Repository: curoverse/arvados Branch: main Commit: a4bf6884f81c Files: 3341 Total size: 18.6 MB Directory structure: gitextract_zydnvh49/ ├── .gitignore ├── .licenseignore ├── AUTHORS ├── CITATION.cff ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── COPYING ├── Makefile ├── README.md ├── SECURITY.md ├── agpl-3.0.txt ├── apache-2.0.txt ├── build/ │ ├── README │ ├── build_docker_image.py │ ├── check-copyright-notices │ ├── create-plot-data-from-log.sh │ ├── docker/ │ │ └── python-venv.Dockerfile │ ├── go-python-package-scripts/ │ │ ├── postinst │ │ └── prerm │ ├── package-testing/ │ │ ├── common-test-packages.sh │ │ ├── deb-common-test-packages.sh │ │ ├── rpm-common-test-packages.sh │ │ ├── test-package-arvados-api-server.sh │ │ ├── test-package-arvados-client.sh │ │ ├── test-package-arvados-docker-cleaner.sh │ │ ├── test-package-python3-arvados-cwl-runner.sh │ │ ├── test-package-python3-arvados-python-client.sh │ │ ├── test-package-python3-crunchstat-summary.sh │ │ └── test-package-python3-python-arvados-fuse.sh │ ├── pypkg_info.py │ ├── rails-package-scripts/ │ │ ├── README.md │ │ ├── arvados-api-server.sh │ │ ├── postinst.sh │ │ ├── postrm.sh │ │ └── prerm.sh │ ├── requirements.build-packages.txt │ ├── requirements.build.txt │ ├── requirements.tests.txt │ ├── run-build-packages-all-targets.sh │ ├── run-build-packages-one-target.sh │ ├── run-build-packages-python-and-ruby.sh │ ├── run-build-packages.sh │ ├── run-build-test-packages-one-target.sh │ ├── run-library.sh │ ├── run-tests.sh │ └── version-at-commit.sh ├── cc-by-sa-3.0.txt ├── cmd/ │ └── arvados-server/ │ ├── arvados-controller.service │ ├── arvados-dispatch-cloud.service │ ├── arvados-dispatch-lsf.service │ ├── arvados-health.service │ ├── arvados-ws.service │ ├── cmd.go │ ├── crunch-dispatch-slurm.service │ ├── keep-balance.service │ ├── keep-web.service │ ├── keepproxy.service │ └── keepstore.service ├── contrib/ │ ├── R-sdk/ │ │ ├── .Rbuildignore │ │ ├── .gitignore │ │ ├── ArvadosR.Rproj │ │ ├── DESCRIPTION │ │ ├── Makefile │ │ ├── NAMESPACE │ │ ├── R/ │ │ │ ├── ArvadosFile.R │ │ │ ├── ArvadosR.R │ │ │ ├── Collection.R │ │ │ ├── CollectionTree.R │ │ │ ├── HttpParser.R │ │ │ ├── HttpRequest.R │ │ │ ├── RESTService.R │ │ │ ├── Subcollection.R │ │ │ ├── util.R │ │ │ └── zzz.R │ │ ├── README.md │ │ ├── arvados-v1-discovery.json │ │ ├── generateApi.R │ │ ├── install_deps.R │ │ ├── run_test.R │ │ └── tests/ │ │ ├── testthat/ │ │ │ ├── fakes/ │ │ │ │ ├── FakeArvados.R │ │ │ │ ├── FakeHttpParser.R │ │ │ │ ├── FakeHttpRequest.R │ │ │ │ └── FakeRESTService.R │ │ │ ├── test-ArvadosFile.R │ │ │ ├── test-Collection.R │ │ │ ├── test-CollectionTree.R │ │ │ ├── test-HttpParser.R │ │ │ ├── test-HttpRequest.R │ │ │ ├── test-RESTService.R │ │ │ ├── test-Subcollection.R │ │ │ └── test-util.R │ │ └── testthat.R │ ├── README.md │ ├── arvados-bootstrap/ │ │ ├── LICENSE-2.0.txt │ │ ├── README.md │ │ ├── pyproject.toml │ │ └── src/ │ │ └── arv_bootstrap/ │ │ ├── __init__.py │ │ ├── export_import.py │ │ ├── federation_migrate.py │ │ ├── seed.py │ │ └── stubapi.py │ ├── arvbash/ │ │ └── arvbash.sh │ └── java-sdk-v2/ │ ├── .gitignore │ ├── .licenseignore │ ├── COPYING │ ├── README.md │ ├── agpl-3.0.txt │ ├── apache-2.0.txt │ ├── build.gradle │ ├── gradle.properties │ ├── settings.gradle │ └── src/ │ ├── main/ │ │ ├── java/ │ │ │ └── org/ │ │ │ └── arvados/ │ │ │ └── client/ │ │ │ ├── api/ │ │ │ │ ├── client/ │ │ │ │ │ ├── BaseApiClient.java │ │ │ │ │ ├── BaseStandardApiClient.java │ │ │ │ │ ├── CollectionsApiClient.java │ │ │ │ │ ├── ConfigApiClient.java │ │ │ │ │ ├── CountingFileRequestBody.java │ │ │ │ │ ├── CountingRequestBody.java │ │ │ │ │ ├── CountingStreamRequestBody.java │ │ │ │ │ ├── GroupsApiClient.java │ │ │ │ │ ├── KeepWebApiClient.java │ │ │ │ │ ├── LinksApiClient.java │ │ │ │ │ ├── ProgressListener.java │ │ │ │ │ ├── UsersApiClient.java │ │ │ │ │ └── factory/ │ │ │ │ │ └── OkHttpClientFactory.java │ │ │ │ └── model/ │ │ │ │ ├── ApiError.java │ │ │ │ ├── ArvadosConfig.java │ │ │ │ ├── Collection.java │ │ │ │ ├── CollectionList.java │ │ │ │ ├── CollectionReplaceFiles.java │ │ │ │ ├── Group.java │ │ │ │ ├── GroupList.java │ │ │ │ ├── Item.java │ │ │ │ ├── ItemList.java │ │ │ │ ├── KeepService.java │ │ │ │ ├── KeepServiceList.java │ │ │ │ ├── Link.java │ │ │ │ ├── LinkList.java │ │ │ │ ├── RuntimeConstraints.java │ │ │ │ ├── User.java │ │ │ │ ├── UserList.java │ │ │ │ └── argument/ │ │ │ │ ├── Argument.java │ │ │ │ ├── ContentsGroup.java │ │ │ │ ├── Filter.java │ │ │ │ ├── ListArgument.java │ │ │ │ └── UntrashGroup.java │ │ │ ├── common/ │ │ │ │ ├── Characters.java │ │ │ │ ├── Headers.java │ │ │ │ └── Patterns.java │ │ │ ├── config/ │ │ │ │ ├── ConfigProvider.java │ │ │ │ ├── ExternalConfigProvider.java │ │ │ │ ├── FileConfigProvider.java │ │ │ │ └── WebDAVConfigFetcher.java │ │ │ ├── exception/ │ │ │ │ ├── ArvadosApiException.java │ │ │ │ └── ArvadosClientException.java │ │ │ ├── facade/ │ │ │ │ └── ArvadosFacade.java │ │ │ ├── logic/ │ │ │ │ ├── collection/ │ │ │ │ │ ├── CollectionFactory.java │ │ │ │ │ ├── FileToken.java │ │ │ │ │ ├── ManifestDecoder.java │ │ │ │ │ ├── ManifestFactory.java │ │ │ │ │ └── ManifestStream.java │ │ │ │ └── keep/ │ │ │ │ ├── FileDownloader.java │ │ │ │ ├── FileUploader.java │ │ │ │ ├── KeepLocator.java │ │ │ │ └── exception/ │ │ │ │ ├── DownloadFolderAlreadyExistsException.java │ │ │ │ └── FileAlreadyExistsException.java │ │ │ └── utils/ │ │ │ ├── FileMerge.java │ │ │ └── FileSplit.java │ │ └── resources/ │ │ └── reference.conf │ └── test/ │ ├── java/ │ │ └── org/ │ │ └── arvados/ │ │ └── client/ │ │ ├── api/ │ │ │ └── client/ │ │ │ ├── BaseStandardApiClientTest.java │ │ │ ├── CollectionsApiClientTest.java │ │ │ ├── GroupsApiClientTest.java │ │ │ ├── KeepWebApiClientTest.java │ │ │ ├── LinkApiClientTest.java │ │ │ ├── UsersApiClientTest.java │ │ │ └── factory/ │ │ │ └── OkHttpClientFactoryTest.java │ │ ├── config/ │ │ │ ├── ExternalConfigProviderTest.java │ │ │ └── WebDAVConfigFetcherTest.java │ │ ├── facade/ │ │ │ ├── ArvadosFacadeIntegrationTest.java │ │ │ └── ArvadosFacadeTest.java │ │ ├── junit/ │ │ │ └── categories/ │ │ │ └── IntegrationTests.java │ │ ├── logic/ │ │ │ ├── collection/ │ │ │ │ ├── FileTokenTest.java │ │ │ │ ├── ManifestDecoderTest.java │ │ │ │ ├── ManifestFactoryTest.java │ │ │ │ └── ManifestStreamTest.java │ │ │ └── keep/ │ │ │ ├── FileDownloaderTest.java │ │ │ └── KeepLocatorTest.java │ │ ├── test/ │ │ │ └── utils/ │ │ │ ├── ApiClientTestUtils.java │ │ │ ├── ArvadosClientIntegrationTest.java │ │ │ ├── ArvadosClientMockedWebServerTest.java │ │ │ ├── ArvadosClientUnitTest.java │ │ │ ├── FileTestUtils.java │ │ │ └── RequestMethod.java │ │ └── utils/ │ │ ├── FileMergeTest.java │ │ └── FileSplitTest.java │ └── resources/ │ ├── application.conf │ ├── integration-tests-application.conf │ ├── integration-tests-application.conf.example │ ├── mockito-extensions/ │ │ └── org.mockito.plugins.MockMaker │ ├── org/ │ │ └── arvados/ │ │ └── client/ │ │ └── api/ │ │ └── client/ │ │ ├── collections-create-manifest.json │ │ ├── collections-create-simple.json │ │ ├── collections-download-file.json │ │ ├── collections-get.json │ │ ├── collections-list.json │ │ ├── groups-get.json │ │ ├── groups-list.json │ │ ├── keep-client-test-file.txt │ │ ├── keep-client-upload-response.json │ │ ├── keep-services-accessible-disk-only.json │ │ ├── keep-services-accessible.json │ │ ├── keep-services-get.json │ │ ├── keep-services-list.json │ │ ├── keep-services-not-accessible.json │ │ ├── links-create.json │ │ ├── links-get.json │ │ ├── links-list.json │ │ ├── users-create.json │ │ ├── users-get.json │ │ ├── users-list.json │ │ └── users-system.json │ └── selfsigned.keystore.jks ├── doc/ │ ├── Gemfile │ ├── README.textile │ ├── Rakefile │ ├── _config.yml │ ├── _includes/ │ │ ├── _admin_list_collections_without_property_py.liquid │ │ ├── _admin_set_property_to_collections_under_project_py.liquid │ │ ├── _admin_update_collection_property_py.liquid │ │ ├── _assign_volume_uuid.liquid │ │ ├── _branchname.liquid │ │ ├── _container_glob_patterns.liquid │ │ ├── _container_published_ports.liquid │ │ ├── _container_runtime_constraints.liquid │ │ ├── _container_scheduling_parameters.liquid │ │ ├── _contrib_component.liquid │ │ ├── _download_installer.liquid │ │ ├── _example_sdk_go.liquid │ │ ├── _google_analytics.liquid │ │ ├── _hpc_max_gateway_tunnels.liquid │ │ ├── _html_tags.liquid │ │ ├── _install_ansible.liquid │ │ ├── _install_ca_cert.liquid │ │ ├── _install_compute_docker.liquid │ │ ├── _install_compute_fuse.liquid │ │ ├── _install_cuda.liquid │ │ ├── _install_debian_key.liquid │ │ ├── _install_docker_cleaner.liquid │ │ ├── _install_packages.liquid │ │ ├── _install_postgres_database.liquid │ │ ├── _install_rails_command.liquid │ │ ├── _install_ruby_and_bundler.liquid │ │ ├── _matomo_analytics.liquid │ │ ├── _metadata_vocabulary_example.liquid │ │ ├── _mount_types.liquid │ │ ├── _multi_host_install_custom_certificates.liquid │ │ ├── _navbar_left.liquid │ │ ├── _navbar_top.liquid │ │ ├── _notebox_begin.liquid │ │ ├── _notebox_begin_warning.liquid │ │ ├── _notebox_end.liquid │ │ ├── _restart_api.liquid │ │ ├── _setup_debian_repo.liquid │ │ ├── _setup_redhat_repo.liquid │ │ ├── _singularity_mksquashfs_configuration.liquid │ │ ├── _ssh_addkey.liquid │ │ ├── _ssh_intro.liquid │ │ ├── _ssl_config_multi.liquid │ │ ├── _ssl_config_single.liquid │ │ ├── _start_service.liquid │ │ ├── _supportedlinux.liquid │ │ ├── _tutorial_expectations.liquid │ │ ├── _tutorial_expectations_workstation.liquid │ │ ├── _tutorial_hello_cwl.liquid │ │ ├── _webring.liquid │ │ └── _what_is_cwl.liquid │ ├── _layouts/ │ │ └── default.html.liquid │ ├── admin/ │ │ ├── User account states.odg │ │ ├── cloudtest.html.textile.liquid │ │ ├── collection-managed-properties.html.textile.liquid │ │ ├── collection-versioning.html.textile.liquid │ │ ├── config-urls.html.textile.liquid │ │ ├── config.html.textile.liquid │ │ ├── controlling-container-reuse.html.textile.liquid │ │ ├── diagnostics.html.textile.liquid │ │ ├── dispatch.html.textile.liquid │ │ ├── federation.html.textile.liquid │ │ ├── group-management.html.textile.liquid │ │ ├── health-checks.html.textile.liquid │ │ ├── index.html.textile.liquid │ │ ├── inspect.html.textile.liquid │ │ ├── keep-balance.html.textile.liquid │ │ ├── keep-faster-gc-s3.html.textile.liquid │ │ ├── keep-measuring-deduplication.html.textile.liquid │ │ ├── keep-recovering-data.html.textile.liquid │ │ ├── link-accounts.html.textile.liquid │ │ ├── logging.html.textile.liquid │ │ ├── logs-table-management.html.textile.liquid │ │ ├── maintenance-and-upgrading.html.textile.liquid │ │ ├── management-token.html.textile.liquid │ │ ├── memory-cpu-profiling.html.textile.liquid │ │ ├── metadata-vocabulary.html.textile.liquid │ │ ├── metrics.html.textile.liquid │ │ ├── migrating-providers.html.textile.liquid │ │ ├── reassign-ownership.html.textile.liquid │ │ ├── restricting-upload-download.html.textile.liquid │ │ ├── scoped-tokens.html.textile.liquid │ │ ├── spot-instances.html.textile.liquid │ │ ├── storage-classes.html.textile.liquid │ │ ├── token-expiration-policy.html.textile.liquid │ │ ├── upgrading.html.textile.liquid │ │ ├── user-activity.html.textile.liquid │ │ ├── user-management-cli.html.textile.liquid │ │ └── user-management.html.textile.liquid │ ├── api/ │ │ ├── dispatch.html.textile.liquid │ │ ├── execution.html.textile.liquid │ │ ├── index.html.textile.liquid │ │ ├── keep-s3.html.textile.liquid │ │ ├── keep-web-urls.html.textile.liquid │ │ ├── keep-webdav.html.textile.liquid │ │ ├── methods/ │ │ │ ├── api_client_authorizations.html.textile.liquid │ │ │ ├── authorized_keys.html.textile.liquid │ │ │ ├── collections.html.textile.liquid │ │ │ ├── computed_permissions.html.textile.liquid │ │ │ ├── container_requests.html.textile.liquid │ │ │ ├── containers.html.textile.liquid │ │ │ ├── credentials.html.textile.liquid │ │ │ ├── groups.html.textile.liquid │ │ │ ├── keep_services.html.textile.liquid │ │ │ ├── links.html.textile.liquid │ │ │ ├── logs.html.textile.liquid │ │ │ ├── user_agreements.html.textile.liquid │ │ │ ├── users.html.textile.liquid │ │ │ ├── virtual_machines.html.textile.liquid │ │ │ └── workflows.html.textile.liquid │ │ ├── methods.html.textile.liquid │ │ ├── permission-model.html.textile.liquid │ │ ├── projects.html.textile.liquid │ │ ├── properties.html.textile.liquid │ │ ├── requests.html.textile.liquid │ │ ├── resources.html.textile.liquid │ │ └── tokens.html.textile.liquid │ ├── architecture/ │ │ ├── Arvados_arch.odg │ │ ├── Arvados_federation.odg │ │ ├── dispatchcloud.html.textile.liquid │ │ ├── federation.html.textile.liquid │ │ ├── hpc.html.textile.liquid │ │ ├── index.html.textile.liquid │ │ ├── keep-clients.html.textile.liquid │ │ ├── keep-components-overview.html.textile.liquid │ │ ├── keep-data-lifecycle.html.textile.liquid │ │ ├── manifest-format.html.textile.liquid │ │ ├── singularity.html.textile.liquid │ │ └── storage.html.textile.liquid │ ├── css/ │ │ ├── R.css │ │ ├── badges.css │ │ ├── bootstrap-theme.css │ │ ├── bootstrap.css │ │ ├── button-override.css │ │ ├── carousel-override.css │ │ ├── code.css │ │ ├── font-awesome.css │ │ ├── images.css │ │ ├── layout.css │ │ └── nav-list.css │ ├── development/ │ │ ├── CodingStandards.md │ │ ├── DevelopmentProcess.md │ │ ├── DistroVersions.md │ │ ├── Prerequisites.md │ │ ├── RunningTests.md │ │ ├── UpdatingDependencies.md │ │ ├── git.conf │ │ ├── prepare-commit-msg.sh │ │ └── release/ │ │ ├── Checklist.md │ │ ├── FastqPipeline.md │ │ ├── JavaSDK.md │ │ ├── ManualTests.md │ │ └── Zenodo.md │ ├── examples/ │ │ └── config/ │ │ └── zzzzz.yml │ ├── fonts/ │ │ └── FontAwesome.otf │ ├── index.html.liquid │ ├── install/ │ │ ├── arvbox.html.textile.liquid │ │ ├── config.html.textile.liquid │ │ ├── configure-azure-blob-storage.html.textile.liquid │ │ ├── configure-fs-storage.html.textile.liquid │ │ ├── configure-s3-object-storage.html.textile.liquid │ │ ├── container-shell-access.html.textile.liquid │ │ ├── crunch2/ │ │ │ ├── install-compute-node-docker.html.textile.liquid │ │ │ └── install-compute-node-singularity.html.textile.liquid │ │ ├── crunch2-cloud/ │ │ │ ├── install-compute-node.html.textile.liquid │ │ │ └── install-dispatch-cloud.html.textile.liquid │ │ ├── crunch2-lsf/ │ │ │ └── install-dispatch.html.textile.liquid │ │ ├── crunch2-slurm/ │ │ │ ├── configure-slurm.html.textile.liquid │ │ │ ├── install-dispatch.html.textile.liquid │ │ │ └── install-test.html.textile.liquid │ │ ├── diagnostics.html.textile.liquid │ │ ├── index.html.textile.liquid │ │ ├── install-api-server.html.textile.liquid │ │ ├── install-docker.html.textile.liquid │ │ ├── install-keep-balance.html.textile.liquid │ │ ├── install-keep-web.html.textile.liquid │ │ ├── install-keepproxy.html.textile.liquid │ │ ├── install-keepstore.html.textile.liquid │ │ ├── install-manual-prerequisites.html.textile.liquid │ │ ├── install-multi-host.html.textile.liquid │ │ ├── install-postgresql.html.textile.liquid │ │ ├── install-shell-server.html.textile.liquid │ │ ├── install-single-host.html.textile.liquid │ │ ├── install-webshell.html.textile.liquid │ │ ├── install-workbench2-app.html.textile.liquid │ │ ├── install-ws.html.textile.liquid │ │ ├── new_cluster_checklist_AWS.xlsx │ │ ├── new_cluster_checklist_Azure.xlsx │ │ ├── new_cluster_checklist_slurm.xlsx │ │ ├── nginx.html.textile.liquid │ │ ├── packages.html.textile.liquid │ │ ├── proxy-chain.odg │ │ ├── ruby.html.textile.liquid │ │ ├── salt-multi-host.html.textile.liquid │ │ ├── salt-single-host.html.textile.liquid │ │ ├── setup-login.html.textile.liquid │ │ └── workbench.html.textile.liquid │ ├── js/ │ │ └── bootstrap.js │ ├── pysdk_pdoc.py │ ├── sdk/ │ │ ├── cli/ │ │ │ ├── index.html.textile.liquid │ │ │ ├── install.html.textile.liquid │ │ │ ├── reference.html.textile.liquid │ │ │ └── subcommands.html.textile.liquid │ │ ├── fuse/ │ │ │ ├── install.html.textile.liquid │ │ │ └── options.html.textile.liquid │ │ ├── go/ │ │ │ ├── example.html.textile.liquid │ │ │ └── index.html.textile.liquid │ │ ├── index.html.textile.liquid │ │ ├── java-v2/ │ │ │ ├── example.html.textile.liquid │ │ │ ├── index.html.textile.liquid │ │ │ └── javadoc.html.textile.liquid │ │ ├── python/ │ │ │ ├── api-client.html.textile.liquid │ │ │ ├── arvados-cwl-runner.html.textile.liquid │ │ │ ├── cookbook.html.textile.liquid │ │ │ ├── events.html.textile.liquid │ │ │ ├── python.html.textile.liquid │ │ │ └── sdk-python.html.textile.liquid │ │ └── ruby/ │ │ ├── example.html.textile.liquid │ │ └── index.html.textile.liquid │ ├── user/ │ │ ├── copying/ │ │ │ ├── LICENSE-2.0.html │ │ │ ├── agpl-3.0.html │ │ │ ├── by-sa-3.0.html │ │ │ └── copying.html.textile.liquid │ │ ├── cwl/ │ │ │ ├── arvados-vscode-training.html.md.liquid │ │ │ ├── bwa-mem/ │ │ │ │ ├── bwa-mem-input-local.yml │ │ │ │ ├── bwa-mem-input-mixed.yml │ │ │ │ ├── bwa-mem-input-uuids.yml │ │ │ │ ├── bwa-mem-input.yml │ │ │ │ ├── bwa-mem-template.yml │ │ │ │ └── bwa-mem.cwl │ │ │ ├── costanalyzer.html.textile.liquid │ │ │ ├── crunchstat-summary.html.textile.liquid │ │ │ ├── cwl-extensions.html.textile.liquid │ │ │ ├── cwl-run-options.html.textile.liquid │ │ │ ├── cwl-runner.html.textile.liquid │ │ │ ├── cwl-style.html.textile.liquid │ │ │ ├── cwl-versions.html.textile.liquid │ │ │ ├── federated/ │ │ │ │ ├── FileOnCluster.yml │ │ │ │ ├── colors_to_select.txt │ │ │ │ ├── extract.cwl │ │ │ │ ├── extract.py │ │ │ │ ├── feddemo.cwl │ │ │ │ ├── items1.csv │ │ │ │ ├── items2.csv │ │ │ │ ├── items3.csv │ │ │ │ ├── merge.cwl │ │ │ │ ├── merge.py │ │ │ │ └── shards.yml │ │ │ ├── federated-workflow.odg │ │ │ ├── federated-workflows.html.textile.liquid │ │ │ └── rnaseq-cwl-training.html.textile.liquid │ │ ├── debugging/ │ │ │ └── container-shell-access.html.textile.liquid │ │ ├── getting_started/ │ │ │ ├── check-environment.html.textile.liquid │ │ │ ├── community.html.textile.liquid │ │ │ ├── setup-cli.html.textile.liquid │ │ │ ├── ssh-access-unix.html.textile.liquid │ │ │ ├── ssh-access-windows.html.textile.liquid │ │ │ ├── vm-login-with-webshell.html.textile.liquid │ │ │ └── workbench.html.textile.liquid │ │ ├── index.html.textile.liquid │ │ ├── reference/ │ │ │ ├── api-tokens.html.textile.liquid │ │ │ └── cookbook.html.textile.liquid │ │ ├── topics/ │ │ │ ├── arv-copy.html.textile.liquid │ │ │ ├── arv-docker.html.textile.liquid │ │ │ ├── arvados-sync-external-sources.html.textile.liquid │ │ │ ├── collection-versioning.html.textile.liquid │ │ │ ├── external-inputs.html.textile.liquid │ │ │ ├── link-accounts.html.textile.liquid │ │ │ ├── service-containers.html.textile.liquid │ │ │ ├── storage-classes.html.textile.liquid │ │ │ └── workbench-migration.html.textile.liquid │ │ └── tutorials/ │ │ ├── tutorial-keep-collection-lifecycle.html.textile.liquid │ │ ├── tutorial-keep-get.html.textile.liquid │ │ ├── tutorial-keep-mount-gnu-linux.html.textile.liquid │ │ ├── tutorial-keep-mount-os-x.html.textile.liquid │ │ ├── tutorial-keep-mount-windows.html.textile.liquid │ │ ├── tutorial-keep.html.textile.liquid │ │ ├── tutorial-projects.html.textile.liquid │ │ ├── tutorial-workflow-workbench.html.textile.liquid │ │ ├── wgs-tutorial.html.textile.liquid │ │ └── writing-cwl-workflow.html.textile.liquid │ ├── zenweb-fix-body.rb │ ├── zenweb-liquid.rb │ └── zenweb-textile.rb ├── go.mod ├── go.sum ├── lib/ │ ├── boot/ │ │ ├── cert.go │ │ ├── cmd.go │ │ ├── example.sh │ │ ├── helpers.go │ │ ├── nginx.go │ │ ├── passenger.go │ │ ├── postgresql.go │ │ ├── rails_db.go │ │ ├── rails_db_test.go │ │ ├── service.go │ │ ├── supervisor.go │ │ ├── supervisor_test.go │ │ └── workbench2.go │ ├── cli/ │ │ ├── external.go │ │ ├── flags.go │ │ ├── get.go │ │ └── get_test.go │ ├── cloud/ │ │ ├── azure/ │ │ │ ├── azure.go │ │ │ └── azure_test.go │ │ ├── cloudtest/ │ │ │ ├── cmd.go │ │ │ ├── tester.go │ │ │ └── tester_test.go │ │ ├── ec2/ │ │ │ ├── ec2.go │ │ │ └── ec2_test.go │ │ ├── interfaces.go │ │ ├── loopback/ │ │ │ ├── loopback.go │ │ │ └── loopback_test.go │ │ ├── price.go │ │ └── price_test.go │ ├── cmd/ │ │ ├── cmd.go │ │ ├── cmd_test.go │ │ └── parseflags.go │ ├── cmdtest/ │ │ └── leakcheck.go │ ├── config/ │ │ ├── cmd.go │ │ ├── cmd_test.go │ │ ├── config.default.yml │ │ ├── default.go │ │ ├── deprecated.go │ │ ├── deprecated_keepstore.go │ │ ├── deprecated_keepstore_test.go │ │ ├── deprecated_test.go │ │ ├── export.go │ │ ├── export_test.go │ │ ├── load.go │ │ ├── load_test.go │ │ └── uptodate_test.go │ ├── controller/ │ │ ├── api/ │ │ │ └── routable.go │ │ ├── auth_test.go │ │ ├── cmd.go │ │ ├── dblock/ │ │ │ ├── dblock.go │ │ │ └── dblock_test.go │ │ ├── fed_generic.go │ │ ├── federation/ │ │ │ ├── collection_test.go │ │ │ ├── conn.go │ │ │ ├── federation_test.go │ │ │ ├── generate.go │ │ │ ├── generated.go │ │ │ ├── generated_test.go │ │ │ ├── group_test.go │ │ │ ├── list.go │ │ │ ├── list_test.go │ │ │ ├── login_test.go │ │ │ ├── logout_test.go │ │ │ └── user_test.go │ │ ├── federation.go │ │ ├── federation_test.go │ │ ├── handler.go │ │ ├── handler_test.go │ │ ├── integration_test.go │ │ ├── localdb/ │ │ │ ├── authorized_key.go │ │ │ ├── authorized_key_test.go │ │ │ ├── collection.go │ │ │ ├── collection_test.go │ │ │ ├── conn.go │ │ │ ├── container.go │ │ │ ├── container_gateway.go │ │ │ ├── container_gateway_test.go │ │ │ ├── container_request.go │ │ │ ├── container_request_test.go │ │ │ ├── container_test.go │ │ │ ├── docker_test.go │ │ │ ├── group.go │ │ │ ├── group_test.go │ │ │ ├── link.go │ │ │ ├── link_test.go │ │ │ ├── localdb_test.go │ │ │ ├── log_activity.go │ │ │ ├── log_activity_test.go │ │ │ ├── login.go │ │ │ ├── login_docker_test/ │ │ │ │ ├── add_example_user.ldif │ │ │ │ ├── run_controller.sh │ │ │ │ ├── setup_pam_test.sh │ │ │ │ ├── setup_suite.sh │ │ │ │ ├── setup_suite_users.sh │ │ │ │ ├── start_controller_container.sh │ │ │ │ └── teardown_suite.sh │ │ │ ├── login_docker_test.go │ │ │ ├── login_ldap.go │ │ │ ├── login_oidc.go │ │ │ ├── login_oidc_test.go │ │ │ ├── login_pam.go │ │ │ ├── login_pam_static.go │ │ │ ├── login_test.go │ │ │ ├── login_testuser.go │ │ │ ├── login_testuser_test.go │ │ │ ├── logout.go │ │ │ └── testdata/ │ │ │ ├── dsa.pub │ │ │ ├── ecdsa-sk.pub │ │ │ ├── ecdsa.pub │ │ │ ├── ed25519-sk.pub │ │ │ ├── ed25519.pub │ │ │ ├── generate │ │ │ └── rsa.pub │ │ ├── proxy.go │ │ ├── rails_restart_test.go │ │ ├── railsproxy/ │ │ │ └── railsproxy.go │ │ ├── router/ │ │ │ ├── checker_test.go │ │ │ ├── error.go │ │ │ ├── request.go │ │ │ ├── request_test.go │ │ │ ├── response.go │ │ │ ├── router.go │ │ │ └── router_test.go │ │ ├── rpc/ │ │ │ ├── conn.go │ │ │ └── conn_test.go │ │ ├── semaphore.go │ │ ├── server_test.go │ │ └── trash.go │ ├── crunchrun/ │ │ ├── background.go │ │ ├── bufthenwrite.go │ │ ├── cgroup.go │ │ ├── cgroup_test.go │ │ ├── container_gateway.go │ │ ├── copier.go │ │ ├── copier_test.go │ │ ├── crunchrun.go │ │ ├── crunchrun_test.go │ │ ├── cuda.go │ │ ├── docker.go │ │ ├── docker_test.go │ │ ├── executor.go │ │ ├── executor_test.go │ │ ├── integration_test.go │ │ ├── logging.go │ │ ├── logging_test.go │ │ ├── logscanner.go │ │ ├── logscanner_test.go │ │ ├── singularity.go │ │ ├── singularity_test.go │ │ └── testdata/ │ │ └── fakestat/ │ │ ├── cgroup.procs │ │ └── cgroupid/ │ │ ├── cgroup.procs │ │ └── memory.stat │ ├── crunchstat/ │ │ ├── command.go │ │ ├── crunchstat.go │ │ ├── crunchstat_test.go │ │ └── testdata/ │ │ ├── debian10/ │ │ │ ├── proc/ │ │ │ │ ├── 3288/ │ │ │ │ │ ├── cgroup │ │ │ │ │ ├── cpuset │ │ │ │ │ └── net/ │ │ │ │ │ └── dev │ │ │ │ ├── cpuinfo │ │ │ │ ├── mounts │ │ │ │ └── self/ │ │ │ │ └── smaps │ │ │ └── sys/ │ │ │ └── fs/ │ │ │ └── cgroup/ │ │ │ └── user.slice/ │ │ │ ├── cpu.max │ │ │ ├── io.stat │ │ │ └── user-1000.slice/ │ │ │ └── session-7.scope/ │ │ │ ├── cpu.stat │ │ │ ├── memory.current │ │ │ ├── memory.stat │ │ │ └── memory.swap.current │ │ ├── debian11/ │ │ │ ├── proc/ │ │ │ │ ├── 4153022/ │ │ │ │ │ ├── cgroup │ │ │ │ │ ├── cpuset │ │ │ │ │ └── net/ │ │ │ │ │ └── dev │ │ │ │ ├── cpuinfo │ │ │ │ ├── mounts │ │ │ │ └── self/ │ │ │ │ └── smaps │ │ │ └── sys/ │ │ │ └── fs/ │ │ │ └── cgroup/ │ │ │ └── user.slice/ │ │ │ ├── cpu.max │ │ │ ├── cpuset.cpus.effective │ │ │ ├── io.stat │ │ │ └── user-1000.slice/ │ │ │ └── session-5424.scope/ │ │ │ ├── cpu.stat │ │ │ ├── memory.current │ │ │ ├── memory.stat │ │ │ └── memory.swap.current │ │ ├── debian12/ │ │ │ ├── proc/ │ │ │ │ ├── 1115883/ │ │ │ │ │ ├── cgroup │ │ │ │ │ ├── cpuset │ │ │ │ │ └── net/ │ │ │ │ │ └── dev │ │ │ │ ├── cpuinfo │ │ │ │ ├── mounts │ │ │ │ └── self/ │ │ │ │ └── smaps │ │ │ └── sys/ │ │ │ └── fs/ │ │ │ └── cgroup/ │ │ │ └── user.slice/ │ │ │ ├── cpuset.cpus.effective │ │ │ ├── io.stat │ │ │ └── user-1000.slice/ │ │ │ └── session-4.scope/ │ │ │ ├── cpu.max │ │ │ ├── cpu.stat │ │ │ ├── memory.current │ │ │ ├── memory.stat │ │ │ └── memory.swap.current │ │ ├── ubuntu1804/ │ │ │ ├── proc/ │ │ │ │ ├── 2523/ │ │ │ │ │ ├── cgroup │ │ │ │ │ ├── cpuset │ │ │ │ │ └── net/ │ │ │ │ │ └── dev │ │ │ │ ├── cpuinfo │ │ │ │ ├── mounts │ │ │ │ └── self/ │ │ │ │ └── smaps │ │ │ └── sys/ │ │ │ └── fs/ │ │ │ └── cgroup/ │ │ │ ├── blkio/ │ │ │ │ └── user.slice/ │ │ │ │ └── blkio.throttle.io_service_bytes │ │ │ ├── cpu,cpuacct/ │ │ │ │ └── user.slice/ │ │ │ │ └── cpuacct.stat │ │ │ ├── cpuset/ │ │ │ │ └── cpuset.cpus │ │ │ ├── memory/ │ │ │ │ └── user.slice/ │ │ │ │ └── memory.stat │ │ │ └── unified/ │ │ │ └── user.slice/ │ │ │ └── user-1000.slice/ │ │ │ └── session-1.scope/ │ │ │ └── cpu.stat │ │ ├── ubuntu2004/ │ │ │ ├── proc/ │ │ │ │ ├── 1360/ │ │ │ │ │ ├── cgroup │ │ │ │ │ ├── cpuset │ │ │ │ │ └── net/ │ │ │ │ │ └── dev │ │ │ │ ├── cpuinfo │ │ │ │ ├── mounts │ │ │ │ └── self/ │ │ │ │ └── smaps │ │ │ └── sys/ │ │ │ └── fs/ │ │ │ └── cgroup/ │ │ │ ├── blkio/ │ │ │ │ └── user.slice/ │ │ │ │ └── blkio.throttle.io_service_bytes │ │ │ ├── cpu,cpuacct/ │ │ │ │ └── user.slice/ │ │ │ │ └── cpuacct.stat │ │ │ ├── cpuset/ │ │ │ │ └── cpuset.cpus │ │ │ ├── memory/ │ │ │ │ └── user.slice/ │ │ │ │ └── user-1000.slice/ │ │ │ │ └── session-2.scope/ │ │ │ │ └── memory.stat │ │ │ └── unified/ │ │ │ └── user.slice/ │ │ │ └── user-1000.slice/ │ │ │ └── session-2.scope/ │ │ │ └── cpu.stat │ │ └── ubuntu2204/ │ │ ├── proc/ │ │ │ ├── 1967/ │ │ │ │ ├── cgroup │ │ │ │ ├── cpuset │ │ │ │ └── net/ │ │ │ │ └── dev │ │ │ ├── cpuinfo │ │ │ ├── mounts │ │ │ └── self/ │ │ │ └── smaps │ │ └── sys/ │ │ └── fs/ │ │ └── cgroup/ │ │ └── user.slice/ │ │ ├── cpu.max │ │ ├── cpuset.cpus.effective │ │ ├── io.stat │ │ └── user-1000.slice/ │ │ └── session-1.scope/ │ │ ├── cpu.stat │ │ ├── memory.current │ │ ├── memory.stat │ │ └── memory.swap.current │ ├── ctrlctx/ │ │ ├── auth.go │ │ ├── auth_test.go │ │ ├── db.go │ │ └── db_test.go │ ├── deduplicationreport/ │ │ ├── command.go │ │ ├── report.go │ │ └── report_test.go │ ├── diagnostics/ │ │ ├── cmd.go │ │ └── docker_image_test.go │ ├── dispatchcloud/ │ │ ├── adminclient.go │ │ ├── cmd.go │ │ ├── container/ │ │ │ ├── node_size.go │ │ │ ├── node_size_test.go │ │ │ ├── queue.go │ │ │ └── queue_test.go │ │ ├── dispatcher.go │ │ ├── dispatcher_test.go │ │ ├── driver.go │ │ ├── gocheck_test.go │ │ ├── logger.go │ │ ├── readme.go │ │ ├── readme_states.txt │ │ ├── scheduler/ │ │ │ ├── fix_stale_locks.go │ │ │ ├── gocheck_test.go │ │ │ ├── interfaces.go │ │ │ ├── run_queue.go │ │ │ ├── run_queue_test.go │ │ │ ├── scheduler.go │ │ │ ├── sync.go │ │ │ └── sync_test.go │ │ ├── sshexecutor/ │ │ │ ├── executor.go │ │ │ └── executor_test.go │ │ ├── test/ │ │ │ ├── doc.go │ │ │ ├── fixtures.go │ │ │ ├── queue.go │ │ │ ├── ssh_service.go │ │ │ ├── sshkey_dispatch │ │ │ ├── sshkey_dispatch.pub │ │ │ ├── sshkey_vm │ │ │ ├── sshkey_vm.pub │ │ │ └── stub_driver.go │ │ └── worker/ │ │ ├── gocheck_test.go │ │ ├── pool.go │ │ ├── pool_test.go │ │ ├── runner.go │ │ ├── throttle.go │ │ ├── throttle_test.go │ │ ├── verify.go │ │ ├── worker.go │ │ └── worker_test.go │ ├── lsf/ │ │ ├── dispatch.go │ │ ├── dispatch_test.go │ │ ├── lsfcli.go │ │ └── lsfqueue.go │ ├── mount/ │ │ ├── command.go │ │ ├── command_test.go │ │ ├── fs.go │ │ └── fs_test.go │ ├── pam/ │ │ ├── .gitignore │ │ ├── README │ │ ├── docker_test.go │ │ ├── fpm-info.sh │ │ ├── pam-configs-arvados │ │ ├── pam_arvados.go │ │ ├── pam_c.go │ │ └── testclient.go │ ├── recovercollection/ │ │ ├── cmd.go │ │ └── cmd_test.go │ ├── selfsigned/ │ │ ├── cert.go │ │ └── cert_test.go │ ├── service/ │ │ ├── cmd.go │ │ ├── cmd_test.go │ │ ├── error.go │ │ ├── log.go │ │ └── tls.go │ └── webdavfs/ │ ├── fs.go │ └── fs_test.go ├── sdk/ │ ├── cli/ │ │ ├── .gitignore │ │ ├── Gemfile │ │ ├── LICENSE-2.0.txt │ │ ├── Rakefile │ │ ├── arvados-cli.gemspec │ │ ├── bin/ │ │ │ ├── arv │ │ │ └── arv-tag │ │ └── test/ │ │ ├── binstub_arv-mount/ │ │ │ └── arv-mount │ │ ├── binstub_clean_fail/ │ │ │ └── arv-mount │ │ ├── binstub_docker_noop/ │ │ │ └── docker.io │ │ ├── binstub_output_coll_owner/ │ │ │ └── python │ │ ├── binstub_sanity_check/ │ │ │ ├── docker.io │ │ │ └── true │ │ ├── test_arv-collection-create.rb │ │ ├── test_arv-get.rb │ │ ├── test_arv-keep-get.rb │ │ ├── test_arv-keep-put.rb │ │ ├── test_arv-tag.rb │ │ └── test_arv-ws.rb │ ├── cwl/ │ │ ├── LICENSE-2.0.txt │ │ ├── MANIFEST.in │ │ ├── README.rst │ │ ├── arvados_cwl/ │ │ │ ├── __init__.py │ │ │ ├── arv-cwl-schema-v1.0.yml │ │ │ ├── arv-cwl-schema-v1.1.yml │ │ │ ├── arv-cwl-schema-v1.2.yml │ │ │ ├── arvcontainer.py │ │ │ ├── arvdocker.py │ │ │ ├── arvtool.py │ │ │ ├── arvworkflow.py │ │ │ ├── context.py │ │ │ ├── done.py │ │ │ ├── executor.py │ │ │ ├── fsaccess.py │ │ │ ├── pathmapper.py │ │ │ ├── perf.py │ │ │ ├── runner.py │ │ │ └── util.py │ │ ├── arvados_version.py │ │ ├── bin/ │ │ │ ├── arvados-cwl-runner │ │ │ └── cwl-runner │ │ ├── fpm-info.sh │ │ ├── pyproject.toml │ │ ├── setup.py │ │ └── tests/ │ │ ├── 10380-trailing-slash-dir.cwl │ │ ├── 12213-keepref-expr.cwl │ │ ├── 12213-keepref-job.yml │ │ ├── 12213-keepref-tool.cwl │ │ ├── 12213-keepref-wf.cwl │ │ ├── 12418-glob-empty-collection.cwl │ │ ├── 13931-size-job.yml │ │ ├── 13931-size.cwl │ │ ├── 13976-keepref-wf.cwl │ │ ├── 15241-writable-dir-job.json │ │ ├── 15241-writable-dir.cwl │ │ ├── 15295-bad-keep-ref.cwl │ │ ├── 16169-no-listing-hint.cwl │ │ ├── 16377-missing-default.cwl │ │ ├── 17004-output-props.cwl │ │ ├── 17267-broken-schemas.cwl │ │ ├── 17521-dot-slash-glob.cwl │ │ ├── 17801-runtime-outdir.cwl │ │ ├── 17858-pack-visit-crash.cwl │ │ ├── 17879-ignore-sbg-fields-job.yml │ │ ├── 17879-ignore-sbg-fields.cwl │ │ ├── 18888-download_def.cwl │ │ ├── 18994-basename/ │ │ │ ├── check.cwl │ │ │ ├── rename.cwl │ │ │ ├── wf_ren.cwl │ │ │ └── whale.txt │ │ ├── 19070-copy-deps.cwl │ │ ├── 19109-upload-secondary/ │ │ │ ├── file1.txt │ │ │ ├── file1.txt.tbi │ │ │ ├── file2.txt │ │ │ └── file2.txt.tbi │ │ ├── 19109-upload-secondary.cwl │ │ ├── 19109-upload-secondary.yml │ │ ├── 19678-name-id.cwl │ │ ├── 19678-name-id.yml │ │ ├── 22466/ │ │ │ ├── fake.bam │ │ │ └── input.yml │ │ ├── 22466-output-glob-expressions-secondaryfile.cwl │ │ ├── __init__.py │ │ ├── arvados-tests.yml │ │ ├── badkeep.yml │ │ ├── cat.cwl │ │ ├── cat2.cwl │ │ ├── chipseq/ │ │ │ ├── chip-seq-single.json │ │ │ ├── cwl-packed.json │ │ │ └── data/ │ │ │ └── Genomes/ │ │ │ ├── Blacklist/ │ │ │ │ └── lists2/ │ │ │ │ └── hg38-blacklist.v2.bed │ │ │ ├── Drosophila_melanogaster/ │ │ │ │ └── dmel_r6.16/ │ │ │ │ ├── Bowtie2Index/ │ │ │ │ │ └── genome.fa │ │ │ │ └── WholeGenome/ │ │ │ │ ├── genome.dict │ │ │ │ ├── genome.fa │ │ │ │ └── genome.fa.fai │ │ │ └── Homo_sapiens/ │ │ │ └── GRCh38.p2/ │ │ │ ├── Bowtie2Index/ │ │ │ │ └── genome.fa │ │ │ └── WholeGenome/ │ │ │ ├── genome.dict │ │ │ ├── genome.fa │ │ │ └── genome.fa.fai │ │ ├── collection_per_tool/ │ │ │ ├── a.txt │ │ │ ├── b.txt │ │ │ ├── c.txt │ │ │ ├── collection_per_tool.cwl │ │ │ ├── collection_per_tool_packed.cwl │ │ │ ├── collection_per_tool_wrapper.cwl │ │ │ ├── step1.cwl │ │ │ └── step2.cwl │ │ ├── conftest.py │ │ ├── container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt │ │ ├── container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt │ │ ├── dir-job.yml │ │ ├── dir-job2.yml │ │ ├── fake-keep-mount/ │ │ │ └── fake_collection_dir/ │ │ │ └── subdir/ │ │ │ └── banana.txt │ │ ├── federation/ │ │ │ ├── README │ │ │ ├── cases/ │ │ │ │ ├── base-case.cwl │ │ │ │ ├── cat.cwl │ │ │ │ ├── hint-on-tool.cwl │ │ │ │ ├── hint-on-wf.cwl │ │ │ │ ├── md5sum-tool-hint.cwl │ │ │ │ ├── md5sum.cwl │ │ │ │ ├── remote-case.cwl │ │ │ │ ├── rev-input-to-output.cwl │ │ │ │ ├── rev.cwl │ │ │ │ ├── runner-home-step-remote.cwl │ │ │ │ ├── runner-remote-step-home.cwl │ │ │ │ ├── scatter-gather.cwl │ │ │ │ ├── threestep-remote.cwl │ │ │ │ ├── twostep-both-remote.cwl │ │ │ │ ├── twostep-home-to-remote.cwl │ │ │ │ ├── twostep-remote-copy-to-home.cwl │ │ │ │ └── twostep-remote-to-home.cwl │ │ │ ├── data/ │ │ │ │ ├── base-case-input.txt │ │ │ │ ├── hint-on-tool.txt │ │ │ │ ├── hint-on-wf.txt │ │ │ │ ├── remote-case-input.txt │ │ │ │ ├── runner-home-step-remote-input.txt │ │ │ │ ├── runner-remote-step-home-input.txt │ │ │ │ ├── scatter-gather-s1.txt │ │ │ │ ├── scatter-gather-s2.txt │ │ │ │ ├── scatter-gather-s3.txt │ │ │ │ ├── threestep-remote.txt │ │ │ │ ├── twostep-both-remote.txt │ │ │ │ ├── twostep-home-to-remote.txt │ │ │ │ ├── twostep-remote-copy-to-home.txt │ │ │ │ └── twostep-remote-to-home.txt │ │ │ ├── framework/ │ │ │ │ ├── check-exist.cwl │ │ │ │ ├── check_exist.py │ │ │ │ ├── dockerbuild.cwl │ │ │ │ ├── prepare.cwl │ │ │ │ ├── prepare.py │ │ │ │ ├── run-acr.cwl │ │ │ │ └── testcase.cwl │ │ │ └── main.cwl │ │ ├── hello.yml │ │ ├── hg19/ │ │ │ ├── hg19.fa │ │ │ ├── hg19.fa.amb │ │ │ ├── hg19.fa.ann │ │ │ └── hg19.fa.fai │ │ ├── hw.py │ │ ├── input/ │ │ │ └── blorp.txt │ │ ├── keep-dir-test-input.cwl │ │ ├── keep-dir-test-input2.cwl │ │ ├── keep-dir-test-input3.cwl │ │ ├── listing-job.yml │ │ ├── makes_intermediates/ │ │ │ ├── echo.cwl │ │ │ ├── hello1.txt │ │ │ ├── run_in_single.cwl │ │ │ └── subwf.cwl │ │ ├── matcher.py │ │ ├── mock_discovery.py │ │ ├── noreuse.cwl │ │ ├── octo.yml │ │ ├── oom/ │ │ │ ├── 19975-oom-mispelled.cwl │ │ │ ├── 19975-oom.cwl │ │ │ ├── 19975-oom3.cwl │ │ │ ├── fakeoom.py │ │ │ ├── fakeoom.yml │ │ │ ├── fakeoom2.py │ │ │ ├── fakeoom2.yml │ │ │ ├── fakeoom3.py │ │ │ └── fakeoom3.yml │ │ ├── order/ │ │ │ ├── empty_order.json │ │ │ └── inputs_test_order.json │ │ ├── scripts/ │ │ │ └── download_all_data.sh │ │ ├── secondary/ │ │ │ ├── dir/ │ │ │ │ ├── hg19.fa │ │ │ │ ├── hg19.fa.amb │ │ │ │ ├── hg19.fa.ann │ │ │ │ └── hg19.fa.fai │ │ │ ├── ls.cwl │ │ │ ├── sub.cwl │ │ │ ├── wf-job.yml │ │ │ └── wf.cwl │ │ ├── secondaryFiles/ │ │ │ ├── example1.cwl │ │ │ ├── example3.cwl │ │ │ ├── hello.txt │ │ │ ├── hello.txt.idx │ │ │ └── inp3.yml │ │ ├── secret_test_job.yml │ │ ├── stdout.cwl │ │ ├── submit_test_job.json │ │ ├── submit_test_job_missing.json │ │ ├── submit_test_job_s3.json │ │ ├── submit_test_job_with_inconsistent_uuids.json │ │ ├── submit_test_job_with_mismatched_uuids.json │ │ ├── submit_test_job_with_uuids.json │ │ ├── test_conformance.py │ │ ├── test_container.py │ │ ├── test_copy_deps.py │ │ ├── test_fsaccess.py │ │ ├── test_integration.py │ │ ├── test_make_output.py │ │ ├── test_pathmapper.py │ │ ├── test_submit.py │ │ ├── test_tq.py │ │ ├── test_urljoin.py │ │ ├── test_util.py │ │ ├── testdir/ │ │ │ ├── a │ │ │ ├── b │ │ │ └── c/ │ │ │ └── d │ │ ├── tmp1/ │ │ │ └── tmp2/ │ │ │ └── tmp3/ │ │ │ └── .gitkeep │ │ ├── tool/ │ │ │ ├── blub.txt │ │ │ ├── blub.txt.cat │ │ │ ├── submit_tool.cwl │ │ │ ├── submit_tool_map.cwl │ │ │ ├── tool_with_sf.cwl │ │ │ └── tool_with_sf.yml │ │ ├── wf/ │ │ │ ├── 16169-step.cwl │ │ │ ├── check_mem.py │ │ │ ├── echo-subwf.cwl │ │ │ ├── echo-wf.cwl │ │ │ ├── echo_a.cwl │ │ │ ├── echo_b.cwl │ │ │ ├── expect_arvworkflow.cwl │ │ │ ├── expect_packed.cwl │ │ │ ├── expect_upload_packed.cwl │ │ │ ├── expect_upload_wrapper.cwl │ │ │ ├── expect_upload_wrapper_altname.cwl │ │ │ ├── expect_upload_wrapper_map.cwl │ │ │ ├── hello.txt │ │ │ ├── indir1/ │ │ │ │ └── hello2.txt │ │ │ ├── inputs_test.cwl │ │ │ ├── listing_deep.cwl │ │ │ ├── listing_none.cwl │ │ │ ├── listing_shallow.cwl │ │ │ ├── output_dir.cwl │ │ │ ├── output_dir_wf.cwl │ │ │ ├── revsort/ │ │ │ │ ├── revsort.cwl │ │ │ │ ├── revtool.cwl │ │ │ │ └── sorttool.cwl │ │ │ ├── runin-reqs-wf.cwl │ │ │ ├── runin-reqs-wf2.cwl │ │ │ ├── runin-reqs-wf3.cwl │ │ │ ├── runin-reqs-wf4.cwl │ │ │ ├── runin-reqs-wf5.cwl │ │ │ ├── runin-wf.cwl │ │ │ ├── runin-with-ttl-wf.cwl │ │ │ ├── runseparate-wf.cwl │ │ │ ├── scatter2.cwl │ │ │ ├── scatter2_subwf.cwl │ │ │ ├── secret_job.cwl │ │ │ ├── secret_wf.cwl │ │ │ ├── submit_keepref_wf.cwl │ │ │ ├── submit_storage_class_wf.cwl │ │ │ ├── submit_wf.cwl │ │ │ ├── submit_wf_map.cwl │ │ │ ├── submit_wf_no_reuse.cwl │ │ │ ├── submit_wf_packed.cwl │ │ │ ├── submit_wf_process_properties.cwl │ │ │ ├── submit_wf_runner_resources.cwl │ │ │ ├── submit_wf_wrapper.cwl │ │ │ └── trick_defaults2.cwl │ │ └── wf-defaults/ │ │ ├── default-dir1.cwl │ │ ├── default-dir2.cwl │ │ ├── default-dir3.cwl │ │ ├── default-dir4.cwl │ │ ├── default-dir5.cwl │ │ ├── default-dir6.cwl │ │ ├── default-dir6a.cwl │ │ ├── default-dir7.cwl │ │ ├── default-dir7a.cwl │ │ ├── default-dir8.cwl │ │ ├── inp1/ │ │ │ └── hello.txt │ │ ├── wf1.cwl │ │ ├── wf2.cwl │ │ ├── wf3.cwl │ │ ├── wf4.cwl │ │ ├── wf5.cwl │ │ ├── wf6.cwl │ │ ├── wf7.cwl │ │ └── wf8.cwl │ ├── go/ │ │ ├── arvados/ │ │ │ ├── api.go │ │ │ ├── api_client_authorization.go │ │ │ ├── authorized_key.go │ │ │ ├── blob_signature.go │ │ │ ├── blob_signature_test.go │ │ │ ├── block_segment.go │ │ │ ├── block_segment_test.go │ │ │ ├── byte_size.go │ │ │ ├── byte_size_test.go │ │ │ ├── client.go │ │ │ ├── client_test.go │ │ │ ├── collection.go │ │ │ ├── collection_test.go │ │ │ ├── config.go │ │ │ ├── config_test.go │ │ │ ├── container.go │ │ │ ├── container_gateway.go │ │ │ ├── context.go │ │ │ ├── contextgroup.go │ │ │ ├── credential.go │ │ │ ├── doc.go │ │ │ ├── duration.go │ │ │ ├── duration_test.go │ │ │ ├── error.go │ │ │ ├── fs_backend.go │ │ │ ├── fs_base.go │ │ │ ├── fs_collection.go │ │ │ ├── fs_collection_test.go │ │ │ ├── fs_deferred.go │ │ │ ├── fs_filehandle.go │ │ │ ├── fs_getternode.go │ │ │ ├── fs_lookup.go │ │ │ ├── fs_project.go │ │ │ ├── fs_project_test.go │ │ │ ├── fs_site.go │ │ │ ├── fs_site_test.go │ │ │ ├── fs_users.go │ │ │ ├── group.go │ │ │ ├── integration_test_cluster.go │ │ │ ├── keep_block.go │ │ │ ├── keep_cache.go │ │ │ ├── keep_cache_test.go │ │ │ ├── keep_metrics.go │ │ │ ├── keep_service.go │ │ │ ├── keep_service_test.go │ │ │ ├── limiter.go │ │ │ ├── limiter_test.go │ │ │ ├── link.go │ │ │ ├── log.go │ │ │ ├── login.go │ │ │ ├── node.go │ │ │ ├── postgresql.go │ │ │ ├── resource_list.go │ │ │ ├── resource_list_test.go │ │ │ ├── throttle.go │ │ │ ├── tls_certs.go │ │ │ ├── tls_certs_test.go │ │ │ ├── tls_certs_test_showenv.go │ │ │ ├── user.go │ │ │ ├── virtual_machine.go │ │ │ ├── vocabulary.go │ │ │ ├── vocabulary_test.go │ │ │ └── workflow.go │ │ ├── arvadosclient/ │ │ │ ├── arvadosclient.go │ │ │ ├── arvadosclient_test.go │ │ │ └── pool.go │ │ ├── arvadostest/ │ │ │ ├── api.go │ │ │ ├── api_test.go │ │ │ ├── busybox_image.go │ │ │ ├── db.go │ │ │ ├── fixtures.go │ │ │ ├── keep_stub.go │ │ │ ├── manifest.go │ │ │ ├── metrics.go │ │ │ ├── oidc_provider.go │ │ │ ├── proxy.go │ │ │ ├── run_servers.go │ │ │ └── stub.go │ │ ├── asyncbuf/ │ │ │ ├── buf.go │ │ │ └── buf_test.go │ │ ├── auth/ │ │ │ ├── auth.go │ │ │ ├── handlers.go │ │ │ ├── handlers_test.go │ │ │ └── salt.go │ │ ├── blockdigest/ │ │ │ ├── blockdigest.go │ │ │ ├── blockdigest_test.go │ │ │ └── testing.go │ │ ├── config/ │ │ │ ├── dump.go │ │ │ └── load.go │ │ ├── ctxlog/ │ │ │ └── log.go │ │ ├── dispatch/ │ │ │ ├── dispatch.go │ │ │ ├── dispatch_test.go │ │ │ ├── throttle.go │ │ │ └── throttle_test.go │ │ ├── health/ │ │ │ ├── aggregator.go │ │ │ ├── aggregator_test.go │ │ │ ├── handler.go │ │ │ └── handler_test.go │ │ ├── httpserver/ │ │ │ ├── error.go │ │ │ ├── httpserver.go │ │ │ ├── id_generator.go │ │ │ ├── inspect.go │ │ │ ├── inspect_test.go │ │ │ ├── log.go │ │ │ ├── logger.go │ │ │ ├── logger_test.go │ │ │ ├── metrics.go │ │ │ ├── request_limiter.go │ │ │ ├── request_limiter_test.go │ │ │ └── responsewriter.go │ │ ├── keepclient/ │ │ │ ├── collectionreader.go │ │ │ ├── collectionreader_test.go │ │ │ ├── discover.go │ │ │ ├── discover_test.go │ │ │ ├── gateway_shim.go │ │ │ ├── hashcheck.go │ │ │ ├── hashcheck_test.go │ │ │ ├── keepclient.go │ │ │ ├── keepclient_test.go │ │ │ ├── perms.go │ │ │ ├── root_sorter.go │ │ │ ├── root_sorter_test.go │ │ │ └── support.go │ │ └── stats/ │ │ ├── duration.go │ │ └── duration_test.go │ ├── python/ │ │ ├── LICENSE-2.0.txt │ │ ├── MANIFEST.in │ │ ├── README.rst │ │ ├── arvados/ │ │ │ ├── __init__.py │ │ │ ├── _internal/ │ │ │ │ ├── __init__.py │ │ │ │ ├── basedirs.py │ │ │ │ ├── diskcache.py │ │ │ │ ├── downloaderbase.py │ │ │ │ ├── http_to_keep.py │ │ │ │ ├── pycurl.py │ │ │ │ ├── report_template.py │ │ │ │ ├── s3_to_keep.py │ │ │ │ ├── streams.py │ │ │ │ └── to_keep_util.py │ │ │ ├── api.py │ │ │ ├── arvfile.py │ │ │ ├── cache.py │ │ │ ├── collection.py │ │ │ ├── commands/ │ │ │ │ ├── __init__.py │ │ │ │ ├── _util.py │ │ │ │ ├── arv_copy.py │ │ │ │ ├── arvcli.py │ │ │ │ ├── get.py │ │ │ │ ├── keepdocker.py │ │ │ │ ├── ls.py │ │ │ │ ├── put.py │ │ │ │ ├── run.py │ │ │ │ └── ws.py │ │ │ ├── config.py │ │ │ ├── errors.py │ │ │ ├── events.py │ │ │ ├── keep.py │ │ │ ├── logging.py │ │ │ ├── retry.py │ │ │ ├── safeapi.py │ │ │ ├── util.py │ │ │ └── vocabulary.py │ │ ├── arvados-v1-discovery.json │ │ ├── arvados_version.py │ │ ├── bin/ │ │ │ ├── arv-copy │ │ │ ├── arv-get │ │ │ ├── arv-keepdocker │ │ │ ├── arv-ls │ │ │ ├── arv-normalize │ │ │ ├── arv-put │ │ │ └── arv-ws │ │ ├── discovery2pydoc.py │ │ ├── fpm-info.sh │ │ ├── pyproject.toml │ │ ├── pytest.ini │ │ ├── setup.py │ │ └── tests/ │ │ ├── __init__.py │ │ ├── arvados_testutil.py │ │ ├── conftest.py │ │ ├── data/ │ │ │ ├── 1000G_ref_manifest │ │ │ ├── hello-world-README.txt │ │ │ └── jlake_manifest │ │ ├── keepstub.py │ │ ├── manifest_examples.py │ │ ├── nginx.conf │ │ ├── performance/ │ │ │ ├── __init__.py │ │ │ ├── performance_profiler.py │ │ │ └── test_a_sample.py │ │ ├── run_test_server.py │ │ ├── test_api.py │ │ ├── test_arv_copy.py │ │ ├── test_arv_get.py │ │ ├── test_arv_keepdocker.py │ │ ├── test_arv_ls.py │ │ ├── test_arv_normalize.py │ │ ├── test_arv_put.py │ │ ├── test_arv_ws.py │ │ ├── test_arvcli.py │ │ ├── test_arvfile.py │ │ ├── test_basedirs.py │ │ ├── test_benchmark_collections.py │ │ ├── test_cmd_util.py │ │ ├── test_collections.py │ │ ├── test_computed_permissions.py │ │ ├── test_config.py │ │ ├── test_errors.py │ │ ├── test_events.py │ │ ├── test_http_cache.py │ │ ├── test_http_to_keep.py │ │ ├── test_internal.py │ │ ├── test_keep_client.py │ │ ├── test_keep_locator.py │ │ ├── test_retry.py │ │ ├── test_retry_job_helpers.py │ │ ├── test_s3_to_keep.py │ │ ├── test_storage_classes.py │ │ ├── test_stream.py │ │ ├── test_util.py │ │ └── test_vocabulary.py │ ├── ruby/ │ │ ├── .gitignore │ │ ├── Gemfile │ │ ├── LICENSE-2.0.txt │ │ ├── README │ │ ├── Rakefile │ │ ├── arvados.gemspec │ │ ├── lib/ │ │ │ ├── arvados/ │ │ │ │ ├── collection.rb │ │ │ │ ├── google_api_client.rb │ │ │ │ └── keep.rb │ │ │ └── arvados.rb │ │ └── test/ │ │ ├── sdk_fixtures.rb │ │ ├── test_big_request.rb │ │ ├── test_collection.rb │ │ ├── test_keep_manifest.rb │ │ └── test_request_id.rb │ └── ruby-google-api-client/ │ ├── .gitignore │ ├── .rspec │ ├── .travis.yml │ ├── .yardopts │ ├── CHANGELOG.md │ ├── CONTRIBUTING.md │ ├── Gemfile │ ├── LICENSE │ ├── README.md │ ├── Rakefile │ ├── arvados-google-api-client.gemspec │ ├── lib/ │ │ ├── cacerts.pem │ │ ├── compat/ │ │ │ └── multi_json.rb │ │ └── google/ │ │ ├── api_client/ │ │ │ ├── auth/ │ │ │ │ ├── compute_service_account.rb │ │ │ │ ├── file_storage.rb │ │ │ │ ├── installed_app.rb │ │ │ │ ├── jwt_asserter.rb │ │ │ │ ├── key_utils.rb │ │ │ │ ├── pkcs12.rb │ │ │ │ ├── storage.rb │ │ │ │ └── storages/ │ │ │ │ ├── file_store.rb │ │ │ │ └── redis_store.rb │ │ │ ├── batch.rb │ │ │ ├── charset.rb │ │ │ ├── client_secrets.rb │ │ │ ├── discovery/ │ │ │ │ ├── api.rb │ │ │ │ ├── media.rb │ │ │ │ ├── method.rb │ │ │ │ ├── resource.rb │ │ │ │ └── schema.rb │ │ │ ├── discovery.rb │ │ │ ├── environment.rb │ │ │ ├── errors.rb │ │ │ ├── logging.rb │ │ │ ├── media.rb │ │ │ ├── railtie.rb │ │ │ ├── reference.rb │ │ │ ├── request.rb │ │ │ ├── result.rb │ │ │ ├── service/ │ │ │ │ ├── batch.rb │ │ │ │ ├── request.rb │ │ │ │ ├── resource.rb │ │ │ │ ├── result.rb │ │ │ │ ├── simple_file_store.rb │ │ │ │ └── stub_generator.rb │ │ │ ├── service.rb │ │ │ ├── service_account.rb │ │ │ └── version.rb │ │ └── api_client.rb │ ├── rakelib/ │ │ ├── gem.rake │ │ ├── git.rake │ │ ├── metrics.rake │ │ ├── spec.rake │ │ ├── wiki.rake │ │ └── yard.rake │ ├── script/ │ │ ├── package │ │ └── release │ ├── spec/ │ │ ├── fixtures/ │ │ │ └── files/ │ │ │ ├── auth_stored_credentials.json │ │ │ ├── client_secrets.json │ │ │ ├── privatekey.p12 │ │ │ ├── sample.txt │ │ │ ├── secret.pem │ │ │ └── zoo.json │ │ ├── google/ │ │ │ ├── api_client/ │ │ │ │ ├── auth/ │ │ │ │ │ ├── storage_spec.rb │ │ │ │ │ └── storages/ │ │ │ │ │ ├── file_store_spec.rb │ │ │ │ │ └── redis_store_spec.rb │ │ │ │ ├── batch_spec.rb │ │ │ │ ├── client_secrets_spec.rb │ │ │ │ ├── discovery_spec.rb │ │ │ │ ├── gzip_spec.rb │ │ │ │ ├── media_spec.rb │ │ │ │ ├── request_spec.rb │ │ │ │ ├── result_spec.rb │ │ │ │ ├── service_account_spec.rb │ │ │ │ ├── service_spec.rb │ │ │ │ └── simple_file_store_spec.rb │ │ │ └── api_client_spec.rb │ │ └── spec_helper.rb │ └── yard/ │ ├── bin/ │ │ └── yard-wiki │ ├── lib/ │ │ ├── yard/ │ │ │ ├── cli/ │ │ │ │ └── wiki.rb │ │ │ ├── rake/ │ │ │ │ └── wikidoc_task.rb │ │ │ ├── serializers/ │ │ │ │ └── wiki_serializer.rb │ │ │ └── templates/ │ │ │ └── helpers/ │ │ │ └── wiki_helper.rb │ │ └── yard-google-code.rb │ └── templates/ │ └── default/ │ ├── class/ │ │ └── setup.rb │ ├── docstring/ │ │ └── setup.rb │ ├── method/ │ │ └── setup.rb │ ├── method_details/ │ │ └── setup.rb │ ├── module/ │ │ └── setup.rb │ └── tags/ │ └── setup.rb ├── services/ │ ├── api/ │ │ ├── .gitignore │ │ ├── Gemfile │ │ ├── Passengerfile.json │ │ ├── README │ │ ├── Rakefile │ │ ├── app/ │ │ │ ├── assets/ │ │ │ │ ├── config/ │ │ │ │ │ └── manifest.js │ │ │ │ └── stylesheets/ │ │ │ │ ├── application.css │ │ │ │ └── scaffolds.css.scss │ │ │ ├── controllers/ │ │ │ │ ├── application_controller.rb │ │ │ │ ├── arvados/ │ │ │ │ │ └── v1/ │ │ │ │ │ ├── api_client_authorizations_controller.rb │ │ │ │ │ ├── authorized_keys_controller.rb │ │ │ │ │ ├── collections_controller.rb │ │ │ │ │ ├── computed_permissions_controller.rb │ │ │ │ │ ├── container_requests_controller.rb │ │ │ │ │ ├── containers_controller.rb │ │ │ │ │ ├── credentials_controller.rb │ │ │ │ │ ├── groups_controller.rb │ │ │ │ │ ├── keep_services_controller.rb │ │ │ │ │ ├── links_controller.rb │ │ │ │ │ ├── logs_controller.rb │ │ │ │ │ ├── management_controller.rb │ │ │ │ │ ├── schema_controller.rb │ │ │ │ │ ├── user_agreements_controller.rb │ │ │ │ │ ├── users_controller.rb │ │ │ │ │ ├── virtual_machines_controller.rb │ │ │ │ │ └── workflows_controller.rb │ │ │ │ ├── database_controller.rb │ │ │ │ ├── static_controller.rb │ │ │ │ ├── sys_controller.rb │ │ │ │ └── user_sessions_controller.rb │ │ │ ├── helpers/ │ │ │ │ └── application_helper.rb │ │ │ ├── mailers/ │ │ │ │ ├── .gitkeep │ │ │ │ ├── admin_notifier.rb │ │ │ │ ├── profile_notifier.rb │ │ │ │ └── user_notifier.rb │ │ │ ├── middlewares/ │ │ │ │ └── arvados_api_token.rb │ │ │ ├── models/ │ │ │ │ ├── .gitkeep │ │ │ │ ├── api_client_authorization.rb │ │ │ │ ├── application_record.rb │ │ │ │ ├── arvados_model.rb │ │ │ │ ├── authorized_key.rb │ │ │ │ ├── blob.rb │ │ │ │ ├── collection.rb │ │ │ │ ├── computed_permission.rb │ │ │ │ ├── container.rb │ │ │ │ ├── container_port.rb │ │ │ │ ├── container_request.rb │ │ │ │ ├── credential.rb │ │ │ │ ├── database_seeds.rb │ │ │ │ ├── frozen_group.rb │ │ │ │ ├── group.rb │ │ │ │ ├── jsonb_type.rb │ │ │ │ ├── keep_service.rb │ │ │ │ ├── link.rb │ │ │ │ ├── log.rb │ │ │ │ ├── trashed_group.rb │ │ │ │ ├── user.rb │ │ │ │ ├── user_agreement.rb │ │ │ │ ├── virtual_machine.rb │ │ │ │ └── workflow.rb │ │ │ └── views/ │ │ │ ├── admin_notifier/ │ │ │ │ ├── new_inactive_user.text.erb │ │ │ │ └── new_user.text.erb │ │ │ ├── layouts/ │ │ │ │ └── application.html.erb │ │ │ ├── profile_notifier/ │ │ │ │ └── profile_created.text.erb │ │ │ ├── static/ │ │ │ │ ├── intro.html.erb │ │ │ │ └── login_failure.html.erb │ │ │ ├── user_notifier/ │ │ │ │ └── account_is_setup.text.erb │ │ │ └── user_sessions/ │ │ │ ├── create.html.erb │ │ │ └── failure.html.erb │ │ ├── arvados-railsapi.service │ │ ├── bin/ │ │ │ ├── bundle │ │ │ ├── rails │ │ │ ├── rake │ │ │ ├── setup │ │ │ └── update │ │ ├── config/ │ │ │ ├── application.default.yml │ │ │ ├── application.rb │ │ │ ├── application.yml.example │ │ │ ├── arvados_config.rb │ │ │ ├── boot.rb │ │ │ ├── cable.yml │ │ │ ├── database.yml.example │ │ │ ├── environment.rb │ │ │ ├── environments/ │ │ │ │ ├── development.rb.example │ │ │ │ ├── production.rb.example │ │ │ │ └── test.rb.example │ │ │ ├── initializers/ │ │ │ │ ├── andand.rb │ │ │ │ ├── app_version.rb │ │ │ │ ├── application_controller_renderer.rb │ │ │ │ ├── assets.rb │ │ │ │ ├── authorization.rb │ │ │ │ ├── backtrace_silencers.rb │ │ │ │ ├── clear_empty_content_type.rb │ │ │ │ ├── common_api_template.rb │ │ │ │ ├── content_security_policy.rb │ │ │ │ ├── cookies_serializer.rb │ │ │ │ ├── current_api_client.rb │ │ │ │ ├── custom_types.rb │ │ │ │ ├── db_current_time.rb │ │ │ │ ├── db_timeout.rb │ │ │ │ ├── filter_parameter_logging.rb │ │ │ │ ├── fix_www_decode.rb │ │ │ │ ├── inflections.rb │ │ │ │ ├── kind_and_etag.rb │ │ │ │ ├── lograge.rb │ │ │ │ ├── mime_types.rb │ │ │ │ ├── net_http.rb │ │ │ │ ├── oj_mimic_json.rb │ │ │ │ ├── permissions_policy.rb │ │ │ │ ├── permit_all_parameters.rb │ │ │ │ ├── reload_config.rb │ │ │ │ ├── request_id_middleware.rb │ │ │ │ ├── session_store.rb │ │ │ │ ├── time_format.rb │ │ │ │ ├── time_zone.rb │ │ │ │ └── wrap_parameters.rb │ │ │ ├── locales/ │ │ │ │ └── en.yml │ │ │ ├── puma.rb │ │ │ ├── routes.rb │ │ │ ├── secrets.yml │ │ │ ├── spring.rb │ │ │ └── unbound.template │ │ ├── config.ru │ │ ├── db/ │ │ │ ├── migrate/ │ │ │ │ ├── 20121016005009_create_collections.rb │ │ │ │ ├── 20130105203021_create_metadata.rb │ │ │ │ ├── 20130105224358_rename_metadata_class.rb │ │ │ │ ├── 20130105224618_rename_collection_created_by_client.rb │ │ │ │ ├── 20130107181109_add_uuid_to_collections.rb │ │ │ │ ├── 20130107212832_create_nodes.rb │ │ │ │ ├── 20130109175700_create_pipelines.rb │ │ │ │ ├── 20130109220548_create_pipeline_invocations.rb │ │ │ │ ├── 20130113214204_add_index_to_collections_and_metadata.rb │ │ │ │ ├── 20130116024233_create_specimens.rb │ │ │ │ ├── 20130116215213_create_projects.rb │ │ │ │ ├── 20130118002239_rename_metadata_attributes.rb │ │ │ │ ├── 20130122020042_create_users.rb │ │ │ │ ├── 20130122201442_create_logs.rb │ │ │ │ ├── 20130122221616_add_modified_at_to_logs.rb │ │ │ │ ├── 20130123174514_add_uuid_index_to_users.rb │ │ │ │ ├── 20130123180224_create_api_clients.rb │ │ │ │ ├── 20130123180228_create_api_client_authorizations.rb │ │ │ │ ├── 20130125220425_rename_created_by_to_owner.rb │ │ │ │ ├── 20130128202518_rename_metadata_to_links.rb │ │ │ │ ├── 20130128231343_add_properties_to_specimen.rb │ │ │ │ ├── 20130130205749_add_manifest_text_to_collection.rb │ │ │ │ ├── 20130203104818_create_jobs.rb │ │ │ │ ├── 20130203104824_create_job_steps.rb │ │ │ │ ├── 20130203115329_add_priority_to_jobs.rb │ │ │ │ ├── 20130207195855_add_index_on_timestamps.rb │ │ │ │ ├── 20130218181504_add_properties_to_pipeline_invocations.rb │ │ │ │ ├── 20130226170000_remove_native_target_from_links.rb │ │ │ │ ├── 20130313175417_rename_projects_to_groups.rb │ │ │ │ ├── 20130315155820_add_is_locked_by_to_jobs.rb │ │ │ │ ├── 20130315183626_add_log_to_jobs.rb │ │ │ │ ├── 20130315213205_add_tasks_summary_to_jobs.rb │ │ │ │ ├── 20130318002138_add_resource_limits_to_jobs.rb │ │ │ │ ├── 20130319165853_rename_job_command_to_script.rb │ │ │ │ ├── 20130319180730_rename_pipeline_invocation_to_pipeline_instance.rb │ │ │ │ ├── 20130319194637_rename_pipelines_to_pipeline_templates.rb │ │ │ │ ├── 20130319201431_rename_job_steps_to_job_tasks.rb │ │ │ │ ├── 20130319235957_add_default_owner_to_users.rb │ │ │ │ ├── 20130320000107_add_default_owner_to_api_client_authorizations.rb │ │ │ │ ├── 20130326173804_create_commits.rb │ │ │ │ ├── 20130326182917_create_commit_ancestors.rb │ │ │ │ ├── 20130415020241_rename_orvos_to_arvados.rb │ │ │ │ ├── 20130425024459_create_keep_disks.rb │ │ │ │ ├── 20130425214427_add_service_host_and_service_port_and_service_ssl_flag_to_keep_disks.rb │ │ │ │ ├── 20130523060112_add_created_by_job_task_to_job_tasks.rb │ │ │ │ ├── 20130523060213_add_qsequence_to_job_tasks.rb │ │ │ │ ├── 20130524042319_fix_job_task_qsequence_type.rb │ │ │ │ ├── 20130528134100_update_nodes_index.rb │ │ │ │ ├── 20130606183519_create_authorized_keys.rb │ │ │ │ ├── 20130608053730_create_virtual_machines.rb │ │ │ │ ├── 20130610202538_create_repositories.rb │ │ │ │ ├── 20130611163736_rename_authorized_key_authorized_user_to_authorized_user_uuid.rb │ │ │ │ ├── 20130612042554_add_name_unique_index_to_repositories.rb │ │ │ │ ├── 20130617150007_add_is_trusted_to_api_clients.rb │ │ │ │ ├── 20130626002829_add_is_active_to_users.rb │ │ │ │ ├── 20130626022810_activate_all_admins.rb │ │ │ │ ├── 20130627154537_create_traits.rb │ │ │ │ ├── 20130627184333_create_humans.rb │ │ │ │ ├── 20130708163414_rename_foreign_uuid_attributes.rb │ │ │ │ ├── 20130708182912_rename_job_foreign_uuid_attributes.rb │ │ │ │ ├── 20130708185153_rename_user_default_owner.rb │ │ │ │ ├── 20130724153034_add_scopes_to_api_client_authorizations.rb │ │ │ │ ├── 20131007180607_rename_resource_limits_to_runtime_constraints.rb │ │ │ │ ├── 20140117231056_normalize_collection_uuid.rb │ │ │ │ ├── 20140124222114_fix_link_kind_underscores.rb │ │ │ │ ├── 20140129184311_normalize_collection_uuids_in_script_parameters.rb │ │ │ │ ├── 20140317135600_add_nondeterministic_column_to_job.rb │ │ │ │ ├── 20140319160547_separate_repository_from_script_version.rb │ │ │ │ ├── 20140321191343_add_repository_column_to_job.rb │ │ │ │ ├── 20140324024606_add_output_is_persistent_to_job.rb │ │ │ │ ├── 20140325175653_remove_kind_columns.rb │ │ │ │ ├── 20140402001908_add_system_group.rb │ │ │ │ ├── 20140407184311_rename_log_info_to_properties.rb │ │ │ │ ├── 20140421140924_add_group_class_to_groups.rb │ │ │ │ ├── 20140421151939_rename_auth_keys_user_index.rb │ │ │ │ ├── 20140421151940_timestamps_not_null.rb │ │ │ │ ├── 20140422011506_pipeline_instance_state.rb │ │ │ │ ├── 20140423132913_add_object_owner_to_logs.rb │ │ │ │ ├── 20140423133559_new_scope_format.rb │ │ │ │ ├── 20140501165548_add_unique_name_index_to_links.rb │ │ │ │ ├── 20140519205916_create_keep_services.rb │ │ │ │ ├── 20140527152921_add_description_to_pipeline_templates.rb │ │ │ │ ├── 20140530200539_add_supplied_script_version.rb │ │ │ │ ├── 20140601022548_remove_name_from_collections.rb │ │ │ │ ├── 20140602143352_remove_active_and_success_from_pipeline_instances.rb │ │ │ │ ├── 20140607150616_rename_folder_to_project.rb │ │ │ │ ├── 20140611173003_add_docker_locator_to_jobs.rb │ │ │ │ ├── 20140627210837_anonymous_group.rb │ │ │ │ ├── 20140709172343_job_task_serial_qsequence.rb │ │ │ │ ├── 20140714184006_empty_collection.rb │ │ │ │ ├── 20140811184643_collection_use_regular_uuids.rb │ │ │ │ ├── 20140817035914_add_unique_name_constraints.rb │ │ │ │ ├── 20140818125735_add_not_null_constraint_to_group_name.rb │ │ │ │ ├── 20140826180337_remove_output_is_persistent_column.rb │ │ │ │ ├── 20140828141043_job_priority_fixup.rb │ │ │ │ ├── 20140909183946_add_start_finish_time_to_tasks_and_pipelines.rb │ │ │ │ ├── 20140911221252_add_description_to_pipeline_instances_and_jobs.rb │ │ │ │ ├── 20140918141529_change_user_owner_uuid_not_null.rb │ │ │ │ ├── 20140918153541_add_properties_to_node.rb │ │ │ │ ├── 20140918153705_add_state_to_job.rb │ │ │ │ ├── 20140924091559_add_job_uuid_to_nodes.rb │ │ │ │ ├── 20141111133038_add_arvados_sdk_version_to_jobs.rb │ │ │ │ ├── 20141208164553_owner_uuid_index.rb │ │ │ │ ├── 20141208174553_descriptions_are_strings.rb │ │ │ │ ├── 20141208174653_collection_file_names.rb │ │ │ │ ├── 20141208185217_search_index.rb │ │ │ │ ├── 20150122175935_no_description_in_search_index.rb │ │ │ │ ├── 20150123142953_full_text_search.rb │ │ │ │ ├── 20150203180223_set_group_class_on_anonymous_group.rb │ │ │ │ ├── 20150206210804_all_users_can_read_anonymous_group.rb │ │ │ │ ├── 20150206230342_rename_replication_attributes.rb │ │ │ │ ├── 20150216193428_collection_name_owner_unique_only_non_expired.rb │ │ │ │ ├── 20150303210106_fix_collection_portable_data_hash_with_hinted_manifest.rb │ │ │ │ ├── 20150312151136_change_collection_expires_at_to_datetime.rb │ │ │ │ ├── 20150317132720_add_username_to_users.rb │ │ │ │ ├── 20150324152204_backward_compatibility_for_user_repositories.rb │ │ │ │ ├── 20150423145759_no_filenames_in_collection_search_index.rb │ │ │ │ ├── 20150512193020_read_only_on_keep_services.rb │ │ │ │ ├── 20150526180251_leading_space_on_full_text_index.rb │ │ │ │ ├── 20151202151426_create_containers_and_requests.rb │ │ │ │ ├── 20151215134304_fix_containers_index.rb │ │ │ │ ├── 20151229214707_add_exit_code_to_containers.rb │ │ │ │ ├── 20160208210629_add_uuid_to_api_client_authorization.rb │ │ │ │ ├── 20160209155729_add_uuid_to_api_token_search_index.rb │ │ │ │ ├── 20160324144017_add_components_to_job.rb │ │ │ │ ├── 20160506175108_add_auths_to_container.rb │ │ │ │ ├── 20160509143250_add_auth_and_lock_to_container_index.rb │ │ │ │ ├── 20160808151559_create_workflows.rb │ │ │ │ ├── 20160819195557_add_script_parameters_digest_to_jobs.rb │ │ │ │ ├── 20160819195725_populate_script_parameters_digest.rb │ │ │ │ ├── 20160901210110_repair_script_parameters_digest.rb │ │ │ │ ├── 20160909181442_rename_workflow_to_definition.rb │ │ │ │ ├── 20160926194129_add_container_count.rb │ │ │ │ ├── 20161019171346_add_use_existing_to_container_requests.rb │ │ │ │ ├── 20161111143147_add_scheduling_parameters_to_container.rb │ │ │ │ ├── 20161115171221_add_output_and_log_uuid_to_container_request.rb │ │ │ │ ├── 20161115174218_add_output_and_log_uuids_to_container_request_search_index.rb │ │ │ │ ├── 20161213172944_full_text_search_indexes.rb │ │ │ │ ├── 20161222153434_split_expiry_to_trash_and_delete.rb │ │ │ │ ├── 20161223090712_add_output_name_to_container_requests.rb │ │ │ │ ├── 20170102153111_add_output_name_to_container_request_search_index.rb │ │ │ │ ├── 20170105160301_add_output_name_to_cr_fts_index.rb │ │ │ │ ├── 20170105160302_set_finished_at_on_finished_pipeline_instances.rb │ │ │ │ ├── 20170216170823_no_cr_mounts_and_workflow_def_in_full_text_search_index.rb │ │ │ │ ├── 20170301225558_no_downgrade_after_json.rb │ │ │ │ ├── 20170319063406_serialized_columns_accept_null.rb │ │ │ │ ├── 20170328215436_add_portable_data_hash_index_to_collections.rb │ │ │ │ ├── 20170330012505_add_output_ttl_to_container_requests.rb │ │ │ │ ├── 20170419173031_add_created_by_job_task_index_to_job_tasks.rb │ │ │ │ ├── 20170419173712_add_object_owner_index_to_logs.rb │ │ │ │ ├── 20170419175801_add_requesting_container_index_to_container_requests.rb │ │ │ │ ├── 20170628185847_jobs_yaml_to_json.rb │ │ │ │ ├── 20170704160233_yaml_to_json.rb │ │ │ │ ├── 20170706141334_json_collection_properties.rb │ │ │ │ ├── 20170824202826_trashable_groups.rb │ │ │ │ ├── 20170906224040_materialized_permission_view.rb │ │ │ │ ├── 20171027183824_add_index_to_containers.rb │ │ │ │ ├── 20171208203841_fix_trash_flag_follow.rb │ │ │ │ ├── 20171212153352_add_gin_index_to_collection_properties.rb │ │ │ │ ├── 20180216203422_add_storage_classes_to_collections.rb │ │ │ │ ├── 20180228220311_add_secret_mounts_to_containers.rb │ │ │ │ ├── 20180313180114_change_container_priority_bigint.rb │ │ │ │ ├── 20180501182859_add_redirect_to_user_uuid_to_users.rb │ │ │ │ ├── 20180514135529_add_container_auth_uuid_index.rb │ │ │ │ ├── 20180607175050_properties_to_jsonb.rb │ │ │ │ ├── 20180608123145_add_properties_to_groups.rb │ │ │ │ ├── 20180806133039_index_all_filenames.rb │ │ │ │ ├── 20180820130357_add_pdh_and_trash_index_to_collections.rb │ │ │ │ ├── 20180820132617_add_lock_index_to_containers.rb │ │ │ │ ├── 20180820135808_drop_pdh_index_from_collections.rb │ │ │ │ ├── 20180824152014_add_md5_index_to_containers.rb │ │ │ │ ├── 20180824155207_add_queue_index_to_containers.rb │ │ │ │ ├── 20180904110712_add_runtime_status_to_containers.rb │ │ │ │ ├── 20180913175443_add_version_info_to_collections.rb │ │ │ │ ├── 20180915155335_set_current_version_uuid_on_collections.rb │ │ │ │ ├── 20180917200000_replace_full_text_indexes.rb │ │ │ │ ├── 20180917205609_recompute_file_names_index.rb │ │ │ │ ├── 20180919001158_recreate_collection_unique_name_index.rb │ │ │ │ ├── 20181001175023_add_preserve_version_to_collections.rb │ │ │ │ ├── 20181004131141_add_current_version_uuid_to_collection_search_index.rb │ │ │ │ ├── 20181005192222_add_container_runtime_token.rb │ │ │ │ ├── 20181011184200_add_runtime_token_to_container.rb │ │ │ │ ├── 20181213183234_add_expression_index_to_links.rb │ │ │ │ ├── 20190214214814_add_container_lock_count.rb │ │ │ │ ├── 20190322174136_add_file_info_to_collection.rb │ │ │ │ ├── 20190422144631_fill_missing_modified_at.rb │ │ │ │ ├── 20190523180148_add_trigram_index_for_text_search.rb │ │ │ │ ├── 20190808145904_drop_commit_ancestors.rb │ │ │ │ ├── 20190809135453_remove_commits_table.rb │ │ │ │ ├── 20190905151603_enforce_unique_identity_url.rb │ │ │ │ ├── 20200501150153_permission_table.rb │ │ │ │ ├── 20200602141328_fix_roles_projects.rb │ │ │ │ ├── 20200914203202_public_favorites_project.rb │ │ │ │ ├── 20201103170213_refresh_trashed_groups.rb │ │ │ │ ├── 20201105190435_refresh_permissions.rb │ │ │ │ ├── 20201202174753_fix_collection_versions_timestamps.rb │ │ │ │ ├── 20210108033940_add_gateway_address_to_containers.rb │ │ │ │ ├── 20210126183521_add_interactive_session_started_to_containers.rb │ │ │ │ ├── 20210621204455_add_container_output_storage_class.rb │ │ │ │ ├── 20210816191509_drop_fts_index.rb │ │ │ │ ├── 20211027154300_delete_disabled_user_tokens_and_keys.rb │ │ │ │ ├── 20220224203102_add_frozen_by_uuid_to_groups.rb │ │ │ │ ├── 20220301155729_frozen_groups.rb │ │ │ │ ├── 20220303204419_add_frozen_by_uuid_to_group_search_index.rb │ │ │ │ ├── 20220401153101_fix_created_at_indexes.rb │ │ │ │ ├── 20220505112900_add_output_properties.rb │ │ │ │ ├── 20220726034131_write_via_all_users.rb │ │ │ │ ├── 20220804133317_add_cost_to_containers.rb │ │ │ │ ├── 20221219165512_dedup_permission_links.rb │ │ │ │ ├── 20221230155924_bigint_id.rb │ │ │ │ ├── 20230421142716_add_name_index_to_collections_and_groups.rb │ │ │ │ ├── 20230503224107_priority_update_functions.rb │ │ │ │ ├── 20230815160000_jsonb_exists_functions.rb │ │ │ │ ├── 20230821000000_priority_update_fix.rb │ │ │ │ ├── 20230922000000_add_btree_name_index_to_collections_and_groups.rb │ │ │ │ ├── 20231013000000_compute_permission_index.rb │ │ │ │ ├── 20240329173437_add_output_glob_to_containers.rb │ │ │ │ ├── 20240402162733_add_output_glob_index_to_containers.rb │ │ │ │ ├── 20240604183200_exclude_uuids_and_hashes_from_text_search.rb │ │ │ │ ├── 20240618121312_create_uuid_locks.rb │ │ │ │ ├── 20240627201747_set_default_api_client_id.rb │ │ │ │ ├── 20240820202230_exclude_container_image_from_text_search.rb │ │ │ │ ├── 20241118110000_index_on_container_request_name.rb │ │ │ │ ├── 20250115145250_drop_fts_index_again.rb │ │ │ │ ├── 20250312141843_add_refreshes_at_to_api_client_authorizations.rb │ │ │ │ ├── 20250315222222_add_services_and_published_ports.rb │ │ │ │ ├── 20250402131700_add_collection_uuid_to_workflows.rb │ │ │ │ ├── 20250422103000_create_credentials_table.rb │ │ │ │ ├── 20250426201300_priority_update_check_trash_at.rb │ │ │ │ ├── 20250527181323_add_container_ports.rb │ │ │ │ └── 20251006181234_enforce_required_credential_fields.rb │ │ │ ├── seeds.rb │ │ │ └── structure.sql │ │ ├── fpm-info.sh │ │ ├── lib/ │ │ │ ├── 20200501150153_permission_table_constants.rb │ │ │ ├── app_version.rb │ │ │ ├── arvados_model_updates.rb │ │ │ ├── assets/ │ │ │ │ └── .gitkeep │ │ │ ├── audit_logs.rb │ │ │ ├── can_be_an_owner.rb │ │ │ ├── common_api_template.rb │ │ │ ├── config_loader.rb │ │ │ ├── create_ancestor_view.sql │ │ │ ├── create_permission_view.sql │ │ │ ├── current_api_client.rb │ │ │ ├── db_current_time.rb │ │ │ ├── fix_collection_versions_timestamps.rb │ │ │ ├── fix_roles_projects.rb │ │ │ ├── group_pdhs.rb │ │ │ ├── has_uuid.rb │ │ │ ├── kind_and_etag.rb │ │ │ ├── load_param.rb │ │ │ ├── log_reuse_info.rb │ │ │ ├── migrate_yaml_to_json.rb │ │ │ ├── record_filters.rb │ │ │ ├── request_error.rb │ │ │ ├── safe_json.rb │ │ │ ├── safer_file_store.rb │ │ │ ├── salvage_collection.rb │ │ │ ├── serializers.rb │ │ │ ├── tasks/ │ │ │ │ ├── .gitkeep │ │ │ │ ├── config.rake │ │ │ │ ├── delete_old_container_logs.rake │ │ │ │ ├── manage_long_lived_tokens.rake │ │ │ │ ├── statement_timeout.rake │ │ │ │ └── test_tasks.rake │ │ │ ├── trashable.rb │ │ │ ├── update_permissions.rb │ │ │ ├── update_priorities.rb │ │ │ ├── validate_serialized.rb │ │ │ └── whitelist_update.rb │ │ ├── public/ │ │ │ ├── 404.html │ │ │ ├── 422.html │ │ │ ├── 500.html │ │ │ └── robots.txt │ │ ├── script/ │ │ │ ├── populate-file-info-columns-in-collections.rb │ │ │ ├── rails │ │ │ ├── rake_test.sh │ │ │ ├── restart-dns-server │ │ │ ├── salvage_collection.rb │ │ │ └── setup-new-user.rb │ │ └── test/ │ │ ├── factories/ │ │ │ ├── api_client_authorization.rb │ │ │ ├── group.rb │ │ │ ├── link.rb │ │ │ └── user.rb │ │ ├── fixtures/ │ │ │ ├── .gitkeep │ │ │ ├── api_client_authorizations.yml │ │ │ ├── authorized_keys.yml │ │ │ ├── collections.yml │ │ │ ├── container_requests.yml │ │ │ ├── containers.yml │ │ │ ├── files/ │ │ │ │ └── proc_stat │ │ │ ├── groups.yml │ │ │ ├── keep_services.yml │ │ │ ├── links.yml │ │ │ ├── logs.yml │ │ │ ├── users.yml │ │ │ ├── virtual_machines.yml │ │ │ └── workflows.yml │ │ ├── functional/ │ │ │ ├── .gitkeep │ │ │ ├── application_controller_test.rb │ │ │ ├── arvados/ │ │ │ │ └── v1/ │ │ │ │ ├── api_client_authorizations_controller_test.rb │ │ │ │ ├── authorized_keys_controller_test.rb │ │ │ │ ├── collections_controller_test.rb │ │ │ │ ├── computed_permissions_controller_test.rb │ │ │ │ ├── container_requests_controller_test.rb │ │ │ │ ├── containers_controller_test.rb │ │ │ │ ├── filters_test.rb │ │ │ │ ├── groups_controller_test.rb │ │ │ │ ├── keep_services_controller_test.rb │ │ │ │ ├── links_controller_test.rb │ │ │ │ ├── logs_controller_test.rb │ │ │ │ ├── management_controller_test.rb │ │ │ │ ├── query_test.rb │ │ │ │ ├── schema_controller_test.rb │ │ │ │ ├── user_agreements_controller_test.rb │ │ │ │ ├── users_controller_test.rb │ │ │ │ └── virtual_machines_controller_test.rb │ │ │ ├── database_controller_test.rb │ │ │ ├── sys_controller_test.rb │ │ │ └── user_sessions_controller_test.rb │ │ ├── helpers/ │ │ │ ├── container_test_helper.rb │ │ │ ├── docker_migration_helper.rb │ │ │ ├── manifest_examples.rb │ │ │ ├── time_block.rb │ │ │ └── users_test_helper.rb │ │ ├── integration/ │ │ │ ├── .gitkeep │ │ │ ├── api_client_authorizations_api_test.rb │ │ │ ├── api_client_authorizations_scopes_test.rb │ │ │ ├── bundler_version_test.rb │ │ │ ├── collections_api_test.rb │ │ │ ├── collections_performance_test.rb │ │ │ ├── computed_permissions_test.rb │ │ │ ├── container_auth_test.rb │ │ │ ├── container_dispatch_test.rb │ │ │ ├── container_request_test.rb │ │ │ ├── credentials_test.rb │ │ │ ├── cross_origin_test.rb │ │ │ ├── database_reset_test.rb │ │ │ ├── discovery_document_test.rb │ │ │ ├── errors_test.rb │ │ │ ├── groups_test.rb │ │ │ ├── http_quirks_test.rb │ │ │ ├── keep_proxy_test.rb │ │ │ ├── logging_test.rb │ │ │ ├── login_workflow_test.rb │ │ │ ├── noop_deep_munge_test.rb │ │ │ ├── passenger_config_test.rb │ │ │ ├── permissions_test.rb │ │ │ ├── pipeline_test.rb │ │ │ ├── reader_tokens_test.rb │ │ │ ├── remote_user_test.rb │ │ │ ├── select_test.rb │ │ │ ├── serialized_encoding_test.rb │ │ │ ├── user_sessions_test.rb │ │ │ ├── users_test.rb │ │ │ ├── valid_links_test.rb │ │ │ └── workflows_test.rb │ │ ├── performance/ │ │ │ ├── links_index_test.rb │ │ │ └── permission_test.rb │ │ ├── test_helper.rb │ │ └── unit/ │ │ ├── .gitkeep │ │ ├── api_client_authorization_test.rb │ │ ├── app_version_test.rb │ │ ├── application_test.rb │ │ ├── arvados_model_test.rb │ │ ├── authorized_key_test.rb │ │ ├── blob_test.rb │ │ ├── collection_performance_test.rb │ │ ├── collection_test.rb │ │ ├── container_request_test.rb │ │ ├── container_test.rb │ │ ├── credential_test.rb │ │ ├── group_pdhs_test.rb │ │ ├── group_test.rb │ │ ├── keep_disk_test.rb │ │ ├── keep_service_test.rb │ │ ├── link_test.rb │ │ ├── log_test.rb │ │ ├── owner_test.rb │ │ ├── permission_test.rb │ │ ├── salvage_collection_test.rb │ │ ├── seralizer_test.rb │ │ ├── time_zone_test.rb │ │ ├── user_notifier_test.rb │ │ ├── user_test.rb │ │ ├── virtual_machine_test.rb │ │ └── workflow_test.rb │ ├── crunch-dispatch-slurm/ │ │ ├── crunch-dispatch-slurm.go │ │ ├── crunch-dispatch-slurm_test.go │ │ ├── node_type.go │ │ ├── priority.go │ │ ├── priority_test.go │ │ ├── script.go │ │ ├── script_test.go │ │ ├── slurm.go │ │ ├── squeue.go │ │ ├── squeue_test.go │ │ └── usage.go │ ├── dockercleaner/ │ │ ├── MANIFEST.in │ │ ├── README.rst │ │ ├── agpl-3.0.txt │ │ ├── arvados-docker-cleaner.service │ │ ├── arvados_docker/ │ │ │ ├── __init__.py │ │ │ └── cleaner.py │ │ ├── arvados_version.py │ │ ├── bin/ │ │ │ └── arvados-docker-cleaner │ │ ├── pyproject.toml │ │ ├── setup.py │ │ └── tests/ │ │ ├── __init__.py │ │ └── test_cleaner.py │ ├── fuse/ │ │ ├── MANIFEST.in │ │ ├── README.rst │ │ ├── agpl-3.0.txt │ │ ├── arvados_fuse/ │ │ │ ├── __init__.py │ │ │ ├── command.py │ │ │ ├── crunchstat.py │ │ │ ├── fresh.py │ │ │ ├── fusedir.py │ │ │ ├── fusefile.py │ │ │ └── unmount.py │ │ ├── arvados_version.py │ │ ├── bin/ │ │ │ └── arv-mount │ │ ├── fpm-info.sh │ │ ├── pyproject.toml │ │ ├── setup.py │ │ └── tests/ │ │ ├── __init__.py │ │ ├── fstest.py │ │ ├── integration_test.py │ │ ├── mount_test_base.py │ │ ├── prof.py │ │ ├── test_cache.py │ │ ├── test_command_args.py │ │ ├── test_concurrency.py │ │ ├── test_crunchstat.py │ │ ├── test_exec.py │ │ ├── test_inodes.py │ │ ├── test_mount.py │ │ ├── test_mount_filters.py │ │ ├── test_mount_type.py │ │ ├── test_retry.py │ │ ├── test_tmp_collection.py │ │ ├── test_token_expiry.py │ │ └── test_unmount.py │ ├── keep-balance/ │ │ ├── balance.go │ │ ├── balance_run_test.go │ │ ├── balance_test.go │ │ ├── block_state.go │ │ ├── block_state_test.go │ │ ├── change_set.go │ │ ├── change_set_test.go │ │ ├── collection.go │ │ ├── collection_test.go │ │ ├── integration_test.go │ │ ├── keep_service.go │ │ ├── main.go │ │ ├── main_test.go │ │ ├── metrics.go │ │ └── server.go │ ├── keepproxy/ │ │ ├── keepproxy.go │ │ ├── keepproxy_test.go │ │ ├── pkg-extras/ │ │ │ └── etc/ │ │ │ ├── default/ │ │ │ │ └── keepproxy │ │ │ └── init.d/ │ │ │ └── keepproxy │ │ └── proxy_client.go │ ├── login-sync/ │ │ ├── .gitignore │ │ ├── Gemfile │ │ ├── Rakefile │ │ ├── agpl-3.0.txt │ │ ├── arvados-login-sync.gemspec │ │ ├── bin/ │ │ │ └── arvados-login-sync │ │ └── test/ │ │ ├── binstub_new_user/ │ │ │ └── useradd │ │ ├── stubs.rb │ │ └── test_add_user.rb │ ├── workbench2/ │ │ ├── .gitignore │ │ ├── .npmrc │ │ ├── .yarn/ │ │ │ └── releases/ │ │ │ └── yarn-3.2.0.cjs │ │ ├── .yarnrc │ │ ├── .yarnrc.yml │ │ ├── AUTHORS │ │ ├── COPYING │ │ ├── Makefile │ │ ├── README.md │ │ ├── __mocks__/ │ │ │ └── popper.js.js │ │ ├── agpl-3.0.txt │ │ ├── apache-2.0.txt │ │ ├── cc-by-sa-3.0.txt │ │ ├── config/ │ │ │ ├── env.js │ │ │ ├── getHttpsConfig.js │ │ │ ├── jest/ │ │ │ │ ├── babelTransform.js │ │ │ │ ├── cssTransform.js │ │ │ │ └── fileTransform.js │ │ │ ├── modules.js │ │ │ ├── paths.js │ │ │ ├── webpack/ │ │ │ │ └── persistentCache/ │ │ │ │ └── createEnvironmentHash.js │ │ │ ├── webpack.config.js │ │ │ └── webpackDevServer.config.js │ │ ├── cypress/ │ │ │ ├── e2e/ │ │ │ │ ├── auth-action.cy.js │ │ │ │ ├── auth-middleware.cy.js │ │ │ │ ├── banner-tooltip.cy.js │ │ │ │ ├── collection.cy.js │ │ │ │ ├── context-menu.cy.js │ │ │ │ ├── create-workflow.cy.js │ │ │ │ ├── dashboard.cy.js │ │ │ │ ├── delete-multiple-files.cy.js │ │ │ │ ├── details-card.cy.js │ │ │ │ ├── details-panel.cy.js │ │ │ │ ├── external-credentials.cy.js │ │ │ │ ├── favorites.cy.js │ │ │ │ ├── group-manage.cy.js │ │ │ │ ├── login.cy.js │ │ │ │ ├── multiselect-toolbar.cy.js │ │ │ │ ├── page-not-found.cy.js │ │ │ │ ├── process.cy.js │ │ │ │ ├── project.cy.js │ │ │ │ ├── search.cy.js │ │ │ │ ├── sharing.cy.js │ │ │ │ ├── side-panel.cy.js │ │ │ │ ├── trash.cy.js │ │ │ │ ├── user-preferences.cy.js │ │ │ │ ├── user-profile.cy.js │ │ │ │ ├── virtual-machine-admin.cy.js │ │ │ │ └── workflow.cy.js │ │ │ ├── fixtures/ │ │ │ │ ├── .gitkeep │ │ │ │ ├── files/ │ │ │ │ │ ├── banner.html │ │ │ │ │ └── tooltips.txt │ │ │ │ ├── webdav-propfind-outputs.xml │ │ │ │ ├── workflow-with-optional-inputs.yaml │ │ │ │ ├── workflow_directory_array.yaml │ │ │ │ ├── workflow_with_array_fields.yaml │ │ │ │ ├── workflow_with_default_array_fields.yaml │ │ │ │ └── workflow_with_secret_input.yaml │ │ │ ├── plugins/ │ │ │ │ └── index.js │ │ │ └── support/ │ │ │ ├── commands.js │ │ │ ├── commands.ts │ │ │ ├── component-index.html │ │ │ ├── component.ts │ │ │ ├── e2e.js │ │ │ ├── index.d.ts │ │ │ └── msToolbarTooltips.js │ │ ├── cypress.config.ts │ │ ├── etc/ │ │ │ └── arvados/ │ │ │ └── workbench2/ │ │ │ └── workbench2.example.json │ │ ├── package.json │ │ ├── public/ │ │ │ ├── file-viewers-example.json │ │ │ ├── index.html │ │ │ ├── manifest.json │ │ │ └── webshell/ │ │ │ ├── README │ │ │ ├── index.html │ │ │ ├── keyboard.html │ │ │ ├── shell_in_a_box.js │ │ │ └── styles.css │ │ ├── scripts/ │ │ │ ├── build.js │ │ │ └── start.js │ │ ├── src/ │ │ │ ├── common/ │ │ │ │ ├── app-info.ts │ │ │ │ ├── array-utils.ts │ │ │ │ ├── codes.ts │ │ │ │ ├── config.ts │ │ │ │ ├── custom-theme.ts │ │ │ │ ├── file.ts │ │ │ │ ├── formatters.cy.js │ │ │ │ ├── formatters.ts │ │ │ │ ├── frozen-resources.ts │ │ │ │ ├── getuser.ts │ │ │ │ ├── html-sanitize.ts │ │ │ │ ├── labels.ts │ │ │ │ ├── link-update-name.ts │ │ │ │ ├── menu-action-set-actions.ts │ │ │ │ ├── objects.ts │ │ │ │ ├── plugintypes.ts │ │ │ │ ├── redirect-to.ts │ │ │ │ ├── regexp.ts │ │ │ │ ├── resource-to-menu-kind.ts │ │ │ │ ├── resource-to-menukind.cy.js │ │ │ │ ├── service-provider.ts │ │ │ │ ├── unionize.ts │ │ │ │ ├── url.cy.js │ │ │ │ ├── url.ts │ │ │ │ ├── use-async-interval.cy.js │ │ │ │ ├── use-async-interval.ts │ │ │ │ ├── usePrevious.tsx │ │ │ │ ├── useStateWithValidation.ts │ │ │ │ ├── webdav.cy.js │ │ │ │ ├── webdav.ts │ │ │ │ └── xml.ts │ │ │ ├── components/ │ │ │ │ ├── autocomplete/ │ │ │ │ │ └── autocomplete.tsx │ │ │ │ ├── breadcrumbs/ │ │ │ │ │ ├── breadcrumbs.cy.js │ │ │ │ │ └── breadcrumbs.tsx │ │ │ │ ├── checkbox-field/ │ │ │ │ │ └── checkbox-field.tsx │ │ │ │ ├── chips/ │ │ │ │ │ └── chips.tsx │ │ │ │ ├── chips-input/ │ │ │ │ │ └── chips-input.tsx │ │ │ │ ├── code-snippet/ │ │ │ │ │ ├── code-snippet.tsx │ │ │ │ │ └── virtual-code-snippet.tsx │ │ │ │ ├── collection-panel-files/ │ │ │ │ │ └── collection-panel-files.tsx │ │ │ │ ├── column-selector/ │ │ │ │ │ ├── column-selector.cy.js │ │ │ │ │ └── column-selector.tsx │ │ │ │ ├── conditional-tabs/ │ │ │ │ │ ├── conditional-tabs.cy.js │ │ │ │ │ └── conditional-tabs.tsx │ │ │ │ ├── confirmation-dialog/ │ │ │ │ │ └── confirmation-dialog.tsx │ │ │ │ ├── context-menu/ │ │ │ │ │ └── context-menu.tsx │ │ │ │ ├── copy-to-clipboard/ │ │ │ │ │ └── copy-result-to-clipboard.ts │ │ │ │ ├── copy-to-clipboard-snackbar/ │ │ │ │ │ └── copy-to-clipboard-snackbar.tsx │ │ │ │ ├── dashboard/ │ │ │ │ │ ├── dashboard-item-row.tsx │ │ │ │ │ ├── dashboard.tsx │ │ │ │ │ ├── favorite-pins/ │ │ │ │ │ │ ├── favorite-pins-item.tsx │ │ │ │ │ │ └── favorite-pins-section.tsx │ │ │ │ │ ├── recent-workflow-runs.tsx │ │ │ │ │ └── recently-visited.tsx │ │ │ │ ├── data-explorer/ │ │ │ │ │ ├── data-explorer.cy.js │ │ │ │ │ └── data-explorer.tsx │ │ │ │ ├── data-table/ │ │ │ │ │ ├── data-column.ts │ │ │ │ │ ├── data-table.cy.js │ │ │ │ │ └── data-table.tsx │ │ │ │ ├── data-table-default-view/ │ │ │ │ │ └── data-table-default-view.tsx │ │ │ │ ├── data-table-filters/ │ │ │ │ │ ├── data-table-filters-popover.cy.js │ │ │ │ │ ├── data-table-filters-popover.tsx │ │ │ │ │ ├── data-table-filters-tree.tsx │ │ │ │ │ └── data-table-filters.ts │ │ │ │ ├── data-table-multiselect-popover/ │ │ │ │ │ └── data-table-multiselect-popover.tsx │ │ │ │ ├── date-picker/ │ │ │ │ │ ├── date-picker.cy.js │ │ │ │ │ └── date-picker.tsx │ │ │ │ ├── default-code-snippet/ │ │ │ │ │ ├── default-code-snippet.tsx │ │ │ │ │ └── default-virtual-code-snippet.tsx │ │ │ │ ├── default-view/ │ │ │ │ │ └── default-view.tsx │ │ │ │ ├── details-attribute/ │ │ │ │ │ └── details-attribute.tsx │ │ │ │ ├── dialog-actions/ │ │ │ │ │ └── dialog-actions.tsx │ │ │ │ ├── dialog-form/ │ │ │ │ │ ├── dialog-form.tsx │ │ │ │ │ └── dialog-text-field.tsx │ │ │ │ ├── dropdown-menu/ │ │ │ │ │ ├── dropdown-menu.cy.js │ │ │ │ │ └── dropdown-menu.tsx │ │ │ │ ├── expand-chevron-right/ │ │ │ │ │ └── expand-chevron-right.tsx │ │ │ │ ├── file-tree/ │ │ │ │ │ ├── file-thumbnail.cy.js │ │ │ │ │ ├── file-thumbnail.tsx │ │ │ │ │ ├── file-tree-data.ts │ │ │ │ │ └── file-tree-item.tsx │ │ │ │ ├── file-upload/ │ │ │ │ │ ├── file-upload.tsx │ │ │ │ │ └── upload-input.tsx │ │ │ │ ├── float-input/ │ │ │ │ │ └── float-input.tsx │ │ │ │ ├── form-dialog/ │ │ │ │ │ └── form-dialog.tsx │ │ │ │ ├── form-field/ │ │ │ │ │ └── form-field.tsx │ │ │ │ ├── icon/ │ │ │ │ │ └── icon.tsx │ │ │ │ ├── int-input/ │ │ │ │ │ └── int-input.tsx │ │ │ │ ├── list-item-text-icon/ │ │ │ │ │ └── list-item-text-icon.tsx │ │ │ │ ├── loading/ │ │ │ │ │ ├── circular-suspense.tsx │ │ │ │ │ ├── inline-pulser.tsx │ │ │ │ │ └── three-dots.tsx │ │ │ │ ├── loading-indicator/ │ │ │ │ │ └── loading-indicator.tsx │ │ │ │ ├── multi-panel-view/ │ │ │ │ │ ├── multi-panel-view.cy.js │ │ │ │ │ └── multi-panel-view.tsx │ │ │ │ ├── multiselect-toolbar/ │ │ │ │ │ ├── MultiselectToolbar.tsx │ │ │ │ │ ├── MultiselectToolbar.utils.ts │ │ │ │ │ ├── ms-toolbar-overflow-menu.tsx │ │ │ │ │ └── ms-toolbar-overflow-wrapper.tsx │ │ │ │ ├── overview-panel/ │ │ │ │ │ └── overview-panel.tsx │ │ │ │ ├── popover/ │ │ │ │ │ ├── helpers.ts │ │ │ │ │ ├── popover.cy.js │ │ │ │ │ └── popover.tsx │ │ │ │ ├── progress-button/ │ │ │ │ │ └── progress-button.tsx │ │ │ │ ├── radio-field/ │ │ │ │ │ └── radio-field.tsx │ │ │ │ ├── refresh-button/ │ │ │ │ │ ├── refresh-button.cy.js │ │ │ │ │ └── refresh-button.tsx │ │ │ │ ├── rich-text-editor-link/ │ │ │ │ │ └── rich-text-editor-link.tsx │ │ │ │ ├── search-input/ │ │ │ │ │ ├── search-input.cy.js │ │ │ │ │ └── search-input.tsx │ │ │ │ ├── select-field/ │ │ │ │ │ └── select-field.tsx │ │ │ │ ├── string-array-input/ │ │ │ │ │ ├── string-array-mui-input.cy.js │ │ │ │ │ └── string-array-mui-input.tsx │ │ │ │ ├── subprocess-filter/ │ │ │ │ │ └── subprocess-filter.tsx │ │ │ │ ├── switch-field/ │ │ │ │ │ └── switch-field.tsx │ │ │ │ ├── tabbedList/ │ │ │ │ │ └── tabbed-list.tsx │ │ │ │ ├── text-field/ │ │ │ │ │ └── text-field.tsx │ │ │ │ ├── tree/ │ │ │ │ │ ├── tree.cy.js │ │ │ │ │ ├── tree.tsx │ │ │ │ │ └── virtual-tree.tsx │ │ │ │ ├── warning/ │ │ │ │ │ └── warning.tsx │ │ │ │ ├── warning-collection/ │ │ │ │ │ └── warning-collection.tsx │ │ │ │ └── workflow-inputs-form/ │ │ │ │ ├── validators.ts │ │ │ │ └── workflow-input.tsx │ │ │ ├── cypress/ │ │ │ │ ├── mocks/ │ │ │ │ │ └── service-provider.ts │ │ │ │ └── utils/ │ │ │ │ └── contains-action-subset.ts │ │ │ ├── index.css │ │ │ ├── index.tsx │ │ │ ├── lib/ │ │ │ │ ├── cwl-svg/ │ │ │ │ │ ├── assets/ │ │ │ │ │ │ └── styles/ │ │ │ │ │ │ ├── _variables.scss │ │ │ │ │ │ ├── style.css │ │ │ │ │ │ ├── style.scss │ │ │ │ │ │ ├── theme.css │ │ │ │ │ │ ├── theme.scss │ │ │ │ │ │ └── themes/ │ │ │ │ │ │ └── rabix-dark/ │ │ │ │ │ │ ├── _variables.scss │ │ │ │ │ │ ├── theme.css │ │ │ │ │ │ └── theme.scss │ │ │ │ │ ├── behaviors/ │ │ │ │ │ │ └── edge-panning.ts │ │ │ │ │ ├── graph/ │ │ │ │ │ │ ├── connectable.ts │ │ │ │ │ │ ├── edge.ts │ │ │ │ │ │ ├── graph-node.ts │ │ │ │ │ │ ├── io-port.ts │ │ │ │ │ │ ├── step-node.ts │ │ │ │ │ │ ├── template-parser.ts │ │ │ │ │ │ └── workflow.ts │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── plugins/ │ │ │ │ │ │ ├── arrange/ │ │ │ │ │ │ │ └── arrange.ts │ │ │ │ │ │ ├── deletion/ │ │ │ │ │ │ │ └── deletion.ts │ │ │ │ │ │ ├── edge-hover/ │ │ │ │ │ │ │ └── edge-hover.ts │ │ │ │ │ │ ├── node-move/ │ │ │ │ │ │ │ └── node-move.ts │ │ │ │ │ │ ├── plugin-base.ts │ │ │ │ │ │ ├── plugin.ts │ │ │ │ │ │ ├── port-drag/ │ │ │ │ │ │ │ ├── _variables.scss │ │ │ │ │ │ │ ├── port-drag.ts │ │ │ │ │ │ │ ├── style.css │ │ │ │ │ │ │ ├── style.scss │ │ │ │ │ │ │ ├── theme.css │ │ │ │ │ │ │ ├── theme.dark.css │ │ │ │ │ │ │ ├── theme.dark.scss │ │ │ │ │ │ │ └── theme.scss │ │ │ │ │ │ ├── selection/ │ │ │ │ │ │ │ ├── _variables.scss │ │ │ │ │ │ │ ├── selection.ts │ │ │ │ │ │ │ ├── style.css │ │ │ │ │ │ │ ├── style.scss │ │ │ │ │ │ │ ├── theme.css │ │ │ │ │ │ │ ├── theme.dark.css │ │ │ │ │ │ │ ├── theme.dark.scss │ │ │ │ │ │ │ └── theme.scss │ │ │ │ │ │ ├── validate/ │ │ │ │ │ │ │ ├── validate.css │ │ │ │ │ │ │ ├── validate.scss │ │ │ │ │ │ │ └── validate.ts │ │ │ │ │ │ └── zoom/ │ │ │ │ │ │ ├── index.ts │ │ │ │ │ │ └── zoom.ts │ │ │ │ │ └── utils/ │ │ │ │ │ ├── dom-events.ts │ │ │ │ │ ├── dynamic-stylesheet.ts │ │ │ │ │ ├── event-hub.ts │ │ │ │ │ ├── geometry.ts │ │ │ │ │ ├── html-utils.ts │ │ │ │ │ ├── perf.ts │ │ │ │ │ ├── svg-dumper.ts │ │ │ │ │ └── svg-utils.ts │ │ │ │ ├── resource-properties.cy.js │ │ │ │ └── resource-properties.ts │ │ │ ├── models/ │ │ │ │ ├── api-client-authorization.ts │ │ │ │ ├── client-authorization.ts │ │ │ │ ├── collection-file.ts │ │ │ │ ├── collection.ts │ │ │ │ ├── container-request.ts │ │ │ │ ├── container.ts │ │ │ │ ├── details.ts │ │ │ │ ├── empty.ts │ │ │ │ ├── external-credential.ts │ │ │ │ ├── file-viewers-config.ts │ │ │ │ ├── group.ts │ │ │ │ ├── keep-manifest.ts │ │ │ │ ├── keep-services.ts │ │ │ │ ├── link-account.ts │ │ │ │ ├── link.ts │ │ │ │ ├── log.ts │ │ │ │ ├── mount-types.ts │ │ │ │ ├── node.ts │ │ │ │ ├── object-types.ts │ │ │ │ ├── permission.ts │ │ │ │ ├── process.ts │ │ │ │ ├── project.ts │ │ │ │ ├── repositories.ts │ │ │ │ ├── resource.ts │ │ │ │ ├── runtime-constraints.ts │ │ │ │ ├── runtime-status.ts │ │ │ │ ├── scheduling-parameters.ts │ │ │ │ ├── search-bar.ts │ │ │ │ ├── session.ts │ │ │ │ ├── ssh-key.ts │ │ │ │ ├── tag.ts │ │ │ │ ├── test-utils.ts │ │ │ │ ├── tree.cy.js │ │ │ │ ├── tree.ts │ │ │ │ ├── user.cy.js │ │ │ │ ├── user.ts │ │ │ │ ├── virtual-machines.ts │ │ │ │ ├── vocabulary.cy.js │ │ │ │ ├── vocabulary.ts │ │ │ │ └── workflow.ts │ │ │ ├── plugins/ │ │ │ │ ├── README.md │ │ │ │ ├── blank/ │ │ │ │ │ └── index.tsx │ │ │ │ ├── example/ │ │ │ │ │ ├── exampleComponents.tsx │ │ │ │ │ └── index.tsx │ │ │ │ └── root-redirect/ │ │ │ │ └── index.tsx │ │ │ ├── plugins.tsx │ │ │ ├── react-app-env.d.ts │ │ │ ├── routes/ │ │ │ │ ├── route-change-handlers.ts │ │ │ │ └── routes.ts │ │ │ ├── services/ │ │ │ │ ├── ancestors-service/ │ │ │ │ │ └── ancestors-service.ts │ │ │ │ ├── api/ │ │ │ │ │ ├── api-actions.ts │ │ │ │ │ ├── filter-builder.cy.js │ │ │ │ │ ├── filter-builder.ts │ │ │ │ │ ├── order-builder.cy.js │ │ │ │ │ ├── order-builder.ts │ │ │ │ │ ├── url-builder.cy.js │ │ │ │ │ └── url-builder.ts │ │ │ │ ├── api-client-authorization-service/ │ │ │ │ │ ├── api-client-authorization-service.cy.js │ │ │ │ │ └── api-client-authorization-service.ts │ │ │ │ ├── auth-service/ │ │ │ │ │ └── auth-service.ts │ │ │ │ ├── authorized-keys-service/ │ │ │ │ │ └── authorized-keys-service.ts │ │ │ │ ├── collection-service/ │ │ │ │ │ ├── collection-service-files-response.cy.js │ │ │ │ │ ├── collection-service-files-response.ts │ │ │ │ │ ├── collection-service.cy.js │ │ │ │ │ └── collection-service.ts │ │ │ │ ├── common-service/ │ │ │ │ │ ├── common-resource-service.cy.js │ │ │ │ │ ├── common-resource-service.ts │ │ │ │ │ ├── common-service.cy.js │ │ │ │ │ ├── common-service.ts │ │ │ │ │ └── trashable-resource-service.ts │ │ │ │ ├── container-request-service/ │ │ │ │ │ └── container-request-service.ts │ │ │ │ ├── container-service/ │ │ │ │ │ └── container-service.ts │ │ │ │ ├── external-credentials/ │ │ │ │ │ └── external-credentials-service.ts │ │ │ │ ├── favorite-service/ │ │ │ │ │ ├── favorite-service.cy.js │ │ │ │ │ └── favorite-service.ts │ │ │ │ ├── file-viewers-config-service/ │ │ │ │ │ └── file-viewers-config-service.ts │ │ │ │ ├── groups-service/ │ │ │ │ │ ├── groups-service.cy.js │ │ │ │ │ └── groups-service.ts │ │ │ │ ├── keep-service/ │ │ │ │ │ └── keep-service.ts │ │ │ │ ├── link-account-service/ │ │ │ │ │ └── link-account-service.ts │ │ │ │ ├── link-service/ │ │ │ │ │ └── link-service.ts │ │ │ │ ├── log-service/ │ │ │ │ │ ├── log-service.cy.js │ │ │ │ │ └── log-service.ts │ │ │ │ ├── permission-service/ │ │ │ │ │ └── permission-service.ts │ │ │ │ ├── project-service/ │ │ │ │ │ ├── project-service.cy.js │ │ │ │ │ └── project-service.ts │ │ │ │ ├── repositories-service/ │ │ │ │ │ └── repositories-service.ts │ │ │ │ ├── search-service/ │ │ │ │ │ └── search-service.ts │ │ │ │ ├── services.ts │ │ │ │ ├── tag-service/ │ │ │ │ │ └── tag-service.ts │ │ │ │ ├── user-service/ │ │ │ │ │ └── user-service.ts │ │ │ │ ├── virtual-machines-service/ │ │ │ │ │ └── virtual-machines-service.ts │ │ │ │ ├── vocabulary-service/ │ │ │ │ │ └── vocabulary-service.ts │ │ │ │ └── workflow-service/ │ │ │ │ └── workflow-service.ts │ │ │ ├── store/ │ │ │ │ ├── advanced-tab/ │ │ │ │ │ └── advanced-tab.tsx │ │ │ │ ├── all-processes-panel/ │ │ │ │ │ ├── all-processes-panel-action.ts │ │ │ │ │ └── all-processes-panel-middleware-service.ts │ │ │ │ ├── api-client-authorizations/ │ │ │ │ │ ├── api-client-authorizations-actions.ts │ │ │ │ │ └── api-client-authorizations-middleware-service.ts │ │ │ │ ├── app-info/ │ │ │ │ │ ├── app-info-actions.ts │ │ │ │ │ └── app-info-reducer.ts │ │ │ │ ├── auth/ │ │ │ │ │ ├── auth-action-session.ts │ │ │ │ │ ├── auth-action-ssh.ts │ │ │ │ │ ├── auth-action.ts │ │ │ │ │ ├── auth-middleware.ts │ │ │ │ │ ├── auth-reducer.cy.js │ │ │ │ │ ├── auth-reducer.ts │ │ │ │ │ └── cluster-badges.ts │ │ │ │ ├── banner/ │ │ │ │ │ ├── banner-action.ts │ │ │ │ │ └── banner-reducer.ts │ │ │ │ ├── breadcrumbs/ │ │ │ │ │ └── breadcrumbs-actions.ts │ │ │ │ ├── collection-panel/ │ │ │ │ │ ├── collection-panel-action.ts │ │ │ │ │ ├── collection-panel-files/ │ │ │ │ │ │ ├── collection-panel-files-actions.ts │ │ │ │ │ │ ├── collection-panel-files-reducer.cy.js │ │ │ │ │ │ ├── collection-panel-files-reducer.ts │ │ │ │ │ │ └── collection-panel-files-state.ts │ │ │ │ │ └── collection-panel-reducer.ts │ │ │ │ ├── collections/ │ │ │ │ │ ├── collection-copy-actions.ts │ │ │ │ │ ├── collection-create-actions.ts │ │ │ │ │ ├── collection-info-actions.ts │ │ │ │ │ ├── collection-move-actions.ts │ │ │ │ │ ├── collection-partial-copy-actions.ts │ │ │ │ │ ├── collection-partial-move-actions.ts │ │ │ │ │ ├── collection-update-actions.ts │ │ │ │ │ ├── collection-upload-actions.ts │ │ │ │ │ └── collection-version-actions.ts │ │ │ │ ├── collections-content-address-panel/ │ │ │ │ │ ├── collections-content-address-middleware-service.ts │ │ │ │ │ └── collections-content-address-panel-actions.ts │ │ │ │ ├── context-menu/ │ │ │ │ │ ├── context-menu-actions.ts │ │ │ │ │ ├── context-menu-filters.ts │ │ │ │ │ ├── context-menu-reducer.ts │ │ │ │ │ └── context-menu.ts │ │ │ │ ├── copy-dialog/ │ │ │ │ │ └── copy-dialog.ts │ │ │ │ ├── data-explorer/ │ │ │ │ │ ├── data-explorer-action.ts │ │ │ │ │ ├── data-explorer-middleware-service.ts │ │ │ │ │ ├── data-explorer-middleware.cy.js │ │ │ │ │ ├── data-explorer-middleware.ts │ │ │ │ │ ├── data-explorer-reducer.cy.js │ │ │ │ │ └── data-explorer-reducer.ts │ │ │ │ ├── description-dialog/ │ │ │ │ │ └── description-dialog-actions.ts │ │ │ │ ├── details-panel/ │ │ │ │ │ ├── details-panel-action.ts │ │ │ │ │ └── details-panel-reducer.ts │ │ │ │ ├── dialog/ │ │ │ │ │ ├── dialog-actions.ts │ │ │ │ │ ├── dialog-reducer.cy.js │ │ │ │ │ ├── dialog-reducer.ts │ │ │ │ │ └── with-dialog.ts │ │ │ │ ├── external-credentials/ │ │ │ │ │ ├── external-credential-dialog-data.ts │ │ │ │ │ ├── external-credentials-actions.cy.js │ │ │ │ │ ├── external-credentials-actions.ts │ │ │ │ │ └── external-credentials-middleware-service.ts │ │ │ │ ├── favorite-panel/ │ │ │ │ │ ├── favorite-panel-action.ts │ │ │ │ │ └── favorite-panel-middleware-service.ts │ │ │ │ ├── favorite-pins/ │ │ │ │ │ └── favorite-pins-middleware-service.ts │ │ │ │ ├── favorites/ │ │ │ │ │ ├── favorites-actions.ts │ │ │ │ │ ├── favorites-links-reducer.tsx │ │ │ │ │ └── favorites-reducer.ts │ │ │ │ ├── file-selection/ │ │ │ │ │ └── file-selection-actions.ts │ │ │ │ ├── file-uploader/ │ │ │ │ │ ├── file-uploader-actions.ts │ │ │ │ │ └── file-uploader-reducer.ts │ │ │ │ ├── file-viewers/ │ │ │ │ │ ├── file-viewers-actions.ts │ │ │ │ │ └── file-viewers-selectors.ts │ │ │ │ ├── group-details-panel/ │ │ │ │ │ ├── group-details-panel-actions.ts │ │ │ │ │ ├── group-details-panel-members-middleware-service.test.js │ │ │ │ │ ├── group-details-panel-members-middleware-service.ts │ │ │ │ │ ├── group-details-panel-permissions-middleware-service.test.js │ │ │ │ │ └── group-details-panel-permissions-middleware-service.ts │ │ │ │ ├── groups-panel/ │ │ │ │ │ ├── groups-panel-actions.ts │ │ │ │ │ ├── groups-panel-middleware-service.cy.js │ │ │ │ │ └── groups-panel-middleware-service.ts │ │ │ │ ├── keep-services/ │ │ │ │ │ ├── keep-services-actions.ts │ │ │ │ │ └── keep-services-reducer.ts │ │ │ │ ├── link-account-panel/ │ │ │ │ │ ├── link-account-panel-actions.ts │ │ │ │ │ ├── link-account-panel-reducer.cy.js │ │ │ │ │ └── link-account-panel-reducer.ts │ │ │ │ ├── link-panel/ │ │ │ │ │ ├── link-panel-actions.ts │ │ │ │ │ └── link-panel-middleware-service.ts │ │ │ │ ├── move-to-dialog/ │ │ │ │ │ └── move-to-dialog.ts │ │ │ │ ├── multiselect/ │ │ │ │ │ ├── multiselect-actions.tsx │ │ │ │ │ └── multiselect-reducer.tsx │ │ │ │ ├── navigation/ │ │ │ │ │ └── navigation-action.ts │ │ │ │ ├── not-found-panel/ │ │ │ │ │ └── not-found-panel-action.tsx │ │ │ │ ├── open-in-new-tab/ │ │ │ │ │ └── open-in-new-tab.actions.ts │ │ │ │ ├── owner-name/ │ │ │ │ │ ├── owner-name-actions.ts │ │ │ │ │ └── owner-name-reducer.ts │ │ │ │ ├── process-logs-panel/ │ │ │ │ │ ├── process-logs-panel-actions.cy.js │ │ │ │ │ ├── process-logs-panel-actions.ts │ │ │ │ │ ├── process-logs-panel-reducer.ts │ │ │ │ │ └── process-logs-panel.ts │ │ │ │ ├── process-panel/ │ │ │ │ │ ├── process-panel-actions.ts │ │ │ │ │ ├── process-panel-reducer.ts │ │ │ │ │ └── process-panel.ts │ │ │ │ ├── processes/ │ │ │ │ │ ├── process-copy-actions.cy.js │ │ │ │ │ ├── process-copy-actions.ts │ │ │ │ │ ├── process-input-actions.ts │ │ │ │ │ ├── process-update-actions.ts │ │ │ │ │ ├── process.ts │ │ │ │ │ ├── processes-actions.ts │ │ │ │ │ └── processes-middleware-service.ts │ │ │ │ ├── progress-indicator/ │ │ │ │ │ ├── progress-indicator-actions.ts │ │ │ │ │ ├── progress-indicator-reducer.ts │ │ │ │ │ └── with-progress.ts │ │ │ │ ├── project-panel/ │ │ │ │ │ ├── project-panel-action-bind.ts │ │ │ │ │ ├── project-panel-action.ts │ │ │ │ │ ├── project-panel-data-middleware-service.ts │ │ │ │ │ ├── project-panel-run-middleware-service.ts │ │ │ │ │ └── project-panel.ts │ │ │ │ ├── project-tree-picker/ │ │ │ │ │ └── project-tree-picker-actions.ts │ │ │ │ ├── projects/ │ │ │ │ │ ├── project-create-actions.ts │ │ │ │ │ ├── project-lock-actions.ts │ │ │ │ │ ├── project-move-actions.ts │ │ │ │ │ └── project-update-actions.ts │ │ │ │ ├── properties/ │ │ │ │ │ ├── properties-actions.ts │ │ │ │ │ ├── properties-reducer.ts │ │ │ │ │ └── properties.ts │ │ │ │ ├── public-favorites/ │ │ │ │ │ ├── public-favorites-actions.ts │ │ │ │ │ ├── public-favorites-reducer.ts │ │ │ │ │ └── public-favorites.ts │ │ │ │ ├── public-favorites-panel/ │ │ │ │ │ ├── public-favorites-action.ts │ │ │ │ │ └── public-favorites-middleware-service.ts │ │ │ │ ├── recent-wf-runs/ │ │ │ │ │ ├── recent-wf-runs-action.ts │ │ │ │ │ └── recent-wf-runs-middleware-sevice.ts │ │ │ │ ├── recently-visited/ │ │ │ │ │ ├── recently-visited-actions.tsx │ │ │ │ │ └── recently-visited-middleware-services.ts │ │ │ │ ├── redux-saga.ts │ │ │ │ ├── repositories/ │ │ │ │ │ ├── repositories-actions.ts │ │ │ │ │ └── repositories-reducer.ts │ │ │ │ ├── resource-type-filters/ │ │ │ │ │ ├── resource-type-filters.cy.js │ │ │ │ │ └── resource-type-filters.ts │ │ │ │ ├── resources/ │ │ │ │ │ ├── resources-actions.ts │ │ │ │ │ ├── resources-reducer.ts │ │ │ │ │ └── resources.ts │ │ │ │ ├── rich-text-editor-dialog/ │ │ │ │ │ └── rich-text-editor-dialog-actions.tsx │ │ │ │ ├── run-process-panel/ │ │ │ │ │ ├── run-process-panel-actions.cy.js │ │ │ │ │ ├── run-process-panel-actions.ts │ │ │ │ │ └── run-process-panel-reducer.ts │ │ │ │ ├── search-bar/ │ │ │ │ │ ├── search-bar-actions.cy.js │ │ │ │ │ ├── search-bar-actions.ts │ │ │ │ │ ├── search-bar-reducer.ts │ │ │ │ │ ├── search-bar-tree-actions.ts │ │ │ │ │ └── search-query/ │ │ │ │ │ ├── arv-parser.ts │ │ │ │ │ └── parser.ts │ │ │ │ ├── search-results-panel/ │ │ │ │ │ ├── search-results-middleware-service.cy.js │ │ │ │ │ ├── search-results-middleware-service.ts │ │ │ │ │ └── search-results-panel-actions.ts │ │ │ │ ├── selected-resource/ │ │ │ │ │ ├── selected-resource-actions.ts │ │ │ │ │ └── selected-resource-reducer.ts │ │ │ │ ├── shared-with-me-panel/ │ │ │ │ │ ├── shared-with-me-middleware-service.ts │ │ │ │ │ └── shared-with-me-panel-actions.ts │ │ │ │ ├── sharing-dialog/ │ │ │ │ │ ├── sharing-dialog-actions.ts │ │ │ │ │ └── sharing-dialog-types.ts │ │ │ │ ├── side-panel/ │ │ │ │ │ ├── side-panel-action.ts │ │ │ │ │ └── side-panel-reducer.tsx │ │ │ │ ├── side-panel-tree/ │ │ │ │ │ └── side-panel-tree-actions.ts │ │ │ │ ├── snackbar/ │ │ │ │ │ ├── snackbar-actions.ts │ │ │ │ │ └── snackbar-reducer.ts │ │ │ │ ├── store.ts │ │ │ │ ├── subprocess-panel/ │ │ │ │ │ ├── subprocess-panel-actions.ts │ │ │ │ │ └── subprocess-panel-middleware-service.ts │ │ │ │ ├── token-dialog/ │ │ │ │ │ └── token-dialog-actions.tsx │ │ │ │ ├── tooltips/ │ │ │ │ │ └── tooltips-middleware.ts │ │ │ │ ├── trash/ │ │ │ │ │ └── trash-actions.ts │ │ │ │ ├── trash-panel/ │ │ │ │ │ ├── trash-panel-action.ts │ │ │ │ │ └── trash-panel-middleware-service.ts │ │ │ │ ├── tree-picker/ │ │ │ │ │ ├── picker-id.tsx │ │ │ │ │ ├── tree-picker-actions.cy.js │ │ │ │ │ ├── tree-picker-actions.ts │ │ │ │ │ ├── tree-picker-middleware.ts │ │ │ │ │ ├── tree-picker-reducer.cy.js │ │ │ │ │ ├── tree-picker-reducer.ts │ │ │ │ │ └── tree-picker.ts │ │ │ │ ├── user-preferences/ │ │ │ │ │ └── user-preferences-actions.ts │ │ │ │ ├── user-profile/ │ │ │ │ │ ├── user-profile-actions.ts │ │ │ │ │ └── user-profile-groups-middleware-service.ts │ │ │ │ ├── users/ │ │ │ │ │ ├── user-panel-middleware-service.ts │ │ │ │ │ └── users-actions.ts │ │ │ │ ├── virtual-machines/ │ │ │ │ │ ├── virtual-machines-actions.ts │ │ │ │ │ └── virtual-machines-reducer.ts │ │ │ │ ├── vocabulary/ │ │ │ │ │ ├── vocabulary-actions.ts │ │ │ │ │ └── vocabulary-selectors.ts │ │ │ │ ├── workbench/ │ │ │ │ │ └── workbench-actions.ts │ │ │ │ └── workflow-panel/ │ │ │ │ ├── workflow-middleware-service.ts │ │ │ │ ├── workflow-panel-actions.cy.js │ │ │ │ └── workflow-panel-actions.ts │ │ │ ├── validators/ │ │ │ │ ├── is-float.tsx │ │ │ │ ├── is-integer.tsx │ │ │ │ ├── is-number.tsx │ │ │ │ ├── is-remote-host.tsx │ │ │ │ ├── is-rsa-key.cy.js │ │ │ │ ├── is-rsa-key.tsx │ │ │ │ ├── is-valid-file-ops-location.ts │ │ │ │ ├── is-valid-future-date.tsx │ │ │ │ ├── is-zip-filename.tsx │ │ │ │ ├── max-length.tsx │ │ │ │ ├── min-length.tsx │ │ │ │ ├── min.tsx │ │ │ │ ├── optional.tsx │ │ │ │ ├── require.tsx │ │ │ │ ├── valid-name.tsx │ │ │ │ └── validators.tsx │ │ │ ├── views/ │ │ │ │ ├── all-processes-panel/ │ │ │ │ │ ├── all-processes-panel-columns.tsx │ │ │ │ │ └── all-processes-panel.tsx │ │ │ │ ├── api-client-authorization-panel/ │ │ │ │ │ ├── api-client-authorization-panel-columns.tsx │ │ │ │ │ ├── api-client-authorization-panel-root.tsx │ │ │ │ │ └── api-client-authorization-panel.tsx │ │ │ │ ├── collection-content-address-panel/ │ │ │ │ │ ├── collection-content-address-panel-columns.tsx │ │ │ │ │ └── collection-content-address-panel.tsx │ │ │ │ ├── collection-panel/ │ │ │ │ │ ├── collection-attributes.tsx │ │ │ │ │ └── collection-panel.tsx │ │ │ │ ├── external-credentials-panel/ │ │ │ │ │ ├── external-credentials-panel-columns.tsx │ │ │ │ │ └── external-credentials-panel.tsx │ │ │ │ ├── favorite-panel/ │ │ │ │ │ ├── favorite-panel-columns.tsx │ │ │ │ │ └── favorite-panel.tsx │ │ │ │ ├── group-details-panel/ │ │ │ │ │ ├── group-details-panel-columns.tsx │ │ │ │ │ └── group-details-panel.tsx │ │ │ │ ├── groups-panel/ │ │ │ │ │ ├── groups-panel-columns.tsx │ │ │ │ │ └── groups-panel.tsx │ │ │ │ ├── inactive-panel/ │ │ │ │ │ ├── inactive-panel.cy.js │ │ │ │ │ └── inactive-panel.tsx │ │ │ │ ├── instance-types-panel/ │ │ │ │ │ ├── instance-types-panel.cy.js │ │ │ │ │ └── instance-types-panel.tsx │ │ │ │ ├── keep-service-panel/ │ │ │ │ │ ├── keep-service-panel-root.tsx │ │ │ │ │ └── keep-service-panel.tsx │ │ │ │ ├── link-account-panel/ │ │ │ │ │ ├── link-account-panel-root.tsx │ │ │ │ │ └── link-account-panel.tsx │ │ │ │ ├── link-panel/ │ │ │ │ │ ├── link-panel-columns.tsx │ │ │ │ │ ├── link-panel-root.tsx │ │ │ │ │ └── link-panel.tsx │ │ │ │ ├── login-panel/ │ │ │ │ │ ├── login-panel.cy.js │ │ │ │ │ └── login-panel.tsx │ │ │ │ ├── main-panel/ │ │ │ │ │ ├── main-panel-root.tsx │ │ │ │ │ └── main-panel.tsx │ │ │ │ ├── not-found-panel/ │ │ │ │ │ ├── not-found-panel-root.cy.js │ │ │ │ │ ├── not-found-panel-root.tsx │ │ │ │ │ └── not-found-panel.tsx │ │ │ │ ├── process-panel/ │ │ │ │ │ ├── process-attributes.tsx │ │ │ │ │ ├── process-cmd-card.tsx │ │ │ │ │ ├── process-details-attributes.tsx │ │ │ │ │ ├── process-io-card.cy.js │ │ │ │ │ ├── process-io-card.tsx │ │ │ │ │ ├── process-log-card.tsx │ │ │ │ │ ├── process-log-code-snippet.tsx │ │ │ │ │ ├── process-log-form.tsx │ │ │ │ │ ├── process-output-collection-files.ts │ │ │ │ │ ├── process-panel-root.tsx │ │ │ │ │ ├── process-panel.tsx │ │ │ │ │ └── process-resource-card.tsx │ │ │ │ ├── project-panel/ │ │ │ │ │ ├── project-attributes.tsx │ │ │ │ │ ├── project-panel-columns.tsx │ │ │ │ │ ├── project-panel-data.tsx │ │ │ │ │ ├── project-panel-run.tsx │ │ │ │ │ └── project-panel.tsx │ │ │ │ ├── public-favorites-panel/ │ │ │ │ │ ├── public-favorites-panel-columns.tsx │ │ │ │ │ └── public-favorites-panel.tsx │ │ │ │ ├── repositories-panel/ │ │ │ │ │ └── repositories-panel.tsx │ │ │ │ ├── run-process-panel/ │ │ │ │ │ ├── inputs/ │ │ │ │ │ │ ├── boolean-input.tsx │ │ │ │ │ │ ├── directory-array-input.tsx │ │ │ │ │ │ ├── directory-input.tsx │ │ │ │ │ │ ├── enum-input.tsx │ │ │ │ │ │ ├── file-array-input.tsx │ │ │ │ │ │ ├── file-input.tsx │ │ │ │ │ │ ├── float-array-input.tsx │ │ │ │ │ │ ├── float-input.tsx │ │ │ │ │ │ ├── generic-input.tsx │ │ │ │ │ │ ├── int-array-input.tsx │ │ │ │ │ │ ├── int-input.tsx │ │ │ │ │ │ ├── run-wf-project-input.tsx │ │ │ │ │ │ ├── search-project-input.tsx │ │ │ │ │ │ ├── string-array-input.tsx │ │ │ │ │ │ └── string-input.tsx │ │ │ │ │ ├── run-process-advanced-form.tsx │ │ │ │ │ ├── run-process-basic-form.tsx │ │ │ │ │ ├── run-process-first-step.tsx │ │ │ │ │ ├── run-process-inputs-form.tsx │ │ │ │ │ ├── run-process-panel-root.tsx │ │ │ │ │ ├── run-process-panel.tsx │ │ │ │ │ ├── run-process-second-step.tsx │ │ │ │ │ └── workflow-preset-select.tsx │ │ │ │ ├── search-results-panel/ │ │ │ │ │ ├── search-results-panel-columns.tsx │ │ │ │ │ ├── search-results-panel-view.tsx │ │ │ │ │ └── search-results-panel.tsx │ │ │ │ ├── shared-with-me-panel/ │ │ │ │ │ ├── shared-with-me-columns.tsx │ │ │ │ │ └── shared-with-me-panel.tsx │ │ │ │ ├── site-manager-panel/ │ │ │ │ │ ├── site-manager-panel-root.tsx │ │ │ │ │ └── site-manager-panel.tsx │ │ │ │ ├── ssh-key-panel/ │ │ │ │ │ ├── ssh-key-admin-panel.tsx │ │ │ │ │ ├── ssh-key-panel-root.tsx │ │ │ │ │ └── ssh-key-panel.tsx │ │ │ │ ├── subprocess-panel/ │ │ │ │ │ ├── subprocess-panel-columns.tsx │ │ │ │ │ ├── subprocess-panel-root.tsx │ │ │ │ │ └── subprocess-panel.tsx │ │ │ │ ├── trash-panel/ │ │ │ │ │ ├── trash-panel-columns.tsx │ │ │ │ │ └── trash-panel.tsx │ │ │ │ ├── user-panel/ │ │ │ │ │ ├── user-panel-columns.tsx │ │ │ │ │ └── user-panel.tsx │ │ │ │ ├── user-preferences-panel/ │ │ │ │ │ ├── user-preferences-panel-root.tsx │ │ │ │ │ └── user-preferences-panel.tsx │ │ │ │ ├── user-profile-panel/ │ │ │ │ │ ├── user-profile-panel-columns.tsx │ │ │ │ │ ├── user-profile-panel-root.tsx │ │ │ │ │ └── user-profile-panel.tsx │ │ │ │ ├── virtual-machine-panel/ │ │ │ │ │ ├── virtual-machine-admin-panel.tsx │ │ │ │ │ └── virtual-machine-user-panel.tsx │ │ │ │ ├── workbench/ │ │ │ │ │ ├── fed-login.tsx │ │ │ │ │ ├── workbench-loading-screen.tsx │ │ │ │ │ ├── workbench.cy.js │ │ │ │ │ └── workbench.tsx │ │ │ │ └── workflow-panel/ │ │ │ │ ├── registered-workflow-panel.tsx │ │ │ │ ├── workflow-description-card.tsx │ │ │ │ ├── workflow-graph.tsx │ │ │ │ ├── workflow-panel-columns.tsx │ │ │ │ ├── workflow-panel-view.tsx │ │ │ │ ├── workflow-panel.tsx │ │ │ │ ├── workflow-processes-panel-columns.tsx │ │ │ │ ├── workflow-processes-panel-root.tsx │ │ │ │ └── workflow-processes-panel.tsx │ │ │ ├── views-components/ │ │ │ │ ├── add-session/ │ │ │ │ │ └── add-session.tsx │ │ │ │ ├── advanced-tab-dialog/ │ │ │ │ │ ├── advanced-tab-dialog.tsx │ │ │ │ │ └── metadataTab.tsx │ │ │ │ ├── api-client-authorizations-dialog/ │ │ │ │ │ ├── attributes-dialog.tsx │ │ │ │ │ ├── help-dialog.tsx │ │ │ │ │ └── remove-dialog.tsx │ │ │ │ ├── api-token/ │ │ │ │ │ └── api-token.tsx │ │ │ │ ├── auto-logout/ │ │ │ │ │ ├── auto-logout.cy.js │ │ │ │ │ └── auto-logout.tsx │ │ │ │ ├── baner/ │ │ │ │ │ ├── banner.cy.js │ │ │ │ │ └── banner.tsx │ │ │ │ ├── breadcrumbs/ │ │ │ │ │ └── breadcrumbs.ts │ │ │ │ ├── collection-panel-files/ │ │ │ │ │ └── collection-panel-files.ts │ │ │ │ ├── collections-dialog/ │ │ │ │ │ └── restore-version-dialog.ts │ │ │ │ ├── context-menu/ │ │ │ │ │ ├── action-sets/ │ │ │ │ │ │ ├── api-client-authorization-action-set.ts │ │ │ │ │ │ ├── collection-action-set.cy.js │ │ │ │ │ │ ├── collection-action-set.ts │ │ │ │ │ │ ├── collection-files-action-set.ts │ │ │ │ │ │ ├── collection-files-item-action-set.ts │ │ │ │ │ │ ├── collection-files-not-selected-action-set.ts │ │ │ │ │ │ ├── external-credential-action-set.ts │ │ │ │ │ │ ├── favorite-action-set.ts │ │ │ │ │ │ ├── group-action-set.ts │ │ │ │ │ │ ├── group-member-action-set.ts │ │ │ │ │ │ ├── keep-service-action-set.ts │ │ │ │ │ │ ├── link-action-set.ts │ │ │ │ │ │ ├── permission-edit-action-set.ts │ │ │ │ │ │ ├── process-resource-action-set.ts │ │ │ │ │ │ ├── project-action-set.cy.js │ │ │ │ │ │ ├── project-action-set.ts │ │ │ │ │ │ ├── project-admin-action-set.ts │ │ │ │ │ │ ├── repository-action-set.ts │ │ │ │ │ │ ├── resource-action-set.ts │ │ │ │ │ │ ├── root-project-action-set.ts │ │ │ │ │ │ ├── search-results-action-set.ts │ │ │ │ │ │ ├── ssh-key-action-set.ts │ │ │ │ │ │ ├── trash-action-set.ts │ │ │ │ │ │ ├── trashed-collection-action-set.ts │ │ │ │ │ │ ├── user-action-set.ts │ │ │ │ │ │ ├── user-details-action-set.ts │ │ │ │ │ │ ├── virtual-machine-action-set.ts │ │ │ │ │ │ └── workflow-action-set.ts │ │ │ │ │ ├── actions/ │ │ │ │ │ │ ├── collection-copy-to-clipboard-action.tsx │ │ │ │ │ │ ├── collection-file-viewer-action.cy.js │ │ │ │ │ │ ├── collection-file-viewer-action.tsx │ │ │ │ │ │ ├── context-menu-divider.tsx │ │ │ │ │ │ ├── copy-to-clipboard-action.cy.js │ │ │ │ │ │ ├── copy-to-clipboard-action.tsx │ │ │ │ │ │ ├── download-action.cy.js │ │ │ │ │ │ ├── download-action.tsx │ │ │ │ │ │ ├── download-collection-file-action.tsx │ │ │ │ │ │ ├── favorite-action.tsx │ │ │ │ │ │ ├── file-viewer-action.cy.js │ │ │ │ │ │ ├── file-viewer-action.tsx │ │ │ │ │ │ ├── file-viewer-actions.tsx │ │ │ │ │ │ ├── helpers.cy.js │ │ │ │ │ │ ├── helpers.ts │ │ │ │ │ │ ├── lock-action.tsx │ │ │ │ │ │ ├── public-favorite-action.tsx │ │ │ │ │ │ └── trash-action.tsx │ │ │ │ │ ├── component-item-styles.ts │ │ │ │ │ ├── context-menu-action-set.ts │ │ │ │ │ ├── context-menu.tsx │ │ │ │ │ └── menu-item-sort.ts │ │ │ │ ├── data-explorer/ │ │ │ │ │ ├── data-explorer.tsx │ │ │ │ │ ├── renderers.cy.js │ │ │ │ │ ├── renderers.tsx │ │ │ │ │ └── with-resources.tsx │ │ │ │ ├── description-dialog/ │ │ │ │ │ └── description-dialog.tsx │ │ │ │ ├── details-card/ │ │ │ │ │ ├── collection-details-card.tsx │ │ │ │ │ ├── description-preview.tsx │ │ │ │ │ ├── details-card-root.tsx │ │ │ │ │ ├── process-details-card.tsx │ │ │ │ │ ├── project-details-card.tsx │ │ │ │ │ ├── service-menu.cy.js │ │ │ │ │ ├── service-menu.tsx │ │ │ │ │ ├── user-details-card.tsx │ │ │ │ │ └── workflow-details-card.tsx │ │ │ │ ├── details-panel/ │ │ │ │ │ ├── collection-details.tsx │ │ │ │ │ ├── details-data.tsx │ │ │ │ │ ├── details-panel.tsx │ │ │ │ │ ├── empty-details.tsx │ │ │ │ │ ├── file-details.tsx │ │ │ │ │ ├── process-details.tsx │ │ │ │ │ ├── project-details.tsx │ │ │ │ │ ├── root-project-details.tsx │ │ │ │ │ └── workflow-details.tsx │ │ │ │ ├── dialog-copy/ │ │ │ │ │ ├── dialog-collection-partial-copy-to-existing-collection.tsx │ │ │ │ │ ├── dialog-collection-partial-copy-to-new-collection.tsx │ │ │ │ │ ├── dialog-collection-partial-copy-to-separate-collections.tsx │ │ │ │ │ ├── dialog-copy.tsx │ │ │ │ │ └── dialog-process-rerun.tsx │ │ │ │ ├── dialog-create/ │ │ │ │ │ ├── create-external-credential-dialog.ts │ │ │ │ │ ├── dialog-collection-create.tsx │ │ │ │ │ ├── dialog-external-credential-create.tsx │ │ │ │ │ ├── dialog-project-create.tsx │ │ │ │ │ ├── dialog-repository-create.tsx │ │ │ │ │ ├── dialog-ssh-key-create.tsx │ │ │ │ │ └── dialog-user-create.tsx │ │ │ │ ├── dialog-forms/ │ │ │ │ │ ├── create-repository-dialog.ts │ │ │ │ │ ├── create-ssh-key-dialog.ts │ │ │ │ │ ├── create-user-dialog.ts │ │ │ │ │ ├── update-external-credential-dialog.ts │ │ │ │ │ └── update-process-dialog.ts │ │ │ │ ├── dialog-move/ │ │ │ │ │ ├── dialog-collection-partial-move-to-existing-collection.tsx │ │ │ │ │ ├── dialog-collection-partial-move-to-new-collection.tsx │ │ │ │ │ ├── dialog-collection-partial-move-to-separate-collections.tsx │ │ │ │ │ ├── dialog-move-collection.tsx │ │ │ │ │ └── dialog-move-project.tsx │ │ │ │ ├── dialog-remove/ │ │ │ │ │ └── external-credential-remove-dialog.tsx │ │ │ │ ├── dialog-update/ │ │ │ │ │ ├── dialog-collection-update.tsx │ │ │ │ │ ├── dialog-external-credential-update.tsx │ │ │ │ │ ├── dialog-process-update.tsx │ │ │ │ │ └── dialog-project-update.tsx │ │ │ │ ├── dialog-upload/ │ │ │ │ │ └── dialog-collection-files-upload.tsx │ │ │ │ ├── download-files-as-zip/ │ │ │ │ │ └── download-files-as-zip.tsx │ │ │ │ ├── favorite-star/ │ │ │ │ │ └── favorite-star.tsx │ │ │ │ ├── file-remove-dialog/ │ │ │ │ │ ├── file-remove-dialog.ts │ │ │ │ │ └── multiple-files-remove-dialog.ts │ │ │ │ ├── file-uploader/ │ │ │ │ │ └── file-uploader.tsx │ │ │ │ ├── form-fields/ │ │ │ │ │ ├── collection-form-fields.tsx │ │ │ │ │ ├── external-credential-form-fields.tsx │ │ │ │ │ ├── process-form-fields.tsx │ │ │ │ │ ├── project-form-fields.tsx │ │ │ │ │ ├── repository-form-fields.tsx │ │ │ │ │ ├── resource-form-fields.tsx │ │ │ │ │ ├── search-bar-form-fields.tsx │ │ │ │ │ ├── ssh-key-form-fields.tsx │ │ │ │ │ └── user-form-fields.tsx │ │ │ │ ├── groups-dialog/ │ │ │ │ │ ├── attributes-dialog.tsx │ │ │ │ │ ├── member-attributes-dialog.tsx │ │ │ │ │ ├── member-remove-dialog.ts │ │ │ │ │ └── remove-dialog.ts │ │ │ │ ├── keep-services-dialog/ │ │ │ │ │ ├── attributes-dialog.tsx │ │ │ │ │ └── remove-dialog.tsx │ │ │ │ ├── links-dialog/ │ │ │ │ │ ├── attributes-dialog.tsx │ │ │ │ │ └── remove-dialog.tsx │ │ │ │ ├── login-form/ │ │ │ │ │ └── login-form.tsx │ │ │ │ ├── main-app-bar/ │ │ │ │ │ ├── account-menu.cy.js │ │ │ │ │ ├── account-menu.tsx │ │ │ │ │ ├── admin-menu.tsx │ │ │ │ │ ├── anonymous-menu.tsx │ │ │ │ │ ├── help-menu.tsx │ │ │ │ │ ├── main-app-bar.tsx │ │ │ │ │ └── notifications-menu.tsx │ │ │ │ ├── main-content-bar/ │ │ │ │ │ └── main-content-bar.tsx │ │ │ │ ├── not-found-dialog/ │ │ │ │ │ └── not-found-dialog.tsx │ │ │ │ ├── process-cancel-dialog/ │ │ │ │ │ └── process-cancel-dialog.tsx │ │ │ │ ├── process-input-dialog/ │ │ │ │ │ └── process-input-dialog.tsx │ │ │ │ ├── process-remove-dialog/ │ │ │ │ │ └── process-remove-dialog.tsx │ │ │ │ ├── process-runtime-status/ │ │ │ │ │ └── process-runtime-status.tsx │ │ │ │ ├── projects-tree-picker/ │ │ │ │ │ ├── favorites-tree-picker.tsx │ │ │ │ │ ├── generic-projects-tree-picker.tsx │ │ │ │ │ ├── home-tree-picker.tsx │ │ │ │ │ ├── projects-tree-picker.tsx │ │ │ │ │ ├── public-favorites-tree-picker.tsx │ │ │ │ │ ├── search-projects-picker.tsx │ │ │ │ │ ├── shared-tree-picker.tsx │ │ │ │ │ └── tree-picker-field.tsx │ │ │ │ ├── property-chips/ │ │ │ │ │ ├── get-property-chips.cy.js │ │ │ │ │ └── get-property-chips.tsx │ │ │ │ ├── remove-dialog/ │ │ │ │ │ └── remove-dialog.tsx │ │ │ │ ├── rename-file-dialog/ │ │ │ │ │ └── rename-file-dialog.tsx │ │ │ │ ├── repositories-sample-git-dialog/ │ │ │ │ │ └── repositories-sample-git-dialog.tsx │ │ │ │ ├── repository-attributes-dialog/ │ │ │ │ │ └── repository-attributes-dialog.tsx │ │ │ │ ├── repository-remove-dialog/ │ │ │ │ │ └── repository-remove-dialog.ts │ │ │ │ ├── resource-properties-form/ │ │ │ │ │ ├── property-chip.tsx │ │ │ │ │ ├── property-field-common.tsx │ │ │ │ │ ├── property-key-field.tsx │ │ │ │ │ ├── property-value-field.tsx │ │ │ │ │ └── resource-properties-form.tsx │ │ │ │ ├── rich-text-editor-dialog/ │ │ │ │ │ └── rich-text-editor-dialog.tsx │ │ │ │ ├── run-process-dialog/ │ │ │ │ │ └── change-workflow-dialog.ts │ │ │ │ ├── search-bar/ │ │ │ │ │ ├── search-bar-advanced-properties-view.tsx │ │ │ │ │ ├── search-bar-advanced-view.tsx │ │ │ │ │ ├── search-bar-autocomplete-view.tsx │ │ │ │ │ ├── search-bar-basic-view.tsx │ │ │ │ │ ├── search-bar-recent-queries.tsx │ │ │ │ │ ├── search-bar-save-queries.tsx │ │ │ │ │ ├── search-bar-view.cy.js │ │ │ │ │ ├── search-bar-view.tsx │ │ │ │ │ └── search-bar.tsx │ │ │ │ ├── sharing-dialog/ │ │ │ │ │ ├── participant-select.tsx │ │ │ │ │ ├── permission-select.tsx │ │ │ │ │ ├── select-item.tsx │ │ │ │ │ ├── sharing-dialog-component.cy.js │ │ │ │ │ ├── sharing-dialog-component.tsx │ │ │ │ │ ├── sharing-dialog.tsx │ │ │ │ │ ├── sharing-invitation-form-component.tsx │ │ │ │ │ ├── sharing-invitation-form.tsx │ │ │ │ │ ├── sharing-management-form-component.tsx │ │ │ │ │ ├── sharing-management-form.tsx │ │ │ │ │ ├── sharing-public-access-form-component.tsx │ │ │ │ │ ├── sharing-public-access-form.tsx │ │ │ │ │ ├── sharing-urls-component.cy.js │ │ │ │ │ ├── sharing-urls-component.tsx │ │ │ │ │ ├── sharing-urls.tsx │ │ │ │ │ └── visibility-level-select.tsx │ │ │ │ ├── side-panel/ │ │ │ │ │ ├── side-panel-collapsed.tsx │ │ │ │ │ └── side-panel.tsx │ │ │ │ ├── side-panel-button/ │ │ │ │ │ ├── side-panel-button.cy.js │ │ │ │ │ └── side-panel-button.tsx │ │ │ │ ├── side-panel-toggle/ │ │ │ │ │ └── side-panel-toggle.tsx │ │ │ │ ├── side-panel-tree/ │ │ │ │ │ └── side-panel-tree.tsx │ │ │ │ ├── snackbar/ │ │ │ │ │ └── snackbar.tsx │ │ │ │ ├── ssh-keys-dialog/ │ │ │ │ │ ├── attributes-dialog.tsx │ │ │ │ │ ├── public-key-dialog.tsx │ │ │ │ │ └── remove-dialog.tsx │ │ │ │ ├── token-dialog/ │ │ │ │ │ ├── token-dialog.cy.js │ │ │ │ │ └── token-dialog.tsx │ │ │ │ ├── tree-picker/ │ │ │ │ │ ├── tree-picker.cy.js │ │ │ │ │ └── tree-picker.ts │ │ │ │ ├── user-dialog/ │ │ │ │ │ ├── activate-dialog.tsx │ │ │ │ │ ├── attributes-dialog.tsx │ │ │ │ │ ├── deactivate-dialog.tsx │ │ │ │ │ └── setup-dialog.tsx │ │ │ │ ├── virtual-machines-dialog/ │ │ │ │ │ ├── add-login-dialog.tsx │ │ │ │ │ ├── attributes-dialog.tsx │ │ │ │ │ ├── group-array-input.tsx │ │ │ │ │ ├── remove-dialog.tsx │ │ │ │ │ └── remove-login-dialog.tsx │ │ │ │ ├── webdav-s3-dialog/ │ │ │ │ │ ├── webdav-s3-dialog.cy.js │ │ │ │ │ └── webdav-s3-dialog.tsx │ │ │ │ └── workflow-remove-dialog/ │ │ │ │ └── workflow-remove-dialog.tsx │ │ │ └── websocket/ │ │ │ ├── resource-event-message.ts │ │ │ ├── websocket-service.cy.js │ │ │ ├── websocket-service.ts │ │ │ └── websocket.ts │ │ ├── tools/ │ │ │ ├── arvados_config.yml │ │ │ ├── example-vocabulary.json │ │ │ ├── run-integration-tests.sh │ │ │ └── setup-docker-volume.sh │ │ ├── tsconfig.json │ │ ├── tsconfig.prod.json │ │ ├── tsconfig.test.json │ │ ├── tslint.json │ │ ├── typings/ │ │ │ ├── global.d.ts │ │ │ └── images.d.ts │ │ └── version-at-commit.sh │ └── ws/ │ ├── doc.go │ ├── event.go │ ├── event_source.go │ ├── event_source_test.go │ ├── event_test.go │ ├── gocheck_test.go │ ├── handler.go │ ├── permission.go │ ├── permission_test.go │ ├── router.go │ ├── service.go │ ├── service_test.go │ ├── session.go │ ├── session_v0.go │ ├── session_v0_test.go │ └── session_v1.go └── tools/ ├── ansible/ │ ├── README.md │ ├── build-compute-image.yml │ ├── build-debian-nspawn-vm.yml │ ├── build-docker-image.yml │ ├── examples/ │ │ ├── full-cluster-inventory.yml │ │ ├── simple-cluster-config.yml │ │ └── simple-cluster-inventory.yml │ ├── files/ │ │ ├── default-test-config.yml │ │ └── development-docker-images.yml │ ├── filter_plugins/ │ │ └── arvados.py │ ├── group_vars/ │ │ ├── all/ │ │ │ └── vars.yml │ │ └── arvados_postgresql/ │ │ └── database.yml │ ├── install-ansible.sh │ ├── install-arvados-cluster.yml │ ├── install-dev-tools.yml │ ├── privilege-nspawn-vm.yml │ ├── requirements.txt │ ├── requirements.yml │ ├── roles/ │ │ ├── arvados_alloy/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_ansible/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_api/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ └── main.yml │ │ │ └── templates/ │ │ │ └── cluster.conf.j2 │ │ ├── arvados_apt/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ └── main.yml │ │ │ └── templates/ │ │ │ └── arvados.pref.j2 │ │ ├── arvados_aws_secret/ │ │ │ ├── files/ │ │ │ │ └── arvados-aws-secret.sh │ │ │ ├── tasks/ │ │ │ │ └── main.yml │ │ │ └── templates/ │ │ │ └── arvados_aws_secret.service.j2 │ │ ├── arvados_compute/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_controller/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_database/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_dispatch_cloud/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_dispatch_local/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_docker/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── files/ │ │ │ │ └── arvados-docker.pref │ │ │ └── tasks/ │ │ │ ├── apt.yml │ │ │ ├── dnf.yml │ │ │ └── main.yml │ │ ├── arvados_go/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_grafana/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_keep_web/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_keepbalance/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_keepproxy/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_keepstore/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_loki/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_nginx_base/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── handlers/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ └── main.yml │ │ │ └── templates/ │ │ │ ├── arvados-nginx-http.conf.j2 │ │ │ └── nginx-core.conf.j2 │ │ ├── arvados_nginx_frontend/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── files/ │ │ │ │ └── aws_secret.conf │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ ├── install_cert_file.yml │ │ │ │ └── main.yml │ │ │ └── templates/ │ │ │ ├── arvados-nginx-controller.conf.j2 │ │ │ ├── arvados-nginx-keepproxy.conf.j2 │ │ │ ├── arvados-nginx-site.conf.j2 │ │ │ ├── arvados-nginx-webdav.conf.j2 │ │ │ ├── arvados-nginx-websocket.conf.j2 │ │ │ └── arvados-nginx-workbench2.conf.j2 │ │ ├── arvados_nodejs/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_postgresql/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ ├── main.yml │ │ │ │ └── setup_service.yml │ │ │ └── templates/ │ │ │ └── arvados-ansible.conf.j2 │ │ ├── arvados_prometheus/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_python/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ ├── install_from_source.yml │ │ │ └── main.yml │ │ ├── arvados_ruby/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ ├── install_from_source.yml │ │ │ │ └── main.yml │ │ │ └── templates/ │ │ │ └── bundler.sh.j2 │ │ ├── arvados_service/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_shell/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── files/ │ │ │ │ ├── arvados-login-sync.service │ │ │ │ └── arvados-login-sync.timer │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ ├── login_sync.yml │ │ │ │ └── main.yml │ │ │ └── templates/ │ │ │ └── login-sync.env.j2 │ │ ├── arvados_websocket/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── arvados_workbench/ │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── compute_amd_rocm/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── files/ │ │ │ │ └── arvados-amd-rocm.pref │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── compute_docker/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── handlers/ │ │ │ │ └── main.yml │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── compute_encrypt_tmp/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── files/ │ │ │ │ ├── arvados-ensure-encrypted-partitions.service │ │ │ │ ├── ebs-autoscale.conf │ │ │ │ └── ensure-encrypted-partitions.sh │ │ │ └── tasks/ │ │ │ ├── aws_ebs.yml │ │ │ └── main.yml │ │ ├── compute_nvidia/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── files/ │ │ │ │ └── arvados-nvidia.pref │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── compute_singularity/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── compute_user/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── distro_apt/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── handlers/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ └── main.yml │ │ │ └── templates/ │ │ │ └── 65arvados-ansible-unattended-upgrades.j2 │ │ ├── distro_bootstrap/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── distro_dnf/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── distro_grafana/ │ │ │ ├── files/ │ │ │ │ └── arvados-grafana.pref │ │ │ ├── meta/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── distro_packages/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ └── distro_postgresql/ │ │ ├── defaults/ │ │ │ └── main.yml │ │ ├── meta/ │ │ │ └── main.yml │ │ └── tasks/ │ │ └── main.yml │ └── setup-package-tests.yml ├── cluster-activity/ │ ├── MANIFEST.in │ ├── README.rst │ ├── agpl-3.0.txt │ ├── arvados_cluster_activity/ │ │ ├── __init__.py │ │ ├── dygraphs.js │ │ ├── main.py │ │ ├── prometheus.py │ │ ├── report.py │ │ ├── reportchart.py │ │ ├── sortable.js │ │ └── synchronizer.js │ ├── arvados_version.py │ ├── cluster-activity.cwl │ ├── fpm-info.sh │ ├── pyproject.toml │ ├── pytest.ini │ ├── setup.py │ └── tests/ │ ├── test_prometheus.py │ ├── test_report.csv │ ├── test_report.html │ └── test_report.py ├── compute-images/ │ ├── .gitignore │ ├── README.md │ ├── aws_config.example.json │ ├── aws_template.json │ ├── azure_config.example.json │ ├── azure_template.json │ └── host_config.example.yml ├── copy-tutorial/ │ └── copy-tutorial.sh ├── crunchstat-summary/ │ ├── MANIFEST.in │ ├── README.rst │ ├── agpl-3.0.txt │ ├── arvados_version.py │ ├── bin/ │ │ └── crunchstat-summary │ ├── crunchstat_summary/ │ │ ├── __init__.py │ │ ├── command.py │ │ ├── dygraphs.js │ │ ├── dygraphs.py │ │ ├── reader.py │ │ ├── summarizer.py │ │ └── synchronizer.js │ ├── fpm-info.sh │ ├── pyproject.toml │ ├── setup.py │ └── tests/ │ ├── __init__.py │ ├── container_9tee4-dz642-lymtndkpy39eibk.txt.gz.report │ ├── container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt.gz.report │ ├── container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt.gz.report │ ├── container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y.txt.gz.report │ ├── crunchstat_error_messages.txt │ └── test_examples.py ├── jenkins/ │ └── submit-ci-dev.sh ├── keep-xref/ │ └── keep-xref.py ├── python-metapackage/ │ ├── LICENSE-2.0.txt │ ├── README.md │ ├── agpl-3.0.txt │ ├── arvados_version.py │ ├── pyproject.toml │ └── setup.py ├── salt-install/ │ ├── .gitignore │ ├── README.md │ ├── Vagrantfile │ ├── common.sh │ ├── config_examples/ │ │ ├── multi_host/ │ │ │ └── aws/ │ │ │ ├── README.md │ │ │ ├── certs/ │ │ │ │ └── README.md │ │ │ ├── dashboards/ │ │ │ │ ├── arvados_logs.json │ │ │ │ ├── arvados_overview.json │ │ │ │ ├── node-exporter-full_rev30.json │ │ │ │ ├── postgresql_exporter.json │ │ │ │ └── ssl-certificate-monitor.json │ │ │ ├── pillars/ │ │ │ │ ├── alloy.sls │ │ │ │ ├── arvados.sls │ │ │ │ ├── aws_credentials.sls │ │ │ │ ├── docker.sls │ │ │ │ ├── grafana.sls │ │ │ │ ├── letsencrypt.sls │ │ │ │ ├── letsencrypt_balancer_configuration.sls │ │ │ │ ├── letsencrypt_controller_configuration.sls │ │ │ │ ├── letsencrypt_grafana_configuration.sls │ │ │ │ ├── letsencrypt_keepproxy_configuration.sls │ │ │ │ ├── letsencrypt_keepweb_configuration.sls │ │ │ │ ├── letsencrypt_loki_configuration.sls │ │ │ │ ├── letsencrypt_prometheus_configuration.sls │ │ │ │ ├── letsencrypt_webshell_configuration.sls │ │ │ │ ├── letsencrypt_websocket_configuration.sls │ │ │ │ ├── letsencrypt_workbench2_configuration.sls │ │ │ │ ├── letsencrypt_workbench_configuration.sls │ │ │ │ ├── locale.sls │ │ │ │ ├── logrotate.sls │ │ │ │ ├── logrotate_api.sls │ │ │ │ ├── logrotate_wb1.sls │ │ │ │ ├── loki.sls │ │ │ │ ├── nginx.sls │ │ │ │ ├── nginx_api_configuration.sls │ │ │ │ ├── nginx_balancer_configuration.sls │ │ │ │ ├── nginx_collections_configuration.sls │ │ │ │ ├── nginx_controller_configuration.sls │ │ │ │ ├── nginx_download_configuration.sls │ │ │ │ ├── nginx_grafana_configuration.sls │ │ │ │ ├── nginx_keepproxy_configuration.sls │ │ │ │ ├── nginx_keepweb_configuration.sls │ │ │ │ ├── nginx_loki_configuration.sls │ │ │ │ ├── nginx_prometheus_configuration.sls │ │ │ │ ├── nginx_snippets.sls │ │ │ │ ├── nginx_webshell_configuration.sls │ │ │ │ ├── nginx_websocket_configuration.sls │ │ │ │ ├── nginx_workbench2_configuration.sls │ │ │ │ ├── nginx_workbench_configuration.sls │ │ │ │ ├── postgresql.sls │ │ │ │ ├── postgresql_external.sls │ │ │ │ ├── prometheus_node_exporter.sls │ │ │ │ ├── prometheus_pg_exporter.sls │ │ │ │ ├── prometheus_server.sls │ │ │ │ └── ssl_key_encrypted.sls │ │ │ ├── states/ │ │ │ │ ├── alloy_install.sls │ │ │ │ ├── aws_credentials.sls │ │ │ │ ├── custom_certs.sls │ │ │ │ ├── grafana_admin_user.sls │ │ │ │ ├── grafana_dashboards.sls │ │ │ │ ├── grafana_datasource.sls │ │ │ │ ├── host_entries.sls │ │ │ │ ├── loki_install.sls │ │ │ │ ├── nginx_prometheus_configuration.sls │ │ │ │ ├── passenger_rvm.sls │ │ │ │ ├── postgresql_external.sls │ │ │ │ ├── prometheus_pg_exporter.sls │ │ │ │ ├── railsapi_passenger_configs.sls │ │ │ │ ├── shell_cron_add_login_sync.sls │ │ │ │ ├── shell_sudo_passwordless.sls │ │ │ │ ├── ssl_key_encrypted.sls │ │ │ │ └── workbench1_uninstall.sls │ │ │ └── tofs/ │ │ │ └── arvados/ │ │ │ └── shell/ │ │ │ └── config/ │ │ │ └── files/ │ │ │ └── default/ │ │ │ ├── shell-pam-shellinabox.tmpl.jinja │ │ │ └── shell-shellinabox.tmpl.jinja │ │ └── single_host/ │ │ ├── multiple_hostnames/ │ │ │ ├── README.md │ │ │ ├── pillars/ │ │ │ │ ├── arvados.sls │ │ │ │ ├── docker.sls │ │ │ │ ├── locale.sls │ │ │ │ ├── logrotate.sls │ │ │ │ ├── logrotate_api.sls │ │ │ │ ├── logrotate_wb1.sls │ │ │ │ ├── nginx.sls │ │ │ │ ├── nginx_api_configuration.sls │ │ │ │ ├── nginx_controller_configuration.sls │ │ │ │ ├── nginx_keepproxy_configuration.sls │ │ │ │ ├── nginx_keepweb_configuration.sls │ │ │ │ ├── nginx_webshell_configuration.sls │ │ │ │ ├── nginx_websocket_configuration.sls │ │ │ │ ├── nginx_workbench2_configuration.sls │ │ │ │ ├── nginx_workbench_configuration.sls │ │ │ │ └── postgresql.sls │ │ │ └── states/ │ │ │ ├── custom_certs.sls │ │ │ ├── dns.sls │ │ │ ├── host_entries.sls │ │ │ ├── keep_volume.sls │ │ │ ├── passenger_rvm.sls │ │ │ ├── railsapi_passenger_configs.sls │ │ │ ├── snakeoil_certs.sls │ │ │ └── workbench1_uninstall.sls │ │ └── single_hostname/ │ │ ├── README.md │ │ ├── pillars/ │ │ │ ├── arvados.sls │ │ │ ├── aws_credentials.sls │ │ │ ├── docker.sls │ │ │ ├── letsencrypt.sls │ │ │ ├── locale.sls │ │ │ ├── logrotate.sls │ │ │ ├── logrotate_api.sls │ │ │ ├── logrotate_wb1.sls │ │ │ ├── nginx.sls │ │ │ ├── nginx_api_configuration.sls │ │ │ ├── nginx_controller_configuration.sls │ │ │ ├── nginx_keepproxy_configuration.sls │ │ │ ├── nginx_keepweb_configuration.sls │ │ │ ├── nginx_webshell_configuration.sls │ │ │ ├── nginx_websocket_configuration.sls │ │ │ ├── nginx_workbench2_configuration.sls │ │ │ ├── nginx_workbench_configuration.sls │ │ │ └── postgresql.sls │ │ └── states/ │ │ ├── custom_certs.sls │ │ ├── dns.sls │ │ ├── host_entries.sls │ │ ├── keep_volume.sls │ │ ├── passenger_rvm.sls │ │ ├── railsapi_passenger_configs.sls │ │ ├── shell_cron_add_login_sync.sls │ │ ├── shell_sudo_passwordless.sls │ │ ├── snakeoil_certs.sls │ │ └── workbench1_uninstall.sls │ ├── installer.sh │ ├── local.params.example.multiple_hosts │ ├── local.params.example.single_host_multiple_hostnames │ ├── local.params.example.single_host_single_hostname │ ├── local.params.secrets.example │ ├── provision.sh │ ├── terraform/ │ │ └── aws/ │ │ ├── .gitignore │ │ ├── assumerolepolicy.json │ │ ├── data-storage/ │ │ │ ├── .terraform.lock.hcl │ │ │ ├── data.tf │ │ │ ├── locals.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── terraform.tfvars │ │ │ └── variables.tf │ │ ├── services/ │ │ │ ├── .terraform.lock.hcl │ │ │ ├── data.tf │ │ │ ├── locals.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ ├── terraform.tfvars │ │ │ ├── user_data.sh │ │ │ └── variables.tf │ │ └── vpc/ │ │ ├── .terraform.lock.hcl │ │ ├── data.tf │ │ ├── locals.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── terraform.tfvars │ │ └── variables.tf │ └── tests/ │ ├── hasher-workflow-job.yml │ ├── hasher-workflow.cwl │ ├── hasher.cwl │ ├── run-test.sh │ └── test.txt ├── terraform/ │ └── .gitignore ├── test-collection-create/ │ └── test-collection-create.py ├── user-activity/ │ ├── MANIFEST.in │ ├── README.rst │ ├── agpl-3.0.txt │ ├── arvados_user_activity/ │ │ ├── __init__.py │ │ └── main.py │ ├── arvados_version.py │ ├── bin/ │ │ └── arv-user-activity │ ├── fpm-info.sh │ ├── pyproject.toml │ └── setup.py └── vocabulary-migrate/ └── vocabulary-migrate.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ .bundle .rvmrc *~ *.pyc *.egg *.egg-info .eggs *.pid *.pid.lock *.gem *.rpm *.deb docker/*/generated docker/config.yml /doc/.site /doc/sdk/python/arvados /doc/sdk/python/arvados.html /doc/sdk/python/index.html /doc/sdk/python/search.js /doc/sdk/R/arvados /doc/sdk/java-v2/javadoc *.class /sdk/cli/binstubs/ /sdk/ruby/binstubs/ /services/api/binstubs/ /services/login-sync/binstubs/ /services/api/config/arvados-clients.yml /contrib/arvados-bootstrap/build/ /contrib/arvados-bootstrap/dist/ /sdk/cwl/build/ /sdk/cwl/dist/ /sdk/python/build/ /sdk/python/dist/ /services/dockercleaner/build/ /services/dockercleaner/dist/ /services/fuse/build/ /services/fuse/dist/ /tools/cluster-activity/build/ /tools/cluster-activity/dist/ /tools/crunchstat-summary/build/ /tools/crunchstat-summary/dist/ /tools/python-metapackage/build/ /tools/python-metapackage/dist/ /tools/user-activity/build/ /tools/user-activity/dist/ *#* vendor/ tmp/ .DS_Store/ .vscode .Rproj.user *.bak *.log arvados-snakeoil-ca.pem .vagrant /packages/ .eslintcache ================================================ FILE: .licenseignore ================================================ *agpl-3.0.html *agpl-3.0.txt apache-2.0.txt AUTHORS */bootstrap.css */bootstrap.js *bootstrap-theme.css *by-sa-3.0.html *by-sa-3.0.txt *COPYING doc/fonts/* doc/_includes/_config_default_yml.liquid doc/_includes/_terraform_*_tfvars.liquid doc/user/cwl/federated/* doc/_includes/_federated_cwl.liquid */docker_image docker/jobs/apt.arvados.org*.list docker/jobs/1078ECD7.key */en.bootstrap.yml *font-awesome.css *.gif .gitignore */.gitignore */.gitkeep */.gitstub *.gz *.gz.report *.ico *.jpg *.svg *.odg *.json *LICENSE*.html .licenseignore *LICENSE*.txt *.lock *.log *.map *.min.css *.min.js *.png */proc_stat */pytest.ini */README */robots.txt */runit-docker/* */sb-admin.css.scss */script/rails sdk/cwl/tests/input/blorp.txt sdk/cwl/tests/tool/blub.txt sdk/cwl/tests/19109-upload-secondary/* sdk/cwl/tests/federation/data/* sdk/cwl/tests/fake-keep-mount/fake_collection_dir/.arvados#collection sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt sdk/go/manifest/testdata/*_manifest sdk/java/.classpath sdk/java/pom.xml sdk/java/.project sdk/java/.settings/org.eclipse.jdt.core.prefs sdk/java/src/main/resources/log4j.properties sdk/pam/examples/shellinabox sdk/pam/pam-configs/arvados sdk/python/tests/data/* services/api/config/unbound.template services/api/config/config.default.yml services/arv-web/sample-cgi-app/public/.htaccess services/arv-web/sample-cgi-app/public/index.cgi services/keepproxy/pkg-extras/etc/default/keepproxy *.tar tools/crunchstat-summary/tests/crunchstat_error_messages.txt tools/crunchstat-summary/crunchstat_summary/synchronizer.js tools/cluster-activity/tests/*.html tools/cluster-activity/tests/*.csv contrib/R-sdk/DESCRIPTION contrib/R-sdk/NAMESPACE contrib/R-sdk/.Rbuildignore contrib/R-sdk/ArvadosR.Rproj *.Rd lib/dispatchcloud/test/sshkey_* *.asc contrib/java-sdk-v2/build.gradle contrib/java-sdk-v2/settings.gradle contrib/java-sdk-v2/src/test/resources/* sdk/cwl/tests/wf/feddemo go.mod go.sum doc/install/*.xlsx sdk/cwl/tests/wf/hello.txt sdk/cwl/tests/wf/indir1/hello2.txt sdk/cwl/tests/chipseq/data/Genomes/* CITATION.cff SECURITY.md lib/crunchstat/testdata/* lib/controller/localdb/testdata/*.pub sdk/ruby-google-api-client/* services/api/bin/rails services/api/bin/rake services/api/bin/setup services/api/bin/yarn services/api/storage.yml services/api/test.rb.example services/api/config/boot.rb services/api/config/environment.rb services/api/config/initializers/application_controller_renderer.rb services/api/config/initializers/assets.rb services/api/config/initializers/backtrace_silencers.rb services/api/config/initializers/content_security_policy.rb services/api/config/initializers/cookies_serializer.rb services/api/config/initializers/filter_parameter_logging.rb services/api/config/initializers/mime_types.rb services/api/config/initializers/new_framework_defaults_*.rb services/api/config/initializers/permissions_policy.rb services/api/config/initializers/wrap_parameters.rb services/api/config/locales/en.yml services/api/config.ru services/workbench2/*.d.ts services/workbench2/*.css services/workbench2/*.scss services/workbench2/README.md services/workbench2/public/* services/workbench2/.yarnrc services/workbench2/.npmrc services/workbench2/src/lib/cwl-svg/* services/workbench2/tools/arvados_config.yml services/workbench2/cypress/fixtures/files/5mb.bin services/workbench2/cypress/fixtures/files/15mb.bin services/workbench2/cypress/fixtures/files/cat.png services/workbench2/cypress/fixtures/files/banner.html services/workbench2/cypress/fixtures/files/tooltips.txt services/workbench2/cypress/fixtures/webdav-propfind-outputs.xml services/workbench2/.yarn/releases/* services/workbench2/package.json services/workbench2/yarn.lock ================================================ FILE: AUTHORS ================================================ # Names should be added to this file with this pattern: # # For individuals: # Name # # For organizations: # Organization # # See python fnmatch module documentation for more information. Curoverse, Inc. <*@curoverse.com> Adam Savitzky Colin Nolan David Guillermo Carrasco Joshua Randall President and Fellows of Harvard College <*@harvard.edu> Thomas Mooney Chen Chen Veritas Genetics, Inc. <*@veritasgenetics.com> Curii Corporation <*@curii.com> Dante Tsang Codex Genetics Ltd Bruno P. Kinoshita George Chlipala ================================================ FILE: CITATION.cff ================================================ cff-version: 1.2.0 message: "If you use this software, please cite it as below." authors: - name: "The Arvados Authors" - family-names: "Amstutz" given-names: "Peter" orcid: "https://orcid.org/0000-0003-3566-7705" - family-names: "Bértoli" given-names: "Javier" family-names: "César" given-names: "Nico" - family-names: "Clegg" given-names: "Tom" orcid: "https://orcid.org/0000-0001-6751-2930" - family-names: "Di Pentima" given-names: "Lucas" orcid: "https://orcid.org/0000-0002-2807-6854" - family-names: "Kutyła" given-names: "Daniel" - family-names: "Li" given-names: "Jiayong" - family-names: "Smith" given-names: "Stephen" - family-names: "Vandewege" given-names: "Ward" orcid: "https://orcid.org/0000-0002-2527-6949" - family-names: "Wait Zaranek" given-names: "Alexander" orcid: "https://orcid.org/0000-0002-0415-9655" - family-names: "Wait Zaranek" given-names: "Sarah" orcid: "https://orcid.org/0000-0003-4716-9121" title: "Arvados" abstract: "Arvados is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data." type: software url: "https://github.com/arvados/arvados/" doi: 10.5281/zenodo.6382942 ================================================ FILE: CODE_OF_CONDUCT.md ================================================ Arvados Code of Conduct ======================= The Arvados Project is dedicated to providing a harassment-free experience for everyone. We do not tolerate harassment of participants in any form. This code of conduct applies to all Arvados Project spaces both online and off: Gitter chat, Redmine issues, wiki, mailing lists, forums, video chats, and any other Arvados spaces. Anyone who violates this code of conduct may be sanctioned or expelled from these spaces at the discretion of the Arvados Team. Some Arvados Project spaces may have additional rules in place, which will be made clearly available to participants. Participants are responsible for knowing and abiding by these rules. Harassment includes, but is not limited to: - Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, age, race, or religion. - Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment. - Deliberate misgendering or use of [dead](https://www.quora.com/What-is-deadnaming/answer/Nancy-C-Walker) or rejected names. - Gratuitous or off-topic sexual images or behaviour in spaces where they’re not appropriate. - Physical contact and simulated physical contact (eg, textual descriptions like “\*hug\*” or “\*backrub\*”) without consent or after a request to stop. - Threats of violence. - Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm. - Deliberate intimidation. - Stalking or following. - Harassing photography or recording, including logging online activity for harassment purposes. - Sustained disruption of discussion. - Unwelcome sexual attention. - Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others - Continued one-on-one communication after requests to cease. - Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect vulnerable people from intentional abuse. - Publication of non-harassing private communication. The Arvados Project prioritizes marginalized people’s safety over privileged people’s comfort. The Arvados Leadership Team will not act on complaints regarding: - ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’ - Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you.” - Communicating in a [tone](http://geekfeminism.wikia.com/wiki/Tone_argument) you don’t find congenial Reporting --------- If you are being harassed by a member of the Arvados Project, notice that someone else is being harassed, or have any other concerns, please contact the Arvados Project Team at contact@arvados.org. If person who is harassing you is on the team, they will recuse themselves from handling your incident. We will respond as promptly as we can. This code of conduct applies to Arvados Project spaces, but if you are being harassed by a member of Arvados Project outside our spaces, we still want to know about it. We will take all good-faith reports of harassment by Arvados Project members, especially the Arvados Team, seriously. This includes harassment outside our spaces and harassment that took place at any point in time. The abuse team reserves the right to exclude people from the Arvados Project based on their past behavior, including behavior outside Arvados Project spaces and behavior towards people who are not in the Arvados Project. In order to protect volunteers from abuse and burnout, we reserve the right to reject any report we believe to have been made in bad faith. Reports intended to silence legitimate criticism may be deleted without response. We will respect confidentiality requests for the purpose of protecting victims of abuse. At our discretion, we may publicly name a person about whom we’ve received harassment complaints, or privately warn third parties about them, if we believe that doing so will increase the safety of Arvados Project members or the general public. We will not name harassment victims without their affirmative consent. Consequences ------------ Participants asked to stop any harassing behavior are expected to comply immediately. If a participant engages in harassing behavior, the Arvados Team may take any action they deem appropriate, up to and including expulsion from all Arvados Project spaces and identification of the participant as a harasser to other Arvados Project members or the general public. This anti-harassment policy is based on the [example policy from the Geek Feminism wiki](http://geekfeminism.wikia.com/wiki/Community_anti-harassment/Policy), created by the Geek Feminism community. ================================================ FILE: CONTRIBUTING.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Contributing to Arvados Arvados is free software, which means it is free for all to use, learn from, and improve. We encourage contributions from the community that improve Arvados for everyone. Some examples of contributions are bug reports, bug fixes, new features, and scripts or documentation that help with using, administering, or installing Arvados. We also love to hear about Arvados success stories. ## Reporting Issues Arvados uses [GitHub Issues](https://github.com/arvados/arvados/issues). You can file issues against any Arvados component there. Even if you're not sure which component causes the issue, you can still file problem reports and we'll work with you to address them. ## Contributing Code The preferred method for making contributions is through GitHub pull requests. The rest of this guide helps orient you with the code and discusses requirements for all contributions, from the smallest typo fix to entire new components. If you're interested in developing a large new feature for Arvados, please file an issue to discuss it with us first. We can give you guidance on how to best organize the work before you start it. ## Setting Up Your Development Environment The [Arvados source code is hosted on GitHub](https://github.com/arvados/arvados). Once you clone it, you'll find guides for specific topics under the `doc/development` directory. You'll probably want to [install a development environment](doc/development/Prerequisites.md) and [learn how to run tests](doc/development/RunningTests.md). There are also some component-specific guides. ### Setting Up Git We provide Git configuration and hooks to help you follow project conventions. `doc/development/git.conf` includes a block of Git configuration settings. You can set it up for your checkout by running `git config edit --local`: insert the contents of `doc/development/git.conf`, edit them following the comments, then save and exit. Install our `prepare-commit-msg` hook: ```sh $ install -b -m 755 doc/development/prepare-commit-msg.sh .git/hooks/prepare-commit-msg ``` ## Prepare a Development Branch If you haven't before, fork the Arvados repository using the GitHub "Fork" button. If you have, make sure your fork's `main` branch is up-to-date with Arvados'. Then start a new branch for your development named like `1234-your-work`. The number at the start should match the GitHub issue this request is associated with. Then briefly describe the main change your branch makes. ### Coding Standards Please familiarize yourself with our [coding standards](doc/development/CodingStandards.md) for the component(s) you're working on and follow them in your work. ### Sign Off Your Commits Contributions must be signed off. The sign-off is a simple line at the end of each commit message which certifies that you wrote it or otherwise have the right to contribute it under the license listed in the file(s) modified. Make sure each commit message contains the following line with your real name and email (sorry, no pseudonymous or anonymous contributions): Arvados-DCO-1.1-Signed-off-by: Alex Doe When you add this, you certify the below (from ): > Developer Certificate of Origin > Version 1.1 > > Copyright (C) 2004, 2006 The Linux Foundation and its contributors. > > Everyone is permitted to copy and distribute verbatim copies of this > license document, but changing it is not allowed. > > > Developer's Certificate of Origin 1.1 > > By making a contribution to this project, I certify that: > > (a) The contribution was created in whole or in part by me and I > have the right to submit it under the open source license > indicated in the file; or > > (b) The contribution is based upon previous work that, to the best > of my knowledge, is covered under an appropriate open source > license and I have the right under that license to submit that > work with modifications, whether created in whole or in part > by me, under the same open source license (unless I am > permitted to submit under a different license), as indicated > in the file; or > > (c) The contribution was provided directly to me by some other > person who certified (a), (b) or (c) and I have not modified > it. > > (d) I understand and agree that this project and the contribution > are public and that a record of the contribution (including all > personal information I submit with it, including my sign-off) is > maintained indefinitely and may be redistributed consistent with > this project or the open source license(s) involved. ### Add License Headers The comments at the top of each file must contain this copyright notice: > Copyright © The Arvados Authors. All rights reserved. They must also contain an `SPDX-License-Identifier` to identify the license of this component. In most cases you can copy this header from another file in the component. If you need more guidance, refer to [the COPYING file](COPYING). If it is not technically possible to add these comments to a file (for example, because it's a binary test file), you may add its path to the `.licenseignore` file instead. ### Add Your Authorship If you are not already listed in [the AUTHORS file](AUTHORS), please add yourself in the branch, following the documented format. ## Create Your Pull Request Once you've finished pushing changes to your branch, create a pull request against `arvados:main` with the following checklist filled out: * All agreed upon points are implemented / addressed. Describe changes from pre-implementation design. ** _comments_ * Anything not implemented (discovered or discussed during work) has a follow-up story. ** _comments_ * Code is tested and passing, both automated and manual, what manual testing was done is described. ** _comments_ * The tested code incorporates recent main branch changes. ** _confirm_ * New or changed UI/UX has gotten feedback from stakeholders. ** _comments_ * Documentation has been updated. ** _comments_ * Behaves appropriately at the intended scale (describe intended scale). ** _comments_ * Considered backwards and forwards compatibility issues between client and server. ** _comments_ * Follows our coding standards, including GUI style guidelines ** _comments_ "Incorporates recent main branch changes" means that the branch is either based on, or merged, the `main` branch within the last week. The more active development on a component is, the more important it is to be up-to-date with main to avoid surprising test failures post-merge. UI/UX stands for “User Interface / User Experience”. This includes new or modified GUI elements in Workbench and as well as usability elements of command line tools. Stakeholders typically include the product manager and may include designers, salespeople, customers, and other end users as appropriate. In this process, the assigned developer demos the new feature, makes note of any feedback, and then based on their judgement either: implements the changes, provides a reason why the feedback cannot be acted on, or discusses how to handle the feedback with the product manager and/or assigned reviewer. This feedback is typically obtained in earlier drafts of the pull request before it is submitted for final review. A member of the core team will review the pull request. They may have questions or comments through the pull request interface. Once all issues have been resolved, your branch will be merged. ## Continuous Integration Continuous integration is hosted at . Currently, external contributors cannot trigger test runs. Trusted contributors may be given permission to do so. ## Community Chat You can chat with other members of the [Arvados community on Gitter](https://gitter.im/arvados/community). Come say hi! ================================================ FILE: COPYING ================================================ Unless indicated otherwise in the header of the file, the files in this repository are distributed under one of three different licenses: AGPL-3.0, Apache-2.0 or CC-BY-SA-3.0. Individual files contain an SPDX tag that indicates the license for the file. These are the three tags in use: SPDX-License-Identifier: AGPL-3.0 SPDX-License-Identifier: Apache-2.0 SPDX-License-Identifier: CC-BY-SA-3.0 This enables machine processing of license information based on the SPDX License Identifiers that are available here: http://spdx.org/licenses/ The full license text for each license is appended below, and is also available in this directory: AGPL-3.0: agpl-3.0.txt Apache-2.0: apache-2.0.txt CC-BY-SA-3.0: cc-by-sa-3.0.txt As a general rule, code in the sdk/ directory is licensed Apache-2.0, documentation in the doc/ directory is licensed CC-BY-SA-3.0, and everything else is licensed AGPL-3.0. ############################################################################### Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ############################################################################### GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ############################################################################### Attribution-ShareAlike 3.0 Unported CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE. License THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. 1. Definitions "Adaptation" means a work based upon the Work, or upon the Work and other pre-existing works, such as a translation, adaptation, derivative work, arrangement of music or other alterations of a literary or artistic work, or phonogram or performance and includes cinematographic adaptations or any other form in which the Work may be recast, transformed, or adapted including in any form recognizably derived from the original, except that a work that constitutes a Collection will not be considered an Adaptation for the purpose of this License. For the avoidance of doubt, where the Work is a musical work, performance or phonogram, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered an Adaptation for the purpose of this License. "Collection" means a collection of literary or artistic works, such as encyclopedias and anthologies, or performances, phonograms or broadcasts, or other works or subject matter other than works listed in Section 1(f) below, which, by reason of the selection and arrangement of their contents, constitute intellectual creations, in which the Work is included in its entirety in unmodified form along with one or more other contributions, each constituting separate and independent works in themselves, which together are assembled into a collective whole. A work that constitutes a Collection will not be considered an Adaptation (as defined below) for the purposes of this License. "Creative Commons Compatible License" means a license that is listed at https://creativecommons.org/compatiblelicenses that has been approved by Creative Commons as being essentially equivalent to this License, including, at a minimum, because that license: (i) contains terms that have the same purpose, meaning and effect as the License Elements of this License; and, (ii) explicitly permits the relicensing of adaptations of works made available under that license under this License or a Creative Commons jurisdiction license with the same License Elements as this License. "Distribute" means to make available to the public the original and copies of the Work or Adaptation, as appropriate, through sale or other transfer of ownership. "License Elements" means the following high-level license attributes as selected by Licensor and indicated in the title of this License: Attribution, ShareAlike. "Licensor" means the individual, individuals, entity or entities that offer(s) the Work under the terms of this License. "Original Author" means, in the case of a literary or artistic work, the individual, individuals, entity or entities who created the Work or if no individual or entity can be identified, the publisher; and in addition (i) in the case of a performance the actors, singers, musicians, dancers, and other persons who act, sing, deliver, declaim, play in, interpret or otherwise perform literary or artistic works or expressions of folklore; (ii) in the case of a phonogram the producer being the person or legal entity who first fixes the sounds of a performance or other sounds; and, (iii) in the case of broadcasts, the organization that transmits the broadcast. "Work" means the literary and/or artistic work offered under the terms of this License including without limitation any production in the literary, scientific and artistic domain, whatever may be the mode or form of its expression including digital form, such as a book, pamphlet and other writing; a lecture, address, sermon or other work of the same nature; a dramatic or dramatico-musical work; a choreographic work or entertainment in dumb show; a musical composition with or without words; a cinematographic work to which are assimilated works expressed by a process analogous to cinematography; a work of drawing, painting, architecture, sculpture, engraving or lithography; a photographic work to which are assimilated works expressed by a process analogous to photography; a work of applied art; an illustration, map, plan, sketch or three-dimensional work relative to geography, topography, architecture or science; a performance; a broadcast; a phonogram; a compilation of data to the extent it is protected as a copyrightable work; or a work performed by a variety or circus performer to the extent it is not otherwise considered a literary or artistic work. "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. "Publicly Perform" means to perform public recitations of the Work and to communicate to the public those public recitations, by any means or process, including by wire or wireless means or public digital performances; to make available to the public Works in such a way that members of the public may access these Works from a place and at a place individually chosen by them; to perform the Work to the public by any means or process and the communication to the public of the performances of the Work, including by public digital performance; to broadcast and rebroadcast the Work by any means including signs, sounds or images. "Reproduce" means to make copies of the Work by any means including without limitation by sound or visual recordings and the right of fixation and reproducing fixations of the Work, including storage of a protected performance or phonogram in digital form or other electronic medium. 2. Fair Dealing Rights. Nothing in this License is intended to reduce, limit, or restrict any uses free from copyright or rights arising from limitations or exceptions that are provided for in connection with the copyright protection under copyright law or other applicable laws. 3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: to Reproduce the Work, to incorporate the Work into one or more Collections, and to Reproduce the Work as incorporated in the Collections; to create and Reproduce Adaptations provided that any such Adaptation, including any translation in any medium, takes reasonable steps to clearly label, demarcate or otherwise identify that changes were made to the original Work. For example, a translation could be marked "The original work was translated from English to Spanish," or a modification could indicate "The original work has been modified."; to Distribute and Publicly Perform the Work including as incorporated in Collections; and, to Distribute and Publicly Perform Adaptations. For the avoidance of doubt: Non-waivable Compulsory License Schemes. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme cannot be waived, the Licensor reserves the exclusive right to collect such royalties for any exercise by You of the rights granted under this License; Waivable Compulsory License Schemes. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme can be waived, the Licensor waives the exclusive right to collect such royalties for any exercise by You of the rights granted under this License; and, Voluntary License Schemes. The Licensor waives the right to collect royalties, whether individually or, in the event that the Licensor is a member of a collecting society that administers voluntary licensing schemes, via that society, from any exercise by You of the rights granted under this License. The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. Subject to Section 8(f), all rights not expressly granted by Licensor are hereby reserved. 4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: You may Distribute or Publicly Perform the Work only under the terms of this License. You must include a copy of, or the Uniform Resource Identifier (URI) for, this License with every copy of the Work You Distribute or Publicly Perform. You may not offer or impose any terms on the Work that restrict the terms of this License or the ability of the recipient of the Work to exercise the rights granted to that recipient under the terms of the License. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties with every copy of the Work You Distribute or Publicly Perform. When You Distribute or Publicly Perform the Work, You may not impose any effective technological measures on the Work that restrict the ability of a recipient of the Work from You to exercise the rights granted to that recipient under the terms of the License. This Section 4(a) applies to the Work as incorporated in a Collection, but this does not require the Collection apart from the Work itself to be made subject to the terms of this License. If You create a Collection, upon notice from any Licensor You must, to the extent practicable, remove from the Collection any credit as required by Section 4(c), as requested. If You create an Adaptation, upon notice from any Licensor You must, to the extent practicable, remove from the Adaptation any credit as required by Section 4(c), as requested. You may Distribute or Publicly Perform an Adaptation only under the terms of: (i) this License; (ii) a later version of this License with the same License Elements as this License; (iii) a Creative Commons jurisdiction license (either this or a later license version) that contains the same License Elements as this License (e.g., Attribution-ShareAlike 3.0 US)); (iv) a Creative Commons Compatible License. If you license the Adaptation under one of the licenses mentioned in (iv), you must comply with the terms of that license. If you license the Adaptation under the terms of any of the licenses mentioned in (i), (ii) or (iii) (the "Applicable License"), you must comply with the terms of the Applicable License generally and the following provisions: (I) You must include a copy of, or the URI for, the Applicable License with every copy of each Adaptation You Distribute or Publicly Perform; (II) You may not offer or impose any terms on the Adaptation that restrict the terms of the Applicable License or the ability of the recipient of the Adaptation to exercise the rights granted to that recipient under the terms of the Applicable License; (III) You must keep intact all notices that refer to the Applicable License and to the disclaimer of warranties with every copy of the Work as included in the Adaptation You Distribute or Publicly Perform; (IV) when You Distribute or Publicly Perform the Adaptation, You may not impose any effective technological measures on the Adaptation that restrict the ability of a recipient of the Adaptation from You to exercise the rights granted to that recipient under the terms of the Applicable License. This Section 4(b) applies to the Adaptation as incorporated in a Collection, but this does not require the Collection apart from the Adaptation itself to be made subject to the terms of the Applicable License. If You Distribute, or Publicly Perform the Work or any Adaptations or Collections, You must, unless a request has been made pursuant to Section 4(a), keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or if the Original Author and/or Licensor designate another party or parties (e.g., a sponsor institute, publishing entity, journal) for attribution ("Attribution Parties") in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; (ii) the title of the Work if supplied; (iii) to the extent reasonably practicable, the URI, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and (iv) , consistent with Ssection 3(b), in the case of an Adaptation, a credit identifying the use of the Work in the Adaptation (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). The credit required by this Section 4(c) may be implemented in any reasonable manner; provided, however, that in the case of a Adaptation or Collection, at a minimum such credit will appear, if a credit for all contributing authors of the Adaptation or Collection appears, then as part of these credits and in a manner at least as prominent as the credits for the other contributing authors. For the avoidance of doubt, You may only use the credit required by this Section for the purpose of attribution in the manner set out above and, by exercising Your rights under this License, You may not implicitly or explicitly assert or imply any connection with, sponsorship or endorsement by the Original Author, Licensor and/or Attribution Parties, as appropriate, of You or Your use of the Work, without the separate, express prior written permission of the Original Author, Licensor and/or Attribution Parties. Except as otherwise agreed in writing by the Licensor or as may be otherwise permitted by applicable law, if You Reproduce, Distribute or Publicly Perform the Work either by itself or as part of any Adaptations or Collections, You must not distort, mutilate, modify or take other derogatory action in relation to the Work which would be prejudicial to the Original Author's honor or reputation. Licensor agrees that in those jurisdictions (e.g. Japan), in which any exercise of the right granted in Section 3(b) of this License (the right to make Adaptations) would be deemed to be a distortion, mutilation, modification or other derogatory action prejudicial to the Original Author's honor and reputation, the Licensor will waive or not assert, as appropriate, this Section, to the fullest extent permitted by the applicable national law, to enable You to reasonably exercise Your right under Section 3(b) of this License (right to make Adaptations) but not otherwise. 5. Representations, Warranties and Disclaimer UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. 6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 7. Termination This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Adaptations or Collections from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. 8. Miscellaneous Each time You Distribute or Publicly Perform the Work or a Collection, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. Each time You Distribute or Publicly Perform an Adaptation, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. The rights granted under, and the subject matter referenced, in this License were drafted utilizing the terminology of the Berne Convention for the Protection of Literary and Artistic Works (as amended on September 28, 1979), the Rome Convention of 1961, the WIPO Copyright Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and the Universal Copyright Convention (as revised on July 24, 1971). These rights and subject matter take effect in the relevant jurisdiction in which the License terms are sought to be enforced according to the corresponding provisions of the implementation of those treaty provisions in the applicable national law. If the standard suite of rights granted under applicable copyright law includes additional rights not granted under this License, such additional rights are deemed to be included in the License; this License is not intended to restrict the license of any rights under applicable law. Creative Commons Notice Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the Work. Creative Commons will not be liable to You or any party on any legal theory for any damages whatsoever, including without limitation any general, special, incidental or consequential damages arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and obligations of Licensor. Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL, Creative Commons does not authorize the use by either party of the trademark "Creative Commons" or any related trademark or logo of Creative Commons without the prior written consent of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current trademark usage guidelines, as may be published on its website or otherwise made available upon request from time to time. For the avoidance of doubt, this trademark restriction does not form part of the License. Creative Commons may be contacted at https://creativecommons.org/. ================================================ FILE: Makefile ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 export WORKSPACE?=$(shell pwd) help: @echo >&2 @echo >&2 "There is no default make target here. Did you mean 'make test'?" @echo >&2 @echo >&2 "More info:" @echo >&2 " Installing --> http://doc.arvados.org/install" @echo >&2 " Developing/contributing --> https://github.com/arvados/arvados" @echo >&2 " Project home --> https://arvados.org" @echo >&2 @false test: build/run-tests.sh ${TEST_FLAGS} packages: build/run-build-packages-all-targets.sh ${PACKAGES_FLAGS} test-packages: build/run-build-packages-all-targets.sh --test-packages ${PACKAGES_FLAGS} ================================================ FILE: README.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) [![Join the chat at https://gitter.im/arvados/community](https://badges.gitter.im/arvados/community.svg)](https://gitter.im/arvados/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) | [Installing Arvados](https://doc.arvados.org/install/index.html) | [Installing Client SDKs](https://doc.arvados.org/sdk/index.html) | [Report a bug](https://github.com/arvados/arvados/issues/new) | [Development and Contributing](CONTRIBUTING.md) [Arvados](https://arvados.org) is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data. With Arvados, bioinformaticians run and scale compute-intensive workflows, developers create biomedical applications, and IT administrators manage large compute and storage resources. The key components of Arvados are: * *Keep*: Keep is the Arvados storage system for managing and storing large collections of files. Keep combines content addressing and a distributed storage architecture resulting in both high reliability and high throughput. Every file stored in Keep can be accurately verified every time it is retrieved. Keep supports the creation of collections as a flexible way to define data sets without having to re-organize or needlessly copy data. Keep works on a wide range of underlying filesystems and object stores. * *Crunch*: Crunch is the orchestration system for running [Common Workflow Language](https://www.commonwl.org) workflows. It is designed to maintain data provenance and workflow reproducibility. Crunch automatically tracks data inputs and outputs through Keep and executes workflow processes in Docker containers. In a cloud environment, Crunch optimizes costs by scaling compute on demand. * *Workbench*: The Workbench web application allows users to interactively access Arvados functionality. It is especially helpful for querying and browsing data, visualizing provenance, and tracking the progress of workflows. * *Command Line tools*: The command line interface (CLI) provides convenient access to Arvados functionality in the Arvados platform from the command line. * *API and SDKs*: Arvados is designed to be integrated with existing infrastructure. All the services in Arvados are accessed through a RESTful API. SDKs are available for Python, Go, R, Perl, Ruby, and Java. # Documentation Complete documentation, including the [User Guide](https://doc.arvados.org/user/index.html), [Installation documentation](https://doc.arvados.org/install/index.html), [Administrator documentation](https://doc.arvados.org/admin/index.html) and [API documentation](https://doc.arvados.org/api/index.html) is available at http://doc.arvados.org/ If you wish to build the Arvados documentation from a local git clone, see [doc/README.textile](doc/README.textile) for instructions. # Community [![Join the chat at https://gitter.im/arvados/community](https://badges.gitter.im/arvados/community.svg)](https://gitter.im/arvados/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) The [Arvados community channel](https://gitter.im/arvados/community) channel at [gitter.im](https://gitter.im) is available for live discussion and support. The [Arvados developement channel](https://gitter.im/arvados/development) channel at [gitter.im](https://gitter.im) is used to coordinate development. The [Arvados user mailing list](http://lists.arvados.org/mailman/listinfo/arvados) is used to announce new versions and other news. All participants are expected to abide by the [Arvados Code of Conduct](CODE_OF_CONDUCT.md). # Reporting bugs [Report an issue on GitHub](https://github.com/arvados/arvados/issues/new) # Development and Contributing See [CONTRIBUTING](CONTRIBUTING.md) for information about Arvados development and how to contribute to the Arvados project. # Licensing Arvados is Free Software. See [COPYING](COPYING) for information about the open source licenses used in Arvados. ================================================ FILE: SECURITY.md ================================================ # Arvados Project Security Policy ## Supported Versions The Arvados project will issue security fixes by making point releases on the current stable release series (X.Y.0, X.Y.1, X.Y.2, etc). The most recent stable release version, along with release notes and upgrade notes documenting security fixes, can be found at these locations: https://arvados.org/releases/ https://doc.arvados.org/admin/upgrading.html The Arvados project does not support versions older than the current stable release except by special arrangement (contact info@curii.com). Release announcements, including notification of security fixes, are sent to the Arvados announcement list: https://lists.arvados.org//mailman/listinfo/arvados ## Reporting Security Issues If you believe you have found a security vulnerability in any Arvados-owned repository, please report it to us through coordinated disclosure. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.** Instead, please send an email to dev@curii.com. Please include as much of the information listed below as you can to help us better understand and resolve the issue: * The type of issue (e.g., remote code execution, SQL injection, or cross-site scripting) * Full paths of source file(s) related to the manifestation of the issue * The location of the affected source code (tag/branch/commit or direct URL) * Any special configuration required to reproduce the issue * Step-by-step instructions to reproduce the issue * Proof-of-concept or exploit code (if possible) * Impact of the issue, including how an attacker might exploit the issue This information will help us triage your report more quickly. ================================================ FILE: agpl-3.0.txt ================================================ GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ================================================ FILE: apache-2.0.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: build/README ================================================ Prerequisites ============= In order to build packages, you will need: * Ansible installed following the instructions in `tools/ansible/README.md` * `ansible-galaxy` and `ansible-playbook` in `$PATH` (e.g., by activating your Ansible virtualenv, or having symlinks to those commands inside it) * Docker installed * permission to run Docker commands * the `WORKSPACE` environment variable set to the absolute path of an Arvados Git work tree The Ansible playbook `tools/ansible/install-dev-tools.yml` can install all of these prerequisites except the last. Quickstart ========== Build and test all the packages for a distribution on your architecture by running: ./run-build-test-packages-one-target.sh --target DISTRO This will build package build and test Docker images for the named target distribution, build all packages in a build container, then test all packages in a test container. Limit the build to a single package by adding the `--only-build PACKAGE_NAME` option. This is helpful when a build is mostly in good shape and you're tracking down last bugs in one or two packages. Get more verbose output by adding the `--debug` option. By default the script avoids rebuilding or retesting packages that it detects have already been done in past runs. You can force the script to rebuild or retest package(s) with the `--force-build` and `--force-test` options, respectively. Run the script with `--help` for more information about other options. Scripts in this directory ========================= run-tests.sh Run unit and integration test suite. run-build-test-packages-one-target.sh Entry point, wraps run-build-packages-one-target.sh to perform package building and testing inside Docker. run-build-packages-one-target.sh Build packages for one target inside Docker. run-build-packages-all-targets.sh Run run-build-packages-one-target.sh for every target. run-build-packages.sh Actually build packages. Intended to run inside Docker container with proper build environment. run-build-packages-python-and-ruby.sh Build Python and Ruby packages suitable for upload to PyPi and Rubygems. run-library.sh A library of functions shared by the various scripts in this directory. build_docker_image.py Build a Docker image from Arvados source components Adding a new target =================== In order to build packages on a new distribution, you MUST: * Define containers to build the package build and test Docker images in `tools/ansible/files/development-docker-images.yml`. * Create `package-testing/test-packages-TARGET.sh`, ideally by making it a symlink to `FORMAT-common-test-packages.sh`. * Update the package download code near the bottom of `test_package_presence` in `run-library.sh` so it can download packages for the new distribution. Of course, any part of our package build or test infrastructure may need to be updated to accommodate the process for new distributions. If you're having trouble building lots of packages, consider grepping these build scripts for the identifier of the closest working target, and see if you may need to add branches or similar hooks for your target. If you're having trouble building specific packages, consider doing the same for those packages' `fpm-info.sh` files. ================================================ FILE: build/build_docker_image.py ================================================ #!/usr/bin/env python3 # build_docker_image.py - Build a Docker image with Python source packages # # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 # # Requires you have requirements.build.txt installed import argparse import logging import os import re import runpy import shlex import shutil import subprocess import sys import tempfile from pathlib import Path logger = logging.getLogger('build_docker_image') _null_loghandler = logging.NullHandler() logger.addHandler(_null_loghandler) def _log_cmd(level, msg, *args): *args, cmd = args if logger.isEnabledFor(level): logger.log(level, f'{msg}: %s', *args, ' '.join(shlex.quote(s) for s in cmd)) def _log_and_run(cmd, *, level=logging.DEBUG, check=True, **kwargs): _log_cmd(level, "running command", cmd) return subprocess.run(cmd, check=check, **kwargs) class OptionError(ValueError): pass class DockerImage: _BUILD_ARGS = {} _REGISTRY = {} @classmethod def register(cls, subcls): cls._REGISTRY[subcls.NAME] = subcls pre_name, _, shortname = subcls.NAME.rpartition('/') if pre_name == 'arvados': cls._REGISTRY[shortname] = subcls return subcls @classmethod def build_from_args(cls, args): try: subcls = cls._REGISTRY[args.docker_image] except KeyError: raise OptionError(f"unrecognized Docker image {args.docker_image!r}") from None else: return subcls(args) def __init__(self, args): self.extra_args = args.extra_args self.workspace = args.workspace if args.tag is not None: self.tag = args.tag elif version := (args.version or self.dev_version()): self.tag = f'{self.NAME}:{version}' else: self.tag = None def __enter__(self): tmpname = self.NAME.replace('/', '-') self.context_dir = Path(tempfile.mkdtemp(prefix=f'{tmpname}.')) return self def __exit__(self, exc_type, exc_value, exc_tb): shutil.rmtree(self.context_dir, ignore_errors=True) del self.context_dir def build_docker_image(self): logger.info("building Docker image %s", self.tag or self.NAME) cmd = ['docker', 'image', 'build'] cmd.extend( f'--build-arg={key}={val}' for key, val in self._BUILD_ARGS.items() ) cmd.append(f'--file={self.workspace / self.DOCKERFILE_PATH}') if self.tag is not None: cmd.append(f'--tag={self.tag}') cmd.append(str(self.context_dir)) return _log_and_run(cmd) def dev_version(self): return None class PythonVenvImage(DockerImage): DOCKERFILE_PATH = 'build/docker/python-venv.Dockerfile' _EXTRAS = {} _TEST_COMMAND = None def __init__(self, args): arv_vars = runpy.run_path(args.workspace / 'sdk/python/arvados_version.py') self.arv_pymod = arv_vars['ARVADOS_PYTHON_MODULES'][self._PACKAGE_NAME] super().__init__(args) def dev_version(self): return self.arv_pymod.get_version(self.workspace / self.arv_pymod.src_path) def build_python_wheel(self, src_dir): logger.info("building Python wheel at %s", src_dir) cmd = [sys.executable, '-m', 'build', '--outdir', str(self.context_dir)] return _log_and_run(cmd, cwd=src_dir, umask=0o022) def build_requirements(self): with (self.context_dir / 'requirements.txt').open('w') as requirements_file: for whl_path in self.context_dir.glob('*.whl'): name, _, _ = whl_path.stem.partition('-') try: name += f' [{self._EXTRAS[name]}]' except KeyError: pass whl_uri = Path('/usr/local/src', whl_path.name).as_uri() print(name, '@', whl_uri, file=requirements_file) def build_docker_image(self): for path in self.extra_args: self.build_python_wheel(path) for dep in self.arv_pymod.dependencies: self.build_python_wheel(self.workspace / dep.src_path) self.build_python_wheel(self.workspace / self.arv_pymod.src_path) self.build_requirements() result = super().build_docker_image() if self.tag and self._TEST_COMMAND: _log_and_run( ['docker', 'run', '--rm', '--tty', self.tag] + self._TEST_COMMAND, stdout=subprocess.DEVNULL, ) return result @DockerImage.register class ClusterActivityImage(PythonVenvImage): NAME = 'arvados/cluster-activity' _BUILD_ARGS = { 'APT_PKGLIST': 'libcurl4', 'OLD_PKGNAME': 'python3-arvados-cluster-activity', } _EXTRAS = { 'arvados_cluster_activity': 'prometheus', } _PACKAGE_NAME = 'arvados-cluster-activity' _TEST_COMMAND = ['arv-cluster-activity', '--version'] @DockerImage.register class JobsImage(PythonVenvImage): NAME = 'arvados/jobs' _BUILD_ARGS = { 'APT_PKGLIST': 'libcurl4 nodejs', 'OLD_PKGNAME': 'python3-arvados-cwl-runner', } _PACKAGE_NAME = 'arvados-cwl-runner' _TEST_COMMAND = ['arvados-cwl-runner', '--version'] class Environments: @staticmethod def production(args): if args.version is None: raise OptionError( "$ARVADOS_BUILDING_VERSION must be set to build production images" ) @staticmethod def development(args): return _ARG_MAP = { 'dev': development, 'devel': development, 'development': development, 'prod': production, 'production': production, } @classmethod def parse_argument(cls, s): try: return cls._ARG_MAP[s.lower()] except KeyError: raise ValueError(f"unrecognized environment {s!r}") class UploadActions: @staticmethod def to_arvados(tag): logger.info("uploading Docker image %s to Arvados", tag) name, _, version = tag.rpartition(':') if name: cmd = ['arv-keepdocker', name, version] else: cmd = ['arv-keepdocker', tag] return _log_and_run(cmd) @staticmethod def to_docker_hub(tag): logger.info("uploading Docker image %s to Docker Hub", tag) cmd = ['docker', 'push', tag] for tries_left in range(4, -1, -1): try: docker_push = _log_and_run(cmd) except subprocess.CalledProcessError: if tries_left == 0: raise else: break return docker_push _ARG_MAP = { 'arv-keepdocker': to_arvados, 'arvados': to_arvados, 'docker': to_docker_hub, 'docker_hub': to_docker_hub, 'dockerhub': to_docker_hub, 'keepdocker': to_arvados, } @classmethod def parse_argument(cls, s): try: return cls._ARG_MAP[s.lower()] except KeyError: raise ValueError(f"unrecognized upload method {s!r}") class ArgumentParser(argparse.ArgumentParser): def __init__(self): super().__init__( prog='build_docker_image.py', usage='%(prog)s [options ...] IMAGE_NAME [source directory ...]', ) # We put environment variables for the tool in the args so the rest # of the program has a single place to access parameters. env_workspace = os.environ.get('WORKSPACE') if version := os.environ.get('ARVADOS_BUILDING_VERSION'): version = re.sub(r'~(dev[0-9])', r'.\1', version, 1) version = re.sub(r'~(a|b|rc)([0-9])', r'\1\2', version, 1) self.set_defaults( version=version, workspace=Path(env_workspace) if env_workspace else None, ) self.add_argument( '--environment', type=Environments.parse_argument, default=Environments.production, help="""One of `development` or `production`. Your build settings will use defaults and be validated based on this setting. Default is `production` because it's the strictest. """) self.add_argument( '--loglevel', type=self._parse_loglevel, default=logging.WARNING, help="""Log level to use, like `debug`, `info`, `warning`, or `error` """) self.add_argument( '--tag', '-t', help="""Tag for the built Docker image. Default is generated from the image name and build version. """) self.add_argument( '--upload-to', type=UploadActions.parse_argument, help="""After successfully building the Docker image, upload it to this destination. Choices are `arvados` or `docker_hub`. Both require credentials in place to work. """) self.add_argument( 'docker_image', metavar='IMAGE_NAME', choices=sorted(DockerImage._REGISTRY), help="""Docker image to build. Supported images are: %(choices)s. """) self.add_argument( 'extra_args', metavar='SOURCE_DIR', type=Path, nargs=argparse.ZERO_OR_MORE, default=[], help="""Before building the Docker image, the tool will build a Python wheel from each source directory and add it to the Docker build context. You can use this during testing to install specific development versions of dependencies. """) def _parse_loglevel(self, s): try: return logging.getLevelNamesMapping()[s.upper()] except KeyError: raise ValueError(f"unrecognized logging level {s!r}") def main(args): if not isinstance(args, argparse.Namespace): args = ArgumentParser().parse_args(args) if args.workspace is None: raise OptionError("$WORKSPACE must be set to the Arvados source directory") args.environment(args) docker_image = DockerImage.build_from_args(args) if args.upload_to and not docker_image.tag: raise OptionError("cannot upload a Docker image without a tag") with docker_image: docker_image.build_docker_image() if args.upload_to: args.upload_to(docker_image.tag) return os.EX_OK if __name__ == '__main__': argparser = ArgumentParser() _args = argparser.parse_args() logging.basicConfig( format=f'{logger.name}: %(levelname)s: %(message)s', level=_args.loglevel, ) try: returncode = main(_args) except OptionError as err: argparser.error(err.args[0]) returncode = 2 except subprocess.CalledProcessError as err: _log_cmd( logging.ERROR, "command failed with exit code %s", err.returncode, err.cmd, ) returncode = err.returncode exit(returncode) ================================================ FILE: build/check-copyright-notices ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -e fix=false while [[ "${@}" != "" ]] do arg=${1} shift case ${arg} in --help) cat <&2 "Unrecognized argument '${arg}'. Try $0 --help" exit 2 ;; esac done fixer() { want="${want}" perl -pi~ - "${1}" <<'EOF' BEGIN { undef $/ } s{^((\#\!.*?\n|\n*---\n.*?\n\.\.\.\n|<\?xml.*?>\n)\n?)?}{${2}$ENV{want}\n\n}ms EOF } IFS=$'\n' read -a ignores -r -d $'\000' <.licenseignore || true result=0 coproc git ls-files -z ${@} $outputdir/$cleaned_test-$build.txt result=$? if [ $result -eq 0 ] then echo processing $outputdir/$cleaned_test-$build.txt creating $outputdir/$cleaned_test.csv echo $(grep ^Completed $outputdir/$cleaned_test-$build.txt | perl -n -e '/^Completed (.*) in [0-9]+ms.*$/;print "".++$line."-$1,";' | perl -p -e 's/,$//g'|tr " " "_" ) > $outputdir/$cleaned_test.csv echo $(grep ^Completed $outputdir/$cleaned_test-$build.txt | perl -n -e '/^Completed.*in ([0-9]+)ms.*$/;print "$1,";' | perl -p -e 's/,$//g' ) >> $outputdir/$cleaned_test.csv else echo "$test was't found on $file" cleaned_test=$(echo $test | tr -d ",.:;/") > $outputdir/$cleaned_test.csv fi done ================================================ FILE: build/docker/python-venv.Dockerfile ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 # # Build this with `build/build_docker_image.py` FROM debian:bookworm-slim RUN apt-get update -q \ && DEBIAN_FRONTEND=noninteractive apt-get install -qy python3-venv \ && python3 -m venv /opt/arvados-py # The build script sets up our build context with all the Python source # packages to install. COPY . /usr/local/src/ RUN /opt/arvados-py/bin/pip install -qq --no-cache-dir --no-input \ -r /usr/local/src/requirements.txt ### Stage 2 FROM debian:bookworm-slim MAINTAINER Arvados Package Maintainers ARG APT_PKGLIST ARG OLD_PKGNAME=python3-arvados-python-client RUN apt-get update -q \ && DEBIAN_FRONTEND=noninteractive apt-get install -qy python3 $APT_PKGLIST # The symlinks provide path compatibility with old package-based images. RUN adduser --disabled-password --gecos 'Crunch execution user' crunch \ && install --directory --owner=crunch --group=crunch --mode=0700 \ /keep /tmp/crunch-src /tmp/crunch-job \ && ln -s /opt/arvados-py "/usr/lib/$OLD_PKGNAME" USER crunch ENV PATH=/opt/arvados-py/bin:/usr/local/bin:/usr/bin:/bin COPY --from=0 /opt/arvados-py/ /opt/arvados-py/ ================================================ FILE: build/go-python-package-scripts/postinst ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -e # Detect rpm-based systems: the exit code of the following command is zero # on rpm-based systems if /usr/bin/rpm -q -f /usr/bin/rpm >/dev/null 2>&1; then # Red Hat ("%{...}" is interpolated at package build time) pkg="%{name}" pkgtype=rpm prefix="${RPM_INSTALL_PREFIX}" else # Debian script="$(basename "${0}")" pkg="${script%.postinst}" pkgtype=deb prefix=/usr fi case "${pkgtype}-${1}" in deb-configure | rpm-1) dest_dir="/lib/systemd/system" if ! [ -d "${dest_dir}" ]; then exit 0 fi # Find the unit file we need to install. unit_file="${pkg}.service" for dir in \ "${prefix}/share/doc/${pkg}" \ "${dest_dir}"; do if [ -e "${dir}/${unit_file}" ]; then src_dir="${dir}" break fi done if [ -z "${src_dir}" ]; then echo >&2 "WARNING: postinst script did not find ${unit_file} anywhere." exit 0 fi # Install/update the unit file if necessary. if [ "${src_dir}" != "${dest_dir}" ]; then cp "${src_dir}/${unit_file}" "${dest_dir}/" || exit 0 fi # Enable service, and make sure systemd re-reads the unit # file, in case we changed it. if [ -e /run/systemd/system ]; then systemctl daemon-reload || true eval "$(systemctl -p UnitFileState show "${pkg}")" case "${UnitFileState}" in disabled) # Failing to enable or start the service is not a # package error, so don't let errors here # propagate up. systemctl enable "${pkg}" || true systemctl start "${pkg}" || true ;; enabled) systemctl reload-or-try-restart "${pkg}" || true ;; esac fi ;; esac ================================================ FILE: build/go-python-package-scripts/prerm ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -e # Detect rpm-based systems: the exit code of the following command is zero # on rpm-based systems if /usr/bin/rpm -q -f /usr/bin/rpm >/dev/null 2>&1; then # Red Hat ("%{...}" is interpolated at package build time) pkg="%{name}" pkgtype=rpm prefix="${RPM_INSTALL_PREFIX}" else # Debian script="$(basename "${0}")" pkg="${script%.prerm}" pkgtype=deb prefix=/usr fi case "${pkgtype}-${1}" in deb-remove | rpm-0) if [ -e /run/systemd/system ]; then systemctl stop "${pkg}" || true systemctl disable "${pkg}" || true fi if [ -e "${prefix}/share/doc/${pkg}/${pkg}.service" ]; then # Unit files from Python packages get installed by # postinst so we have to remove them explicitly here. rm "/lib/systemd/system/${pkg}/${pkg}.service" || true fi ;; esac ================================================ FILE: build/package-testing/common-test-packages.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -eu FAIL=0 echo while read so && [ -n "$so" ]; do if ldd "$so" | grep "not found" ; then echo "^^^ Missing while scanning $so ^^^" FAIL=1 fi done < "$ARV_PACKAGES_DIR/$1.before" apt-get $DASHQQ_UNLESS_DEBUG --allow-insecure-repositories update apt-get $DASHQQ_UNLESS_DEBUG -y --allow-unauthenticated install "$1" >"$STDOUT_IF_DEBUG" 2>"$STDERR_IF_DEBUG" || install_status=$? dpkg-query --show > "$ARV_PACKAGES_DIR/$1.after" diff "$ARV_PACKAGES_DIR/$1.before" "$ARV_PACKAGES_DIR/$1.after" > "$ARV_PACKAGES_DIR/$1.diff" || true mkdir -p /tmp/opts cd /tmp/opts export ARV_PACKAGES_DIR="/arvados/packages/$target" if [[ -f $(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb 2>/dev/null | head -n1) ]] ; then debpkg=$(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb | head -n1) else debpkg=$(ls -t "$ARV_PACKAGES_DIR/processed/$1"_*.deb | head -n1) fi dpkg-deb -x $debpkg . if [[ "$DEBUG" != "0" ]]; then find -type f -name '*.so' | while read so; do printf "\n== Package dependencies for %s ==\n" "$so" # dpkg is not fully aware of merged-/usr systems: ldd may list a library # under /lib where dpkg thinks it's under /usr/lib, or vice versa. # awk constructs globs that we pass to `dpkg --search` to be flexible # about which version we find. This could potentially return multiple # results, but doing better probably requires restructuring this whole # code to find and report the best match across multiple dpkg queries. ldd "$so" \ | awk 'BEGIN { ORS="\0" } ($3 ~ /^\//) {print "*" $3}' \ | sort --unique --zero-terminated \ | xargs -0 --no-run-if-empty dpkg --search \ | cut -d: -f1 \ | sort --unique done fi case "${install_status:-0}-$1" in 0-* | 100-arvados-api-server ) exec /jenkins/package-testing/common-test-packages.sh "$1" ;; *) exit "$install_status" ;; esac ================================================ FILE: build/package-testing/rpm-common-test-packages.sh ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -eu # Set up DEBUG=${ARVADOS_DEBUG:-0} STDOUT_IF_DEBUG=/dev/null STDERR_IF_DEBUG=/dev/null if [[ "$DEBUG" != "0" ]]; then STDOUT_IF_DEBUG=/dev/stdout STDERR_IF_DEBUG=/dev/stderr fi target="$(basename "$0" .sh)" target="${target##*-}" microdnf --assumeyes clean all touch /var/lib/rpm/* export ARV_PACKAGES_DIR="/arvados/packages/$target" rpm -qa | sort > "$ARV_PACKAGES_DIR/$1.before" microdnf --assumeyes install "$1" || install_status="$?" rpm -qa | sort > "$ARV_PACKAGES_DIR/$1.after" diff "$ARV_PACKAGES_DIR/$1".{before,after} >"$ARV_PACKAGES_DIR/$1.diff" || true mkdir -p /tmp/opts cd /tmp/opts rpm2cpio $(ls -t "$ARV_PACKAGES_DIR/$1"-*.rpm | head -n1) | cpio -idm 2>/dev/null if [[ "$DEBUG" != "0" ]]; then find -name '*.so' | while read so; do echo -e "\n== Packages dependencies for $so ==" ldd "$so" \ | awk '($3 ~ /^\//){print $3}' | sort -u | xargs rpm -qf | sort -u done fi case "${install_status:-0}-$1" in 0-* ) # Install other packages alongside to test for build id conflicts. # This can be removed after we have test-provision-rocky8, #21426. microdnf --assumeyes install arvados-client arvados-server python3-arvados-python-client ;; 1-arvados-api-server ) ;; *) exit "$install_status" ;; esac exec /jenkins/package-testing/common-test-packages.sh "$1" ================================================ FILE: build/package-testing/test-package-arvados-api-server.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -e PACKAGE_NAME=arvados-api-server API_GEMS_LS="$(mktemp --tmpdir api-gems-XXXXXX.list)" trap 'rm -f "$API_GEMS_LS"' EXIT INT TERM QUIT cd "/var/www/${PACKAGE_NAME%-server}" cat_dropins() { cat /lib/systemd/system/arvados-railsapi.service.d/*.conf } check_gem_dirs() { local when="$1"; shift env -C shared/vendor_bundle/ruby ls -1 >"$API_GEMS_LS" local ls_count="$(wc -l <"$API_GEMS_LS")" if [ "$ls_count" = 1 ]; then return 0 fi echo "Package $PACKAGE_NAME FAILED: $ls_count gem directories created after $when:" >&2 case "${ARVADOS_DEBUG:-0}" in 0) cat "$API_GEMS_LS" >&2 ;; *) env -C shared/vendor_bundle/ruby find -maxdepth 3 -type d -ls >&2 ;; esac return 11 } expect_grep() { local expect_exit="$1"; shift local actual_exit=0 grep "$@" >/dev/null || actual_exit=$? if [ "$actual_exit" -eq "$expect_exit" ]; then return 0 fi echo "Package $PACKAGE_NAME FAILED: \`grep" "$@" "\` returned exit code $actual_exit" >&2 case "$actual_exit" in 0) return 1 ;; *) return "$actual_exit" ;; esac } # We intentionally don't hardcode a Bundler path here because other parts of our # infrastructure expect `bundle` to be available system-wide after installation. # After that infrastructure is fixed, this test can invoke `bundle` the same # way the postinst script does. env -C current bundle list >"$ARV_PACKAGES_DIR/$PACKAGE_NAME.gems" check_gem_dirs "initial install" case "$TARGET" in debian*|ubuntu*) cat_dropins | expect_grep 0 -x SupplementaryGroups=www-data ;; rocky*) cat_dropins | expect_grep 1 "^SupplementaryGroups=" microdnf --assumeyes install nginx microdnf --assumeyes reinstall "$PACKAGE_NAME" || test $? -eq 1 check_gem_dirs "package reinstall" cat_dropins | expect_grep 0 -x SupplementaryGroups=nginx ;; *) echo "$0: WARNING: Unknown target '$TARGET'." >&2 ;; esac ================================================ FILE: build/package-testing/test-package-arvados-client.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -e arvados-client -version >/dev/null ================================================ FILE: build/package-testing/test-package-arvados-docker-cleaner.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -e arvados-docker-cleaner -h >/dev/null ================================================ FILE: build/package-testing/test-package-python3-arvados-cwl-runner.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -e arvados-cwl-runner --version >/dev/null ================================================ FILE: build/package-testing/test-package-python3-arvados-python-client.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 arv-put --version >/dev/null || exit . /usr/lib/python3-arvados-python-client/bin/activate python </dev/null ================================================ FILE: build/package-testing/test-package-python3-python-arvados-fuse.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -e arv-mount --version ================================================ FILE: build/pypkg_info.py ================================================ #!/usr/bin/env python3 # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 """pypkg_info.py - Introspect installed Python packages This tool can read metadata about any Python package installed in the current environment and report it out in various formats. We use this mainly to pass information through when building distribution packages. """ import argparse import enum import importlib.metadata import os import sys from pathlib import PurePath class RawFormat: def format_metadata(self, key, value): return value def format_path(self, path): return str(path) class FPMFormat(RawFormat): PYTHON_METADATA_MAP = { 'summary': 'description', } def format_metadata(self, key, value): key = key.lower() key = self.PYTHON_METADATA_MAP.get(key, key) return f'--{key}={value}' class Formats(enum.Enum): RAW = RawFormat FPM = FPMFormat @classmethod def from_arg(cls, arg): try: return cls[arg.upper()] except KeyError: raise ValueError(f"unknown format {arg!r}") from None def report_binfiles(args): bin_names = [ PurePath('bin', path.name) for pkg_name in args.package_names for path in importlib.metadata.distribution(pkg_name).files if path.parts[-3:-1] == ('..', 'bin') ] fmt = args.format.value().format_path return (fmt(path) for path in bin_names) def report_metadata(args): dist = importlib.metadata.distribution(args.package_name) fmt = args.format.value().format_metadata for key in args.metadata_key: yield fmt(key, dist.metadata.get(key, '')) def unescape_str(arg): arg = arg.replace('\'', '\\\'') return eval(f"'''{arg}'''", {}) def parse_arguments(arglist=None): parser = argparse.ArgumentParser() parser.set_defaults(action=None) format_names = ', '.join(fmt.name.lower() for fmt in Formats) parser.add_argument( '--format', '-f', choices=list(Formats), default=Formats.RAW, type=Formats.from_arg, help=f"Output format. Choices are: {format_names}", ) parser.add_argument( '--delimiter', '-d', default='\n', type=unescape_str, help="Line ending. Python backslash escapes are supported. Default newline.", ) subparsers = parser.add_subparsers() binfiles = subparsers.add_parser('binfiles') binfiles.set_defaults(action=report_binfiles) binfiles.add_argument( 'package_names', nargs=argparse.ONE_OR_MORE, ) metadata = subparsers.add_parser('metadata') metadata.set_defaults(action=report_metadata) metadata.add_argument( 'package_name', ) metadata.add_argument( 'metadata_key', nargs=argparse.ONE_OR_MORE, ) args = parser.parse_args() if args.action is None: parser.error("subcommand is required") return args def main(arglist=None): args = parse_arguments(arglist) try: for line in args.action(args): print(line, end=args.delimiter) except importlib.metadata.PackageNotFoundError as error: print(f"error: package not found: {error.args[0]}", file=sys.stderr) return os.EX_NOTFOUND else: return os.EX_OK if __name__ == '__main__': exit(main()) ================================================ FILE: build/rails-package-scripts/README.md ================================================ [//]: # Copyright (C) The Arvados Authors. All rights reserved. [//]: # [//]: # SPDX-License-Identifier: AGPL-3.0 When run-build-packages.sh builds a Rails package, it generates the package's pre/post-inst/rm scripts by concatenating `arvados-api-server.sh` to define common variables, then the actual step script. Especially when this infrastructure was shared with the old Rails Workbench, this seemed like the least worst option to share code between these files and packages. More advanced code generation would've been too much trouble to integrate into our build process at this time. Trying to inject portions of files into other files seemed error-prone and likely to introduce bugs to the end result. ================================================ FILE: build/rails-package-scripts/arvados-api-server.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 # This file declares variables common to all scripts for one Rails package. PACKAGE_NAME=arvados-api-server INSTALL_PATH=/var/www/arvados-api CONFIG_PATH=/etc/arvados/api DOC_URL="http://doc.arvados.org/install/install-api-server.html#configure" RELEASE_PATH=$INSTALL_PATH/current RELEASE_CONFIG_PATH=$RELEASE_PATH/config SHARED_PATH=$INSTALL_PATH/shared ================================================ FILE: build/rails-package-scripts/postinst.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 # This code runs after package variable definitions. set -e for DISTRO_FAMILY in $(. /etc/os-release && echo "${ID:-} ${ID_LIKE:-}"); do case "$DISTRO_FAMILY" in debian) RESETUP_CMD="dpkg-reconfigure $PACKAGE_NAME" break ;; rhel) RESETUP_CMD="dnf reinstall $PACKAGE_NAME" break ;; esac done if [ -z "$RESETUP_CMD" ]; then echo "$PACKAGE_NAME postinst skipped: don't recognize the distribution from /etc/os-release" >&2 exit 0 fi # This will be set to a command path after we install the version we need. BUNDLE= # systemd_ctl is just "systemctl if we booted with systemd, otherwise a noop." # This makes the package installable in Docker containers, albeit without any # service deployment. if [ -d /run/systemd/system ]; then systemd_ctl() { systemctl "$@"; } else systemd_ctl() { true; } fi systemd_quote() { if [ $# -ne 1 ]; then echo "error: systemd_quote requires exactly one argument" >&2 return 2 fi # See systemd.syntax(7) - Use double quotes with backslash escapes echo "$1" | sed -re 's/[\\"]/\\\0/g; s/^/"/; s/$/"/' } run_and_report() { # Usage: run_and_report ACTION_MSG CMD # This is the usual wrapper that prints ACTION_MSG, runs CMD, then writes # a message about whether CMD succeeded or failed. Returns the exit code # of CMD. local action_message="$1"; shift local retcode=0 echo -n "$action_message..." if "$@"; then echo " done." else retcode=$? echo " failed." fi return $retcode } report_not_ready() { local exitcode="$1"; shift local reason="$1"; shift local doc_url="${1:-}" case "$doc_url" in http://* | https://* ) ;; /*) doc_url="https://doc.arvados.org${doc_url}" ;; \#*) doc_url="https://doc.arvados.org/install/install-api-server.html${doc_url}" ;; *) doc_url="https://doc.arvados.org/install/${doc_url}" ;; esac cat >&2 < After you do that, resume $PACKAGE_NAME setup by running: $RESETUP_CMD EOF exit "${exitcode:-20}" } setup_confdirs() { local confdir confgrp case "$WWW_OWNER" in "") confgrp=root ;; *) confgrp="$WWW_OWNER" ;; esac for confdir in "$@"; do if [ ! -d "$confdir" ]; then install -d -g "$confgrp" -m 0750 "$confdir" fi done } setup_conffile() { # Usage: setup_conffile CONFFILE_PATH [SOURCE_PATH] # Both paths are relative to RELEASE_CONFIG_PATH. # This function will try to safely ensure that a symbolic link for # the configuration file points from RELEASE_CONFIG_PATH to CONFIG_PATH. # If SOURCE_PATH is given, this function will try to install that file as # the configuration file in CONFIG_PATH, and return 1 if the file in # CONFIG_PATH is unmodified from the source. local conffile_relpath="$1"; shift local conffile_source="$1" local release_conffile="$RELEASE_CONFIG_PATH/$conffile_relpath" local etc_conffile="$CONFIG_PATH/$(basename "$conffile_relpath")" # Note that -h can return true and -e will return false simultaneously # when the target is a dangling symlink. We're okay with that outcome, # so check -h first. if [ ! -h "$release_conffile" ]; then if [ ! -e "$release_conffile" ]; then ln -s "$etc_conffile" "$release_conffile" # If there's a config file in /var/www identical to the one in /etc, # overwrite it with a symlink after porting its permissions. elif cmp --quiet "$release_conffile" "$etc_conffile"; then local ownership="$(stat -c "%u:%g" "$release_conffile")" local owning_group="${ownership#*:}" if [ 0 != "$owning_group" ]; then chgrp "$owning_group" "$CONFIG_PATH" /etc/arvados fi chown "$ownership" "$etc_conffile" chmod --reference="$release_conffile" "$etc_conffile" ln --force -s "$etc_conffile" "$release_conffile" fi fi if [ -n "$conffile_source" ]; then if [ ! -e "$etc_conffile" ]; then install -g "$WWW_OWNER" -m 0640 \ "$RELEASE_CONFIG_PATH/$conffile_source" "$etc_conffile" return 1 # Even if $etc_conffile already existed, it might be unmodified from # the source. This is especially likely when a user installs, updates # database.yml, then reconfigures before they update application.yml. # Use cmp to be sure whether $etc_conffile is modified. elif cmp --quiet "$RELEASE_CONFIG_PATH/$conffile_source" "$etc_conffile"; then return 1 fi fi } prepare_database() { # Prevent PostgreSQL from trying to page output unset PAGER DB_MIGRATE_STATUS=`"$BUNDLE" exec bin/rake db:migrate:status 2>&1 || true` if echo "$DB_MIGRATE_STATUS" | grep -qF 'Schema migrations table does not exist yet.'; then # The database exists, but the migrations table doesn't. run_and_report "Setting up database" "$BUNDLE" exec bin/rake db:schema:load db:seed elif echo "$DB_MIGRATE_STATUS" | grep -q '^database: '; then run_and_report "Running db:migrate" "$BUNDLE" exec bin/rake db:migrate db:seed elif echo "$DB_MIGRATE_STATUS" | grep -q 'database .* does not exist'; then run_and_report "Running db:setup" "$BUNDLE" exec bin/rake db:setup else # We don't have enough configuration to even check the database. return 1 fi } case "$DISTRO_FAMILY" in debian) WWW_OWNER=www-data ;; rhel) WWW_OWNER="$(id --group --name nginx || true)" ;; esac # Before we do anything else, make sure some directories and files are in place if [ ! -e $SHARED_PATH/log ]; then mkdir -p $SHARED_PATH/log; fi if [ ! -e $RELEASE_PATH/tmp ]; then mkdir -p $RELEASE_PATH/tmp; fi if [ ! -e $RELEASE_PATH/log ]; then ln -s $SHARED_PATH/log $RELEASE_PATH/log; fi if [ ! -e $SHARED_PATH/log/production.log ]; then touch $SHARED_PATH/log/production.log; fi cd "$RELEASE_PATH" export RAILS_ENV=production # Bundler behaves inconsistently when gems are available system-wide. # Avoid those bugs by starting with a GEM_HOME that *only* contains Bundler. export GEM_HOME="$SHARED_PATH/bundler" export GEM_PATH="$GEM_HOME" # We still need to set directory switches because RHEL configures `gem` with # built-in options that override the environment variables. run_and_report "Installing bundler" gem install \ --bindir "$GEM_HOME/bin" \ --install-dir "$GEM_HOME" \ --version "~> 2.5.0" \ bundler BUNDLE="$GEM_HOME/bin/bundle" run_and_report "Running bundle install" "$BUNDLE" install --prefer-local --quiet run_and_report "Verifying bundle is complete" "$BUNDLE" exec true # Some of our infrastructure expects `bundler` to be available system-wide # after installing arvados-api-server. Ensure that's the case. # TODO: Make the other infrastructure stop doing that, then delete this code. for bcmd in bundle bundler; do if ! command -v "$bcmd" >/dev/null 2>&1; then cat >"/usr/local/bin/$bcmd" <&2 exit 12 fi "$BUNDLE" exec "$passenger-config" build-native-support # `passenger-config install-standalone-runtime` downloads an agent, but at # least with Passenger 6.0.23 (late 2024), that version tends to segfault. # Compiling our own is safer. "$BUNDLE" exec "$passenger-config" compile-agent --auto --optimize "$BUNDLE" exec "$passenger-config" install-standalone-runtime --auto --brief echo -n "Creating symlinks to configuration in $CONFIG_PATH ..." setup_confdirs /etc/arvados "$CONFIG_PATH" setup_conffile environments/production.rb environments/production.rb.example \ || true # Rails 5.2 does not tolerate dangling symlinks in the initializers # directory, and this one can still be there, left over from a previous # version of the API server package. rm -f $RELEASE_PATH/config/initializers/omniauth.rb echo "... done." echo -n "Extending systemd unit configuration ..." if [ -z "$WWW_OWNER" ]; then systemd_group="%N" else systemd_group="$(systemd_quote "$WWW_OWNER")" fi install -d /lib/systemd/system/arvados-railsapi.service.d # The 20 prefix is chosen so most user overrides should come after, which # is what most admins will expect, but there's still space to put drop-ins # earlier. cat >/lib/systemd/system/arvados-railsapi.service.d/20-postinst.conf <= 20.3 # Technically this shouldn't be required: the build process should get the # build requirements listed in pyproject.toml. But it's nice to have it # cached. setuptools ~= 80.9 wheel ================================================ FILE: build/requirements.tests.txt ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 # # Python requirements for run-tests ### Requirements for run-tests.sh itself -r requirements.build.txt # Required by sdk/python/tests/run_test_server.py # which is run directly by run-tests.sh PyYAML # yq is used by run-tests.sh directly and controller tests yq ~= 3.4 ### Requirements for Python tests generally # Required by older, unittest-style Python tests # Prefer using pytest.mark.parametrize in new tests parameterized # Our chosen Python testing tool pytest ### Requirements for individual tests # Run by CWL integration tests cwltest >= 2.5.20241122133319, < 3.0 # Required to build Python SDK documentation pdoc ~= 16.0 # Used by controller and keep-web tests s3cmd ================================================ FILE: build/run-build-packages-all-targets.sh ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 read -rd "\000" helpmessage < Version to build (default: \$ARVADOS_BUILDING_VERSION-\$ARVADOS_BUILDING_ITERATION or 0.1.timestamp.commithash) WORKSPACE=path Path to the Arvados source tree to build packages from EOF if ! [[ -n "$WORKSPACE" ]]; then echo >&2 "$helpmessage" echo >&2 echo >&2 "Error: WORKSPACE environment variable not set" echo >&2 exit 1 fi if ! [[ -d "$WORKSPACE" ]]; then echo >&2 "$helpmessage" echo >&2 echo >&2 "Error: $WORKSPACE is not a directory" echo >&2 exit 1 fi set -e PARSEDOPTS=$(getopt --name "$0" --longoptions \ help,test-packages,debug,command:,only-test:,build-version: \ -- "" "$@") if [ $? -ne 0 ]; then exit 1 fi COMMAND= DEBUG= TEST_PACKAGES= ONLY_TEST= eval set -- "$PARSEDOPTS" while [ $# -gt 0 ]; do case "$1" in --help) echo >&2 "$helpmessage" echo >&2 exit 1 ;; --debug) DEBUG="--debug" ;; --command) COMMAND="$2"; shift ;; --test-packages) TEST_PACKAGES="--test-packages" ;; --only-test) ONLY_TEST="$1 $2"; shift ;; --build-version) ARVADOS_BUILDING_VERSION="$2"; shift ;; --) if [ $# -gt 1 ]; then echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help" exit 1 fi ;; esac shift done cd $(dirname $0) FINAL_EXITCODE=0 for pkgtest_path in package-testing/test-packages-*.sh; do target="$(basename "${pkgtest_path##*-}" .sh)" if ./run-build-packages-one-target.sh --target "$target" --command "$COMMAND" --build-version "$ARVADOS_BUILDING_VERSION" $DEBUG $TEST_PACKAGES $ONLY_TEST ; then true else FINAL_EXITCODE=$? echo echo "Build packages failed for $(basename $(dirname "$dockerfile_path"))" echo fi done if test $FINAL_EXITCODE != 0 ; then echo echo "Build packages failed with code $FINAL_EXITCODE" >&2 echo fi exit $FINAL_EXITCODE ================================================ FILE: build/run-build-packages-one-target.sh ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 read -rd "\000" helpmessage < [options] --target Distribution to build packages for --command Build command to execute (default: use built-in Docker image command) --test-packages Run package install test script "test-packages-[target].sh" --debug Output debug information (default: false) --only-build Build only a specific package --only-test Test only a specific package --force-build Build even if the package exists upstream or if it has already been built locally --force-test Test even if there is no new untested package --build-version Version to build (default: \$ARVADOS_BUILDING_VERSION-\$ARVADOS_BUILDING_ITERATION or 0.1.timestamp.commithash) --skip-docker-build Don't try to build Docker images WORKSPACE=path Path to the Arvados source tree to build packages from EOF set -e if ! [[ -n "$WORKSPACE" ]]; then echo >&2 "$helpmessage" echo >&2 echo >&2 "Error: WORKSPACE environment variable not set" echo >&2 exit 1 fi if ! [[ -d "$WORKSPACE" ]]; then echo >&2 "$helpmessage" echo >&2 echo >&2 "Error: $WORKSPACE is not a directory" echo >&2 exit 1 fi PARSEDOPTS=$(getopt --name "$0" --longoptions \ help,debug,test-packages,target:,command:,only-test:,force-test,only-build:,force-build,arch:,build-version:,skip-docker-build \ -- "" "$@") if [ $? -ne 0 ]; then exit 1 fi FORCE_BUILD=0 COMMAND=run-build-packages.sh DEBUG= TARGET= eval set -- "$PARSEDOPTS" while [ $# -gt 0 ]; do case "$1" in --help) echo >&2 "$helpmessage" echo >&2 exit 1 ;; --target) TARGET="$2"; shift ;; --only-test) test_packages=1 testing_one_package=1 packages="$2"; shift ;; --force-test) FORCE_TEST=true ;; --force-build) FORCE_BUILD=1 ;; --only-build) ONLY_BUILD="$2"; shift ;; --arch) case "$2" in amd64) ;; *) printf "FATAL: --arch '%s' is not supported" "$2" >&2 exit 2 ;; esac ARCH="$2"; shift ;; --debug) DEBUG=" --debug" ARVADOS_DEBUG="1" ;; --command) COMMAND="$2"; shift ;; --test-packages) test_packages=1 ;; --build-version) if [[ -z "$2" ]]; then : elif ! [[ "$2" =~ (.*)-(.*) ]]; then echo >&2 "FATAL: --build-version '$2' does not include an iteration. Try '${2}-1'?" exit 1 elif ! [[ "$2" =~ ^[0-9]+\.[0-9]+\.[0-9]+(\.[0-9]+|)(~rc[0-9]+|~dev[0-9]+|)-[0-9]+$ ]]; then echo >&2 "FATAL: --build-version '$2' is invalid, must match pattern ^[0-9]+\.[0-9]+\.[0-9]+(\.[0-9]+|)(~rc[0-9]+|~dev[0-9]+|)-[0-9]+$" exit 1 else [[ "$2" =~ (.*)-(.*) ]] ARVADOS_BUILDING_VERSION="${BASH_REMATCH[1]}" ARVADOS_BUILDING_ITERATION="${BASH_REMATCH[2]}" fi shift ;; --skip-docker-build) SKIP_DOCKER_BUILD=1 ;; --) if [ $# -gt 1 ]; then echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help" exit 1 fi ;; esac shift done set -e orig_umask="$(umask)" if [[ -z "$TARGET" ]]; then echo "FATAL: --target must be specified" >&2 exit 2 elif [[ ! -e "$WORKSPACE/build/package-testing/test-packages-$TARGET.sh" ]]; then echo "FATAL: unknown build target '$TARGET'" >&2 exit 2 fi if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then echo "build version='$ARVADOS_BUILDING_VERSION', package iteration='$ARVADOS_BUILDING_ITERATION'" fi if [[ -n "$test_packages" ]]; then # Packages are built world-readable, so package indexes should be too, # especially because since 2022 apt uses an unprivileged user `_apt` to # retrieve everything. Ensure it has permissions to read the packages # when mounted as a volume inside the Docker container. chmod a+rx "$WORKSPACE" "$WORKSPACE/packages" "$WORKSPACE/packages/$TARGET" umask 022 if [[ -n "$(find $WORKSPACE/packages/$TARGET -name '*.rpm')" ]] ; then CREATEREPO="$(command -v createrepo createrepo_c | tail -n1)" if [[ -z "$CREATEREPO" ]]; then echo >&2 echo >&2 "Error: please install createrepo. E.g. sudo apt install createrepo-c" echo >&2 exit 1 fi "$CREATEREPO" $WORKSPACE/packages/$TARGET fi if [[ -n "$(find $WORKSPACE/packages/$TARGET -name '*.deb')" ]] ; then set +e /usr/bin/which dpkg-scanpackages >/dev/null if [[ "$?" != "0" ]]; then echo >&2 echo >&2 "Error: please install dpkg-dev. E.g. sudo apt-get install dpkg-dev" echo >&2 exit 1 fi /usr/bin/which apt-ftparchive >/dev/null if [[ "$?" != "0" ]]; then echo >&2 echo >&2 "Error: please install apt-utils. E.g. sudo apt-get install apt-utils" echo >&2 exit 1 fi set -e (cd $WORKSPACE/packages/$TARGET dpkg-scanpackages --multiversion . 2> >(grep -v 'warning' 1>&2) | tee Packages | gzip -c > Packages.gz apt-ftparchive -o APT::FTPArchive::Release::Origin=Arvados release . > Release ) fi COMMAND="/jenkins/package-testing/test-packages-$TARGET.sh" IMAGE="arvados/package-test:$TARGET" umask "$orig_umask" else IMAGE="arvados/build:$TARGET" COMMAND="bash /jenkins/$COMMAND --target $TARGET$DEBUG" fi JENKINS_DIR=$(dirname "$(readlink -e "$0")") if [[ "$SKIP_DOCKER_BUILD" != 1 ]] ; then env -C "$WORKSPACE/tools/ansible" ansible-galaxy install -r requirements.yml declare -a ansible_opts=() if [[ -n "$test_packages" ]]; then ansible_opts+=( --extra-vars=arvados_build_playbook=setup-package-tests.yml --limit="arvados_pkgtest_$TARGET" ) else ansible_opts+=( --extra-vars=arvados_build_playbook=install-dev-tools.yml --limit="arvados_pkgbuild_$TARGET" ) fi env -C "$WORKSPACE/tools/ansible" ansible-playbook \ --inventory=files/development-docker-images.yml \ "${ansible_opts[@]}" build-docker-image.yml unset ansible_opts fi if test -z "$packages" ; then packages="arvados-api-server arvados-client arvados-controller arvados-dispatch-cloud arvados-dispatch-lsf arvados-docker-cleaner arvados-health arvados-server arvados-src arvados-sync-groups arvados-sync-users arvados-workbench2 arvados-ws crunch-dispatch-local crunch-dispatch-slurm crunch-run keep-balance keep-block-check keep-exercise keep-rsync keep-web keepproxy keepstore libpam-arvados-go python3-arvados-cwl-runner python3-arvados-fuse python3-arvados-python-client python3-arvados-user-activity python3-arvados-cluster-activity python3-crunchstat-summary" fi FINAL_EXITCODE=0 package_fails="" mkdir -p "$WORKSPACE/services/api/vendor/cache-$TARGET" docker_volume_args=( --mount "type=bind,src=$JENKINS_DIR,dst=/jenkins" --mount "type=bind,src=$WORKSPACE,dst=/arvados" --tmpfs /arvados/services/api/.bundle:rw,noexec,nosuid,size=1m --tmpfs /arvados/services/api/vendor:rw,exec,nosuid,size=1g --mount "type=bind,src=$WORKSPACE/services/api/vendor/cache-$TARGET,dst=/arvados/services/api/vendor/cache" ) if [[ -n "$test_packages" ]]; then for p in $packages ; do if [[ -n "$ONLY_BUILD" ]] && [[ "$p" != "$ONLY_BUILD" ]]; then continue fi if [[ -e "${WORKSPACE}/packages/.last_test_${TARGET}" ]] && [[ -z "$FORCE_TEST" ]]; then MATCH=`find ${WORKSPACE}/packages/ -newer ${WORKSPACE}/packages/.last_test_${TARGET} -regex .*${TARGET}/$p.*` if [[ "$MATCH" == "" ]]; then # No new package has been built that needs testing echo "Skipping $p test because no new package was built since the last test." continue fi fi # If we're testing all packages, we should not error out on packages that don't exist. # If we are testing one specific package only (i.e. --only-test was given), we should # error out if that package does not exist. if [[ -z "$testing_one_package" ]]; then MATCH=`find ${WORKSPACE}/packages/ -regextype posix-extended -regex .*${TARGET}/$p.*\\(deb\\|rpm\\)` if [[ "$MATCH" == "" ]]; then # No new package has been built that needs testing echo "Skipping $p test because no package file is available to test." continue fi fi echo echo "START: $p test on $IMAGE" >&2 if docker run \ --rm \ "${docker_volume_args[@]}" \ --env ARVADOS_DEBUG=$ARVADOS_DEBUG \ --env "TARGET=$TARGET" \ --env "WORKSPACE=/arvados" \ "$IMAGE" $COMMAND $p then echo "OK: $p test on $IMAGE succeeded" >&2 else FINAL_EXITCODE=$? package_fails="$package_fails $p" echo "ERROR: $p test on $IMAGE failed with exit status $FINAL_EXITCODE" >&2 fi done if [[ "$FINAL_EXITCODE" == "0" ]]; then touch ${WORKSPACE}/packages/.last_test_${TARGET} fi else echo echo "START: build packages on $IMAGE" >&2 # Move existing packages and other files into the processed/ subdirectory if [[ ! -e "${WORKSPACE}/packages/${TARGET}/processed" ]]; then mkdir -p "${WORKSPACE}/packages/${TARGET}/processed" fi set +e mv -f ${WORKSPACE}/packages/${TARGET}/* ${WORKSPACE}/packages/${TARGET}/processed/ 2>/dev/null set -e # give bundle (almost) all the cores. See also the MAKE env var that is passed into the # docker run command below. # Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/ tmpfile=$(mktemp /tmp/run-build-packages-one-target.XXXXXX) cores=$(let a=$(grep -c processor /proc/cpuinfo )-1; echo $a) printf -- "---\nBUNDLE_JOBS: \"$cores\"" > $tmpfile # Build packages. if docker run \ --rm \ "${docker_volume_args[@]}" \ --mount "type=bind,src=$tmpfile,dst=/root/.bundle/config" \ --env ARVADOS_BUILDING_VERSION="$ARVADOS_BUILDING_VERSION" \ --env ARVADOS_BUILDING_ITERATION="$ARVADOS_BUILDING_ITERATION" \ --env ARVADOS_DEBUG=$ARVADOS_DEBUG \ --env "ONLY_BUILD=$ONLY_BUILD" \ --env "FORCE_BUILD=$FORCE_BUILD" \ --env "ARCH=$ARCH" \ --env "MAKE=make --jobs $cores" \ "$IMAGE" $COMMAND then echo echo "OK: build packages on $IMAGE succeeded" >&2 else FINAL_EXITCODE=$? echo "ERROR: build packages on $IMAGE failed with exit status $FINAL_EXITCODE" >&2 fi # Clean up the bundle config file rm -f $tmpfile fi if test -n "$package_fails" ; then echo "Failed package tests:$package_fails" >&2 fi exit $FINAL_EXITCODE ================================================ FILE: build/run-build-packages-python-and-ruby.sh ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 COLUMNS=80 . `dirname "$(readlink -f "$0")"`/run-library.sh read -rd "\000" helpmessage < Build ruby gems (default: true) --python Build python packages (default: true) WORKSPACE=path Path to the Arvados source tree to build packages from EOF exit_cleanly() { trap - INT report_outcomes exit ${#failures[@]} } gem_wrapper() { local gem_name="$1"; shift local gem_directory="$1"; shift title "Start $gem_name gem build" timer_reset cd "$gem_directory" handle_ruby_gem $gem_name checkexit $? "$gem_name gem build" title "End of $gem_name gem build (`timer`)" } python_wrapper() { local package_name="$1"; shift local package_directory="$1"; shift title "Start $package_name python package build" timer_reset python3 -m build "$package_directory" checkexit $? "$package_name python package build" title "End of $package_name python package build (`timer`)" } TARGET= UPLOAD=0 RUBY=1 PYTHON=1 DEBUG=${ARVADOS_DEBUG:-0} PARSEDOPTS=$(getopt --name "$0" --longoptions \ help,debug,ruby:,python:,upload,target: \ -- "" "$@") if [ $? -ne 0 ]; then exit 1 fi eval set -- "$PARSEDOPTS" while [ $# -gt 0 ]; do case "$1" in --help) echo >&2 "$helpmessage" echo >&2 exit 1 ;; --target) TARGET="$2"; shift ;; --ruby) RUBY="$2"; shift if [ "$RUBY" != "true" ] && [ "$RUBY" != "1" ]; then RUBY=0 else RUBY=1 fi ;; --python) PYTHON="$2"; shift if [ "$PYTHON" != "true" ] && [ "$PYTHON" != "1" ]; then PYTHON=0 else PYTHON=1 fi ;; --upload) UPLOAD=1 ;; --debug) DEBUG=1 ;; --) if [ $# -gt 1 ]; then echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help" exit 1 fi ;; esac shift done if ! [[ -n "$WORKSPACE" ]]; then echo >&2 "$helpmessage" echo >&2 echo >&2 "Error: WORKSPACE environment variable not set" echo >&2 exit 1 fi STDOUT_IF_DEBUG=/dev/null STDERR_IF_DEBUG=/dev/null DASHQ_UNLESS_DEBUG=-q if [[ "$DEBUG" != 0 ]]; then STDOUT_IF_DEBUG=/dev/stdout STDERR_IF_DEBUG=/dev/stderr DASHQ_UNLESS_DEBUG= fi RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`" RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`" # absolutized and normalized if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then # error; for some reason, the path is not accessible # to the script (e.g. permissions re-evaled after suid) exit 1 # fail fi debug_echo "$0 is running from $RUN_BUILD_PACKAGES_PATH" debug_echo "Workspace is $WORKSPACE" if [ $RUBY -eq 0 ] && [ $PYTHON -eq 0 ]; then echo "Nothing to do!" exit 0 fi # Make all files world-readable -- jenkins runs with umask 027, and has checked # out our git tree here chmod o+r "$WORKSPACE" -R # More cleanup - make sure all executables that we'll package are 755 cd "$WORKSPACE" find -type d -name 'bin' |xargs -I {} find {} -type f |xargs -I {} chmod 755 {} # Now fix our umask to something better suited to building and publishing # gems and packages umask 0022 debug_echo "umask is" `umask` GEM_BUILD_FAILURES=0 if [ $RUBY -eq 1 ]; then debug_echo "Building Ruby gems" gem_wrapper arvados "$WORKSPACE/sdk/ruby" gem_wrapper arvados-cli "$WORKSPACE/sdk/cli" gem_wrapper arvados-login-sync "$WORKSPACE/services/login-sync" if [ ${#failures[@]} -ne 0 ]; then GEM_BUILD_FAILURES=${#failures[@]} fi fi PYTHON_BUILD_FAILURES=0 if [ $PYTHON -eq 1 ]; then debug_echo "Building Python packages" python_wrapper arvados-python-client "$WORKSPACE/sdk/python" python_wrapper arvados-cwl-runner "$WORKSPACE/sdk/cwl" python_wrapper arvados_fuse "$WORKSPACE/services/fuse" python_wrapper crunchstat_summary "$WORKSPACE/tools/crunchstat-summary" python_wrapper arvados-user-activity "$WORKSPACE/tools/user-activity" python_wrapper arvados-cluster-activity "$WORKSPACE/tools/cluster-activity" if [ $((${#failures[@]} - $GEM_BUILD_FAILURES)) -ne 0 ]; then PYTHON_BUILD_FAILURES=$((${#failures[@]} - $GEM_BUILD_FAILURES)) fi fi if [ $UPLOAD -ne 0 ]; then if get_ci_scripts then checkexit $? "get CI scripts" else checkexit $? "get CI scripts" UPLOAD=0 fi fi if [ $UPLOAD -ne 0 ]; then echo "Uploading" if [ $DEBUG > 0 ]; then EXTRA_UPLOAD_FLAGS=" --verbose" else EXTRA_UPLOAD_FLAGS="" fi if [ ! -e "$WORKSPACE/packages" ]; then mkdir -p "$WORKSPACE/packages" fi if [ $PYTHON -eq 1 ]; then title "Start upload python packages" timer_reset if [ $PYTHON_BUILD_FAILURES -eq 0 ]; then "$CI_DIR/run_upload_packages.py" $EXTRA_UPLOAD_FLAGS --workspace $WORKSPACE python else echo "Skipping python packages upload, there were errors building the packages" fi checkexit $? "upload python packages" title "End of upload python packages (`timer`)" fi if [ $RUBY -eq 1 ]; then title "Start upload ruby gems" timer_reset if [ $GEM_BUILD_FAILURES -eq 0 ]; then "$CI_DIR/run_upload_packages.py" $EXTRA_UPLOAD_FLAGS --workspace $WORKSPACE gems else echo "Skipping ruby gem upload, there were errors building the packages" fi checkexit $? "upload ruby gems" title "End of upload ruby gems (`timer`)" fi fi exit_cleanly ================================================ FILE: build/run-build-packages.sh ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 . "$(dirname "$(readlink -f "$0")")"/run-library.sh || exit 1 read -rd "\000" helpmessage < [options] Options: --build-bundle-packages (default: false) Build api server package with vendor/bundle included --debug Output debug information (default: false) --target Distribution to build packages for --only-build Build only a specific package (or ONLY_BUILD from environment) --force-build Build even if the package exists upstream or if it has already been built locally --command Build command to execute (defaults to the run command defined in the Docker image) WORKSPACE=path Path to the Arvados source tree to build packages from EOF # Begin of user configuration # set to --no-cache-dir to disable pip caching CACHE_FLAG= MAINTAINER="Arvados Package Maintainers " VENDOR="The Arvados Project" # End of user configuration DEBUG=${ARVADOS_DEBUG:-0} FORCE_BUILD=${FORCE_BUILD:-0} EXITCODE=0 COMMAND= TARGET= PARSEDOPTS=$(getopt --name "$0" --longoptions \ help,build-bundle-packages,debug,target:,only-build:,arch:,force-build \ -- "" "$@") if [ $? -ne 0 ]; then exit 1 fi eval set -- "$PARSEDOPTS" while [ $# -gt 0 ]; do case "$1" in --help) echo >&2 "$helpmessage" echo >&2 exit 1 ;; --target) TARGET="$2"; shift ;; --only-build) ONLY_BUILD="$2"; shift ;; --force-build) FORCE_BUILD=1 ;; --arch) case "$2" in amd64) ;; *) printf "FATAL: --arch '%s' is not supported" "$2" >&2 exit 2 ;; esac ARCH="$2"; shift ;; --debug) DEBUG=1 ;; --command) COMMAND="$2"; shift ;; --) if [ $# -gt 1 ]; then echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help" exit 1 fi ;; esac shift done if [[ -z "$TARGET" ]]; then echo "FATAL: --target must be specified" >&2 exit 2 elif [[ ! -e "$WORKSPACE/build/package-testing/test-packages-$TARGET.sh" ]]; then echo "FATAL: unknown build target '$TARGET'" >&2 exit 2 fi if [[ "$COMMAND" != "" ]]; then COMMAND="bash /jenkins/$COMMAND --target $TARGET" fi STDOUT_IF_DEBUG=/dev/null STDERR_IF_DEBUG=/dev/null DASHQ_UNLESS_DEBUG=-q if [[ "$DEBUG" != 0 ]]; then STDOUT_IF_DEBUG=/dev/stdout STDERR_IF_DEBUG=/dev/stderr DASHQ_UNLESS_DEBUG= fi # The next section defines a bunch of constants used to build distro packages # for our Python tools. Because those packages include C extensions, they need # to depend on and refer to a specific minor version of Python 3. The logic # below should Just Work for most cases, but you can override variables for a # specific distro if you need to to do something weird. # * PYTHON3_VERSION: The major+minor version of Python we build against # (e.g., "3.11") # * PYTHON3_EXECUTABLE: The command to run that version of Python, # either a full path or something in $PATH (e.g., "python3.11") # * PYTHON3_PACKAGE: The name of the distro package that provides # $PYTHON3_EXECUTABLE. Our Python packages will all depend on this. # * PYTHON3_PKG_PREFIX: The prefix used in the names of all of our Python # packages. This should match distro convention. PYTHON3_PKG_PREFIX=python3 case "$TARGET" in rocky9) FORMAT=rpm PYTHON3_VERSION=3.11 ;; centos*|rocky*) FORMAT=rpm ;; debian*|ubuntu*) FORMAT=deb ;; *) echo -e "$0: Unknown target '$TARGET'.\n" >&2 exit 1 ;; esac : "${PYTHON3_VERSION:=$("${PYTHON3_EXECUTABLE:-python3}" -c 'import sys; print("{v.major}.{v.minor}".format(v=sys.version_info))')}" : "${PYTHON3_EXECUTABLE:=python$PYTHON3_VERSION}" case "$FORMAT" in deb) : "${PYTHON3_PACKAGE:=python$PYTHON3_VERSION}" ;; rpm) : "${PYTHON3_PACKAGE:=$(rpm -qf "$(command -v "$PYTHON3_EXECUTABLE")" --queryformat '%{NAME}\n')}" ;; esac if [[ -z "$WORKSPACE" ]]; then echo >&2 "$helpmessage" echo >&2 echo >&2 "Error: WORKSPACE environment variable not set" echo >&2 exit 1 fi # Test for fpm fpm --version >/dev/null 2>&1 if [[ $? -ne 0 ]]; then echo >&2 "$helpmessage" echo >&2 echo >&2 "Error: fpm not found" echo >&2 exit 1 fi RUN_BUILD_PACKAGES_PATH="$(dirname "$0")" RUN_BUILD_PACKAGES_PATH="$(cd "$RUN_BUILD_PACKAGES_PATH" && pwd)" # absolutized and normalized if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then # error; for some reason, the path is not accessible # to the script (e.g. permissions re-evaled after suid) exit 1 # fail fi debug_echo "$0 is running from $RUN_BUILD_PACKAGES_PATH" debug_echo "Workspace is $WORKSPACE" # Make all files world-readable -- jenkins runs with umask 027, and has checked # out our git tree here chmod o+r "$WORKSPACE" -R # More cleanup - make sure all executables that we'll package are 755 cd "$WORKSPACE" || exit 1 find . -type d -name 'bin' -print0 |xargs -0 -I {} find {} -type f -print0 |xargs -0 -I {} chmod 755 {} # Now fix our umask to something better suited to building and publishing # gems and packages umask 0022 debug_echo "umask is" "$(umask)" if [[ ! -d "$WORKSPACE/packages/$TARGET" ]]; then mkdir -p "$WORKSPACE/packages/$TARGET" chown --reference="$WORKSPACE" "$WORKSPACE/packages/$TARGET" fi # Required due to CVE-2022-24765 git config --global --add safe.directory /arvados # Ruby gems debug_echo -e "\nRuby gems\n" FPM_GEM_PREFIX=$(gem environment gemdir) cd "$WORKSPACE/sdk/ruby" || exit 1 handle_ruby_gem arvados cd "$WORKSPACE/sdk/cli" || exit 1 handle_ruby_gem arvados-cli cd "$WORKSPACE/services/login-sync" || exit 1 handle_ruby_gem arvados-login-sync # arvados-src handle_arvados_src # Go packages debug_echo -e "\nGo packages\n" # Go binaries export GOPATH=~/go package_go_binary cmd/arvados-client arvados-client "$FORMAT" "$ARCH" \ "Arvados command line tool (beta)" package_go_binary cmd/arvados-server arvados-server "$FORMAT" "$ARCH" \ "Arvados server daemons" package_go_binary cmd/arvados-server arvados-controller "$FORMAT" "$ARCH" \ "Arvados cluster controller daemon" package_go_binary cmd/arvados-server arvados-dispatch-cloud "$FORMAT" "$ARCH" \ "Arvados cluster cloud dispatch" package_go_binary cmd/arvados-server arvados-dispatch-lsf "$FORMAT" "$ARCH" \ "Dispatch Arvados containers to an LSF cluster" package_go_binary services/crunch-dispatch-local crunch-dispatch-local "$FORMAT" "$ARCH" \ "Dispatch Crunch containers on the local system" package_go_binary cmd/arvados-server crunch-dispatch-slurm "$FORMAT" "$ARCH" \ "Dispatch Crunch containers to a SLURM cluster" package_go_binary cmd/arvados-server crunch-run "$FORMAT" "$ARCH" \ "Supervise a single Crunch container" package_go_binary cmd/arvados-server arvados-health "$FORMAT" "$ARCH" \ "Check health of all Arvados cluster services" package_go_binary cmd/arvados-server keep-balance "$FORMAT" "$ARCH" \ "Rebalance and garbage-collect data blocks stored in Arvados Keep" package_go_binary cmd/arvados-server keepproxy "$FORMAT" "$ARCH" \ "Make a Keep cluster accessible to clients that are not on the LAN" package_go_binary cmd/arvados-server keepstore "$FORMAT" "$ARCH" \ "Keep storage daemon, accessible to clients on the LAN" package_go_binary cmd/arvados-server keep-web "$FORMAT" "$ARCH" \ "Static web hosting service for user data stored in Arvados Keep" package_go_binary cmd/arvados-server arvados-ws "$FORMAT" "$ARCH" \ "Arvados Websocket server" package_go_binary tools/sync-groups arvados-sync-groups "$FORMAT" "$ARCH" \ "Synchronize remote groups into Arvados from an external source" package_go_binary tools/sync-users arvados-sync-users "$FORMAT" "$ARCH" \ "Synchronize remote users into Arvados from an external source" package_go_binary tools/keep-block-check keep-block-check "$FORMAT" "$ARCH" \ "Verify that all data from one set of Keep servers to another was copied" package_go_binary tools/keep-rsync keep-rsync "$FORMAT" "$ARCH" \ "Copy all data from one set of Keep servers to another" package_go_binary tools/keep-exercise keep-exercise "$FORMAT" "$ARCH" \ "Performance testing tool for Arvados Keep" package_go_so lib/pam pam_arvados.so libpam-arvados-go "$FORMAT" "$ARCH" \ "Arvados PAM authentication module" # Python packages debug_echo -e "\nPython packages\n" # Before a Python package can be built, its dependencies must already be built. # This list is ordered accordingly. setup_build_virtualenv fpm_build_virtualenv "arvados-python-client" "sdk/python" "$FORMAT" "$ARCH" fpm_build_virtualenv "crunchstat-summary" "tools/crunchstat-summary" "$FORMAT" "$ARCH" fpm_build_virtualenv "arvados-cwl-runner" "sdk/cwl" "$FORMAT" "$ARCH" fpm_build_virtualenv "arvados-docker-cleaner" "services/dockercleaner" "$FORMAT" "$ARCH" fpm_build_virtualenv "arvados-fuse" "services/fuse" "$FORMAT" "$ARCH" fpm_build_virtualenv "arvados-user-activity" "tools/user-activity" "$FORMAT" "$ARCH" fpm_build_virtualenv "arvados-cluster-activity" "tools/cluster-activity" "$FORMAT" "$ARCH" # Workbench2 package_workbench2 # Rails packages debug_echo -e "\nRails packages\n" # The rails api server package handle_api_server "$ARCH" # clean up temporary GOPATH rm -rf "$GOPATH" exit $EXITCODE ================================================ FILE: build/run-build-test-packages-one-target.sh ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 read -rd "\000" helpmessage < [options] --target Distribution to build packages for --only-build Build only a specific package (or ONLY_BUILD from environment) --arch Build a specific architecture (or ARCH from environment, defaults to native architecture) --force-build Build even if the package exists upstream or if it has already been built locally --force-test Test even if there is no new untested package --upload If the build and test steps are successful, upload the packages to a remote apt repository (default: false) --debug Output debug information (default: false) --rc Optional Parameter to build Release Candidate --build-version Version to build (default: \$ARVADOS_BUILDING_VERSION-\$ARVADOS_BUILDING_ITERATION or 0.1.timestamp.commithash) --skip-docker-build Don't try to build Docker images WORKSPACE=path Path to the Arvados source tree to build packages from EOF if ! [[ -n "$WORKSPACE" ]]; then echo >&2 "$helpmessage" echo >&2 echo >&2 "Error: WORKSPACE environment variable not set" echo >&2 exit 1 fi if ! [[ -d "$WORKSPACE" ]]; then echo >&2 "$helpmessage" echo >&2 echo >&2 "Error: $WORKSPACE is not a directory" echo >&2 exit 1 fi PARSEDOPTS=$(getopt --name "$0" --longoptions \ help,debug,upload,rc,target:,force-test,only-build:,force-build,arch:,build-version:,skip-docker-build \ -- "" "$@") if [ $? -ne 0 ]; then exit 1 fi UPLOAD=0 UPLOAD_REPO=dev DEBUG= TARGET= declare -a build_args=() eval set -- "$PARSEDOPTS" while [ $# -gt 0 ]; do case "$1" in --help) echo >&2 "$helpmessage" echo >&2 exit 1 ;; --target) TARGET="$2"; shift ;; --force-test) FORCE_TEST=1 ;; --force-build) FORCE_BUILD=1 ;; --only-build) ONLY_BUILD="$2"; shift ;; --arch) ARCH="$2"; shift ;; --debug) DEBUG=" --debug" ;; --upload) UPLOAD=1 ;; --rc) UPLOAD_REPO=testing ;; --build-version) build_args+=("$1" "$2") shift ;; --skip-docker-build) SKIP_DOCKER_BUILD=1 ;; --) if [ $# -gt 1 ]; then echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help" exit 1 fi ;; esac shift done if [[ -z "$TARGET" ]]; then echo "FATAL: --target must be specified" >&2 exit 2 elif [[ ! -e "$WORKSPACE/build/package-testing/test-packages-$TARGET.sh" ]]; then echo "FATAL: unknown build target '$TARGET'" >&2 exit 2 fi build_args+=(--target "$TARGET") if [[ -n "$ONLY_BUILD" ]]; then build_args+=(--only-build "$ONLY_BUILD") fi if [[ -n "$FORCE_BUILD" ]]; then build_args+=(--force-build) fi if [[ -n "$FORCE_TEST" ]]; then build_args+=(--force-test) fi if [[ "$SKIP_DOCKER_BUILD" = 1 ]]; then build_args+=(--skip-docker-build) fi if [[ -n "$ARCH" ]]; then build_args+=(--arch "$ARCH") fi exit_cleanly() { trap - INT report_outcomes exit ${#failures} } COLUMNS=80 . $WORKSPACE/build/run-library.sh title "Start build packages" timer_reset $WORKSPACE/build/run-build-packages-one-target.sh "${build_args[@]}"$DEBUG checkexit $? "build packages" title "End of build packages (`timer`)" title "Start test packages" timer_reset if [ ${#failures[@]} -eq 0 ]; then $WORKSPACE/build/run-build-packages-one-target.sh "${build_args[@]}" --test-packages$DEBUG else echo "Skipping package upload, there were errors building the packages" fi checkexit $? "test packages" title "End of test packages (`timer`)" if [[ "$UPLOAD" != 0 ]]; then title "Start upload packages" timer_reset get_ci_scripts checkexit $? "get CI scripts" if [ ${#failures[@]} -eq 0 ]; then "$CI_DIR/run_upload_packages.py" \ --repo="$UPLOAD_REPO" \ -H jenkinsapt@apt.arvados.org \ --workspace="$WORKSPACE" \ "$TARGET" checkexit $? "upload packages" else echo "Skipping package upload, there were errors building and/or testing the packages" fi title "End of upload packages (`timer`)" fi exit_cleanly ================================================ FILE: build/run-library.sh ================================================ #!/bin/bash -xe # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 # A library of functions shared by the various scripts in this directory. # This is the timestamp about when we merged changed to include licenses # with Arvados packages. We use it as a heuristic to add revisions for # older packages. LICENSE_PACKAGE_TS=20151208015500 RAILS_PACKAGE_ITERATION="${ARVADOS_BUILDING_ITERATION:-1}" declare -A LICENSE_FILE_NAME_MAP=( [agpl-3.0.txt]="GNU Affero General Public License version 3.0" [LICENSE-2.0.txt]="Apache 2.0" ) debug_echo () { echo "$@" >"$STDOUT_IF_DEBUG" } find_python_program() { prog="$1" shift for prog in "$@"; do if "$prog" --version >/dev/null 2>&1; then echo "$prog" return 0 fi done cat >&2 <"$STDOUT_IF_DEBUG" 2>"$STDERR_IF_DEBUG" fi } # Usage: package_workbench2 package_workbench2() { local pkgname=arvados-workbench2 local src=services/workbench2 local dst=/var/www/arvados-workbench2/workbench2 local description="Arvados Workbench 2" if [[ -n "$ONLY_BUILD" ]] && [[ "$pkgname" != "$ONLY_BUILD" ]] ; then return 0 fi cd "$WORKSPACE/$src" local version="$(version_from_git)" rm -rf ./build NODE_ENV=production yarn install VERSION="$version" BUILD_NUMBER="$(default_iteration "$pkgname" "$version" yarn)" GIT_COMMIT="$(git rev-parse HEAD | head -c9)" yarn build cd "$WORKSPACE/packages/$TARGET" fpm_build "${WORKSPACE}/$src" "${WORKSPACE}/$src/build/=$dst" "$pkgname" dir "$version" \ --license="GNU Affero General Public License, version 3.0" \ --description="${description}" \ --config-files="/etc/arvados/$pkgname/workbench2.example.json" \ "$WORKSPACE/services/workbench2/etc/arvados/workbench2/workbench2.example.json=/etc/arvados/$pkgname/workbench2.example.json" } calculate_go_package_version() { # $__returnvar has the nameref attribute set, which means it is a reference # to another variable that is passed in as the first argument to this function. # see https://www.gnu.org/software/bash/manual/html_node/Shell-Parameters.html local -n __returnvar="$1"; shift local oldpwd="$PWD" cd "$WORKSPACE" go mod download # Update the version number and build a new package if the vendor # bundle has changed, or the command imports anything from the # Arvados SDK and the SDK has changed. declare -a checkdirs=(go.mod go.sum) while [ -n "$1" ]; do checkdirs+=("$1") shift done # Even our rails packages (version calculation happens here!) depend on a go component (arvados-server) # Everything depends on the build directory. checkdirs+=(sdk/go lib build) local timestamp=0 for dir in ${checkdirs[@]}; do cd "$WORKSPACE" ts="$(timestamp_from_git "$dir")" if [[ "$ts" -gt "$timestamp" ]]; then version=$(version_from_git "$dir") timestamp="$ts" fi done cd "$oldpwd" __returnvar="$version" } # Usage: package_go_binary services/foo arvados-foo [deb|rpm] [amd64] "Compute foo to arbitrary precision" [apache-2.0.txt] package_go_binary() { local src_path="$1"; shift local prog="$1"; shift local package_format="$1"; shift local target_arch="$1"; shift local description="$1"; shift local license_file="${1:-agpl-3.0.txt}"; shift if [[ -n "$ONLY_BUILD" ]] && [[ "$prog" != "$ONLY_BUILD" ]]; then debug_echo -e "Skipping build of $prog package." return 0 fi native_arch=$(get_native_arch) if [[ "$native_arch" != "amd64" ]] && [[ -n "$target_arch" ]] && [[ "$native_arch" != "$target_arch" ]]; then echo "Error: no cross compilation support for Go on $native_arch, can not build $prog for $target_arch" return 1 fi case "$package_format-$TARGET" in # Red Hat-based distributions do not support native cross compilation at # all (they use a qemu-based solution we haven't implemented yet). rpm-*) cross_compilation=0 if [[ "$native_arch" == "amd64" ]] && [[ -n "$target_arch" ]] && [[ "$native_arch" != "$target_arch" ]]; then echo "Error: no cross compilation support for Go on $native_arch for $TARGET, can not build $prog for $target_arch" return 1 fi ;; *) cross_compilation=1 ;; esac if [[ -n "$target_arch" ]]; then archs=($target_arch) else # No target architecture specified, default to native target. archs=($native_arch) fi for ta in ${archs[@]}; do package_go_binary_worker "$src_path" "$prog" "$package_format" "$description" "$native_arch" "$ta" "$license_file" retval=$? if [[ $retval -ne 0 ]]; then return $retval fi done } # Usage: package_go_binary services/foo arvados-foo deb "Compute foo to arbitrary precision" [amd64] [amd64] [apache-2.0.txt] package_go_binary_worker() { local src_path="$1"; shift local prog="$1"; shift local package_format="$1"; shift local description="$1"; shift local native_arch="${1:-amd64}"; shift local target_arch="${1:-amd64}"; shift local license_file="${1:-agpl-3.0.txt}"; shift debug_echo "package_go_binary $src_path as $prog (native arch: $native_arch, target arch: $target_arch)" local basename="${src_path##*/}" calculate_go_package_version go_package_version $src_path cd $WORKSPACE/packages/$TARGET test_package_presence "$prog" "$go_package_version" "go" "" "$target_arch" if [[ $? -ne 0 ]]; then return 0 fi echo "Building $package_format ($target_arch) package for $prog from $src_path" GOARCH=${arch} go install -ldflags "-X git.arvados.org/arvados.git/lib/cmd.version=${go_package_version} -X main.version=${go_package_version}" "git.arvados.org/arvados.git/$src_path" local -a switches=() binpath=$GOPATH/bin/${basename} if [[ "${target_arch}" != "${native_arch}" ]]; then switches+=("-a${target_arch}") binpath="$GOPATH/bin/linux_${target_arch}/${basename}" fi case "$package_format" in # As of April 2024 we package identical Go binaries under different # packages and names. This upsets the build id database, so don't # register ourselves there. rpm) switches+=(--rpm-rpmbuild-define="_build_id_links none") ;; esac systemd_unit="$WORKSPACE/${src_path}/${prog}.service" if [[ -e "${systemd_unit}" ]]; then switches+=( --after-install "${WORKSPACE}/build/go-python-package-scripts/postinst" --before-remove "${WORKSPACE}/build/go-python-package-scripts/prerm" "${systemd_unit}=/lib/systemd/system/${prog}.service") fi switches+=("$WORKSPACE/${license_file}=/usr/share/doc/$prog/${license_file}") fpm_build "${WORKSPACE}/${src_path}" "$binpath=/usr/bin/${prog}" "${prog}" dir "${go_package_version}" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=${description}" "${switches[@]}" } # Usage: package_go_so lib/foo arvados_foo.so arvados-foo deb amd64 "Arvados foo library" package_go_so() { local src_path="$1"; shift local sofile="$1"; shift local pkg="$1"; shift local package_format="$1"; shift local target_arch="$1"; shift # supported: amd64 local description="$1"; shift if [[ -n "$ONLY_BUILD" ]] && [[ "$pkg" != "$ONLY_BUILD" ]]; then debug_echo -e "Skipping build of $pkg package." return 0 fi debug_echo "package_go_so $src_path as $pkg" calculate_go_package_version go_package_version $src_path cd $WORKSPACE/packages/$TARGET test_package_presence $pkg $go_package_version go || return 1 cd $WORKSPACE/$src_path go build -buildmode=c-shared -o ${GOPATH}/bin/${sofile} cd $WORKSPACE/packages/$TARGET local -a fpmargs=( "--url=https://arvados.org" "--license=Apache License, Version 2.0" "--description=${description}" "$WORKSPACE/apache-2.0.txt=/usr/share/doc/$pkg/apache-2.0.txt" ) if [[ -e "$WORKSPACE/$src_path/pam-configs-arvados" ]]; then fpmargs+=("$WORKSPACE/$src_path/pam-configs-arvados=/usr/share/doc/$pkg/pam-configs-arvados-go") fi if [[ -e "$WORKSPACE/$src_path/README" ]]; then fpmargs+=("$WORKSPACE/$src_path/README=/usr/share/doc/$pkg/README") fi fpm_build "${WORKSPACE}/${src_path}" "$GOPATH/bin/${sofile}=/usr/lib/${sofile}" "${pkg}" dir "${go_package_version}" "${fpmargs[@]}" } default_iteration() { if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then echo "$ARVADOS_BUILDING_ITERATION" return fi local package_name="$1"; shift local package_version="$1"; shift local package_type="$1"; shift local iteration=1 if [[ $package_version =~ ^0\.1\.([0-9]{14})(\.|$) ]] && \ [[ ${BASH_REMATCH[1]} -le $LICENSE_PACKAGE_TS ]]; then iteration=2 fi echo $iteration } _build_rails_package_scripts() { local pkgname="$1"; shift local destdir="$1"; shift local srcdir="$RUN_BUILD_PACKAGES_PATH/rails-package-scripts" for scriptname in postinst prerm postrm; do cat "$srcdir/$pkgname.sh" "$srcdir/$scriptname.sh" \ >"$destdir/$scriptname" || return $? done } rails_package_version() { local pkgname="$1"; shift local srcdir="$1"; shift if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then echo "$ARVADOS_BUILDING_VERSION" return fi local version="$(version_from_git)" if [ $pkgname = "arvados-api-server" ] ; then calculate_go_package_version version cmd/arvados-server "$srcdir" fi echo $version } test_rails_package_presence() { local pkgname="$1"; shift local srcdir="$1"; shift if [[ -n "$ONLY_BUILD" ]] && [[ "$pkgname" != "$ONLY_BUILD" ]] ; then return 1 fi tmppwd=`pwd` cd $srcdir local version="$(rails_package_version "$pkgname" "$srcdir")" cd $tmppwd test_package_presence $pkgname $version rails "$RAILS_PACKAGE_ITERATION" } get_complete_package_name() { # if the errexit flag is set, unset it until this function returns # otherwise, the shift calls below will abort the program if optional arguments are not supplied if [ -o errexit ]; then set +e trap 'set -e' RETURN fi # $__returnvar has the nameref attribute set, which means it is a reference # to another variable that is passed in as the first argument to this function. # see https://www.gnu.org/software/bash/manual/html_node/Shell-Parameters.html local -n __returnvar="$1"; shift local pkgname="$1"; shift local version="$1"; shift local pkgtype="$1"; shift local iteration="$1"; shift local arch="$1"; shift if [[ "$iteration" == "" ]]; then iteration="$(default_iteration "$pkgname" "$version" "$pkgtype")" fi if [[ "$arch" == "" ]]; then native_arch=$(get_native_arch) rpm_native_arch="x86_64" rpm_architecture="$rpm_native_arch" deb_architecture="$native_arch" if [[ "$pkgtype" =~ ^(src)$ ]]; then rpm_architecture="noarch" deb_architecture="all" fi else rpm_architecture=$arch deb_architecture=$arch fi local complete_pkgname="${pkgname}_$version${iteration:+-$iteration}_$deb_architecture.deb" if [[ "$FORMAT" == "rpm" ]]; then # rpm packages get iteration 1 if we don't supply one iteration=${iteration:-1} complete_pkgname="$pkgname-$version-${iteration}.$rpm_architecture.rpm" fi __returnvar=${complete_pkgname} } # Test if the package already exists, if not return 0, if it does return 1 test_package_presence() { local pkgname="$1"; shift local version="$1"; shift local pkgtype="$1"; shift local iteration="$1"; shift local arch="$1"; shift if [[ -n "$ONLY_BUILD" ]] && [[ "$pkgname" != "$ONLY_BUILD" ]] ; then return 1 fi local full_pkgname get_complete_package_name full_pkgname "$pkgname" "$version" "$pkgtype" "$iteration" "$arch" # See if we can skip building the package, only if it already exists in the # processed/ directory. If so, move it back to the packages directory to make # sure it gets picked up by the test and/or upload steps. # Get the list of packages from the repos local pkg_url if [[ "$FORCE_BUILD" == "1" ]]; then echo "Package $full_pkgname build forced with --force-build, building" return 0 elif [[ "$FORMAT" == "deb" ]]; then local codename case "$TARGET" in debian12) codename=bookworm ;; ubuntu2204) codename=jammy ;; ubuntu2404) codename=noble ;; *) echo "FIXME: Don't know deb URL path for $TARGET, building" return 0 ;; esac local repo_subdir if [ ${pkgname:0:3} = "lib" ]; then repo_subdir=${pkgname:0:4} else repo_subdir=${pkgname:0:1} fi pkg_url="http://apt.arvados.org/$codename/pool/main/$repo_subdir/$pkgname/$full_pkgname" else local rpm_root case "$TARGET" in rocky8 | rocky9 | rocky10 ) rpm_root="RHEL/${TARGET#rocky}/dev" ;; *) echo "FIXME: Don't know RPM URL path for $TARGET, building" return 0 ;; esac pkg_url="https://rpm.arvados.org/$rpm_root/$arch/$full_pkgname" fi if curl -fs -o "$WORKSPACE/packages/$TARGET/$full_pkgname" "$pkg_url"; then echo "Package $full_pkgname exists upstream, not rebuilding, downloading instead!" return 1 elif [[ -f "$WORKSPACE/packages/$TARGET/processed/$full_pkgname" ]]; then echo "Package $full_pkgname exists, not rebuilding!" return 1 else echo "Package $full_pkgname not found, building" return 0 fi } handle_rails_package() { local pkgname="$1"; shift if [[ -n "$ONLY_BUILD" ]] && [[ "$pkgname" != "$ONLY_BUILD" ]] ; then return 0 fi local srcdir="$1"; shift cd "$srcdir" local license_path="$1"; shift local version="$(rails_package_version "$pkgname" "$srcdir")" echo "$version" >package-build.version local scripts_dir="$(mktemp --tmpdir -d "$pkgname-XXXXXXXX.scripts")" && \ ( set -e _build_rails_package_scripts "$pkgname" "$scripts_dir" cd "$srcdir" mkdir -p tmp git rev-parse HEAD >git-commit.version # Prevent `bundle cache` from seeing system-wide gems and skipping # their download. This depends on the Bundler install set up # in the arvados_ruby Ansible role. See there for more background. export GEM_HOME=/opt/arvados-bundler export GEM_PATH="$GEM_HOME" # Please make sure you read `bundle help config` carefully before you # modify any of these settings. Some of their names are not intuitive. # # `bundle cache` caches from Git and paths, not just rubygems.org. bundle config set cache_all true # `bundle cache` caches for all platforms listed in `Gemfile.lock`. bundle config set cache_all_platforms true # Avoid loading system-wide gems (although this seems to not work 100%). bundle config set disable_shared_gems true # `bundle cache` only downloads gems, doesn't install them. # Our Rails postinst script does the install step. bundle config set no_install true # Do not install gem sets unnecessary for production. bundle config set without development:test bundle cache # Configuration after this point is for the installed package but only # makes sense to set *after* running `bundle cache`. # # Install with deployment settings. bundle config set deployment true # Install gems to a dedicated path that is only used by RailsAPI # (but shared across versions for efficiency). bundle config set path /var/www/arvados-api/shared/vendor_bundle ) if [[ 0 != "$?" ]] || ! cd "$WORKSPACE/packages/$TARGET"; then echo "ERROR: $pkgname package prep failed" >&2 rm -rf "$scripts_dir" EXITCODE=1 return 1 fi local railsdir="/var/www/${pkgname%-server}/current" local -a pos_args=("$srcdir/=$railsdir" "$pkgname" dir "$version") local -a switches=(--after-install "$scripts_dir/postinst" --before-remove "$scripts_dir/prerm" --after-remove "$scripts_dir/postrm") if [[ -z "$ARVADOS_BUILDING_VERSION" ]]; then switches+=(--iteration $RAILS_PACKAGE_ITERATION) fi # For some reason fpm excludes need to not start with /. local exclude_root="${railsdir#/}" for exclude in tmp log coverage Capfile\* \ config/deploy\* \ config/application.yml \ config/database.yml \ \*.service; do switches+=(-x "$exclude_root/$exclude") done fpm_build "${srcdir}" "${pos_args[@]}" "${switches[@]}" \ -x "$exclude_root/vendor/cache-*" \ -x "$exclude_root/vendor/bundle" "$@" \ "$license_path=$railsdir/$(basename "$license_path")" \ "$srcdir/arvados-railsapi.service=/lib/systemd/system/arvados-railsapi.service" rm -rf "$scripts_dir" } # Usage: handle_api_server [amd64] handle_api_server () { local target_arch="${1:-amd64}"; shift if [[ -n "$ONLY_BUILD" ]] && [[ "$ONLY_BUILD" != "arvados-api-server" ]] ; then debug_echo -e "Skipping build of arvados-api-server package." return 0 fi native_arch=$(get_native_arch) if [[ "$target_arch" != "$native_arch" ]]; then echo "Error: no cross compilation support for Rails yet, can not build arvados-api-server for $ARCH" echo exit 1 fi # Build the API server package test_rails_package_presence arvados-api-server "$WORKSPACE/services/api" if [[ "$?" == "0" ]]; then calculate_go_package_version arvados_server_version cmd/arvados-server arvados_server_iteration=$(default_iteration "arvados-server" "$arvados_server_version" "go") handle_rails_package arvados-api-server "$WORKSPACE/services/api" \ "$WORKSPACE/agpl-3.0.txt" --url="https://arvados.org" \ --description="Arvados API server - Arvados is a free and open source platform for big data science." \ --license="GNU Affero General Public License, version 3.0" --depends "arvados-server = ${arvados_server_version}-${arvados_server_iteration}" fi } # Usage: handle_arvados_src handle_arvados_src () { if [[ -n "$ONLY_BUILD" ]] && [[ "$ONLY_BUILD" != "arvados-src" ]] ; then debug_echo -e "Skipping build of arvados-src package." return 0 fi # arvados-src ( cd "$WORKSPACE" COMMIT_HASH=$(format_last_commit_here "%H") arvados_src_version="$(version_from_git)" cd $WORKSPACE/packages/$TARGET test_package_presence arvados-src "$arvados_src_version" src "" if [[ "$?" == "0" ]]; then cd "$WORKSPACE" SRC_BUILD_DIR=$(mktemp -d) # mktemp creates the directory with 0700 permissions by default chmod 755 $SRC_BUILD_DIR git clone $DASHQ_UNLESS_DEBUG "$WORKSPACE/.git" "$SRC_BUILD_DIR" cd "$SRC_BUILD_DIR" # go into detached-head state git checkout $DASHQ_UNLESS_DEBUG "$COMMIT_HASH" echo "$COMMIT_HASH" >git-commit.version cd $WORKSPACE/packages/$TARGET fpm_build "$WORKSPACE" $SRC_BUILD_DIR/=/usr/local/arvados/src arvados-src 'dir' "$arvados_src_version" "--exclude=usr/local/arvados/src/.git" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=The Arvados source code" "--architecture=all" rm -rf "$SRC_BUILD_DIR" fi ) } setup_build_virtualenv() { PYTHON_BUILDROOT="$(mktemp --directory --tmpdir pybuild.XXXXXXXX)" "$PYTHON3_EXECUTABLE" -m venv "$PYTHON_BUILDROOT/venv" "$PYTHON_BUILDROOT/venv/bin/pip" install -r "$WORKSPACE/build/requirements.build-packages.txt" mkdir "$PYTHON_BUILDROOT/wheelhouse" } # Build python packages with a virtualenv built-in # Usage: fpm_build_virtualenv arvados-python-client sdk/python [deb|rpm] [amd64] fpm_build_virtualenv () { local pkg=$1; shift local pkg_dir=$1; shift local package_format="$1"; shift local target_arch="${1:-amd64}"; shift fpm_build_virtualenv_worker "$pkg" "$pkg_dir" "$package_format" amd64 amd64 } # Build python packages with a virtualenv built-in # Usage: fpm_build_virtualenv_worker arvados-python-client sdk/python python3 [deb|rpm] [amd64] [amd64] fpm_build_virtualenv_worker () { PKG=$1; shift PKG_DIR=$1; shift local package_format="$1"; shift local native_arch="${1:-amd64}"; shift local target_arch=${1:-amd64}; shift # Set up STDOUT_IF_DEBUG=/dev/null STDERR_IF_DEBUG=/dev/null DASHQ_UNLESS_DEBUG=-q if [[ "$DEBUG" != "0" ]]; then STDOUT_IF_DEBUG=/dev/stdout STDERR_IF_DEBUG=/dev/stderr DASHQ_UNLESS_DEBUG= fi if [[ "$ARVADOS_BUILDING_ITERATION" == "" ]]; then ARVADOS_BUILDING_ITERATION=1 fi PACKAGE="$PKG_DIR" PACKAGE_PREFIX=$PYTHON3_PKG_PREFIX if [[ "$PKG" != "arvados-docker-cleaner" ]]; then PYTHON_PKG=$PACKAGE_PREFIX-$PKG else # Exception to our package naming convention PYTHON_PKG=$PKG fi # We must always add a wheel to our repository, even if we're not building # this distro package, because it might be a dependency for a later # package we do build. if [[ "$PKG_DIR" =~ ^.=[0-9]+\. ]]; then # Not source to build, but a version to download. # The rest of the function expects a filesystem path, so set one afterwards. "$PYTHON_BUILDROOT/venv/bin/pip" download --dest="$PYTHON_BUILDROOT/wheelhouse" "$PKG$PKG_DIR" \ && PKG_DIR="$PYTHON_BUILDROOT/nonexistent" else # Make PKG_DIR absolute. PKG_DIR="$(env -C "$WORKSPACE" readlink -e "$PKG_DIR")" "$PYTHON_BUILDROOT/venv/bin/python" -m build --outdir="$PYTHON_BUILDROOT/wheelhouse" "$PKG_DIR" fi if [[ $? -ne 0 ]]; then printf "Error, unable to download/build wheel for %s @ %s\n" "$PKG" "$PKG_DIR" exit 1 fi if [[ -n "$ONLY_BUILD" ]] && [[ "$PYTHON_PKG" != "$ONLY_BUILD" ]] && [[ "$PKG" != "$ONLY_BUILD" ]]; then return 0 elif ! "$PYTHON_BUILDROOT/venv/bin/piprepo" build "$PYTHON_BUILDROOT/wheelhouse"; then printf "Error, unable to update local wheel repository\n" exit 1 fi local venv_dir="/usr/lib/$PYTHON_PKG" echo "Creating virtualenv..." if ! "$PYTHON3_EXECUTABLE" -m venv "$venv_dir"; then printf "Error, unable to run\n %s -m venv %s\n" "$PYTHON3_EXECUTABLE" "$venv_dir" exit 1 # We must have the dependency resolver introduced in late 2020 for the rest # of our install process to work. # elif ! "$venv_dir/bin/pip" install "pip>=20.3"; then printf "Error, unable to run\n %s/bin/pip install 'pip>=20.3'\n" "$venv_dir" exit 1 fi local pip_wheel="$(ls --sort=time --reverse "$PYTHON_BUILDROOT/wheelhouse/$(echo "$PKG" | sed s/-/_/g)-"*.whl | tail -n1)" if [[ -z "$pip_wheel" ]]; then printf "Error, unable to find built wheel for $PKG\n" exit 1 elif ! "$venv_dir/bin/pip" install $DASHQ_UNLESS_DEBUG $CACHE_FLAG --extra-index-url="file://$PYTHON_BUILDROOT/wheelhouse/simple" "$pip_wheel"; then printf "Error, unable to run %s/bin/pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG --extra-index-url=file://%s %s " "$venv_dir" "$PYTHON_BUILDROOT/wheelhouse/simple" "$pip_wheel" exit 1 fi # Determine the package version from the wheel PYTHON_VERSION="$("$venv_dir/bin/python" "$WORKSPACE/build/pypkg_info.py" metadata "$PKG" Version)" UNFILTERED_PYTHON_VERSION="$(echo "$PYTHON_VERSION" | sed 's/\.dev/~dev/; s/\([0-9]\)rc/\1~rc/')" # See if we actually need to build this package; does it exist already? # We can't do this earlier than here, because we need PYTHON_VERSION. if ! test_package_presence "$PYTHON_PKG" "$UNFILTERED_PYTHON_VERSION" python3 "$ARVADOS_BUILDING_ITERATION" "$target_arch"; then return 0 fi echo "Building $package_format ($target_arch) package for $PKG from $PKG_DIR" local lic_key="$("$venv_dir/bin/python3" "$WORKSPACE/build/pypkg_info.py" metadata "$PKG" License-File)" local lic_desc="${LICENSE_FILE_NAME_MAP[$lic_key]}" if [[ -z "$lic_desc" ]]; then echo "Error, unable to determine license metadata for $PKG" >&2 exit 1 fi # Using `env -C` sets the directory where the package is built. # Using `fpm --chdir` sets the root directory for source arguments. declare -a COMMAND_ARR=( env -C "$PYTHON_BUILDROOT" fpm --chdir="$venv_dir" --name="$PYTHON_PKG" --version="$UNFILTERED_PYTHON_VERSION" --input-type=dir --output-type="$package_format" --depends="$PYTHON3_PACKAGE" --iteration="$ARVADOS_BUILDING_ITERATION" --replaces="python-$PKG" --url="https://arvados.org" --license="$lic_desc" ) # Append fpm flags corresponding to Python package metadata. readarray -d "" -O "${#COMMAND_ARR[@]}" -t COMMAND_ARR < \ <("$venv_dir/bin/python3" "$WORKSPACE/build/pypkg_info.py" \ --delimiter=\\0 --format=fpm \ metadata "$PKG" Summary) if [[ -n "$target_arch" ]] && [[ "$target_arch" != "amd64" ]]; then COMMAND_ARR+=("-a$target_arch") fi if [[ "$MAINTAINER" != "" ]]; then COMMAND_ARR+=('--maintainer' "$MAINTAINER") fi if [[ "$VENDOR" != "" ]]; then COMMAND_ARR+=('--vendor' "$VENDOR") fi if [[ "$DEBUG" != "0" ]]; then COMMAND_ARR+=('--verbose' '--log' 'info') fi systemd_unit="$PKG_DIR/$PKG.service" if [[ -e "${systemd_unit}" ]]; then COMMAND_ARR+=('--after-install' "${WORKSPACE}/build/go-python-package-scripts/postinst") COMMAND_ARR+=('--before-remove' "${WORKSPACE}/build/go-python-package-scripts/prerm") fi case "$package_format" in deb) COMMAND_ARR+=( # Avoid warning --deb-no-default-config-files ) ;; rpm) COMMAND_ARR+=( # Conflict with older packages we used to publish --conflicts "rh-python36-python-$PKG" # Do not generate /usr/lib/.build-id links on RH8+ # (otherwise our packages conflict with platform-python) --rpm-rpmbuild-define "_build_id_links none" ) ;; esac # Append --depends X and other arguments specified by fpm-info.sh in # the package source dir. These are added last so they can override # the arguments added by this script. declare -a fpm_args=() declare -a fpm_depends=() fpminfo="$PKG_DIR/fpm-info.sh" if [[ -e "$fpminfo" ]]; then echo "Loading fpm overrides from $fpminfo" if ! source "$fpminfo"; then echo "Error, unable to source $WORKSPACE/$PKG_DIR/fpm-info.sh for $PKG" exit 1 fi fi for i in "${fpm_depends[@]}"; do COMMAND_ARR+=('--depends' "$i") done # make sure the systemd service file ends up in the right place # used by arvados-docker-cleaner if [[ -e "${systemd_unit}" ]]; then COMMAND_ARR+=("share/doc/$PKG/$PKG.service=/lib/systemd/system/$PKG.service") fi COMMAND_ARR+=("${fpm_args[@]}") while read -d "" binpath; do COMMAND_ARR+=("$binpath=/usr/$binpath") done < <("$venv_dir/bin/python3" "$WORKSPACE/build/pypkg_info.py" --delimiter=\\0 binfiles "$PKG") # the python3-arvados-cwl-runner package comes with cwltool, expose that version if [[ "$PKG" == arvados-cwl-runner ]]; then COMMAND_ARR+=("bin/cwltool=/usr/bin/cwltool") fi COMMAND_ARR+=(".=$venv_dir") debug_echo -e "\n${COMMAND_ARR[@]}\n" FPM_RESULTS=$("${COMMAND_ARR[@]}") FPM_EXIT_CODE=$? # if something went wrong and debug is off, print out the fpm command that errored if ! fpm_verify $FPM_EXIT_CODE $FPM_RESULTS && [[ "$STDOUT_IF_DEBUG" == "/dev/null" ]]; then echo "fpm returned an error executing the command:" echo echo -e "\n${COMMAND_ARR[@]}\n" else ls "$PYTHON_BUILDROOT"/*."$package_format" mv "$PYTHON_BUILDROOT"/*."$package_format" "$WORKSPACE/packages/$TARGET/" fi echo } # Build packages for everything fpm_build() { # Source dir where fpm-info.sh (if any) will be found. SRC_DIR=$1 shift # The package source. Depending on the source type, this can be a # path, or the name of the package in an upstream repository (e.g., # pip). PACKAGE=$1 shift # The name of the package to build. PACKAGE_NAME=$1 shift # The type of source package. Passed to fpm -s. Default "dir". PACKAGE_TYPE=${1:-dir} shift # Optional: the package version number. Passed to fpm -v. VERSION=$1 shift if [[ -n "$ONLY_BUILD" ]] && [[ "$PACKAGE_NAME" != "$ONLY_BUILD" ]] && [[ "$PACKAGE" != "$ONLY_BUILD" ]] ; then return 0 fi local default_iteration_value="$(default_iteration "$PACKAGE" "$VERSION" "$PACKAGE_TYPE")" declare -a COMMAND_ARR=("fpm" "-s" "$PACKAGE_TYPE" "-t" "$FORMAT") if [ python = "$PACKAGE_TYPE" ] && [ deb = "$FORMAT" ]; then # Dependencies are built from Python package metadata. Since that # will never refer to Debian package iterations, it doesn't make sense # to enforce those in the .deb dependencies. COMMAND_ARR+=(--deb-ignore-iteration-in-dependencies) fi if [[ "$DEBUG" != "0" ]]; then COMMAND_ARR+=('--verbose' '--log' 'info') fi if [[ -n "$PACKAGE_NAME" ]]; then COMMAND_ARR+=('-n' "$PACKAGE_NAME") fi if [[ "$MAINTAINER" != "" ]]; then COMMAND_ARR+=('--maintainer' "$MAINTAINER") fi if [[ "$VENDOR" != "" ]]; then COMMAND_ARR+=('--vendor' "$VENDOR") fi if [[ "$VERSION" != "" ]]; then COMMAND_ARR+=('-v' "$VERSION") fi if [[ -n "$default_iteration_value" ]]; then # We can always add an --iteration here. If another one is specified in $@, # that will take precedence, as desired. COMMAND_ARR+=(--iteration "$default_iteration_value") fi # Append --depends X and other arguments specified by fpm-info.sh in # the package source dir. These are added last so they can override # the arguments added by this script. declare -a fpm_args=() declare -a build_depends=() declare -a fpm_depends=() declare -a fpm_conflicts=() declare -a fpm_exclude=() if [[ ! -d "$SRC_DIR" ]]; then echo >&2 "BUG: looking in wrong dir for fpm-info.sh: $pkgdir" exit 1 fi fpminfo="${SRC_DIR}/fpm-info.sh" if [[ -e "$fpminfo" ]]; then debug_echo "Loading fpm overrides from $fpminfo" source "$fpminfo" fi for pkg in "${build_depends[@]}"; do if [[ $TARGET =~ debian|ubuntu ]]; then pkg_deb=$(ls "$WORKSPACE/packages/$TARGET/$pkg_"*.deb | sort -rg | awk 'NR==1') if [[ -e $pkg_deb ]]; then echo "Installing build_dep $pkg from $pkg_deb" dpkg -i "$pkg_deb" else echo "Attemping to install build_dep $pkg using apt-get" apt-get install -y "$pkg" fi apt-get -y -f install else pkg_rpm=$(ls "$WORKSPACE/packages/$TARGET/$pkg"-[0-9]*.rpm | sort -rg | awk 'NR==1') if [[ -e $pkg_rpm ]]; then echo "Installing build_dep $pkg from $pkg_rpm" rpm -i "$pkg_rpm" else echo "Attemping to install build_dep $pkg" rpm -i "$pkg" fi fi done for i in "${fpm_depends[@]}"; do COMMAND_ARR+=('--depends' "$i") done for i in "${fpm_conflicts[@]}"; do COMMAND_ARR+=('--conflicts' "$i") done for i in "${fpm_exclude[@]}"; do COMMAND_ARR+=('--exclude' "$i") done COMMAND_ARR+=("${fpm_args[@]}") # Append remaining function arguments directly to fpm's command line. for i; do COMMAND_ARR+=("$i") done COMMAND_ARR+=("$PACKAGE") debug_echo -e "\n${COMMAND_ARR[@]}\n" FPM_RESULTS=$("${COMMAND_ARR[@]}") FPM_EXIT_CODE=$? echo "fpm: exit code $FPM_EXIT_CODE" >>$STDOUT_IF_DEBUG echo "$FPM_RESULTS" >>$STDOUT_IF_DEBUG fpm_verify $FPM_EXIT_CODE $FPM_RESULTS # if something went wrong and debug is off, print out the fpm command that errored if [[ 0 -ne $? ]] && [[ "$STDOUT_IF_DEBUG" == "/dev/null" ]]; then echo -e "\n${COMMAND_ARR[@]}\n" fi } # verify build results fpm_verify () { FPM_EXIT_CODE=$1 shift FPM_RESULTS=$@ FPM_PACKAGE_NAME='' if [[ $FPM_RESULTS =~ ([A-Za-z0-9_\.~-]*\.)(deb|rpm) ]]; then FPM_PACKAGE_NAME=${BASH_REMATCH[1]}${BASH_REMATCH[2]} fi if [[ "$FPM_PACKAGE_NAME" == "" ]]; then EXITCODE=1 echo echo "Error: $PACKAGE: Unable to figure out package name from fpm results:" echo echo $FPM_RESULTS echo return 1 elif [[ "$FPM_RESULTS" =~ "File already exists" ]]; then echo "Package $FPM_PACKAGE_NAME exists, not rebuilding" return 0 elif [[ 0 -ne "$FPM_EXIT_CODE" ]]; then EXITCODE=1 echo "Error building package for $1:\n $FPM_RESULTS" return 1 fi } install_package() { PACKAGES=$@ if [[ "$FORMAT" == "deb" ]]; then $SUDO apt-get install $PACKAGES --yes elif [[ "$FORMAT" == "rpm" ]]; then $SUDO yum -q -y install $PACKAGES fi } title() { printf '%s %s\n' "=======" "$1" } checkexit() { if [[ "$1" != "0" ]]; then title "$2 -- FAILED" failures+=("$2 (`timer`)") else successes+=("$2 (`timer`)") fi } timer_reset() { t0=$SECONDS } timer() { if [[ -n "$t0" ]]; then echo -n "$(($SECONDS - $t0))s" fi } report_outcomes() { for x in "${successes[@]}" do echo "Pass: $x" done if [[ ${#failures[@]} == 0 ]] then if [[ ${#successes[@]} != 0 ]]; then echo "All test suites passed." fi else echo "Failures (${#failures[@]}):" for x in "${failures[@]}" do echo "Fail: $x" done fi } ================================================ FILE: build/run-tests.sh ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 COLUMNS=80 . `dirname "$(readlink -f "$0")"`/run-library.sh read -rd "\000" helpmessage < export PYTHONWARNINGS="ignore::FutureWarning:google.api_core._python_version_support,${PYTHONWARNINGS:-\ default::DeprecationWarning:__main__\ ,ignore::DeprecationWarning\ ,ignore::PendingDeprecationWarning\ ,ignore::ImportWarning\ ,ignore::ResourceWarning\ }" # setup_ruby_environment will set this to the path of the `bundle` executable # it installs. This stub will cause commands to fail if they try to run before # that. BUNDLE=false short= only_install= temp= temp_preserve= ignore_sigint= clear_temp() { if [[ -z "$temp" ]]; then # we did not even get as far as making a temp dir : elif [[ -z "$temp_preserve" ]]; then # Go creates readonly dirs in the module cache, which cause # "rm -rf" to fail unless we chmod first. chmod -R u+w "$temp" rm -rf "$temp" else echo "Leaving behind temp dirs in $temp" fi } fatal() { clear_temp echo >&2 "Fatal: $* (encountered in ${FUNCNAME[1]} at ${BASH_SOURCE[1]} line ${BASH_LINENO[0]})" exit 1 } exit_cleanly() { trap - INT stop_services rotate_logfile "$WORKSPACE/services/api/log/" "test.log" report_outcomes clear_temp exit ${#failures} } sanity_checks() { [[ -n "${skip[sanity]}" ]] && return 0 ( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \ || fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)" [[ -z "$CONFIGSRC" ]] || [[ -s "$CONFIGSRC/config.yml" ]] \ || fatal "CONFIGSRC is $CONFIGSRC but '$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)" echo Checking dependencies: echo "locale: ${LANG}" [[ "$(locale charmap)" = "UTF-8" ]] \ || fatal "Locale '${LANG}' is broken/missing. Try: echo ${LANG} | sudo tee -a /etc/locale.gen && sudo locale-gen" echo -n 'ruby: ' ruby -v \ || fatal "No ruby. Install >=2.7 from package or source" echo -n 'go: ' go version \ || fatal "No go binary. See http://golang.org/doc/install" [[ $(go version) =~ go1.([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -ge 12 ]] \ || fatal "Go >= 1.12 required. See http://golang.org/doc/install" echo -n 'gcc: ' gcc --version | egrep ^gcc \ || fatal "No gcc. Try: apt-get install build-essential" echo -n 'fuse.h: ' find /usr/include -path '*fuse/fuse.h' | egrep --max-count=1 . \ || fatal "No fuse/fuse.h. Try: apt-get install libfuse-dev" echo -n 'virtualenv: ' python3 -m venv --help | grep -q '^usage: venv ' \ && echo "venv module found" \ || fatal "No virtualenv. Try: apt-get install python3-venv" which netstat \ || fatal "No netstat. Try: apt-get install net-tools" echo -n 'nginx: ' PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin" nginx -v \ || fatal "No nginx. Try: apt-get install nginx" echo -n 'npm: ' npm --version \ || fatal "No npm. Try: wget -O- https://nodejs.org/dist/v14.21.3/node-v14.21.3-linux-x64.tar.xz | sudo tar -C /usr/local -xJf - && sudo ln -s ../node-v14.21.3-linux-x64/bin/{node,npm} /usr/local/bin/" echo -n 'cadaver: ' cadaver --version | grep -w cadaver \ || fatal "No cadaver. Try: apt-get install cadaver" echo -n "jq: " jq --version || fatal "No jq. Try: apt-get install jq" echo -n 'libcurl curl.h: ' find /usr/include -path '*/curl/curl.h' | egrep --max-count=1 . \ || fatal "No libcurl curl.h. Try: apt-get install libcurl4-gnutls-dev" echo -n 'libpq libpq-fe.h: ' find /usr/include -path '*/postgresql/libpq-fe.h' | egrep --max-count=1 . \ || fatal "No libpq libpq-fe.h. Try: apt-get install libpq-dev" echo -n 'libpam pam_appl.h: ' find /usr/include -path '*/security/pam_appl.h' | egrep --max-count=1 . \ || fatal "No libpam pam_appl.h. Try: apt-get install libpam0g-dev" echo -n 'postgresql: ' psql --version || fatal "No postgresql. Try: apt-get install postgresql postgresql-client-common" echo -n 'xvfb: ' which Xvfb || fatal "No xvfb. Try: apt-get install xvfb" echo -n 'singularity: ' singularity --version || fatal "No singularity." echo -n 'docker client: ' docker --version || echo "WARNING: No docker client." echo -n 'docker server: ' docker info --format='{{.ServerVersion}}' || echo "WARNING: No docker server." if [[ "$NEED_SDK_R" = true ]]; then # R SDK stuff echo -n 'R: ' which Rscript || fatal "No Rscript. Try: apt-get install r-base" echo -n 'testthat: ' Rscript -e "library('testthat')" || fatal "No testthat. Try: apt-get install r-cran-testthat" # needed for roxygen2, needed for devtools, needed for R sdk pkg-config --exists libxml-2.0 || fatal "No libxml2. Try: apt-get install libxml2-dev" fi echo 'procs with /dev/fuse open:' find /proc/*/fd -lname /dev/fuse 2>/dev/null | cut -d/ -f3 | xargs --no-run-if-empty ps -lywww echo 'grep fuse /proc/self/mountinfo:' grep fuse /proc/self/mountinfo } rotate_logfile() { # i.e. rotate_logfile "$WORKSPACE/services/api/log/" "test.log" # $BUILD_NUMBER is set by Jenkins if this script is being called as part of a Jenkins run if [[ -f "$1/$2" ]]; then THEDATE=`date +%Y%m%d%H%M%S` mv "$1/$2" "$1/$THEDATE-$BUILD_NUMBER-$2" gzip "$1/$THEDATE-$BUILD_NUMBER-$2" fi } checkpidfile() { svc="$1" pid="$(cat "$WORKSPACE/tmp/${svc}.pid")" if [[ -z "$pid" ]] || ! kill -0 "$pid"; then tail $WORKSPACE/tmp/${1}*.log echo "${svc} pid ${pid} not running" return 1 fi echo "${svc} pid ${pid} ok" } checkhealth() { svc="$1" base="$(yq -r "(.Clusters.zzzzz.Services.$svc.InternalURLs | keys)[0]" "$ARVADOS_CONFIG")" url="$base/_health/ping" if ! curl -Ss -H "Authorization: Bearer e687950a23c3a9bceec28c6223a06c79" "${url}" | tee -a /dev/stderr | grep '"OK"'; then echo "${url} failed" return 1 fi } checkdiscoverydoc() { dd="https://${1}/discovery/v1/apis/arvados/v1/rest" if ! (set -o pipefail; curl -fsk "$dd" | grep -q ^{ ); then echo >&2 "ERROR: could not retrieve discovery doc from RailsAPI at $dd" tail -v $WORKSPACE/tmp/railsapi.log return 1 fi echo "${dd} ok" } start_services() { if [[ -n "$ARVADOS_TEST_API_HOST" ]]; then return 0 fi echo 'Starting API, controller, keepproxy, keep-web, ws, and nginx ssl proxy...' if [[ ! -d "$WORKSPACE/services/api/log" ]]; then mkdir -p "$WORKSPACE/services/api/log" fi # Remove empty api.pid file if it exists if [[ -f "$WORKSPACE/tmp/api.pid" && ! -s "$WORKSPACE/tmp/api.pid" ]]; then rm -f "$WORKSPACE/tmp/api.pid" fi all_services_stopped= fail=1 cd "$WORKSPACE" \ && eval $(python3 sdk/python/tests/run_test_server.py start --auth admin) \ && export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \ && export ARVADOS_TEST_API_INSTALLED="$$" \ && checkpidfile api \ && checkdiscoverydoc $ARVADOS_API_HOST \ && eval $(python3 sdk/python/tests/run_test_server.py start_nginx) \ && checkpidfile nginx \ && python3 sdk/python/tests/run_test_server.py start_controller \ && checkpidfile controller \ && checkhealth Controller \ && checkdiscoverydoc $ARVADOS_API_HOST \ && python3 sdk/python/tests/run_test_server.py start_keep_proxy \ && checkpidfile keepproxy \ && python3 sdk/python/tests/run_test_server.py start_keep-web \ && checkpidfile keep-web \ && checkhealth WebDAV \ && python3 sdk/python/tests/run_test_server.py start_ws \ && checkpidfile ws \ && export ARVADOS_TEST_PROXY_SERVICES=1 \ && (env | egrep ^ARVADOS) \ && fail=0 if [[ $fail != 0 ]]; then unset ARVADOS_TEST_API_HOST fi return $fail } stop_services() { if [[ -n "$all_services_stopped" ]]; then return fi unset ARVADOS_TEST_API_HOST ARVADOS_TEST_PROXY_SERVICES cd "$WORKSPACE" \ && python3 sdk/python/tests/run_test_server.py stop_nginx \ && python3 sdk/python/tests/run_test_server.py stop_ws \ && python3 sdk/python/tests/run_test_server.py stop_keep-web \ && python3 sdk/python/tests/run_test_server.py stop_keep_proxy \ && python3 sdk/python/tests/run_test_server.py stop_controller \ && python3 sdk/python/tests/run_test_server.py stop \ && all_services_stopped=1 unset ARVADOS_CONFIG } interrupt() { if [[ -n "$ignore_sigint" ]]; then echo >&2 "ignored SIGINT" return fi failures+=("($(basename $0) interrupted)") exit_cleanly } trap interrupt INT setup_ruby_environment() { # When our "bundle install"s need to install new gems to # satisfy dependencies, we want them to go where "gem install # --user-install" would put them. (However, if the caller has # already set GEM_HOME, we assume that's where dependencies # should be installed, and we should leave it alone.) if [ -z "$GEM_HOME" ]; then user_gempath="$(gem env gempath)" export GEM_HOME="${user_gempath%%:*}" fi PATH="$(gem env gemdir)/bin:$PATH" # When we build and install our own gems, we install them in our # $GEMHOME tmpdir, and we want them to be at the front of GEM_PATH and # PATH so integration tests prefer them over other versions that # happen to be installed in $user_gempath, system dirs, etc. tmpdir_gem_home="$(env - PATH="$PATH" HOME="$GEMHOME" gem env gempath | cut -f1 -d:)" PATH="$tmpdir_gem_home/bin:$PATH" export GEM_PATH="$tmpdir_gem_home:$(gem env gempath)" echo "Will install dependencies to $(gem env gemdir)" echo "Will install bundler and arvados gems to $tmpdir_gem_home" echo "Gem search path is GEM_PATH=$GEM_PATH" gem install --user --no-document --conservative --version '~> 2.5.0' bundler \ || fatal 'install bundler' BUNDLE="$(gem contents --version '~> 2.5.0' bundler | grep -E '/(bin|exe)/bundle$' | tail -n1)" if [[ ! -x "$BUNDLE" ]]; then BUNDLE=false fatal "could not find 'bundle' executable after installation" fi } with_test_gemset() { GEM_HOME="$tmpdir_gem_home" GEM_PATH="$tmpdir_gem_home" "$@" } setup_virtualenv() { if [[ -z "${VENV3DIR:-}" ]]; then fatal "setup_virtualenv called before \$VENV3DIR was set" elif ! [[ -e "$VENV3DIR/bin/activate" ]]; then python3 -m venv "$VENV3DIR" || fatal "virtualenv creation failed" # Configure pip options we always want to use. "$VENV3DIR/bin/pip" config --quiet --site set global.disable-pip-version-check true "$VENV3DIR/bin/pip" config --quiet --site set global.no-input true "$VENV3DIR/bin/pip" config --quiet --site set global.no-python-version-warning true "$VENV3DIR/bin/pip" config --quiet --site set install.progress-bar off # If we didn't have a virtualenv before, we couldn't have started any # services. Set the flag used by stop_services to indicate that. all_services_stopped=1 fi . "$VENV3DIR/bin/activate" || fatal "virtualenv activation failed" # We must have these in place *before* we install the PySDK below. pip install -r "$WORKSPACE/build/requirements.tests.txt" || fatal "failed to install Python requirements in virtualenv" # run-tests.sh uses run_test_server.py from the Python SDK. do_install_once sdk/python pip || fatal "failed to install PySDK in virtualenv" } initialize() { sanity_checks echo "WORKSPACE=$WORKSPACE" cd "$WORKSPACE" if [[ -z "$temp" ]]; then temp="$(mktemp -d)" fi # Set up temporary install dirs (unless existing dirs were supplied) for tmpdir in VENV3DIR GOPATH GEMHOME R_LIBS do if [[ -z "${!tmpdir}" ]]; then eval "$tmpdir"="$temp/$tmpdir" fi if ! [[ -d "${!tmpdir}" ]]; then mkdir "${!tmpdir}" || fatal "can't create ${!tmpdir} (does $temp exist?)" fi done rm -vf "${WORKSPACE}/tmp/*.log" export R_LIBS export GOPATH # Make sure our compiled binaries under test override anything # else that might be in the environment. export PATH=$GOPATH/bin:$PATH # Jenkins config requires that glob tmp/*.log match something. Ensure # that happens even if we don't end up running services that set up # logging. mkdir -p "${WORKSPACE}/tmp/" || fatal "could not mkdir ${WORKSPACE}/tmp" touch "${WORKSPACE}/tmp/controller.log" || fatal "could not touch ${WORKSPACE}/tmp/controller.log" unset http_proxy https_proxy no_proxy setup_ruby_environment setup_virtualenv echo "PATH is $PATH" } install_env() { go mod download || fatal "Go deps failed" which goimports >/dev/null || go install golang.org/x/tools/cmd/goimports@latest || fatal "Go setup failed" } retry() { remain="${repeat}" while : do if ${@}; then if [[ "$remain" -gt 1 ]]; then remain=$((${remain}-1)) title "(repeating ${remain} more times)" else break fi elif [[ "$retry" == 1 ]]; then read -p 'Try again? [Y/n] ' x if [[ "$x" != "y" ]] && [[ "$x" != "" ]] then break fi else break fi done } do_test() { case "${1}" in services/workbench2_units | services/workbench2_integration) suite=services/workbench2 ;; *) suite="${1}" ;; esac if [[ -n "${skip[$suite]}" || \ -n "${skip[$1]}" || \ (${#only[@]} -ne 0 && ${only[$suite]} -eq 0 && ${only[$1]} -eq 0) ]]; then return 0 fi case "${1}" in services/api) stop_services check_arvados_config "$1" ;; gofmt \ | arvados_version.py \ | doc \ | lib/boot \ | lib/cli \ | lib/cloud/azure \ | lib/cloud/cloudtest \ | lib/cloud/ec2 \ | lib/cmd \ | lib/dispatchcloud/sshexecutor \ | lib/dispatchcloud/worker \ | lib/install \ | services/workbench2_integration \ | services/workbench2_units \ ) check_arvados_config "$1" # don't care whether services are running ;; *) check_arvados_config "$1" if ! start_services; then checkexit 1 "$1 tests" title "test $1 -- failed to start services" return 1 fi ;; esac retry do_test_once ${@} } go_ldflags() { version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev} echo "-X git.arvados.org/arvados.git/lib/cmd.version=${version} -X main.version=${version} -s -w" } do_test_once() { unset result if [[ "$2" == pip && -n "$interactive" ]]; then # We test out of the virtualenv to test with full build artifacts. # We do this by setting --import-mode=append in pytest.ini. # Install the developer's latest changes to the virtualenv. # We need to do this before we start the test header+timer. do_install_once "$1" "$2" || return fi local -a targs=() case "$1" in sdk/cwl ) # The CWL conformance/integration tests each take ~30 # minutes. Before July 2025 they were outside the standard test # suite, so we deselect them by default for consistency. targs+=(-m "not integration") # The CWL conformance/integration tests expect keep # servers and crunch-dispatch-local. if ! ( env -C "$WORKSPACE" python3 sdk/python/tests/run_test_server.py start_keep \ && env -C "$WORKSPACE" python3 sdk/python/tests/run_test_server.py start_dispatch); then checkexit 1 "$1 tests" return 1 fi ;; esac # Append the user's arguments to targs, respecting quoted strings. eval "targs+=(${testargs[$1]})" title "test $1" timer_reset result= if [[ "$2" == "go" ]] then covername="coverage-$(echo "$1" | sed -e 's/\//_/g')" coverflags=("-covermode=count" "-coverprofile=$WORKSPACE/tmp/.$covername.tmp") if ! compgen -G "$WORKSPACE/$1/*_test.go" >/dev/null; then # Go 1.25, when invoked by Go 1.24 via "toolchain go1.25" # directive, fails with 'go: no such tool "covdata"' when # using $coverflags in a directory that has no tests. See # https://github.com/golang/go/issues/75031 # # Workaround: skip coverflags when 'go test' is a no-op # anyway. coverflags=() fi testflags=() # We do "go install" here to catch compilation errors # before trying "go test". Otherwise, coverage-reporting # mode makes Go show the wrong line numbers when reporting # compilation errors. go install -ldflags "$(go_ldflags)" "$WORKSPACE/$1" && \ cd "$WORKSPACE/$1" && \ if [[ "${#targs}" -gt 0 ]] then # "go test -check.vv giturl" doesn't work, but this # does: go test ${short:+-short} ${testflags[@]} "${targs[@]}" else # The above form gets verbose even when testargs is # empty, so use this form in such cases: go test ${short:+-short} ${testflags[@]} ${coverflags[@]} "git.arvados.org/arvados.git/$1" fi result=${result:-$?} if [[ -f "$WORKSPACE/tmp/.$covername.tmp" ]] then go tool cover -html="$WORKSPACE/tmp/.$covername.tmp" -o "$WORKSPACE/tmp/$covername.html" rm "$WORKSPACE/tmp/.$covername.tmp" fi [[ $result = 0 ]] && gofmt -e -d *.go elif [[ "$2" == "pip" ]] then tries=0 while : do tries=$((${tries}+1)) env -C "$WORKSPACE/$1" pytest "${targs[@]}" result=$? # pytest uses exit code 2 to mean "test collection failed." # See discussion in FUSE's IntegrationTest and MountTestBase. if [[ ${tries} < 3 && ${result} == 2 ]] then printf '\n*****\n%s tests exited with code 2 -- retrying\n*****\n\n' "$1" continue else break fi done elif [[ "$2" != "" ]] then "test_$2" else "test_$1" fi result=${result:-$?} checkexit $result "$1 tests" title "test $1 -- `timer`" if [[ "$1" == "sdk/cwl" ]]; then env -C "$WORKSPACE" python3 sdk/python/tests/run_test_server.py stop_keep env -C "$WORKSPACE" python3 sdk/python/tests/run_test_server.py stop_dispatch # Also reset test fixtures that were modified by the dispatcher env -C "$WORKSPACE" python3 sdk/python/tests/run_test_server.py database_reset fi return $result } check_arvados_config() { if [[ "$1" = "env" ]] ; then return fi if [[ -z "$ARVADOS_CONFIG" ]] ; then cd "$WORKSPACE" eval $(python3 sdk/python/tests/run_test_server.py setup_config) fi # Set all PostgreSQL connection variables, and write a .pgpass, to connect # to the test database, so test scripts can write `psql` commands with no # additional configuration. export PGPASSFILE="$WORKSPACE/tmp/.pgpass" export PGDATABASE="$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.dbname "$ARVADOS_CONFIG")" export PGHOST="$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.host "$ARVADOS_CONFIG")" export PGPORT="$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.port "$ARVADOS_CONFIG")" export PGUSER="$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.user "$ARVADOS_CONFIG")" local pgpassword="$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.password "$ARVADOS_CONFIG")" echo "$PGHOST:$PGPORT:$PGDATABASE:$PGUSER:$pgpassword" >"$PGPASSFILE" chmod 0600 "$PGPASSFILE" } do_install() { if [[ -n ${skip["install_$1"]} || -n "${skip[install]}" || ( -n "${only_install}" && "${only_install}" != "${1}" && "${only_install}" != "${2}" ) ]]; then return 0 fi check_arvados_config "$1" retry do_install_once ${@} } do_install_once() { title "install $1" timer_reset result= if [[ "$2" == "go" ]] then go install -ldflags "$(go_ldflags)" "$WORKSPACE/$1" elif [[ "$2" == "pip" ]] then pip install "$WORKSPACE/$1" elif [[ "$2" != "" ]] then "install_$2" else "install_$1" fi result=${result:-$?} checkexit $result "$1 install" title "install $1 -- `timer`" return $result } bundle_install_trylocal() { ( set -e echo "(Running bundle install --local. 'could not find package' messages are OK.)" if ! "$BUNDLE" install --local --no-deployment; then echo "(Running bundle install again, without --local.)" "$BUNDLE" install --no-deployment fi "$BUNDLE" package ) } install_doc() { cd "$WORKSPACE/doc" \ && bundle_install_trylocal \ && rm -rf .site } install_gem() { gemname=$1 srcpath=$2 cd "$WORKSPACE/$srcpath" \ && bundle_install_trylocal \ && gem build "$gemname.gemspec" \ && with_test_gemset gem install --no-document $(ls -t "$gemname"-*.gem|head -n1) } install_sdk/ruby() { install_gem arvados sdk/ruby } install_sdk/ruby-google-api-client() { install_gem arvados-google-api-client sdk/ruby-google-api-client } install_contrib/R-sdk() { if [[ "$NEED_SDK_R" = true ]]; then env -C "$WORKSPACE/contrib/R-sdk" Rscript --vanilla install_deps.R fi } install_sdk/cli() { install_gem arvados-cli sdk/cli } install_services/login-sync() { install_gem arvados-login-sync services/login-sync } install_services/api() { stop_services check_arvados_config "services/api" cd "$WORKSPACE/services/api" \ && RAILS_ENV=test bundle_install_trylocal \ || return 1 rm -f config/environments/test.rb cp config/environments/test.rb.example config/environments/test.rb # Clear out any lingering postgresql connections to the test # database, so that we can drop it. This assumes the current user # is a postgresql superuser. psql -c "SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$PGDATABASE';" 2>/dev/null mkdir -p "$WORKSPACE/services/api/tmp/pids" cert="$WORKSPACE/services/api/tmp/self-signed" if [[ ! -e "$cert.pem" || "$(date -r "$cert.pem" +%s)" -lt 1512659226 ]]; then ( dir="$WORKSPACE/services/api/tmp" set -e openssl req -newkey rsa:2048 -nodes -subj '/C=US/ST=State/L=City/CN=localhost' -out "$cert.csr" -keyout "$cert.key" VERSION=n (revert and/or run a single migration; is up|down|redo)" echo "reset (...services used by integration tests)" echo "exit" echo "== Test targets:" printf "%s\n" "${!testfuncargs[@]}" | sort | column } declare -a failures declare -A skip declare -A only declare -A testargs declare -a pythonstuff pythonstuff=( # The ordering of sdk/python, tools/crunchstat-summary, and # sdk/cwl here is significant. See # https://dev.arvados.org/issues/19744#note-26 sdk/python tools/crunchstat-summary sdk/cwl services/dockercleaner services/fuse tools/cluster-activity ) declare -a gostuff if [[ -n "$WORKSPACE" ]]; then readarray -d "" -t gostuff < <( git -C "$WORKSPACE" ls-files -z | grep -z '\.go$' | xargs -0r dirname -z | sort -zu ) fi declare -A testfuncargs=() for testfuncname in $(declare -F | awk ' ($3 ~ /^test_/ && $3 !~ /_package_presence$/) { print substr($3, 6); } '); do testfuncargs[$testfuncname]="$testfuncname" done for g in "${gostuff[@]}"; do testfuncargs[$g]="$g go" done for p in "${pythonstuff[@]}"; do testfuncargs[$p]="$p pip" done while [[ -n "$1" ]] do arg="$1"; shift case "$arg" in --help) exec 1>&2 echo "$helpmessage" if [[ ${#gostuff} -gt 0 ]]; then printf "\nAvailable targets:\n\n" printf "%s\n" "${!testfuncargs[@]}" | sort | column fi exit 1 ;; --skip) skip["${1%:py3}"]=1; shift ;; --only) only["${1%:py3}"]=1; skip["${1%:py3}"]=""; shift ;; --short) short=1 ;; --interactive) interactive=1 ;; --skip-install) skip[install]=1 ;; --only-install) only_install="$1"; shift ;; --temp) temp="$1"; shift temp_preserve=1 ;; --leave-temp) temp_preserve=1 ;; --repeat) repeat=$((${1}+0)); shift ;; --retry) retry=1 ;; *_test=*) suite="${arg%%_test=*}" args="${arg#*=}" testargs["${suite%:py3}"]="$args" ;; ARVADOS_*=*) eval export $(echo $arg | cut -d= -f1)=\"$(echo $arg | cut -d= -f2-)\" ;; *) echo >&2 "$0: Unrecognized option: '$arg'. Try: $0 --help" exit 1 ;; esac done # R SDK installation is very slow (~360s in a clean environment) and only # required when testing it. Skip that step if it is not needed. NEED_SDK_R=true if [[ ${#only[@]} -ne 0 ]] && [[ -z "${only['contrib/R-sdk']}" && -z "${only['doc']}" ]]; then NEED_SDK_R=false fi if [[ ${skip["contrib/R-sdk"]} == 1 && ${skip["doc"]} == 1 ]]; then NEED_SDK_R=false fi if [[ $NEED_SDK_R == false ]]; then echo "R SDK not needed, it will not be installed." fi initialize if [[ -z ${interactive} ]]; then install_all test_all else skip=() only=() only_install="" stop_services setnextcmd() { if [[ "$TERM" = dumb ]]; then # assume emacs, or something, is offering a history buffer # and pre-populating the command will only cause trouble nextcmd= elif [[ ! -e "$GOPATH/bin/arvados-server" ]]; then nextcmd="install deps" else nextcmd="" fi } echo help_interactive setnextcmd HISTFILE="$WORKSPACE/tmp/.history" history -r ignore_sigint=1 while read -p 'What next? ' -e -i "$nextcmd" nextcmd; do history -s "$nextcmd" history -w count=1 if [[ "${nextcmd}" =~ ^[0-9] ]]; then read count nextcmd <<<"${nextcmd}" fi read verb target opts <<<"${nextcmd}" target="${target%/}" target="${target/\/:/:}" # Remove old Python version suffix for backwards compatibility target="${target%:py3}" case "${verb}" in "exit" | "quit") exit_cleanly ;; "reset") stop_services ;; "migrate") do_migrate ${target} ${opts} ;; "bundle") do_bundle ${target} ${opts} ;; "test" | "install") case "$target" in "") help_interactive ;; all | deps) ${verb}_${target} ;; *) testargs["$target"]="${opts}" while [ $count -gt 0 ]; do do_$verb ${testfuncargs[${target}]} let "count=count-1" done ;; esac ;; "" | "help" | *) help_interactive ;; esac if [[ ${#successes[@]} -gt 0 || ${#failures[@]} -gt 0 ]]; then report_outcomes successes=() failures=() fi cd "$WORKSPACE" setnextcmd done echo fi exit_cleanly ================================================ FILE: build/version-at-commit.sh ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 set -e -o pipefail commit="$1" devsuffix="~dev" # automatically assign *development* version # # handles the following cases: # # * commit is on main or a development branch, the nearest tag is older # than commit where this branch joins main. # -> take greatest version tag in repo X.Y.Z and assign X.(Y+1).0 # # * commit is on a release branch, the nearest tag is newer # than the commit where this branch joins main. # -> take nearest tag X.Y.Z and assign X.Y.(Z+1) # X.Y.Z releases where Z=0 are called major # releases and X.Y.Z releases where Z>1 are called point releases. # # The development process distinction is that X.Y.0 releases are # branched from main and then subsequent X.Y.Z releases cherry-pick # individual features from main onto the "X.Y-staging" branch. # # In semantic versioning terminology an "X.Y.0" release which only # increments Y is called a "minor" release but typically these # releases have significant changes that calling them "minor" in # communications with users feels misleading. # # Incrementing X is reserved for times when a release has significant # backwards-incompatible changes, which we don't do very often and try # to avoid. # # In order to assign a useful development version, we need to # determine if we're on the main branch (or a development branch off # main) or on a release branch. We do this by looking at the point # where the current commit history branched from main. # # If the tag for a new X+1 version appears on a release branch and not # directly in the history of main, the merge-base between main and the # release should be tagged as "development-X.Y.Z" so that # version-at-commit understands what version to assign to subsequent # commits on main. It is also helpful to assign development-X.Y.Z # tags to make git-describe provide better version strings. # 1. get the nearest tag with 'git describe' # 2. get the merge base between this commit and main # 3. if the tag is an ancestor of the merge base, # (tag is older than merge base) increment minor version # else, tag is newer than merge base, so increment point version nearest_tag=$(git describe --abbrev=0 "$commit") # We must use a remote branch here because Jenkins CI checkouts usually only # have the current work branch ref (and not even that if we're working by # commit hash). As of June 2025 everything uses origin, so, merge_base=$(git merge-base origin/main "$commit") if git merge-base --is-ancestor "$nearest_tag" "$merge_base" ; then # the nearest tag appears before the merge base with main (the # branch point), so assume this is a tag for the previous major # release (or a tag with the "development-" prefix indicating the # point where a major release branched off). Subsequent # development versions are given the anticipated version for the # next major release. # # x.(y+1).0~devTIMESTAMP, where x.y.z is the newest version that does not contain $commit # grep reads the list of tags (-f) that contain $commit and filters them out (-v) # this prevents a newer tag from retroactively changing the versions of everything before it v=$(git tag | grep -vFf <(git tag --contains "$merge_base") | sed -e 's/^development-//' | sort --version-sort | awk ' BEGIN { FS="."; OFS="."; } END { print $1, $2+1, 0; } ') else # the nearest tag comes after the merge base with main (the branch # point). Assume this means this is a point release branch, # following a major release. # # x.y.(z+1)~devTIMESTAMP, where x.y.z is the latest released ancestor of $commit v=$(awk ' BEGIN { FS="."; OFS="."; } { print $1, $2, $3+1; exit; } ' < ALL=R/Arvados.R man SDK_VERSION!=awk '($$1 == "Version:"){v=$$2} END {print v}' DESCRIPTION all: $(ALL) .PHONY: api api: R/Arvados.R R/Arvados.R: arvados-v1-discovery.json generateApi.R Rscript --vanilla generateApi.R # Used by arvados/doc/Rakefile. # Check whether we can load libraries necessary to build the package. .PHONY: can_run can_run: Rscript --vanilla -e "library(jsonlite); library(roxygen2);" .PHONY: clean clean: rm -rf $(ALL) "ArvadosR_$(SDK_VERSION).tar.gz" .PHONY: install install: R CMD INSTALL . man: R/Arvados.R R/*.R Rscript --vanilla -e "library(roxygen2); roxygen2::roxygenize(clean=TRUE)" .PHONY: package package: "ArvadosR_$(SDK_VERSION).tar.gz" "ArvadosR_$(SDK_VERSION).tar.gz": $(ALL) [A-Z]* *.R tests/*.R tests/testthat/*.R tests/testthat/fakes/*.R R CMD build . .PHONY: test test: $(ALL) Rscript --vanilla run_test.R ================================================ FILE: contrib/R-sdk/NAMESPACE ================================================ # Generated by roxygen2: do not edit by hand S3method(print,ArvadosFile) S3method(print,Collection) S3method(print,Subcollection) export(Arvados) export(ArvadosFile) export(Collection) export(Subcollection) export(listAll) ================================================ FILE: contrib/R-sdk/R/ArvadosFile.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 #' R6 Class Representing a ArvadosFile #' #' @description #' ArvadosFile class represents a file inside Arvados collection. #' @export ArvadosFile <- R6::R6Class( "ArvadosFile", public = list( #' @description #' Initialize new enviroment. #' @param name Name of the new enviroment. #' @return A new `ArvadosFile` object. #' @examples #' \dontrun{ #' myFile <- ArvadosFile$new("myFile") #' } initialize = function(name) { if(name == "") stop("Invalid name.") private$name <- name }, #' @description #' Returns name of the file. #' @examples #' \dontrun{ #' arvadosFile$getName() #' } getName = function() private$name, #' @description #' Returns collections file content as character vector. #' @param fullPath Checking if TRUE. #' @examples #' \dontrun{ #' arvadosFile$getFileListing() #' } getFileListing = function(fullpath = TRUE) { self$getName() }, #' @description #' Returns collections content size in bytes. #' @examples #' \dontrun{ #' arvadosFile$getSizeInBytes() #' } getSizeInBytes = function() { if(is.null(private$collection)) return(0) REST <- private$collection$getRESTService() fileSize <- REST$getResourceSize(self$getRelativePath(), private$collection$uuid) fileSize }, get = function(fileLikeObjectName) { return(NULL) }, getFirst = function() { return(NULL) }, #' @description #' Returns collection UUID. getCollection = function() private$collection, #' @description #' Sets new collection. setCollection = function(collection, setRecursively = TRUE) { private$collection <- collection }, #' @description #' Returns file path relative to the root. getRelativePath = function() { relativePath <- c(private$name) parent <- private$parent while(!is.null(parent)) { relativePath <- c(parent$getName(), relativePath) parent <- parent$getParent() } relativePath <- relativePath[relativePath != ""] paste0(relativePath, collapse = "/") }, #' @description #' Returns project UUID. getParent = function() private$parent, #' @description #' Sets project collection. setParent = function(newParent) private$parent <- newParent, #' @description #' Read file content. #' @param contentType Type of content. Possible is "text", "raw". #' @param offset Describes the location of a piece of data compared to another location #' @param length Length of content #' @examples #' \dontrun{ #' collection <- Collection$new(arv, collectionUUID) #' arvadosFile <- collection$get(fileName) #' fileContent <- arvadosFile$read("text") #' } read = function(contentType = "raw", offset = 0, length = 0) { if(is.null(private$collection)) stop("ArvadosFile doesn't belong to any collection.") if(offset < 0 || length < 0) stop("Offset and length must be positive values.") REST <- private$collection$getRESTService() fileContent <- REST$read(self$getRelativePath(), private$collection$uuid, contentType, offset, length) fileContent }, #' @description #' Get connection opened in "read" or "write" mode. #' @param rw Type of connection. #' @examples #' \dontrun{ #' collection <- Collection$new(arv, collectionUUID) #' arvadosFile <- collection$get(fileName) #' arvConnection <- arvadosFile$connection("w") #' } connection = function(rw) { if (rw == "r" || rw == "rb") { REST <- private$collection$getRESTService() return(REST$getConnection(self$getRelativePath(), private$collection$uuid, rw)) } else if (rw == "w") { private$buffer <- textConnection(NULL, "w") return(private$buffer) } }, #' @description #' Write connections content to a file or override current content of the file. #' @examples #' \dontrun{ #' collection <- Collection$new(arv, collectionUUID) #' arvadosFile <- collection$get(fileName) #' myFile$write("This is new file content") #' arvadosFile$flush() #' } flush = function() { v <- textConnectionValue(private$buffer) close(private$buffer) self$write(paste(v, collapse='\n')) }, #' @description #' Write to file or override current content of the file. #' @param content File to write. #' @param contentType Type of content. Possible is "text", "raw". #' @examples #' \dontrun{ #' collection <- Collection$new(arv, collectionUUID) #' arvadosFile <- collection$get(fileName) #' myFile$write("This is new file content") #' } write = function(content, contentType = "text/html") { if(is.null(private$collection)) stop("ArvadosFile doesn't belong to any collection.") REST <- private$collection$getRESTService() writeResult <- REST$write(self$getRelativePath(), private$collection$uuid, content, contentType) writeResult }, #' @description #' Moves file to a new location inside collection. #' @param destination Path to new folder. #' @examples #' \dontrun{ #' arvadosFile$move(newPath) #' } move = function(destination) { if(is.null(private$collection)) stop("ArvadosFile doesn't belong to any collection.") destination <- trimFromEnd(destination, "/") nameAndPath <- splitToPathAndName(destination) newParent <- private$collection$get(nameAndPath$path) if(is.null(newParent)) stop("Unable to get destination subcollection.") childWithSameName <- newParent$get(nameAndPath$name) if(!is.null(childWithSameName)) stop("Destination already contains content with same name.") REST <- private$collection$getRESTService() REST$move(self$getRelativePath(), paste0(newParent$getRelativePath(), "/", nameAndPath$name), private$collection$uuid) private$dettachFromCurrentParent() private$attachToNewParent(self, newParent) private$parent <- newParent private$name <- nameAndPath$name self }, #' @description #' Copies file to a new location inside collection. #' @param destination Path to new folder. #' @examples #' \dontrun{ #' arvadosFile$copy("NewName.format") #' } copy = function(destination) { if(is.null(private$collection)) stop("ArvadosFile doesn't belong to any collection.") destination <- trimFromEnd(destination, "/") nameAndPath <- splitToPathAndName(destination) newParent <- private$collection$get(nameAndPath$path) if(is.null(newParent)) stop("Unable to get destination subcollection.") childWithSameName <- newParent$get(nameAndPath$name) if(!is.null(childWithSameName)) stop("Destination already contains content with same name.") REST <- private$collection$getRESTService() REST$copy(self$getRelativePath(), paste0(newParent$getRelativePath(), "/", nameAndPath$name), private$collection$uuid) newFile <- self$duplicate(nameAndPath$name) newFile$setCollection(self$getCollection()) private$attachToNewParent(newFile, newParent) newFile$setParent(newParent) newFile }, #' @description #' Duplicate file and gives it a new name. #' @param newName New name for duplicated file. duplicate = function(newName = NULL) { name <- if(!is.null(newName)) newName else private$name newFile <- ArvadosFile$new(name) newFile } ), private = list( name = NULL, size = NULL, parent = NULL, collection = NULL, buffer = NULL, attachToNewParent = function(content, newParent) { # We temporary set parents collection to NULL. This will ensure that # add method doesn't post this file on REST. # We also need to set content's collection to NULL because # add method throws exception if we try to add content that already # belongs to a collection. parentsCollection <- newParent$getCollection() #parent$.__enclos_env__$private$children <- c(parent$.__enclos_env__$private$children, self) #private$parent <- parent content$setCollection(NULL, setRecursively = FALSE) newParent$setCollection(NULL, setRecursively = FALSE) newParent$add(content) content$setCollection(parentsCollection, setRecursively = FALSE) newParent$setCollection(parentsCollection, setRecursively = FALSE) }, dettachFromCurrentParent = function() { # We temporary set parents collection to NULL. This will ensure that # remove method doesn't remove this file from REST. #private$parent$.__enclos_env__$private$removeChild(private$name) #private$parent <- NULL parent <- private$parent parentsCollection <- parent$getCollection() parent$setCollection(NULL, setRecursively = FALSE) parent$remove(private$name) parent$setCollection(parentsCollection, setRecursively = FALSE) } ), cloneable = FALSE ) #' print.ArvadosFile #' #' Custom print function for ArvadosFile class #' #' @param x Instance of ArvadosFile class #' @param ... Optional arguments. #' @export print.ArvadosFile = function(x, ...) { collection <- NULL relativePath <- x$getRelativePath() if(!is.null(x$getCollection())) { collection <- x$getCollection()$uuid relativePath <- paste0("/", relativePath) } cat(paste0("Type: ", "\"", "ArvadosFile", "\""), sep = "\n") cat(paste0("Name: ", "\"", x$getName(), "\""), sep = "\n") cat(paste0("Relative path: ", "\"", relativePath, "\""), sep = "\n") cat(paste0("Collection: ", "\"", collection, "\""), sep = "\n") } ================================================ FILE: contrib/R-sdk/R/ArvadosR.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 #' @title ArvadosR #' #' @description #' #' Arvados is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data. With Arvados, bioinformaticians run and scale compute-intensive workflows, developers create biomedical applications, and IT administrators manage large compute and storage resources. #' #' @author \itemize{ #' \item Lucas Di Pentima #' \item Ward Vandewege #' \item Fuad Muhic #' \item Peter Amstutz #' \item Aneta Stanczyk #' \item Piotr Nowosielski #' \item Brett Smith} #' #' @seealso \itemize{ #' \item https://arvados.org #' \item https://doc.arvados.org/sdk/R/index.html #' \item https://github.com/arvados/arvados/tree/main/contrib/R-sdk} #' #' @name ArvadosR NULL ================================================ FILE: contrib/R-sdk/R/Collection.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 #' R6 Class Representing Arvados Collection #' #' @description #' Collection class provides interface for working with Arvados collections, #' for exaplme actions like creating, updating, moving or removing are possible. #' #' @seealso #' https://github.com/arvados/arvados/tree/main/contrib/R-sdk #' #' @export Collection <- R6::R6Class( "Collection", public = list( #' @field uuid Autentic for Collection UUID. uuid = NULL, #' @description #' Initialize new enviroment. #' @param api Arvados enviroment. #' @param uuid The UUID Autentic for Collection UUID. #' @return A new `Collection` object. #' @examples #' \dontrun{ #' collection <- Collection$new(arv, CollectionUUID) #' } initialize = function(api, uuid) { private$REST <- api$getRESTService() self$uuid <- uuid }, #' @description #' Adds ArvadosFile or Subcollection specified by content to the collection. Used only with ArvadosFile or Subcollection. #' @param content Content to be added. #' @param relativePath Path to add content. add = function(content, relativePath = "") { if(is.null(private$tree)) private$generateCollectionTreeStructure() if(relativePath == "" || relativePath == "." || relativePath == "./") { subcollection <- private$tree$getTree() } else { relativePath <- trimFromEnd(relativePath, "/") subcollection <- self$get(relativePath) } if(is.null(subcollection)) stop(paste("Subcollection", relativePath, "doesn't exist.")) if("ArvadosFile" %in% class(content) || "Subcollection" %in% class(content)) { if(!is.null(content$getCollection())) stop("Content already belongs to a collection.") if(content$getName() == "") stop("Content has invalid name.") subcollection$add(content) content } else { stop(paste0("Expected AravodsFile or Subcollection object, got ", paste0("(", paste0(class(content), collapse = ", "), ")"), ".")) } }, #' @description #' Read file content. #' @param file Name of the file. #' @param col Collection from which the file is read. #' @param sep Separator used in reading tsv, csv file format. #' @param istable Used in reading txt file to check if the file is table or not. #' @param fileclass Used in reading fasta file to set file class. #' @param Ncol Used in reading binary file to set numbers of columns in data.frame. #' @param Nrow Used in reading binary file to set numbers of rows in data.frame size. #' @examples #' \dontrun{ #' collection <- Collection$new(arv, collectionUUID) #' readFile <- collection$readArvFile(arvadosFile, istable = 'yes') # table #' readFile <- collection$readArvFile(arvadosFile, istable = 'no') # text #' readFile <- collection$readArvFile(arvadosFile) # xlsx, csv, tsv, rds, rdata #' readFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta') # fasta #' readFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32) # binary, only numbers #' readFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = "factor") # binary with factor or text #' } readArvFile = function(file, con, sep = ',', istable = NULL, fileclass = "SeqFastadna", Ncol = NULL, Nrow = NULL, wantedFunction = NULL) { arvFile <- self$get(file) FileName <- arvFile$getName() FileName <- tolower(FileName) FileFormat <- gsub(".*\\.", "", FileName) # set enviroment ARVADOS_API_TOKEN <- Sys.getenv("ARVADOS_API_TOKEN") ARVADOS_API_HOST <- Sys.getenv("ARVADOS_API_HOST") my_collection <- self$uuid key <- gsub("/", "_", ARVADOS_API_TOKEN) Sys.setenv( "AWS_ACCESS_KEY_ID" = key, "AWS_SECRET_ACCESS_KEY" = key, "AWS_DEFAULT_REGION" = "collections", "AWS_S3_ENDPOINT" = gsub("api[.]", "", ARVADOS_API_HOST)) if (FileFormat == "txt") { if (is.null(istable)){ stop(paste('You need to paste whether it is a text or table file')) } else if (istable == 'no') { fileContent <- arvFile$read("text") # used to read fileContent <- gsub("[\r\n]", " ", fileContent) } else if (istable == 'yes') { arvConnection <- arvFile$connection("r") # used to make possible use different function later fileContent <- read.table(arvConnection) } } else if (FileFormat == "xlsx") { fileContent <- aws.s3::s3read_using(FUN = openxlsx::read.xlsx, object = file, bucket = my_collection) } else if (FileFormat == "csv" || FileFormat == "tsv") { arvConnection <- arvFile$connection("r") if (FileFormat == "tsv"){ mytable <- read.table(arvConnection, sep = '\t') } else if (FileFormat == "csv" & sep == '\t') { mytable <- read.table(arvConnection, sep = '\t') } else if (FileFormat == "csv") { mytable <- read.table(arvConnection, sep = ',') } else { stop(paste('File format not supported, use arvadosFile$connection() and customise it')) } } else if (FileFormat == "fasta") { fastafile <- aws.s3::s3read_using(FUN = seqinr::read.fasta, as.string = TRUE, object = file, bucket = my_collection) } else if (FileFormat == "dat" || FileFormat == "bin") { fileContent <- gzcon(arvFile$connection("rb")) # function to precess data to binary format read_bin.file <- function(fileContent) { # read binfile column.names <- readBin(fileContent, character(), n = Ncol) bindata <- readBin(fileContent, numeric(), Nrow*Ncol+Ncol) # check res <- which(bindata < 0.0000001) if (is.list(res)) { bindata <- bindata[-res] } else { bindata <- bindata } # make a dataframe data <- data.frame(matrix(data = NA, nrow = Nrow, ncol = Ncol)) for (i in 1:Ncol) { data[,i] <- bindata[(1+Nrow*(i-1)):(Nrow*i)] } colnames(data) = column.names len <- which(is.na(data[,Ncol])) # error if sth went wrong if (length(len) == 0) { data } else { stop(paste("there is a factor or text in the table, customize the function by typing more arguments")) } } if (is.null(Nrow) | is.null(Ncol)){ stop(paste('You need to specify numbers of columns and rows')) } if (is.null(istable)) { fileContent <- read_bin.file(fileContent) # call a function } else if (istable == "factor") { # if there is a table with col name fileContent <- read_bin.file(fileContent) } } else if (FileFormat == "rds" || FileFormat == "rdata") { arvConnection <- arvFile$connection("rb") mytable <- readRDS(gzcon(arvConnection)) } else { stop(parse(('File format not supported, use arvadosFile$connection() and customise it'))) } }, #' @description #' Write file content #' @param name Name of the file. #' @param file File to be saved. #' @param istable Used in writing txt file to check if the file is table or not. #' @examples #' \dontrun{ #' collection <- Collection$new(arv, collectionUUID) #' writeFile <- collection$writeFile(name = "myoutput.csv", file = file, fileFormat = "csv", istable = NULL, collectionUUID = collectionUUID) # csv #' writeFile <- collection$writeFile(name = "myoutput.tsv", file = file, fileFormat = "tsv", istable = NULL, collectionUUID = collectionUUID) # tsv #' writeFile <- collection$writeFile(name = "myoutput.fasta", file = file, fileFormat = "fasta", istable = NULL, collectionUUID = collectionUUID) # fasta #' writeFile <- collection$writeFile(name = "myoutputtable.txt", file = file, fileFormat = "txt", istable = "yes", collectionUUID = collectionUUID) # txt table #' writeFile <- collection$writeFile(name = "myoutputtext.txt", file = file, fileFormat = "txt", istable = "no", collectionUUID = collectionUUID) # txt text #' writeFile <- collection$writeFile(name = "myoutputbinary.dat", file = file, fileFormat = "dat", collectionUUID = collectionUUID) # binary #' writeFile <- collection$writeFile(name = "myoutputxlsx.xlsx", file = file, fileFormat = "xlsx", collectionUUID = collectionUUID) # xlsx #' } writeFile = function(name, file, collectionUUID, fileFormat, istable = NULL, seqName = NULL) { # set enviroment ARVADOS_API_TOKEN <- Sys.getenv("ARVADOS_API_TOKEN") ARVADOS_API_HOST <- Sys.getenv("ARVADOS_API_HOST") my_collection <- self$uuid key <- gsub("/", "_", ARVADOS_API_TOKEN) Sys.setenv( "AWS_ACCESS_KEY_ID" = key, "AWS_SECRET_ACCESS_KEY" = key, "AWS_DEFAULT_REGION" = "collections", "AWS_S3_ENDPOINT" = gsub("api[.]", "", ARVADOS_API_HOST)) # save file if (fileFormat == "txt") { if (istable == "yes") { aws.s3::s3write_using(file, FUN = write.table, object = name, bucket = collectionUUID) } else if (istable == "no") { aws.s3::s3write_using(file, FUN = writeChar, object = name, bucket = collectionUUID) } else { stop(paste("Specify parametr istable")) } } else if (fileFormat == "csv") { aws.s3::s3write_using(file, FUN = write.csv, object = name, bucket = collectionUUID) } else if (fileFormat == "tsv") { aws.s3::s3write_using(file, FUN = write.table, row.names = FALSE, sep = "\t", object = name, bucket = collectionUUID) } else if (fileFormat == "fasta") { aws.s3::s3write_using(file, FUN = seqinr::write.fasta, name = seqName, object = name, bucket = collectionUUID) } else if (fileFormat == "xlsx") { aws.s3::s3write_using(file, FUN = openxlsx::write.xlsx, object = name, bucket = collectionUUID) } else if (fileFormat == "dat" || fileFormat == "bin") { aws.s3::s3write_using(file, FUN = writeBin, object = name, bucket = collectionUUID) } else { stop(parse(('File format not supported, use arvadosFile$connection() and customise it'))) } }, #' @description #' Creates one or more ArvadosFiles and adds them to the collection at specified path. #' @param files Content to be created. #' @examples #' \dontrun{ #' collection <- arv$collections_create(name = collectionTitle, description = collectionDescription, owner_uuid = collectionOwner, properties = list("ROX37196928443768648" = "ROX37742976443830153")) #' } create = function(files) { if(is.null(private$tree)) private$generateCollectionTreeStructure() if(is.character(files)) { sapply(files, function(file) { childWithSameName <- self$get(file) if(!is.null(childWithSameName)) stop("Destination already contains file with same name.") newTreeBranch <- private$tree$createBranch(file) private$tree$addBranch(private$tree$getTree(), newTreeBranch) private$REST$create(file, self$uuid) newTreeBranch$setCollection(self) newTreeBranch }) } else { stop(paste0("Expected character vector, got ", paste0("(", paste0(class(files), collapse = ", "), ")"), ".")) } }, #' @description #' Remove one or more files from the collection. #' @param paths Content to be removed. #' @examples #' \dontrun{ #' collection$remove(fileName.format) #' } remove = function(paths) { if(is.null(private$tree)) private$generateCollectionTreeStructure() if(is.character(paths)) { sapply(paths, function(filePath) { filePath <- trimFromEnd(filePath, "/") file <- self$get(filePath) if(is.null(file)) stop(paste("File", filePath, "doesn't exist.")) parent <- file$getParent() if(is.null(parent)) stop("You can't delete root folder.") parent$remove(file$getName()) }) "Content removed" } else { stop(paste0("Expected character vector, got ", paste0("(", paste0(class(paths), collapse = ", "), ")"), ".")) } }, #' @description #' Moves ArvadosFile or Subcollection to another location in the collection. #' @param content Content to be moved. #' @param destination Path to move content. #' @examples #' \dontrun{ #' collection$move("fileName.format", path) #' } move = function(content, destination) { if(is.null(private$tree)) private$generateCollectionTreeStructure() content <- trimFromEnd(content, "/") elementToMove <- self$get(content) if(is.null(elementToMove)) stop("Content you want to move doesn't exist in the collection.") elementToMove$move(destination) }, #' @description #' Copies ArvadosFile or Subcollection to another location in the collection. #' @param content Content to be moved. #' @param destination Path to move content. #' @examples #' \dontrun{ #' copied <- collection$copy("oldName.format", "newName.format") #' } copy = function(content, destination) { if(is.null(private$tree)) private$generateCollectionTreeStructure() content <- trimFromEnd(content, "/") elementToCopy <- self$get(content) if(is.null(elementToCopy)) stop("Content you want to copy doesn't exist in the collection.") elementToCopy$copy(destination) }, #' @description #' Refreshes the environment. #' @examples #' \dontrun{ #' collection$refresh() #' } refresh = function() { if(!is.null(private$tree)) { private$tree$getTree()$setCollection(NULL, setRecursively = TRUE) private$tree <- NULL } }, #' @description #' Returns collections file content as character vector. #' @examples #' \dontrun{ #' list <- collection$getFileListing() #' } getFileListing = function() { if(is.null(private$tree)) private$generateCollectionTreeStructure() content <- private$REST$getCollectionContent(self$uuid) content[order(tolower(content))] }, #' @description #' If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL. #' @param relativePath Path from content is taken. #' @examples #' \dontrun{ #' arvadosFile <- collection$get(fileName) #' } get = function(relativePath) { if(is.null(private$tree)) private$generateCollectionTreeStructure() private$tree$getElement(relativePath) }, getRESTService = function() private$REST, setRESTService = function(newRESTService) private$REST <- newRESTService ), private = list( REST = NULL, #' @tree beautiful tree of sth tree = NULL, fileContent = NULL, generateCollectionTreeStructure = function(relativePath = NULL) { if(is.null(self$uuid)) stop("Collection uuid is not defined.") if(is.null(private$REST)) stop("REST service is not defined.") private$fileContent <- private$REST$getCollectionContent(self$uuid, relativePath) private$tree <- CollectionTree$new(private$fileContent, self) } ), cloneable = FALSE ) #' print.Collection #' #' Custom print function for Collection class #' #' @param x Instance of Collection class #' @param ... Optional arguments. #' @export print.Collection = function(x, ...) { cat(paste0("Type: ", "\"", "Arvados Collection", "\""), sep = "\n") cat(paste0("uuid: ", "\"", x$uuid, "\""), sep = "\n") } ================================================ FILE: contrib/R-sdk/R/CollectionTree.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 CollectionTree <- R6::R6Class( "CollectionTree", public = list( pathsList = NULL, initialize = function(fileContent, collection) { self$pathsList <- fileContent treeBranches <- sapply(fileContent, function(filePath) self$createBranch(filePath)) root <- Subcollection$new("") sapply(treeBranches, function(branch) self$addBranch(root, branch)) root$setCollection(collection) private$tree <- root }, createBranch = function(filePath) { splitPath <- unlist(strsplit(filePath, "/", fixed = TRUE)) branch <- NULL lastElementIndex <- length(splitPath) for(elementIndex in lastElementIndex:1) { if(elementIndex == lastElementIndex) { branch <- ArvadosFile$new(splitPath[[elementIndex]]) } else { newFolder <- Subcollection$new(splitPath[[elementIndex]]) newFolder$add(branch) branch <- newFolder } } branch }, addBranch = function(container, node) { child <- container$get(node$getName()) if(is.null(child)) { # Make sure we are don't make any REST call while adding child collection <- container$getCollection() container$setCollection(NULL, setRecursively = FALSE) container$add(node) container$setCollection(collection, setRecursively = FALSE) } else { # Note: REST always returns folder name alone before other folder # content, so in first iteration we don't know if it's a file # or folder since its just a name, so we assume it's a file. # If we encounter that same name again we know # it's a folder so we need to replace ArvadosFile with Subcollection. if("ArvadosFile" %in% class(child)) child = private$replaceFileWithSubcollection(child) self$addBranch(child, node$getFirst()) } }, getElement = function(relativePath) { relativePath <- trimFromStart(relativePath, "./") relativePath <- trimFromEnd(relativePath, "/") if(endsWith(relativePath, "/")) relativePath <- substr(relativePath, 0, nchar(relativePath) - 1) splitPath <- unlist(strsplit(relativePath, "/", fixed = TRUE)) returnElement <- private$tree for(pathFragment in splitPath) { returnElement <- returnElement$get(pathFragment) if(is.null(returnElement)) return(NULL) } returnElement }, getTree = function() private$tree ), private = list( tree = NULL, replaceFileWithSubcollection = function(arvadosFile) { subcollection <- Subcollection$new(arvadosFile$getName()) fileParent <- arvadosFile$getParent() fileParent$remove(arvadosFile$getName()) fileParent$add(subcollection) arvadosFile$setParent(NULL) subcollection } ) ) ================================================ FILE: contrib/R-sdk/R/HttpParser.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 HttpParser <- R6::R6Class( "HttrParser", public = list( validContentTypes = NULL, initialize = function() { self$validContentTypes <- c("text", "raw") }, parseJSONResponse = function(serverResponse) { parsed_response <- httr::content(serverResponse, as = "parsed", type = "application/json") }, parseResponse = function(serverResponse, outputType) { parsed_response <- httr::content(serverResponse, as = outputType) }, getFileNamesFromResponse = function(response, uri) { text <- rawToChar(response$content) doc <- XML::xmlParse(text, asText=TRUE) base <- paste("/", strsplit(uri, "/")[[1]][4], "/", sep="") result <- unlist( XML::xpathApply(doc, "//D:response/D:href", function(node) { sub(base, "", URLdecode(XML::xmlValue(node)), fixed=TRUE) }) ) result[result != ""] }, getFileSizesFromResponse = function(response, uri) { text <- rawToChar(response$content) doc <- XML::xmlParse(text, asText=TRUE) base <- paste(paste("/", strsplit(uri, "/")[[1]][-1:-3], sep="", collapse=""), "/", sep="") result <- XML::xpathApply(doc, "//D:response/D:propstat/D:prop/D:getcontentlength", function(node) { XML::xmlValue(node) }) unlist(result) } ) ) ================================================ FILE: contrib/R-sdk/R/HttpRequest.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 HttpRequest <- R6::R6Class( "HttrRequest", public = list( validContentTypes = NULL, validVerbs = NULL, initialize = function() { self$validContentTypes <- c("text", "raw") self$validVerbs <- c("GET", "POST", "PUT", "DELETE", "PROPFIND", "MOVE", "COPY") }, exec = function(verb, url, headers = NULL, body = NULL, queryParams = NULL, retryTimes = 0) { if(!(verb %in% self$validVerbs)) stop("Http verb is not valid.") urlQuery <- self$createQuery(queryParams) url <- paste0(url, urlQuery) config <- httr::add_headers(unlist(headers)) if(toString(Sys.getenv("ARVADOS_API_HOST_INSECURE") == "TRUE")) config$options = list(ssl_verifypeer = 0L) response <- httr::RETRY(verb, url = url, body = body, config = config, times = retryTimes + 1) }, createQuery = function(queryParams) { queryParams <- Filter(Negate(is.null), queryParams) query <- sapply(queryParams, function(param) { if(is.list(param) || length(param) > 1) param <- RListToPythonList(param, ",") URLencode(as.character(param), reserved = T, repeated = T) }, USE.NAMES = TRUE) if(length(query) > 0) { query <- paste0(names(query), "=", query, collapse = "&") return(paste0("?", query)) } return("") }, getConnection = function(url, headers, openMode) { h <- curl::new_handle() curl::handle_setheaders(h, .list = headers) if(toString(Sys.getenv("ARVADOS_API_HOST_INSECURE") == "TRUE")) curl::handle_setopt(h, ssl_verifypeer = 0L) conn <- curl::curl(url = url, open = openMode, handle = h) } ), cloneable = FALSE ) ================================================ FILE: contrib/R-sdk/R/RESTService.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 RESTService <- R6::R6Class( "RESTService", public = list( token = NULL, http = NULL, httpParser = NULL, numRetries = NULL, initialize = function(token, rawHost, http, httpParser, numRetries = 0, webDavHostName = NULL) { self$token <- token self$http <- http self$httpParser <- httpParser self$numRetries <- numRetries private$rawHostName <- rawHost private$webDavHostName <- webDavHostName }, setNumConnRetries = function(newNumOfRetries) { self$numRetries <- newNumOfRetries }, getWebDavHostName = function() { if(is.null(private$webDavHostName)) { publicConfigURL <- paste0("https://", private$rawHostName, "/arvados/v1/config") serverResponse <- self$http$exec("GET", publicConfigURL, retryTimes = self$numRetries) configDocument <- self$httpParser$parseJSONResponse(serverResponse) private$webDavHostName <- configDocument$Services$WebDAVDownload$ExternalURL if(is.null(private$webDavHostName)) stop("Unable to find WebDAV server.") } private$webDavHostName }, create = function(files, uuid) { sapply(files, function(filePath) { private$createNewFile(filePath, uuid, "text/html") }) }, delete = function(relativePath, uuid) { fileURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/", relativePath); headers <- list(Authorization = paste("Bearer", self$token)) serverResponse <- self$http$exec("DELETE", fileURL, headers, retryTimes = self$numRetries) if(serverResponse$status_code < 200 || serverResponse$status_code >= 300) stop(paste("Server code:", serverResponse$status_code)) serverResponse }, move = function(from, to, uuid) { collectionURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/") fromURL <- paste0(collectionURL, from) toURL <- paste0(collectionURL, trimFromStart(to, "/")) headers <- list("Authorization" = paste("Bearer", self$token), "Destination" = toURL) serverResponse <- self$http$exec("MOVE", fromURL, headers, retryTimes = self$numRetries) if(serverResponse$status_code < 200 || serverResponse$status_code >= 300) stop(paste("Server code:", serverResponse$status_code)) serverResponse }, copy = function(from, to, uuid) { collectionURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/") fromURL <- paste0(collectionURL, from) toURL <- paste0(collectionURL, trimFromStart(to, "/")) headers <- list("Authorization" = paste("Bearer", self$token), "Destination" = toURL) serverResponse <- self$http$exec("COPY", fromURL, headers, retryTimes = self$numRetries) if(serverResponse$status_code < 200 || serverResponse$status_code >= 300) stop(paste("Server code:", serverResponse$status_code)) serverResponse }, getCollectionContent = function(uuid, relativePath = NULL) { collectionURL <- URLencode(paste0(self$getWebDavHostName(), "c=", uuid, "/", relativePath)) headers <- list("Authorization" = paste("Bearer", self$token)) response <- self$http$exec("PROPFIND", collectionURL, headers, retryTimes = self$numRetries) if(all(response == "")) stop("Response is empty, request may be misconfigured") if(response$status_code < 200 || response$status_code >= 300) stop(paste("Server code:", response$status_code)) self$httpParser$getFileNamesFromResponse(response, collectionURL) }, getResourceSize = function(relativePath, uuid) { collectionURL <- URLencode(paste0(self$getWebDavHostName(), "c=", uuid)) subcollectionURL <- paste0(collectionURL, "/", relativePath); headers <- list("Authorization" = paste("Bearer", self$token)) response <- self$http$exec("PROPFIND", subcollectionURL, headers, retryTimes = self$numRetries) if(all(response == "")) stop("Response is empty, request may be misconfigured") if(response$status_code < 200 || response$status_code >= 300) stop(paste("Server code:", response$status_code)) sizes <- self$httpParser$getFileSizesFromResponse(response, collectionURL) as.numeric(sizes) }, read = function(relativePath, uuid, contentType = "raw", offset = 0, length = 0) { fileURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/", relativePath); range <- paste0("bytes=", offset, "-") if(length > 0) range = paste0(range, offset + length - 1) if(offset == 0 && length == 0) { headers <- list(Authorization = paste("Bearer", self$token)) } else { headers <- list(Authorization = paste("Bearer", self$token), Range = range) } if(!(contentType %in% self$httpParser$validContentTypes)) stop("Invalid contentType. Please use text or raw.") serverResponse <- self$http$exec("GET", fileURL, headers, retryTimes = self$numRetries) if(serverResponse$status_code < 200 || serverResponse$status_code >= 300) stop(paste("Server code:", serverResponse$status_code)) self$httpParser$parseResponse(serverResponse, contentType) }, write = function(relativePath, uuid, content, contentType) { fileURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/", relativePath); headers <- list(Authorization = paste("Bearer", self$token), "Content-Type" = contentType) body <- content serverResponse <- self$http$exec("PUT", fileURL, headers, body, retryTimes = self$numRetries) if(serverResponse$status_code < 200 || serverResponse$status_code >= 300) stop(paste("Server code:", serverResponse$status_code)) self$httpParser$parseResponse(serverResponse, "text") }, getConnection = function(relativePath, uuid, openMode) { fileURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/", relativePath); headers <- list(Authorization = paste("Bearer", self$token)) conn <- self$http$getConnection(fileURL, headers, openMode) } ), private = list( webDavHostName = NULL, rawHostName = NULL, createNewFile = function(relativePath, uuid, contentType) { fileURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/", relativePath) headers <- list(Authorization = paste("Bearer", self$token), "Content-Type" = contentType) body <- NULL serverResponse <- self$http$exec("PUT", fileURL, headers, body, retryTimes = self$numRetries) if (serverResponse$status_code < 200){ # to wyrzuca błędy stop(paste("Server code:", serverResponse$status_code))} else if (serverResponse$status_code >= 300 & serverResponse$status_code < 422) { stop(paste("Server code:", serverResponse$status_code))} else if (serverResponse$status_code == 422 ) { stop(paste("Project of that name already exists. If you want to change it use project_update() instead"))} paste("File created:", relativePath) } ), cloneable = FALSE ) ================================================ FILE: contrib/R-sdk/R/Subcollection.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 #' R6 Class Representing a Subcollection #' #' @description #' Subcollection class represents a folder inside Arvados collection. #' It is essentially a composite of arvadosFiles and other subcollections. #' @export Subcollection <- R6::R6Class( "Subcollection", public = list( #' @description #' Initialize new enviroment. #' @param name Name of the new enviroment. #' @return A new `Subcollection` object. initialize = function(name) { private$name <- name }, #' @description #' Returns name of the file. getName = function() private$name, #' @description #' Returns Subcollection's path relative to the root. getRelativePath = function() { relativePath <- c(private$name) parent <- private$parent while(!is.null(parent)) { relativePath <- c(parent$getName(), relativePath) parent <- parent$getParent() } relativePath <- relativePath[relativePath != ""] paste0(relativePath, collapse = "/") }, #' @description #' Adds ArvadosFile or Subcollection specified by content to the Subcollection. #' @param content Content to be added. add = function(content) { if("ArvadosFile" %in% class(content) || "Subcollection" %in% class(content)) { if(!is.null(content$getCollection())) stop("Content already belongs to a collection.") if(content$getName() == "") stop("Content has invalid name.") childWithSameName <- self$get(content$getName()) if(!is.null(childWithSameName)) stop(paste("Subcollection already contains ArvadosFile", "or Subcollection with same name.")) if(!is.null(private$collection)) { if(self$getRelativePath() != "") contentPath <- paste0(self$getRelativePath(), "/", content$getFileListing()) else contentPath <- content$getFileListing() REST <- private$collection$getRESTService() REST$create(contentPath, private$collection$uuid) content$setCollection(private$collection) } private$children <- c(private$children, content) content$setParent(self) "Content added successfully." } else { stop(paste0("Expected AravodsFile or Subcollection object, got ", paste0("(", paste0(class(content), collapse = ", "), ")"), ".")) } }, #' @description #' Removes ArvadosFile or Subcollection specified by name from the Subcollection. #' @param name Name of the file to be removed. remove = function(name) { if(is.character(name)) { child <- self$get(name) if(is.null(child)) stop(paste("Subcollection doesn't contains ArvadosFile", "or Subcollection with specified name.")) if(!is.null(private$collection)) { REST <- private$collection$getRESTService() REST$delete(child$getRelativePath(), private$collection$uuid) child$setCollection(NULL) } private$removeChild(name) child$setParent(NULL) "Content removed" } else { stop(paste0("Expected character, got ", paste0("(", paste0(class(name), collapse = ", "), ")"), ".")) } }, #' @description #' Returns Subcollections file content as character vector. #' @param fullPath Checking if the path to file exists. getFileListing = function(fullPath = TRUE) { content <- private$getContentAsCharVector(fullPath) content[order(tolower(content))] }, #' @description #' Returns subcollections content size in bytes. getSizeInBytes = function() { if(is.null(private$collection)) return(0) REST <- private$collection$getRESTService() fileSizes <- REST$getResourceSize(paste0(self$getRelativePath(), "/"), private$collection$uuid) return(sum(fileSizes)) }, #' @description #' Moves Subcollection to a new location inside collection. #' @param destination Path to move the file. move = function(destination) { if(is.null(private$collection)) stop("Subcollection doesn't belong to any collection.") destination <- trimFromEnd(destination, "/") nameAndPath <- splitToPathAndName(destination) newParent <- private$collection$get(nameAndPath$path) if(is.null(newParent)) stop("Unable to get destination subcollection.") childWithSameName <- newParent$get(nameAndPath$name) if(!is.null(childWithSameName)) stop("Destination already contains content with same name.") REST <- private$collection$getRESTService() REST$move(self$getRelativePath(), paste0(newParent$getRelativePath(), "/", nameAndPath$name), private$collection$uuid) private$dettachFromCurrentParent() private$attachToNewParent(self, newParent) private$parent <- newParent private$name <- nameAndPath$name self }, #' @description #' Copies Subcollection to a new location inside collection. #' @param destination Path to copy the file. copy = function(destination) { if(is.null(private$collection)) stop("Subcollection doesn't belong to any collection.") destination <- trimFromEnd(destination, "/") nameAndPath <- splitToPathAndName(destination) newParent <- private$collection$get(nameAndPath$path) if(is.null(newParent) || !("Subcollection" %in% class(newParent))) stop("Unable to get destination subcollection.") childWithSameName <- newParent$get(nameAndPath$name) if(!is.null(childWithSameName)) stop("Destination already contains content with same name.") REST <- private$collection$getRESTService() REST$copy(self$getRelativePath(), paste0(newParent$getRelativePath(), "/", nameAndPath$name), private$collection$uuid) newContent <- self$duplicate(nameAndPath$name) newContent$setCollection(self$getCollection(), setRecursively = TRUE) newContent$setParent(newParent) private$attachToNewParent(newContent, newParent) newContent }, #' @description #' Duplicate Subcollection and gives it a new name. #' @param newName New name for duplicated file. duplicate = function(newName = NULL) { name <- if(!is.null(newName)) newName else private$name root <- Subcollection$new(name) for(child in private$children) root$add(child$duplicate()) root }, #' @description #' If name is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL. #' @param name Name of the file. get = function(name) { for(child in private$children) { if(child$getName() == name) return(child) } return(NULL) }, #' @description #' Returns files in Subcollection. getFirst = function() { if(length(private$children) == 0) return(NULL) private$children[[1]] }, #' @description #' Sets Collection by its UUID. setCollection = function(collection, setRecursively = TRUE) { private$collection = collection if(setRecursively) { for(child in private$children) child$setCollection(collection) } }, #' @description #' Returns Collection of Subcollection. getCollection = function() private$collection, #' @description #' Returns Collection UUID. getParent = function() private$parent, #' @description #' Sets new Collection. setParent = function(newParent) private$parent <- newParent ), private = list( name = NULL, children = NULL, parent = NULL, collection = NULL, removeChild = function(name) { numberOfChildren = length(private$children) if(numberOfChildren > 0) { for(childIndex in 1:numberOfChildren) { if(private$children[[childIndex]]$getName() == name) { private$children = private$children[-childIndex] return() } } } }, attachToNewParent = function(content, newParent) { # We temporary set parents collection to NULL. This will ensure that # add method doesn't post this subcollection to REST. # We also need to set content's collection to NULL because # add method throws exception if we try to add content that already # belongs to a collection. parentsCollection <- newParent$getCollection() content$setCollection(NULL, setRecursively = FALSE) newParent$setCollection(NULL, setRecursively = FALSE) newParent$add(content) content$setCollection(parentsCollection, setRecursively = FALSE) newParent$setCollection(parentsCollection, setRecursively = FALSE) }, dettachFromCurrentParent = function() { # We temporary set parents collection to NULL. This will ensure that # remove method doesn't remove this subcollection from REST. parent <- private$parent parentsCollection <- parent$getCollection() parent$setCollection(NULL, setRecursively = FALSE) parent$remove(private$name) parent$setCollection(parentsCollection, setRecursively = FALSE) }, getContentAsCharVector = function(fullPath = TRUE) { content <- NULL if(fullPath) { for(child in private$children) content <- c(content, child$getFileListing()) if(private$name != "") content <- unlist(paste0(private$name, "/", content)) } else { for(child in private$children) content <- c(content, child$getName()) } content } ), cloneable = FALSE ) #' print.Subcollection #' #' Custom print function for Subcollection class #' #' @param x Instance of Subcollection class #' @param ... Optional arguments. #' @export print.Subcollection = function(x, ...) { collection <- NULL relativePath <- x$getRelativePath() if(!is.null(x$getCollection())) { collection <- x$getCollection()$uuid if(!x$getName() == "") relativePath <- paste0("/", relativePath) } cat(paste0("Type: ", "\"", "Arvados Subcollection", "\""), sep = "\n") cat(paste0("Name: ", "\"", x$getName(), "\""), sep = "\n") cat(paste0("Relative path: ", "\"", relativePath, "\""), sep = "\n") cat(paste0("Collection: ", "\"", collection, "\""), sep = "\n") } ================================================ FILE: contrib/R-sdk/R/util.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 #' listAll #' #' List all resources even if the number of items is greater than maximum API limit. #' #' @param fn Arvados method used to retrieve items from REST service. #' @param ... Optional arguments which will be pased to fn . #' @examples #' \dontrun{ #' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com") #' cl <- listAll(arv$collections.list, filters = list(list("name", "like", "test%")) #' } #' @export listAll <- function(fn, ...) { offset <- 0 itemsAvailable <- .Machine$integer.max items <- c() while(length(items) < itemsAvailable) { serverResponse <- fn(offset = offset, ...) if(!is.null(serverResponse$errors)) stop(serverResponse$errors) items <- c(items, serverResponse$items) offset <- length(items) itemsAvailable <- serverResponse$items_available } items } #NOTE: Package private functions trimFromStart <- function(sample, trimCharacters) { if(startsWith(sample, trimCharacters)) sample <- substr(sample, nchar(trimCharacters) + 1, nchar(sample)) sample } trimFromEnd <- function(sample, trimCharacters) { if(endsWith(sample, trimCharacters)) sample <- substr(sample, 0, nchar(sample) - nchar(trimCharacters)) sample } RListToPythonList <- function(RList, separator = ", ") { pythonArrayContent <- sapply(RList, function(elementInList) { if((is.vector(elementInList) || is.list(elementInList)) && length(elementInList) > 1) { return(RListToPythonList(elementInList, separator)) } else { return(paste0("\"", elementInList, "\"")) } }) pythonArray <- paste0("[", paste0(pythonArrayContent, collapse = separator), "]") pythonArray } appendToStartIfNotExist <- function(sample, characters) { if(!startsWith(sample, characters)) sample <- paste0(characters, sample) sample } splitToPathAndName = function(path) { path <- appendToStartIfNotExist(path, "/") components <- unlist(stringr::str_split(path, "/")) nameAndPath <- list() nameAndPath$name <- components[length(components)] nameAndPath$path <- trimFromStart(paste0(components[-length(components)], collapse = "/"), "/") nameAndPath } ================================================ FILE: contrib/R-sdk/R/zzz.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 .onLoad <- function(libName, pkgName) { minAllowedRVersion <- "3.3.0" currentRVersion <- getRversion() if(currentRVersion < minAllowedRVersion) print(paste0("Minimum R version required to run ", pkgName, " is ", minAllowedRVersion, ". Your current version is ", toString(currentRVersion), ". Please update R and try again.")) } ================================================ FILE: contrib/R-sdk/README.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # R SDK for Arvados This SDK focuses on providing support for accessing Arvados projects, collections, and the files within collections. The API is not final and feedback is solicited from users on ways in which it could be improved. ## Key Topics * Installation * Usage * Initializing API * Working with collections * Manipulating collection content * Working with Arvados projects * Help * Building the ArvadosR package ## Installation Minimum R version required to run ArvadosR is 3.3.0. ```r install.packages("ArvadosR", repos=c("https://r.arvados.org", getOption("repos")["CRAN"]), dependencies=TRUE) library('ArvadosR') ``` > **Note** > On Linux, you may have to install supporting packages. > > On Red Hat, AlmaLinux, and Rocky Linux, this is: > ``` > yum install libxml2-devel openssl-devel curl-devel > ``` > > On Debian and Ubuntu, this is: > ``` > apt-get install build-essential libxml2-dev libssl-dev libcurl4-gnutls-dev > ``` ## Usage ### Initializing API ```r # use environment variables ARVADOS_API_TOKEN and ARVADOS_API_HOST arv <- Arvados$new() # provide them explicitly arv <- Arvados$new("your Arvados token", "example.arvadosapi.com") ``` Optionally, add `numRetries` parameter to specify number of times to retry failed service requests. Default is 0. ```r arv <- Arvados$new("your Arvados token", "example.arvadosapi.com", numRetries = 3) ``` This parameter can be set at any time using `setNumRetries` ```r arv$setNumRetries(5) ``` ### Working with Arvados projects ##### Create project: ```r newProject <- arv$project_create(name = "project name", description = "project description", owner_uuid = "project UUID", properties = NULL, ensureUniqueName = "false") ``` ##### Update project: ```r updatedProject <- arv$project_update(name = "new project name", properties = newProperties, uuid = "projectUUID") ``` ##### Delete a project: ```r deletedProject <- arv$project_delete("uuid") ``` #### Find a project: ##### Get a project: ```r project <- arv$project_get("uuid") ``` ##### List projects: ```r list subprojects of a project projects <- arv$project_list(list(list("owner_uuid", "=", "aaaaa-j7d0g-ccccccccccccccc"))) list projects which have names beginning with Example examples <- arv$project_list(list(list("name","like","Example%"))) ``` ##### List all projects even if the number of items is greater than maximum API limit: ```r projects <- listAll(arv$project_list, list(list("name","like","Example%"))) ``` ### Working with collections #### Create a new collection: ```r newCollection <- arv$collections_create(name = "collectionTitle", description = "collectionDescription", ownerUUID = "collectionOwner", properties = Properties) ``` #### Update a collection’s metadata: ```r collection <- arv$collections_update(name = "newCollectionTitle", description = "newCollectionDescription", ownerUUID = "collectionOwner", properties = NULL, uuid = "collectionUUID") ``` #### Delete a collection: ```r deletedCollection <- arv$collections_delete("uuid") ``` #### Find a collection: #### Get a collection: ```r collection <- arv$collections_get("uuid") ``` Be aware that the result from `collections_get` is not a Collection class. The object returned from this method lets you access collection fields like “name” and “description”. The Collection class lets you access the files in the collection for reading and writing, and is described in the next section. #### List collections: ```r # offset of 0 and default limit of 100 collectionList <- arv$collections_list(list(list("name", "like", "Test%"))) collectionList <- arv$collections_list(list(list("name", "like", "Test%")), limit = 10, offset = 2) # count of total number of items (may be more than returned due to paging) collectionList$items_available # items which match the filter criteria collectionList$items ``` #### List all collections even if the number of items is greater than maximum API limit: ```r collectionList <- listAll(arv$collections_list, list(list("name", "like", "Test%"))) ``` ### Manipulating collection content #### Initialize a collection object: ```r collection <- Collection$new(arv, "uuid") ``` #### Get list of files: ```r files <- collection$getFileListing() ``` #### Get ArvadosFile or Subcollection from internal tree-like structure: ```r arvadosFile <- collection$get("location/to/my/file.cpp") # or arvadosSubcollection <- collection$get("location/to/my/directory/") ``` #### Read a table: ```r arvadosFile <- collection$get("myinput.txt") arvConnection <- arvadosFile$connection("r") mytable <- read.table(arvConnection) ``` #### Write a table: ```r arvadosFile <- collection$create("myoutput.txt")[[1]] arvConnection <- arvadosFile$connection("w") write.table(mytable, arvConnection) arvadosFile$flush() ``` #### Read a table from a tab delimited file: ```r arvadosFile <- collection$get("myinput.txt") arvConnection <- arvadosFile$connection("r") mytable <- read.delim(arvConnection) ``` #### Read a gzip compressed R object: ```r obj <- readRDS(gzcon(coll$get("abc.RDS")$connection("rb"))) ``` #### Write to existing file (overwrites current content of the file): ```r arvadosFile <- collection$get("location/to/my/file.cpp") arvadosFile$write("This is new file content") ``` #### Read whole file or just a portion of it: ```r fileContent <- arvadosFile$read() fileContent <- arvadosFile$read("text") fileContent <- arvadosFile$read("raw", offset = 1024, length = 512) ``` #### Read various file types: Chooses file type based on file name extension. Recognized file extensions: 'txt', 'xlsx', 'csv', 'tsv', 'fasta', 'dat', 'bin', 'rds', 'rdata'. ```r collection <- Collection$new(arv, collectionUUID) readFile <- collection$readArvFile(arvadosFile, istable = 'yes') # table readFile <- collection$readArvFile(arvadosFile, istable = 'no') # text readFile <- collection$readArvFile(arvadosFile) # xlsx, csv, tsv, rds, rdata readFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta') # fasta readFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32) # binary data.frame, only numbers readFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = "factor") # binary data.frame with factor or text ``` #### Get ArvadosFile or Subcollection size: ```r size <- arvadosFile$getSizeInBytes() # or size <- arvadosSubcollection$getSizeInBytes() ``` #### Create new file in a collection (returns a vector of one or more ArvadosFile objects): ```r collection$create(files) ``` **Example** ``` mainFile <- collection$create("cpp/src/main.cpp")[[1]] fileList <- collection$create(c("cpp/src/main.cpp", "cpp/src/util.h")) ``` #### Delete file from a collection: ```r collection$remove("location/to/my/file.cpp") ``` You can remove both Subcollection and ArvadosFile. If subcollection contains more files or folders they will be removed recursively. > **Note** > You can also remove multiple files at once: > ``` > collection$remove(c("path/to/my/file.cpp", "path/to/other/file.cpp")) > ``` #### Delete file or folder from a Subcollection: ```r subcollection <- collection$get("mySubcollection/") subcollection$remove("fileInsideSubcollection.exe") subcollection$remove("folderInsideSubcollection/") ``` #### Move or rename a file or folder within a collection (moving between collections is currently not supported): ##### Directly from collection ```r collection$move("folder/file.cpp", "file.cpp") ``` ##### Or from file ```r file <- collection$get("location/to/my/file.cpp") file$move("newDestination/file.cpp") ``` ##### Or from subcollection ```r subcollection <- collection$get("location/to/folder") subcollection$move("newDestination/folder") ``` > **Note** > Make sure to include new file name in destination. In second example `file$move(“newDestination/”)` will not work. #### Copy file or folder within a collection (copying between collections is currently not supported): ##### Directly from collection ```r collection$copy("folder/file.cpp", "file.cpp") ``` ##### Or from file ```r file <- collection$get("location/to/my/file.cpp") file$copy("destination/file.cpp") ``` ##### Or from subcollection ```r subcollection <- collection$get("location/to/folder") subcollection$copy("destination/folder") ``` ### Help #### View help page of Arvados classes by puting `?` before class name: ```r ?Arvados ?Collection ?Subcollection ?ArvadosFile ``` #### View help page of any method defined in Arvados class by puting `?` before method name: ```r ?collections_update ?workflows_get ``` ## Building the ArvadosR package ``` make package ``` This will create a tarball of the ArvadosR package in the current directory. ## Documentation Complete documentation, including the [User Guide](https://doc.arvados.org/user/index.html), [Installation documentation](https://doc.arvados.org/install/index.html), [Administrator documentation](https://doc.arvados.org/admin/index.html) and [API documentation](https://doc.arvados.org/api/index.html) is available at http://doc.arvados.org/ ## Community Visit [Arvados Community and Getting Help](https://doc.arvados.org/user/getting_started/community.html). ## Reporting bugs [Report an issue on GitHub](https://github.com/arvados/arvados/issues/new) ## Licensing Arvados is Free Software. See [Arvados Free Software Licenses](https://doc.arvados.org/user/copying/copying.html) for information about the open source licenses used in Arvados. ================================================ FILE: contrib/R-sdk/arvados-v1-discovery.json ================================================ { "auth": { "oauth2": { "scopes": { "https://api.arvados.org/auth/arvados": { "description": "View and manage objects" }, "https://api.arvados.org/auth/arvados.readonly": { "description": "View objects" } } } }, "basePath": "/arvados/v1/", "batchPath": "batch", "description": "The API to interact with Arvados.", "discoveryVersion": "v1", "documentationLink": "http://doc.arvados.org/api/index.html", "id": "arvados:v1", "kind": "discovery#restDescription", "name": "arvados", "parameters": { "alt": { "type": "string", "description": "Data format for the response.", "default": "json", "enum": [ "json" ], "enumDescriptions": [ "Responses with Content-Type of application/json" ], "location": "query" }, "fields": { "type": "string", "description": "Selector specifying which fields to include in a partial response.", "location": "query" }, "key": { "type": "string", "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "location": "query" }, "oauth_token": { "type": "string", "description": "OAuth 2.0 token for the current user.", "location": "query" } }, "protocol": "rest", "resources": { "api_client_authorizations": { "methods": { "get": { "id": "arvados.api_client_authorizations.get", "path": "api_client_authorizations/{uuid}", "httpMethod": "GET", "description": "Get a ApiClientAuthorization record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the ApiClientAuthorization to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "ApiClientAuthorization" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.api_client_authorizations.list", "path": "api_client_authorizations", "httpMethod": "GET", "description": "Retrieve a ApiClientAuthorizationList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "ApiClientAuthorizationList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.api_client_authorizations.create", "path": "api_client_authorizations", "httpMethod": "POST", "description": "Create a new ApiClientAuthorization.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "api_client_authorization": { "$ref": "ApiClientAuthorization" } } }, "response": { "$ref": "ApiClientAuthorization" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.api_client_authorizations.update", "path": "api_client_authorizations/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing ApiClientAuthorization.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the ApiClientAuthorization to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "api_client_authorization": { "$ref": "ApiClientAuthorization" } } }, "response": { "$ref": "ApiClientAuthorization" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.api_client_authorizations.delete", "path": "api_client_authorizations/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing ApiClientAuthorization.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the ApiClientAuthorization to delete.", "required": true, "location": "path" } }, "response": { "$ref": "ApiClientAuthorization" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "create_system_auth": { "id": "arvados.api_client_authorizations.create_system_auth", "path": "api_client_authorizations/create_system_auth", "httpMethod": "POST", "description": "Create a token for the system (\"root\") user.", "parameters": { "scopes": { "type": "array", "required": false, "default": "[\"all\"]", "description": "An array of strings defining the scope of resources this token will be allowed to access. Refer to the [scopes reference][] for details.\n\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\n", "location": "query" } }, "response": { "$ref": "ApiClientAuthorization" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "current": { "id": "arvados.api_client_authorizations.current", "path": "api_client_authorizations/current", "httpMethod": "GET", "description": "Return all metadata for the token used to authorize this request.", "parameters": {}, "response": { "$ref": "ApiClientAuthorization" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "authorized_keys": { "methods": { "get": { "id": "arvados.authorized_keys.get", "path": "authorized_keys/{uuid}", "httpMethod": "GET", "description": "Get a AuthorizedKey record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the AuthorizedKey to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "AuthorizedKey" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.authorized_keys.list", "path": "authorized_keys", "httpMethod": "GET", "description": "Retrieve a AuthorizedKeyList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "AuthorizedKeyList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.authorized_keys.create", "path": "authorized_keys", "httpMethod": "POST", "description": "Create a new AuthorizedKey.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "authorized_key": { "$ref": "AuthorizedKey" } } }, "response": { "$ref": "AuthorizedKey" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.authorized_keys.update", "path": "authorized_keys/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing AuthorizedKey.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the AuthorizedKey to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "authorized_key": { "$ref": "AuthorizedKey" } } }, "response": { "$ref": "AuthorizedKey" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.authorized_keys.delete", "path": "authorized_keys/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing AuthorizedKey.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the AuthorizedKey to delete.", "required": true, "location": "path" } }, "response": { "$ref": "AuthorizedKey" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "collections": { "methods": { "get": { "id": "arvados.collections.get", "path": "collections/{uuid}", "httpMethod": "GET", "description": "Get a Collection record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Collection to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "include_trash": { "type": "boolean", "required": false, "default": "false", "description": "Show collection even if its `is_trashed` attribute is true.", "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "Collection" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.collections.list", "path": "collections", "httpMethod": "GET", "description": "Retrieve a CollectionList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" }, "include_trash": { "type": "boolean", "required": false, "default": "false", "description": "Include collections whose `is_trashed` attribute is true.", "location": "query" }, "include_old_versions": { "type": "boolean", "required": false, "default": "false", "description": "Include past collection versions.", "location": "query" } }, "response": { "$ref": "CollectionList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.collections.create", "path": "collections", "httpMethod": "POST", "description": "Create a new Collection.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false }, "replace_files": { "type": "object", "description": "Add, delete, and replace files and directories with new content\nand/or content from other collections. Refer to the\n[replace_files reference][] for details.\n\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\n\n", "required": false, "location": "query", "properties": {}, "additionalProperties": { "type": "string" } }, "replace_segments": { "type": "object", "description": "Replace existing block segments in the collection with new segments.\nRefer to the [replace_segments reference][] for details.\n\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\n\n", "required": false, "location": "query", "properties": {}, "additionalProperties": { "type": "string" } } }, "request": { "required": true, "properties": { "collection": { "$ref": "Collection" } } }, "response": { "$ref": "Collection" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.collections.update", "path": "collections/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing Collection.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Collection to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "replace_files": { "type": "object", "description": "Add, delete, and replace files and directories with new content\nand/or content from other collections. Refer to the\n[replace_files reference][] for details.\n\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\n\n", "required": false, "location": "query", "properties": {}, "additionalProperties": { "type": "string" } }, "replace_segments": { "type": "object", "description": "Replace existing block segments in the collection with new segments.\nRefer to the [replace_segments reference][] for details.\n\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\n\n", "required": false, "location": "query", "properties": {}, "additionalProperties": { "type": "string" } } }, "request": { "required": true, "properties": { "collection": { "$ref": "Collection" } } }, "response": { "$ref": "Collection" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.collections.delete", "path": "collections/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing Collection.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Collection to delete.", "required": true, "location": "path" } }, "response": { "$ref": "Collection" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "provenance": { "id": "arvados.collections.provenance", "path": "collections/{uuid}/provenance", "httpMethod": "GET", "description": "Detail the provenance of a given collection.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Collection to query.", "required": true, "location": "path" } }, "response": { "$ref": "Collection" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "used_by": { "id": "arvados.collections.used_by", "path": "collections/{uuid}/used_by", "httpMethod": "GET", "description": "Detail where a given collection has been used.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Collection to query.", "required": true, "location": "path" } }, "response": { "$ref": "Collection" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "trash": { "id": "arvados.collections.trash", "path": "collections/{uuid}/trash", "httpMethod": "POST", "description": "Trash a collection.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Collection to update.", "required": true, "location": "path" } }, "response": { "$ref": "Collection" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "untrash": { "id": "arvados.collections.untrash", "path": "collections/{uuid}/untrash", "httpMethod": "POST", "description": "Untrash a collection.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Collection to update.", "required": true, "location": "path" } }, "response": { "$ref": "Collection" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "computed_permissions": { "methods": { "list": { "id": "arvados.computed_permissions.list", "path": "computed_permissions", "httpMethod": "GET", "description": "Retrieve a ComputedPermissionList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" } }, "response": { "$ref": "ComputedPermissionList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] } } }, "containers": { "methods": { "get": { "id": "arvados.containers.get", "path": "containers/{uuid}", "httpMethod": "GET", "description": "Get a Container record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Container to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.containers.list", "path": "containers", "httpMethod": "GET", "description": "Retrieve a ContainerList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "ContainerList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.containers.create", "path": "containers", "httpMethod": "POST", "description": "Create a new Container.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "container": { "$ref": "Container" } } }, "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.containers.update", "path": "containers/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing Container.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Container to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "container": { "$ref": "Container" } } }, "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.containers.delete", "path": "containers/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing Container.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Container to delete.", "required": true, "location": "path" } }, "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "auth": { "id": "arvados.containers.auth", "path": "containers/{uuid}/auth", "httpMethod": "GET", "description": "Get the API client authorization token associated with this container.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Container to query.", "required": true, "location": "path" } }, "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "lock": { "id": "arvados.containers.lock", "path": "containers/{uuid}/lock", "httpMethod": "POST", "description": "Lock a container (for a dispatcher to begin running it).", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Container to update.", "required": true, "location": "path" } }, "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "unlock": { "id": "arvados.containers.unlock", "path": "containers/{uuid}/unlock", "httpMethod": "POST", "description": "Unlock a container (for a dispatcher to stop running it).", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Container to update.", "required": true, "location": "path" } }, "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update_priority": { "id": "arvados.containers.update_priority", "path": "containers/{uuid}/update_priority", "httpMethod": "POST", "description": "Recalculate and return the priority of a given container.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Container to update.", "required": true, "location": "path" } }, "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "secret_mounts": { "id": "arvados.containers.secret_mounts", "path": "containers/{uuid}/secret_mounts", "httpMethod": "GET", "description": "Return secret mount information for the container associated with the API token authorizing this request.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Container to query.", "required": true, "location": "path" } }, "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "current": { "id": "arvados.containers.current", "path": "containers/current", "httpMethod": "GET", "description": "Return the container record associated with the API token authorizing this request.", "parameters": {}, "response": { "$ref": "Container" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "container_requests": { "methods": { "get": { "id": "arvados.container_requests.get", "path": "container_requests/{uuid}", "httpMethod": "GET", "description": "Get a ContainerRequest record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the ContainerRequest to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "include_trash": { "type": "boolean", "required": false, "default": "false", "description": "Show container request even if its owner project is trashed.", "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "ContainerRequest" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.container_requests.list", "path": "container_requests", "httpMethod": "GET", "description": "Retrieve a ContainerRequestList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" }, "include_trash": { "type": "boolean", "required": false, "default": "false", "description": "Include container requests whose owner project is trashed.", "location": "query" } }, "response": { "$ref": "ContainerRequestList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.container_requests.create", "path": "container_requests", "httpMethod": "POST", "description": "Create a new ContainerRequest.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "container_request": { "$ref": "ContainerRequest" } } }, "response": { "$ref": "ContainerRequest" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.container_requests.update", "path": "container_requests/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing ContainerRequest.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the ContainerRequest to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "container_request": { "$ref": "ContainerRequest" } } }, "response": { "$ref": "ContainerRequest" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.container_requests.delete", "path": "container_requests/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing ContainerRequest.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the ContainerRequest to delete.", "required": true, "location": "path" } }, "response": { "$ref": "ContainerRequest" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "container_status": { "id": "arvados.container_requests.container_status", "path": "container_requests/{uuid}/container_status", "httpMethod": "GET", "description": "Return scheduling details for a container request.", "parameters": { "uuid": { "type": "string", "required": true, "description": "The UUID of the container request to query.", "location": "query" } }, "response": { "$ref": "ContainerRequest" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "credentials": { "methods": { "get": { "id": "arvados.credentials.get", "path": "credentials/{uuid}", "httpMethod": "GET", "description": "Get a Credential record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Credential to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "Credential" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.credentials.list", "path": "credentials", "httpMethod": "GET", "description": "Retrieve a CredentialList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "CredentialList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.credentials.create", "path": "credentials", "httpMethod": "POST", "description": "Create a new Credential.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "credential": { "$ref": "Credential" } } }, "response": { "$ref": "Credential" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.credentials.update", "path": "credentials/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing Credential.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Credential to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "credential": { "$ref": "Credential" } } }, "response": { "$ref": "Credential" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.credentials.delete", "path": "credentials/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing Credential.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Credential to delete.", "required": true, "location": "path" } }, "response": { "$ref": "Credential" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "secret": { "id": "arvados.credentials.secret", "path": "credentials/{uuid}/secret", "httpMethod": "GET", "description": "Fetch the secret part of the credential (can only be invoked by running containers).", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Credential to query.", "required": true, "location": "path" } }, "response": { "$ref": "Credential" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "groups": { "methods": { "get": { "id": "arvados.groups.get", "path": "groups/{uuid}", "httpMethod": "GET", "description": "Get a Group record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Group to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "include_trash": { "type": "boolean", "required": false, "default": "false", "description": "Return group/project even if its `is_trashed` attribute is true.", "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "Group" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.groups.list", "path": "groups", "httpMethod": "GET", "description": "Retrieve a GroupList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" }, "include_trash": { "type": "boolean", "required": false, "default": "false", "description": "Include items whose `is_trashed` attribute is true.", "location": "query" } }, "response": { "$ref": "GroupList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.groups.create", "path": "groups", "httpMethod": "POST", "description": "Create a new Group.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false }, "async": { "required": false, "type": "boolean", "location": "query", "default": "false", "description": "If true, cluster permission will not be updated immediately, but instead at the next configured update interval." } }, "request": { "required": true, "properties": { "group": { "$ref": "Group" } } }, "response": { "$ref": "Group" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.groups.update", "path": "groups/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing Group.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Group to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "async": { "required": false, "type": "boolean", "location": "query", "default": "false", "description": "If true, cluster permission will not be updated immediately, but instead at the next configured update interval." } }, "request": { "required": true, "properties": { "group": { "$ref": "Group" } } }, "response": { "$ref": "Group" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.groups.delete", "path": "groups/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing Group.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Group to delete.", "required": true, "location": "path" } }, "response": { "$ref": "Group" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "contents": { "id": "arvados.groups.contents", "path": "groups/contents", "httpMethod": "GET", "description": "List objects that belong to a group.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" }, "include_trash": { "type": "boolean", "required": false, "default": "false", "description": "Include items whose `is_trashed` attribute is true.", "location": "query" }, "uuid": { "type": "string", "required": false, "default": "", "description": "If given, limit the listing to objects owned by the\nuser or group with this UUID.", "location": "query" }, "recursive": { "type": "boolean", "required": false, "default": "false", "description": "If true, include contents from child groups recursively.", "location": "query" }, "include": { "type": "array", "required": false, "description": "An array of referenced objects to include in the `included` field of the response. Supported values in the array are:\n\n * `\"container_uuid\"`\n * `\"owner_uuid\"`\n * `\"collection_uuid\"`\n\n", "location": "query" }, "include_old_versions": { "type": "boolean", "required": false, "default": "false", "description": "If true, include past versions of collections in the listing.", "location": "query" }, "exclude_home_project": { "type": "boolean", "required": false, "default": "false", "description": "If true, exclude contents of the user's home project from the listing.\nCalling this method with this flag set is how clients enumerate objects shared\nwith the current user.", "location": "query" } }, "response": { "$ref": "Group" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "shared": { "id": "arvados.groups.shared", "path": "groups/shared", "httpMethod": "GET", "description": "List groups that the current user can access via permission links.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" }, "include_trash": { "type": "boolean", "required": false, "default": "false", "description": "Include items whose `is_trashed` attribute is true.", "location": "query" }, "include": { "type": "string", "required": false, "description": "A string naming referenced objects to include in the `included` field of the response. Supported values are:\n\n * `\"owner_uuid\"`\n\n", "location": "query" } }, "response": { "$ref": "Group" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "trash": { "id": "arvados.groups.trash", "path": "groups/{uuid}/trash", "httpMethod": "POST", "description": "Trash a group.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Group to update.", "required": true, "location": "path" } }, "response": { "$ref": "Group" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "untrash": { "id": "arvados.groups.untrash", "path": "groups/{uuid}/untrash", "httpMethod": "POST", "description": "Untrash a group.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Group to update.", "required": true, "location": "path" } }, "response": { "$ref": "Group" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "keep_services": { "methods": { "get": { "id": "arvados.keep_services.get", "path": "keep_services/{uuid}", "httpMethod": "GET", "description": "Get a KeepService record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the KeepService to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "KeepService" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.keep_services.list", "path": "keep_services", "httpMethod": "GET", "description": "Retrieve a KeepServiceList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "KeepServiceList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.keep_services.create", "path": "keep_services", "httpMethod": "POST", "description": "Create a new KeepService.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "keep_service": { "$ref": "KeepService" } } }, "response": { "$ref": "KeepService" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.keep_services.update", "path": "keep_services/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing KeepService.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the KeepService to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "keep_service": { "$ref": "KeepService" } } }, "response": { "$ref": "KeepService" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.keep_services.delete", "path": "keep_services/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing KeepService.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the KeepService to delete.", "required": true, "location": "path" } }, "response": { "$ref": "KeepService" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "accessible": { "id": "arvados.keep_services.accessible", "path": "keep_services/accessible", "httpMethod": "GET", "description": "List Keep services that the current client can access.", "parameters": {}, "response": { "$ref": "KeepService" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "links": { "methods": { "get": { "id": "arvados.links.get", "path": "links/{uuid}", "httpMethod": "GET", "description": "Get a Link record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Link to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "Link" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.links.list", "path": "links", "httpMethod": "GET", "description": "Retrieve a LinkList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "LinkList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.links.create", "path": "links", "httpMethod": "POST", "description": "Create a new Link.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "link": { "$ref": "Link" } } }, "response": { "$ref": "Link" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.links.update", "path": "links/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing Link.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Link to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "link": { "$ref": "Link" } } }, "response": { "$ref": "Link" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.links.delete", "path": "links/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing Link.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Link to delete.", "required": true, "location": "path" } }, "response": { "$ref": "Link" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "get_permissions": { "id": "arvados.links.get_permissions", "path": "permissions/{uuid}", "httpMethod": "GET", "description": "List permissions granted on an Arvados object.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Link to query.", "required": true, "location": "path" } }, "response": { "$ref": "Link" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "logs": { "methods": { "get": { "id": "arvados.logs.get", "path": "logs/{uuid}", "httpMethod": "GET", "description": "Get a Log record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Log to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "Log" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.logs.list", "path": "logs", "httpMethod": "GET", "description": "Retrieve a LogList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "LogList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.logs.create", "path": "logs", "httpMethod": "POST", "description": "Create a new Log.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "log": { "$ref": "Log" } } }, "response": { "$ref": "Log" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.logs.update", "path": "logs/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing Log.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Log to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "log": { "$ref": "Log" } } }, "response": { "$ref": "Log" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.logs.delete", "path": "logs/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing Log.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Log to delete.", "required": true, "location": "path" } }, "response": { "$ref": "Log" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "users": { "methods": { "get": { "id": "arvados.users.get", "path": "users/{uuid}", "httpMethod": "GET", "description": "Get a User record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the User to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.users.list", "path": "users", "httpMethod": "GET", "description": "Retrieve a UserList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "UserList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.users.create", "path": "users", "httpMethod": "POST", "description": "Create a new User.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "user": { "$ref": "User" } } }, "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.users.update", "path": "users/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing User.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the User to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not try to update the user on any other clusters in the federation,\nonly the cluster that received the request.\nYou must be an administrator to use this flag.", "location": "query" } }, "request": { "required": true, "properties": { "user": { "$ref": "User" } } }, "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.users.delete", "path": "users/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing User.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the User to delete.", "required": true, "location": "path" } }, "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "current": { "id": "arvados.users.current", "path": "users/current", "httpMethod": "GET", "description": "Return the user record associated with the API token authorizing this request.", "parameters": {}, "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "system": { "id": "arvados.users.system", "path": "users/system", "httpMethod": "GET", "description": "Return this cluster's system (\"root\") user record.", "parameters": {}, "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "activate": { "id": "arvados.users.activate", "path": "users/{uuid}/activate", "httpMethod": "POST", "description": "Set the `is_active` flag on a user record.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the User to update.", "required": true, "location": "path" } }, "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "setup": { "id": "arvados.users.setup", "path": "users/setup", "httpMethod": "POST", "description": "Convenience method to \"fully\" set up a user record with a virtual machine login and notification email.", "parameters": { "uuid": { "type": "string", "required": false, "description": "UUID of an existing user record to set up.", "location": "query" }, "user": { "type": "object", "required": false, "description": "Attributes of a new user record to set up.", "location": "query" }, "repo_name": { "type": "string", "required": false, "description": "This parameter is obsolete and ignored.", "location": "query" }, "vm_uuid": { "type": "string", "required": false, "description": "If given, setup creates a login link to allow this user to access the Arvados virtual machine with this UUID.", "location": "query" }, "send_notification_email": { "type": "boolean", "required": false, "default": "false", "description": "If true, send an email to the user notifying them they can now access this Arvados cluster.", "location": "query" } }, "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "unsetup": { "id": "arvados.users.unsetup", "path": "users/{uuid}/unsetup", "httpMethod": "POST", "description": "Unset a user's active flag and delete associated records.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the User to update.", "required": true, "location": "path" } }, "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "merge": { "id": "arvados.users.merge", "path": "users/merge", "httpMethod": "POST", "description": "Transfer ownership of one user's data to another.", "parameters": { "new_owner_uuid": { "type": "string", "required": true, "description": "UUID of the user or group that will take ownership of data owned by the old user.", "location": "query" }, "new_user_token": { "type": "string", "required": false, "description": "Valid API token for the user receiving ownership. If you use this option, it takes ownership of data owned by the user making the request.", "location": "query" }, "redirect_to_new_user": { "type": "boolean", "required": false, "default": "false", "description": "If true, authorization attempts for the old user will be redirected to the new user.", "location": "query" }, "old_user_uuid": { "type": "string", "required": false, "description": "UUID of the user whose ownership is being transferred to `new_owner_uuid`. You must be an admin to use this option.", "location": "query" }, "new_user_uuid": { "type": "string", "required": false, "description": "UUID of the user receiving ownership. You must be an admin to use this option.", "location": "query" } }, "response": { "$ref": "User" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "user_agreements": { "methods": { "get": { "id": "arvados.user_agreements.get", "path": "user_agreements/{uuid}", "httpMethod": "GET", "description": "Get a UserAgreement record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the UserAgreement to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "UserAgreement" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.user_agreements.list", "path": "user_agreements", "httpMethod": "GET", "description": "Retrieve a UserAgreementList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "UserAgreementList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.user_agreements.create", "path": "user_agreements", "httpMethod": "POST", "description": "Create a new UserAgreement.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "user_agreement": { "$ref": "UserAgreement" } } }, "response": { "$ref": "UserAgreement" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.user_agreements.update", "path": "user_agreements/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing UserAgreement.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the UserAgreement to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "user_agreement": { "$ref": "UserAgreement" } } }, "response": { "$ref": "UserAgreement" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.user_agreements.delete", "path": "user_agreements/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing UserAgreement.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the UserAgreement to delete.", "required": true, "location": "path" } }, "response": { "$ref": "UserAgreement" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "signatures": { "id": "arvados.user_agreements.signatures", "path": "user_agreements/signatures", "httpMethod": "GET", "description": "List all user agreement signature links from a user.", "parameters": {}, "response": { "$ref": "UserAgreement" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "sign": { "id": "arvados.user_agreements.sign", "path": "user_agreements/sign", "httpMethod": "POST", "description": "Create a signature link from the current user for a given user agreement.", "parameters": {}, "response": { "$ref": "UserAgreement" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "virtual_machines": { "methods": { "get": { "id": "arvados.virtual_machines.get", "path": "virtual_machines/{uuid}", "httpMethod": "GET", "description": "Get a VirtualMachine record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the VirtualMachine to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "VirtualMachine" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.virtual_machines.list", "path": "virtual_machines", "httpMethod": "GET", "description": "Retrieve a VirtualMachineList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "VirtualMachineList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.virtual_machines.create", "path": "virtual_machines", "httpMethod": "POST", "description": "Create a new VirtualMachine.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "virtual_machine": { "$ref": "VirtualMachine" } } }, "response": { "$ref": "VirtualMachine" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.virtual_machines.update", "path": "virtual_machines/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing VirtualMachine.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the VirtualMachine to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "virtual_machine": { "$ref": "VirtualMachine" } } }, "response": { "$ref": "VirtualMachine" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.virtual_machines.delete", "path": "virtual_machines/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing VirtualMachine.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the VirtualMachine to delete.", "required": true, "location": "path" } }, "response": { "$ref": "VirtualMachine" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "logins": { "id": "arvados.virtual_machines.logins", "path": "virtual_machines/{uuid}/logins", "httpMethod": "GET", "description": "List login permission links for a given virtual machine.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the VirtualMachine to query.", "required": true, "location": "path" } }, "response": { "$ref": "VirtualMachine" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "get_all_logins": { "id": "arvados.virtual_machines.get_all_logins", "path": "virtual_machines/get_all_logins", "httpMethod": "GET", "description": "List login permission links for all virtual machines.", "parameters": {}, "response": { "$ref": "VirtualMachine" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "workflows": { "methods": { "get": { "id": "arvados.workflows.get", "path": "workflows/{uuid}", "httpMethod": "GET", "description": "Get a Workflow record by UUID.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Workflow to return.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "parameterOrder": [ "uuid" ], "response": { "$ref": "Workflow" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "list": { "id": "arvados.workflows.list", "path": "workflows", "httpMethod": "GET", "description": "Retrieve a WorkflowList.", "parameters": { "filters": { "type": "array", "required": false, "description": "Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n", "location": "query" }, "where": { "type": "object", "required": false, "description": "An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n", "location": "query" }, "order": { "type": "array", "required": false, "description": "An array of strings to set the order in which matching objects are returned.\nEach string has the format ` `.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n", "location": "query" }, "select": { "type": "array", "description": "An array of names of attributes to return from each matching object.", "required": false, "location": "query" }, "distinct": { "type": "boolean", "required": false, "default": "false", "description": "If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n", "location": "query" }, "limit": { "type": "integer", "required": false, "default": "100", "description": "The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n", "location": "query" }, "offset": { "type": "integer", "required": false, "default": "0", "description": "Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n", "location": "query" }, "count": { "type": "string", "required": false, "default": "exact", "description": "A string to determine result counting behavior. Supported values are:\n\n * `\"exact\"`: The response will include an `items_available` field that\n counts the number of objects that matched this search criteria,\n including ones not included in `items`.\n\n * `\"none\"`: The response will not include an `items_avaliable`\n field. This improves performance by returning a result as soon as enough\n `items` have been loaded for this result.\n\n", "location": "query" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster to return objects from", "location": "query", "required": false }, "bypass_federation": { "type": "boolean", "required": false, "default": "false", "description": "If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n", "location": "query" } }, "response": { "$ref": "WorkflowList" }, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] }, "create": { "id": "arvados.workflows.create", "path": "workflows", "httpMethod": "POST", "description": "Create a new Workflow.", "parameters": { "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" }, "ensure_unique_name": { "type": "boolean", "description": "If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.", "location": "query", "required": false, "default": "false" }, "cluster_id": { "type": "string", "description": "Cluster ID of a federated cluster where this object should be created.", "location": "query", "required": false } }, "request": { "required": true, "properties": { "workflow": { "$ref": "Workflow" } } }, "response": { "$ref": "Workflow" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "update": { "id": "arvados.workflows.update", "path": "workflows/{uuid}", "httpMethod": "PUT", "description": "Update attributes of an existing Workflow.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Workflow to update.", "required": true, "location": "path" }, "select": { "type": "array", "description": "An array of names of attributes to return in the response.", "required": false, "location": "query" } }, "request": { "required": true, "properties": { "workflow": { "$ref": "Workflow" } } }, "response": { "$ref": "Workflow" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] }, "delete": { "id": "arvados.workflows.delete", "path": "workflows/{uuid}", "httpMethod": "DELETE", "description": "Delete an existing Workflow.", "parameters": { "uuid": { "type": "string", "description": "The UUID of the Workflow to delete.", "required": true, "location": "path" } }, "response": { "$ref": "Workflow" }, "scopes": [ "https://api.arvados.org/auth/arvados" ] } } }, "configs": { "methods": { "get": { "id": "arvados.configs.get", "path": "config", "httpMethod": "GET", "description": "Get this cluster's public configuration settings.", "parameters": {}, "parameterOrder": [], "response": {}, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] } } }, "vocabularies": { "methods": { "get": { "id": "arvados.vocabularies.get", "path": "vocabulary", "httpMethod": "GET", "description": "Get this cluster's configured vocabulary definition.\n\nRefer to [metadata vocabulary documentation][] for details.\n\n[metadata vocabulary documentation]: https://doc.arvados.org/admin/metadata-vocabulary.html\n\n", "parameters": {}, "parameterOrder": [], "response": {}, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] } } }, "sys": { "methods": { "get": { "id": "arvados.sys.trash_sweep", "path": "sys/trash_sweep", "httpMethod": "POST", "description": "Run scheduled data trash and sweep operations across this cluster's Keep services.", "parameters": {}, "parameterOrder": [], "response": {}, "scopes": [ "https://api.arvados.org/auth/arvados", "https://api.arvados.org/auth/arvados.readonly" ] } } } }, "revision": "20250402", "schemas": { "ApiClientAuthorizationList": { "id": "ApiClientAuthorizationList", "description": "A list of ApiClientAuthorization objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#apiClientAuthorizationList.", "default": "arvados#apiClientAuthorizationList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching ApiClientAuthorization objects.", "items": { "$ref": "ApiClientAuthorization" } } } }, "ApiClientAuthorization": { "id": "ApiClientAuthorization", "description": "Arvados API client authorization token\n\nThis resource represents an API token a user may use to authenticate an\nArvados API request.", "type": "object", "uuidPrefix": "gj3su", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "api_token": { "description": "The secret token that can be used to authorize Arvados API requests.", "type": "string" }, "created_by_ip_address": { "description": "The IP address of the client that created this token.", "type": "string" }, "last_used_by_ip_address": { "description": "The IP address of the client that last used this token.", "type": "string" }, "last_used_at": { "description": "The last time this token was used to authorize a request. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "expires_at": { "description": "The time after which this token is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "created_at": { "description": "The time this API client authorization was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "scopes": { "description": "An array of strings identifying HTTP methods and API paths this token is\nauthorized to use. Refer to the [scopes reference][] for details.\n\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\n\n", "type": "Array" }, "uuid": { "type": "string", "description": "This API client authorization's Arvados UUID, like `zzzzz-gj3su-12345abcde67890`." } } }, "AuthorizedKeyList": { "id": "AuthorizedKeyList", "description": "A list of AuthorizedKey objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#authorizedKeyList.", "default": "arvados#authorizedKeyList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching AuthorizedKey objects.", "items": { "$ref": "AuthorizedKey" } } } }, "AuthorizedKey": { "id": "AuthorizedKey", "description": "Arvados authorized public key\n\nThis resource represents a public key a user may use to authenticate themselves\nto services on the cluster. Its primary use today is to store SSH keys for\nvirtual machines (\"shell nodes\"). It may be extended to store other keys in\nthe future.", "type": "object", "uuidPrefix": "fngyi", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This authorized key's Arvados UUID, like `zzzzz-fngyi-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this authorized key.", "type": "string" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this authorized key.", "type": "string" }, "modified_at": { "description": "The time this authorized key was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "name": { "description": "The name of this authorized key assigned by a user.", "type": "string" }, "key_type": { "description": "A string identifying what type of service uses this key. Supported values are:\n\n * `\"SSH\"`\n\n", "type": "string" }, "authorized_user_uuid": { "description": "The UUID of the Arvados user that is authorized by this key.", "type": "string" }, "public_key": { "description": "The full public key, in the format referenced by `key_type`.", "type": "text" }, "expires_at": { "description": "The time after which this key is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "created_at": { "description": "The time this authorized key was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" } } }, "CollectionList": { "id": "CollectionList", "description": "A list of Collection objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#collectionList.", "default": "arvados#collectionList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching Collection objects.", "items": { "$ref": "Collection" } } } }, "Collection": { "id": "Collection", "description": "Arvados data collection\n\nA collection describes how a set of files is stored in data blocks in Keep,\nalong with associated metadata.", "type": "object", "uuidPrefix": "4zz18", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "owner_uuid": { "description": "The UUID of the user or group that owns this collection.", "type": "string" }, "created_at": { "description": "The time this collection was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this collection.", "type": "string" }, "modified_at": { "description": "The time this collection was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "portable_data_hash": { "description": "The portable data hash of this collection. This string provides a unique\nand stable reference to these contents.", "type": "string" }, "replication_desired": { "description": "The number of copies that should be made for data in this collection.", "type": "integer" }, "replication_confirmed_at": { "description": "The last time the cluster confirmed that it met `replication_confirmed`\nfor this collection. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "replication_confirmed": { "description": "The number of copies of data in this collection that the cluster has confirmed\nexist in storage.", "type": "integer" }, "uuid": { "type": "string", "description": "This collection's Arvados UUID, like `zzzzz-4zz18-12345abcde67890`." }, "manifest_text": { "description": "The manifest text that describes how files are constructed from data blocks\nin this collection. Refer to the [manifest format][] reference for details.\n\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\n\n", "type": "text" }, "name": { "description": "The name of this collection assigned by a user.", "type": "string" }, "description": { "description": "A longer HTML description of this collection assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.", "type": "string" }, "properties": { "description": "A hash of arbitrary metadata for this collection.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n", "type": "Hash" }, "delete_at": { "description": "The time this collection will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "trash_at": { "description": "The time this collection will be trashed. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "is_trashed": { "description": "A boolean flag to indicate whether or not this collection is trashed.", "type": "boolean" }, "storage_classes_desired": { "description": "An array of strings identifying the storage class(es) that should be used\nfor data in this collection. Storage classes are configured by the cluster administrator.", "type": "Array" }, "storage_classes_confirmed": { "description": "An array of strings identifying the storage class(es) the cluster has\nconfirmed have a copy of this collection's data.", "type": "Array" }, "storage_classes_confirmed_at": { "description": "The last time the cluster confirmed that data was stored on the storage\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "current_version_uuid": { "description": "The UUID of the current version of this collection.", "type": "string" }, "version": { "description": "An integer that counts which version of a collection this record\nrepresents. Refer to [collection versioning][] for details. This attribute is\nread-only.\n\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\n\n", "type": "integer" }, "preserve_version": { "description": "A boolean flag to indicate whether this specific version of this collection\nshould be persisted in cluster storage.", "type": "boolean" }, "file_count": { "description": "The number of files represented in this collection's `manifest_text`.\nThis attribute is read-only.", "type": "integer" }, "file_size_total": { "description": "The total size in bytes of files represented in this collection's `manifest_text`.\nThis attribute is read-only.", "type": "integer" } } }, "ComputedPermissionList": { "id": "ComputedPermissionList", "description": "A list of ComputedPermission objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#computedPermissionList.", "default": "arvados#computedPermissionList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching ComputedPermission objects.", "items": { "$ref": "ComputedPermission" } } } }, "ComputedPermission": { "id": "ComputedPermission", "description": "Arvados computed permission\n\nComputed permissions do not correspond directly to any Arvados resource, but\nprovide a simple way to query the entire graph of permissions granted to\nusers and groups.", "type": "object", "properties": { "user_uuid": { "description": "The UUID of the Arvados user who has this permission.", "type": "string" }, "target_uuid": { "description": "The UUID of the Arvados object the user has access to.", "type": "string" }, "perm_level": { "description": "A string representing the user's level of access to the target object.\nPossible values are:\n\n * `\"can_read\"`\n * `\"can_write\"`\n * `\"can_manage\"`\n\n", "type": "string" } } }, "ContainerList": { "id": "ContainerList", "description": "A list of Container objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#containerList.", "default": "arvados#containerList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching Container objects.", "items": { "$ref": "Container" } } } }, "Container": { "id": "Container", "description": "Arvados container record\n\nA container represents compute work that has been or should be dispatched,\nalong with its results. A container can satisfy one or more container requests.", "type": "object", "uuidPrefix": "dz642", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This container's Arvados UUID, like `zzzzz-dz642-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this container.", "type": "string" }, "created_at": { "description": "The time this container was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_at": { "description": "The time this container was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this container.", "type": "string" }, "state": { "description": "A string representing the container's current execution status. Possible\nvalues are:\n\n * `\"Queued\"` --- This container has not been dispatched yet.\n * `\"Locked\"` --- A dispatcher has claimed this container in preparation to run it.\n * `\"Running\"` --- A dispatcher is running this container.\n * `\"Cancelled\"` --- Container execution has been cancelled by user request.\n * `\"Complete\"` --- A dispatcher ran this container to completion and recorded the results.\n\n", "type": "string" }, "started_at": { "description": " The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "finished_at": { "description": " The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "log": { "description": "The portable data hash of the Arvados collection that contains this\ncontainer's logs.", "type": "string" }, "environment": { "description": "A hash of string keys and values that defines the environment variables\nfor the dispatcher to set when it executes this container.", "type": "Hash" }, "cwd": { "description": "A string that the defines the working directory that the dispatcher should\nuse when it executes the command inside this container.", "type": "string" }, "command": { "description": "An array of strings that defines the command that the dispatcher should\nexecute inside this container.", "type": "Array" }, "output_path": { "description": "A string that defines the file or directory path where the command\nwrites output that should be saved from this container.", "type": "string" }, "mounts": { "description": "A hash where each key names a directory inside this container, and its\nvalue is an object that defines the mount source for that directory. Refer\nto the [mount types reference][] for details.\n\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\n\n", "type": "Hash" }, "runtime_constraints": { "description": "A hash that identifies compute resources this container requires to run\nsuccessfully. See the [runtime constraints reference][] for details.\n\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\n\n", "type": "Hash" }, "output": { "description": "The portable data hash of the Arvados collection that contains this\ncontainer's output file(s).", "type": "string" }, "container_image": { "description": "The portable data hash of the Arvados collection that contains the image\nto use for this container.", "type": "string" }, "progress": { "description": "A float between 0.0 and 1.0 (inclusive) that represents the container's\nexecution progress. This attribute is not implemented yet.", "type": "float" }, "priority": { "description": "An integer between 0 and 1000 (inclusive) that represents this container's\nscheduling priority. 0 represents a request to be cancelled. Higher\nvalues represent higher priority. Refer to the [priority reference][] for details.\n\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\n\n", "type": "integer" }, "exit_code": { "description": "An integer that records the Unix exit code of the `command` from a\nfinished container.", "type": "integer" }, "auth_uuid": { "description": "The UUID of the Arvados API client authorization token that a dispatcher\nshould use to set up this container. This token is automatically created by\nArvados and this attribute automatically assigned unless a container is\ncreated with `runtime_token`.", "type": "string" }, "locked_by_uuid": { "description": "The UUID of the Arvados API client authorization token that successfully\nlocked this container in preparation to execute it.", "type": "string" }, "scheduling_parameters": { "description": "A hash of scheduling parameters that should be passed to the underlying\ndispatcher when this container is run.\nSee the [scheduling parameters reference][] for details.\n\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\n\n", "type": "Hash" }, "runtime_status": { "description": "A hash with status updates from a running container.\nRefer to the [runtime status reference][] for details.\n\n[runtime status reference]: https://doc.arvados.org/api/methods/containers.html#runtime_status\n\n", "type": "Hash" }, "runtime_user_uuid": { "description": "The UUID of the Arvados user associated with the API client authorization\ntoken used to run this container.", "type": "text" }, "runtime_auth_scopes": { "description": "The `scopes` from the API client authorization token used to run this container.", "type": "Array" }, "lock_count": { "description": "The number of times this container has been locked by a dispatcher. This\nmay be greater than 1 if a dispatcher locks a container but then execution is\ninterrupted for any reason.", "type": "integer" }, "gateway_address": { "description": "A string with the address of the Arvados gateway server, in `HOST:PORT`\nformat. This is for internal use only.", "type": "string" }, "interactive_session_started": { "description": "This flag is set true if any user starts an interactive shell inside the\nrunning container.", "type": "boolean" }, "output_storage_classes": { "description": "An array of strings identifying the storage class(es) that should be set\non the output collection of this container. Storage classes are configured by\nthe cluster administrator.", "type": "Array" }, "output_properties": { "description": "A hash of arbitrary metadata to set on the output collection of this container.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n", "type": "Hash" }, "cost": { "description": "A float with the estimated cost of the cloud instance used to run this\ncontainer. The value is `0` if cost estimation is not available on this cluster.", "type": "float" }, "subrequests_cost": { "description": "A float with the estimated cost of all cloud instances used to run this\ncontainer and all its subrequests. The value is `0` if cost estimation is not\navailable on this cluster.", "type": "float" }, "output_glob": { "description": "An array of strings of shell-style glob patterns that define which file(s)\nand subdirectory(ies) under the `output_path` directory should be recorded in\nthe container's final output. Refer to the [glob patterns reference][] for details.\n\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\n\n", "type": "Array" }, "service": { "description": "A boolean flag. If set, it informs the system that this is a long-running container\nthat functions as a system service or web app, rather than a once-through batch operation.", "type": "boolean" }, "published_ports": { "description": "A hash where keys are numeric TCP ports on the container which expose HTTP services. Arvados\nwill proxy HTTP requests to these ports. Values are hashes with the following keys:\n\n * `\"access\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\n * `\"label\"` --- A human readable label describing the service, for display in Workbench.\n * `\"initial_path\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.", "type": "jsonb" } } }, "ContainerRequestList": { "id": "ContainerRequestList", "description": "A list of ContainerRequest objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#containerRequestList.", "default": "arvados#containerRequestList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching ContainerRequest objects.", "items": { "$ref": "ContainerRequest" } } } }, "ContainerRequest": { "id": "ContainerRequest", "description": "Arvados container request\n\nA container request represents a user's request that Arvados do some compute\nwork, along with full details about what work should be done. Arvados will\nattempt to fulfill the request by mapping it to a matching container record,\nrunning the work on demand if necessary.", "type": "object", "uuidPrefix": "xvhdp", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This container request's Arvados UUID, like `zzzzz-xvhdp-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this container request.", "type": "string" }, "created_at": { "description": "The time this container request was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_at": { "description": "The time this container request was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this container request.", "type": "string" }, "name": { "description": "The name of this container request assigned by a user.", "type": "string" }, "description": { "description": "A longer HTML description of this container request assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.", "type": "text" }, "properties": { "description": "A hash of arbitrary metadata for this container request.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n", "type": "Hash" }, "state": { "description": "A string indicating where this container request is in its lifecycle.\nPossible values are:\n\n * `\"Uncommitted\"` --- The container request has not been finalized and can still be edited.\n * `\"Committed\"` --- The container request is ready to be fulfilled.\n * `\"Final\"` --- The container request has been fulfilled or cancelled.\n\n", "type": "string" }, "requesting_container_uuid": { "description": "The UUID of the container that created this container request, if any.", "type": "string" }, "container_uuid": { "description": "The UUID of the container that fulfills this container request, if any.", "type": "string" }, "container_count_max": { "description": "An integer that defines the maximum number of times Arvados should attempt\nto dispatch a container to fulfill this container request.", "type": "integer" }, "mounts": { "description": "A hash where each key names a directory inside this container, and its\nvalue is an object that defines the mount source for that directory. Refer\nto the [mount types reference][] for details.\n\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\n\n", "type": "Hash" }, "runtime_constraints": { "description": "A hash that identifies compute resources this container requires to run\nsuccessfully. See the [runtime constraints reference][] for details.\n\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\n\n", "type": "Hash" }, "container_image": { "description": "The portable data hash of the Arvados collection that contains the image\nto use for this container.", "type": "string" }, "environment": { "description": "A hash of string keys and values that defines the environment variables\nfor the dispatcher to set when it executes this container.", "type": "Hash" }, "cwd": { "description": "A string that the defines the working directory that the dispatcher should\nuse when it executes the command inside this container.", "type": "string" }, "command": { "description": "An array of strings that defines the command that the dispatcher should\nexecute inside this container.", "type": "Array" }, "output_path": { "description": "A string that defines the file or directory path where the command\nwrites output that should be saved from this container.", "type": "string" }, "priority": { "description": "An integer between 0 and 1000 (inclusive) that represents this container request's\nscheduling priority. 0 represents a request to be cancelled. Higher\nvalues represent higher priority. Refer to the [priority reference][] for details.\n\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\n\n", "type": "integer" }, "expires_at": { "description": "The time after which this container request will no longer be fulfilled. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "filters": { "description": "Filters that limit which existing containers are eligible to satisfy this\ncontainer request. This attribute is not implemented yet and should be null.", "type": "text" }, "container_count": { "description": "An integer that records how many times Arvados has attempted to dispatch\na container to fulfill this container request.", "type": "integer" }, "use_existing": { "description": "A boolean flag. If set, Arvados may choose to satisfy this container\nrequest with an eligible container that already exists. Otherwise, Arvados will\nsatisfy this container request with a newer container, which will usually result\nin the container running again.", "type": "boolean" }, "scheduling_parameters": { "description": "A hash of scheduling parameters that should be passed to the underlying\ndispatcher when this container is run.\nSee the [scheduling parameters reference][] for details.\n\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\n\n", "type": "Hash" }, "output_uuid": { "description": "The UUID of the Arvados collection that contains output for all the\ncontainer(s) that were dispatched to fulfill this container request.", "type": "string" }, "log_uuid": { "description": "The UUID of the Arvados collection that contains logs for all the\ncontainer(s) that were dispatched to fulfill this container request.", "type": "string" }, "output_name": { "description": "The name to set on the output collection of this container request.", "type": "string" }, "output_ttl": { "description": "An integer in seconds. If greater than zero, when an output collection is\ncreated for this container request, its `expires_at` attribute will be set this\nfar in the future.", "type": "integer" }, "output_storage_classes": { "description": "An array of strings identifying the storage class(es) that should be set\non the output collection of this container request. Storage classes are configured by\nthe cluster administrator.", "type": "Array" }, "output_properties": { "description": "A hash of arbitrary metadata to set on the output collection of this container request.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n", "type": "Hash" }, "cumulative_cost": { "description": "A float with the estimated cost of all cloud instances used to run\ncontainer(s) to fulfill this container request and their subrequests.\nThe value is `0` if cost estimation is not available on this cluster.", "type": "float" }, "output_glob": { "description": "An array of strings of shell-style glob patterns that define which file(s)\nand subdirectory(ies) under the `output_path` directory should be recorded in\nthe container's final output. Refer to the [glob patterns reference][] for details.\n\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\n\n", "type": "Array" }, "service": { "description": "A boolean flag. If set, it informs the system that this request is for a long-running container\nthat functions as a system service or web app, rather than a once-through batch operation.", "type": "boolean" }, "published_ports": { "description": "A hash where keys are numeric TCP ports on the container which expose HTTP services. Arvados\nwill proxy HTTP requests to these ports. Values are hashes with the following keys:\n\n * `\"access\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\n * `\"label\"` --- A human readable label describing the service, for display in Workbench.\n * `\"initial_path\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.", "type": "Hash" } } }, "CredentialList": { "id": "CredentialList", "description": "A list of Credential objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#credentialList.", "default": "arvados#credentialList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching Credential objects.", "items": { "$ref": "Credential" } } } }, "Credential": { "id": "Credential", "description": "Arvados credential.", "type": "object", "uuidPrefix": "oss07", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This credential's Arvados UUID, like `zzzzz-oss07-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this credential.", "type": "string" }, "created_at": { "description": "The time this credential was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_at": { "description": "The time this credential was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this credential.", "type": "string" }, "name": { "description": "The name of this credential assigned by a user.", "type": "string" }, "description": { "description": "A longer HTML description of this credential assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.", "type": "text" }, "credential_class": { "description": "The type of credential being stored.", "type": "string" }, "scopes": { "description": "The resources the credential applies to or should be used with.", "type": "Array" }, "external_id": { "description": "The non-secret external identifier associated with a credential, e.g. a username.", "type": "string" }, "expires_at": { "description": "Date after which the credential_secret field is no longer valid. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" } } }, "GroupList": { "id": "GroupList", "description": "A list of Group objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#groupList.", "default": "arvados#groupList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching Group objects.", "items": { "$ref": "Group" } } } }, "Group": { "id": "Group", "description": "Arvados group\n\nGroups provide a way to organize users or data together, depending on their\n`group_class`.", "type": "object", "uuidPrefix": "j7d0g", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This group's Arvados UUID, like `zzzzz-j7d0g-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this group.", "type": "string" }, "created_at": { "description": "The time this group was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this group.", "type": "string" }, "modified_at": { "description": "The time this group was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "name": { "description": "The name of this group assigned by a user.", "type": "string" }, "description": { "description": "A longer HTML description of this group assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.", "type": "string" }, "group_class": { "description": "A string representing which type of group this is. One of:\n\n * `\"filter\"` --- A virtual project whose contents are selected dynamically by filters.\n * `\"project\"` --- An Arvados project that can contain collections,\n container records, workflows, and subprojects.\n * `\"role\"` --- A group of users that can be granted permissions in Arvados.\n\n", "type": "string" }, "trash_at": { "description": "The time this group will be trashed. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "is_trashed": { "description": "A boolean flag to indicate whether or not this group is trashed.", "type": "boolean" }, "delete_at": { "description": "The time this group will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "properties": { "description": "A hash of arbitrary metadata for this group.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n", "type": "Hash" }, "frozen_by_uuid": { "description": "The UUID of the user that has frozen this group, if any. Frozen projects\ncannot have their contents or metadata changed, even by admins.", "type": "string" } } }, "KeepServiceList": { "id": "KeepServiceList", "description": "A list of KeepService objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#keepServiceList.", "default": "arvados#keepServiceList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching KeepService objects.", "items": { "$ref": "KeepService" } } } }, "KeepService": { "id": "KeepService", "description": "Arvados Keep service\n\nThis resource stores information about a single Keep service in this Arvados\ncluster that clients can contact to retrieve and store data.", "type": "object", "uuidPrefix": "bi6l4", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This Keep service's Arvados UUID, like `zzzzz-bi6l4-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this Keep service.", "type": "string" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this Keep service.", "type": "string" }, "modified_at": { "description": "The time this Keep service was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "service_host": { "description": "The DNS hostname of this Keep service.", "type": "string" }, "service_port": { "description": "The TCP port where this Keep service listens.", "type": "integer" }, "service_ssl_flag": { "description": "A boolean flag that indicates whether or not this Keep service uses TLS/SSL.", "type": "boolean" }, "service_type": { "description": "A string that describes which type of Keep service this is. One of:\n\n * `\"disk\"` --- A service that stores blocks on a local filesystem.\n * `\"blob\"` --- A service that stores blocks in a cloud object store.\n * `\"proxy\"` --- A keepproxy service.\n\n", "type": "string" }, "created_at": { "description": "The time this Keep service was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "read_only": { "description": "A boolean flag. If set, this Keep service does not accept requests to write data\nblocks; it only serves blocks it already has.", "type": "boolean" } } }, "LinkList": { "id": "LinkList", "description": "A list of Link objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#linkList.", "default": "arvados#linkList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching Link objects.", "items": { "$ref": "Link" } } } }, "Link": { "id": "Link", "description": "Arvados object link\n\nA link provides a way to define relationships between Arvados objects,\ndepending on their `link_class`.", "type": "object", "uuidPrefix": "o0j2j", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This link's Arvados UUID, like `zzzzz-o0j2j-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this link.", "type": "string" }, "created_at": { "description": "The time this link was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this link.", "type": "string" }, "modified_at": { "description": "The time this link was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "tail_uuid": { "description": "The UUID of the Arvados object that is the target of this relationship.", "type": "string" }, "link_class": { "description": "A string that defines which kind of link this is. One of:\n\n * `\"permission\"` --- This link grants a permission to the user or group\n referenced by `head_uuid` to the object referenced by `tail_uuid`. The\n access level is set by `name`.\n * `\"star\"` --- This link represents a \"favorite.\" The user referenced\n by `head_uuid` wants quick access to the object referenced by `tail_uuid`.\n * `\"tag\"` --- This link represents an unstructured metadata tag. The object\n referenced by `tail_uuid` has the tag defined by `name`.\n\n", "type": "string" }, "name": { "description": "The primary value of this link. For `\"permission\"` links, this is one of\n`\"can_read\"`, `\"can_write\"`, or `\"can_manage\"`.", "type": "string" }, "head_uuid": { "description": "The UUID of the Arvados object that is the originator or actor in this\nrelationship. May be null.", "type": "string" }, "properties": { "description": "A hash of arbitrary metadata for this link.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n", "type": "Hash" } } }, "LogList": { "id": "LogList", "description": "A list of Log objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#logList.", "default": "arvados#logList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching Log objects.", "items": { "$ref": "Log" } } } }, "Log": { "id": "Log", "description": "Arvados log record\n\nThis resource represents a single log record about an event in this Arvados\ncluster. Some individual Arvados services create log records. Users can also\ncreate custom logs.", "type": "object", "uuidPrefix": "57u5n", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "id": { "description": "The serial number of this log. You can use this in filters to query logs\nthat were created before/after another.", "type": "integer" }, "uuid": { "type": "string", "description": "This log's Arvados UUID, like `zzzzz-57u5n-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this log.", "type": "string" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this log.", "type": "string" }, "object_uuid": { "description": "The UUID of the Arvados object that this log pertains to, such as a user\nor container.", "type": "string" }, "event_at": { "description": " The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "event_type": { "description": "An arbitrary short string that classifies what type of log this is.", "type": "string" }, "summary": { "description": "A text string that describes the logged event. This is the primary\nattribute for simple logs.", "type": "text" }, "properties": { "description": "A hash of arbitrary metadata for this log.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n", "type": "Hash" }, "created_at": { "description": "The time this log was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_at": { "description": "The time this log was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "object_owner_uuid": { "description": "The `owner_uuid` of the object referenced by `object_uuid` at the time\nthis log was created.", "type": "string" } } }, "UserList": { "id": "UserList", "description": "A list of User objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#userList.", "default": "arvados#userList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching User objects.", "items": { "$ref": "User" } } } }, "User": { "id": "User", "description": "Arvados user\n\nA user represents a single individual or role who may be authorized to access\nthis Arvados cluster.", "type": "object", "uuidPrefix": "tpzed", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This user's Arvados UUID, like `zzzzz-tpzed-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this user.", "type": "string" }, "created_at": { "description": "The time this user was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this user.", "type": "string" }, "modified_at": { "description": "The time this user was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "email": { "description": "This user's email address.", "type": "string" }, "first_name": { "description": "This user's first name.", "type": "string" }, "last_name": { "description": "This user's last name.", "type": "string" }, "identity_url": { "description": "A URL that represents this user with the cluster's identity provider.", "type": "string" }, "is_admin": { "description": "A boolean flag. If set, this user is an administrator of the Arvados\ncluster, and automatically passes most permissions checks.", "type": "boolean" }, "prefs": { "description": "A hash that stores cluster-wide user preferences.", "type": "Hash" }, "is_active": { "description": "A boolean flag. If unset, this user is not permitted to make any Arvados\nAPI requests.", "type": "boolean" }, "username": { "description": "This user's Unix username on virtual machines.", "type": "string" } } }, "UserAgreementList": { "id": "UserAgreementList", "description": "A list of UserAgreement objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#userAgreementList.", "default": "arvados#userAgreementList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching UserAgreement objects.", "items": { "$ref": "UserAgreement" } } } }, "UserAgreement": { "id": "UserAgreement", "description": "Arvados user agreement\n\nA user agreement is a collection with terms that users must agree to before\nthey can use this Arvados cluster.", "type": "object", "uuidPrefix": "gv0sa", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "owner_uuid": { "description": "The UUID of the user or group that owns this user agreement.", "type": "string" }, "created_at": { "description": "The time this user agreement was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this user agreement.", "type": "string" }, "modified_at": { "description": "The time this user agreement was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "portable_data_hash": { "description": "The portable data hash of this user agreement. This string provides a unique\nand stable reference to these contents.", "type": "string" }, "replication_desired": { "description": "The number of copies that should be made for data in this user agreement.", "type": "integer" }, "replication_confirmed_at": { "description": "The last time the cluster confirmed that it met `replication_confirmed`\nfor this user agreement. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "replication_confirmed": { "description": "The number of copies of data in this user agreement that the cluster has confirmed\nexist in storage.", "type": "integer" }, "uuid": { "type": "string", "description": "This user agreement's Arvados UUID, like `zzzzz-gv0sa-12345abcde67890`." }, "manifest_text": { "description": "The manifest text that describes how files are constructed from data blocks\nin this user agreement. Refer to the [manifest format][] reference for details.\n\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\n\n", "type": "text" }, "name": { "description": "The name of this user agreement assigned by a user.", "type": "string" }, "description": { "description": "A longer HTML description of this user agreement assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.", "type": "string" }, "properties": { "description": "A hash of arbitrary metadata for this user agreement.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n", "type": "Hash" }, "delete_at": { "description": "The time this user agreement will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "trash_at": { "description": "The time this user agreement will be trashed. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "is_trashed": { "description": "A boolean flag to indicate whether or not this user agreement is trashed.", "type": "boolean" }, "storage_classes_desired": { "description": "An array of strings identifying the storage class(es) that should be used\nfor data in this user agreement. Storage classes are configured by the cluster administrator.", "type": "Array" }, "storage_classes_confirmed": { "description": "An array of strings identifying the storage class(es) the cluster has\nconfirmed have a copy of this user agreement's data.", "type": "Array" }, "storage_classes_confirmed_at": { "description": "The last time the cluster confirmed that data was stored on the storage\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "current_version_uuid": { "description": "The UUID of the current version of this user agreement.", "type": "string" }, "version": { "description": "An integer that counts which version of a user agreement this record\nrepresents. Refer to [collection versioning][] for details. This attribute is\nread-only.\n\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\n\n", "type": "integer" }, "preserve_version": { "description": "A boolean flag to indicate whether this specific version of this user agreement\nshould be persisted in cluster storage.", "type": "boolean" }, "file_count": { "description": "The number of files represented in this user agreement's `manifest_text`.\nThis attribute is read-only.", "type": "integer" }, "file_size_total": { "description": "The total size in bytes of files represented in this user agreement's `manifest_text`.\nThis attribute is read-only.", "type": "integer" } } }, "VirtualMachineList": { "id": "VirtualMachineList", "description": "A list of VirtualMachine objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#virtualMachineList.", "default": "arvados#virtualMachineList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching VirtualMachine objects.", "items": { "$ref": "VirtualMachine" } } } }, "VirtualMachine": { "id": "VirtualMachine", "description": "Arvados virtual machine (\"shell node\")\n\nThis resource stores information about a virtual machine or \"shell node\"\nhosted on this Arvados cluster where users can log in and use preconfigured\nArvados client tools.", "type": "object", "uuidPrefix": "2x53u", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This virtual machine's Arvados UUID, like `zzzzz-2x53u-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this virtual machine.", "type": "string" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this virtual machine.", "type": "string" }, "modified_at": { "description": "The time this virtual machine was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "hostname": { "description": "The DNS hostname where users should access this virtual machine.", "type": "string" }, "created_at": { "description": "The time this virtual machine was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" } } }, "WorkflowList": { "id": "WorkflowList", "description": "A list of Workflow objects.", "type": "object", "properties": { "kind": { "type": "string", "description": "Object type. Always arvados#workflowList.", "default": "arvados#workflowList" }, "etag": { "type": "string", "description": "List cache version." }, "items": { "type": "array", "description": "An array of matching Workflow objects.", "items": { "$ref": "Workflow" } } } }, "Workflow": { "id": "Workflow", "description": "Arvados workflow\n\nA workflow contains workflow definition source code that Arvados can execute\nalong with associated metadata for users.", "type": "object", "uuidPrefix": "7fd4e", "properties": { "etag": { "type": "string", "description": "Object cache version." }, "uuid": { "type": "string", "description": "This workflow's Arvados UUID, like `zzzzz-7fd4e-12345abcde67890`." }, "owner_uuid": { "description": "The UUID of the user or group that owns this workflow.", "type": "string" }, "created_at": { "description": "The time this workflow was created. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_at": { "description": "The time this workflow was last updated. The string encodes a UTC date and time in ISO 8601 format.", "type": "datetime" }, "modified_by_user_uuid": { "description": "The UUID of the user that last updated this workflow.", "type": "string" }, "name": { "description": "The name of this workflow assigned by a user.", "type": "string" }, "description": { "description": "A longer HTML description of this workflow assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.", "type": "text" }, "definition": { "description": "A string with the CWL source of this workflow.", "type": "text" }, "collection_uuid": { "description": "The collection this workflow is linked to, containing the definition of the workflow.", "type": "string" } } } }, "servicePath": "arvados/v1/", "title": "Arvados API", "version": "v1" } ================================================ FILE: contrib/R-sdk/generateApi.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 library(jsonlite) getAPIDocument <- function(loc) { if (length(grep("^[a-z]+://", loc)) > 0) { library(httr) serverResponse <- httr::RETRY("GET", url = loc) httr::content(serverResponse, as = "parsed", type = "application/json") } else { jsonlite::read_json(loc) } } #' generateAPI #' #' Autogenerate classes to interact with Arvados from the Arvados discovery document. #' #' @export generateAPI <- function(discoveryDocument) { methodResources <- discoveryDocument$resources resourceNames <- names(methodResources) classDoc <- genAPIClassDoc(methodResources, resourceNames) arvadosAPIHeader <- genAPIClassHeader() arvadosClassMethods <- genClassContent(methodResources, resourceNames) arvadosProjectMethods <- genProjectMethods(methodResources) arvadosAPIFooter <- genAPIClassFooter() arvadosClass <- c(classDoc, arvadosAPIHeader, arvadosClassMethods, arvadosProjectMethods, arvadosAPIFooter) fileConn <- file("./R/Arvados.R", "w") writeLines(c( "# Copyright (C) The Arvados Authors. All rights reserved.", "#", "# SPDX-License-Identifier: Apache-2.0", "", "#' Arvados", "#'", "#' This class implements a full REST client to the Arvados API.", "#'"), fileConn) writeLines(unlist(arvadosClass), fileConn) close(fileConn) NULL } genAPIClassHeader <- function() { c("#' @export", "Arvados <- R6::R6Class(", "", "\t\"Arvados\",", "", "\tpublic = list(", "", "\t\t#' @description Create a new Arvados API client.", "\t\t#' @param authToken Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.", "\t\t#' @param hostName Host name. If not specified ARVADOS_API_HOST environment variable will be used.", "\t\t#' @param numRetries Number which specifies how many times to retry failed service requests.", "\t\t#' @return A new `Arvados` object.", "\t\tinitialize = function(authToken = NULL, hostName = NULL, numRetries = 0)", "\t\t{", "\t\t\tif(!is.null(hostName))", "\t\t\t\tSys.setenv(ARVADOS_API_HOST = hostName)", "", "\t\t\tif(!is.null(authToken))", "\t\t\t\tSys.setenv(ARVADOS_API_TOKEN = authToken)", "", "\t\t\thostName <- Sys.getenv(\"ARVADOS_API_HOST\")", "\t\t\ttoken <- Sys.getenv(\"ARVADOS_API_TOKEN\")", "", "\t\t\tif(hostName == \"\" | token == \"\")", "\t\t\t\tstop(paste(\"Please provide host name and authentification token\",", "\t\t\t\t\t\t \"or set ARVADOS_API_HOST and ARVADOS_API_TOKEN\",", "\t\t\t\t\t\t \"environment variables.\"))", "", "\t\t\tprivate$token <- token", "\t\t\tprivate$host <- paste0(\"https://\", hostName, \"/arvados/v1/\")", "\t\t\tprivate$numRetries <- numRetries", "\t\t\tprivate$REST <- RESTService$new(token, hostName,", "\t\t\t HttpRequest$new(), HttpParser$new(),", "\t\t\t numRetries)", "", "\t\t},\n") } genProjectMethods <- function(methodResources) { toCallArg <- function(arg) { callArg <- strsplit(arg, " *=")[[1]][1] paste(callArg, callArg, sep=" = ") } toCallArgs <- function(argList) { paste0(Map(toCallArg, argList), collapse=", ") } groupsMethods <- methodResources[["groups"]][["methods"]] getArgs <- getMethodArguments(groupsMethods[["get"]]) createArgs <- getMethodArguments(groupsMethods[["create"]]) updateArgs <- getMethodArguments(groupsMethods[["update"]]) listArgs <- getMethodArguments(groupsMethods[["list"]]) deleteArgs <- getMethodArguments(groupsMethods[["delete"]]) c("\t\t#' @description An alias for `groups_get`.", getMethodParams(groupsMethods[["get"]]), "\t\t#' @return A Group object.", getMethodSignature("project_get", getArgs), "\t\t{", paste("\t\t\tself$groups_get(", toCallArgs(getArgs), ")", sep=""), "\t\t},", "", "\t\t#' @description A wrapper for `groups_create` that sets `group_class=\"project\"`.", getMethodParams(groupsMethods[["create"]]), "\t\t#' @return A Group object.", getMethodSignature("project_create", createArgs), "\t\t{", "\t\t\tgroup <- c(\"group_class\" = \"project\", group)", paste("\t\t\tself$groups_create(", toCallArgs(createArgs), ")", sep=""), "\t\t},", "", "\t\t#' @description A wrapper for `groups_update` that sets `group_class=\"project\"`.", getMethodParams(groupsMethods[["update"]]), "\t\t#' @return A Group object.", getMethodSignature("project_update", updateArgs), "\t\t{", "\t\t\tgroup <- c(\"group_class\" = \"project\", group)", paste("\t\t\tself$groups_update(", toCallArgs(updateArgs), ")", sep=""), "\t\t},", "", "\t\t#' @description A wrapper for `groups_list` that adds a filter for `group_class=\"project\"`.", getMethodParams(groupsMethods[["list"]]), "\t\t#' @return A GroupList object.", getMethodSignature("project_list", listArgs), "\t\t{", "\t\t\tfilters[[length(filters) + 1]] <- list(\"group_class\", \"=\", \"project\")", paste("\t\t\tself$groups_list(", toCallArgs(listArgs), ")", sep=""), "\t\t},", "", "\t\t#' @description An alias for `groups_delete`.", getMethodParams(groupsMethods[["delete"]]), "\t\t#' @return A Group object.", getMethodSignature("project_delete", deleteArgs), "\t\t{", paste("\t\t\tself$groups_delete(", toCallArgs(deleteArgs), ")", sep=""), "\t\t},", "", "\t\t#' @description Test whether or not a project exists.", getMethodParams(groupsMethods[["get"]]), getMethodSignature("project_exist", getArgs), "\t\t{", paste("\t\t\tresult <- try(self$groups_get(", toCallArgs(getArgs), "))", sep=""), "\t\t\tif(inherits(result, \"try-error\"))", "\t\t\t\texists <- FALSE", "\t\t\telse", "\t\t\t\texists <- result['group_class'] == \"project\"", "\t\t\tcat(format(exists))", "\t\t},", "", "\t\t#' @description A convenience wrapper for `project_update` to set project metadata properties.", "\t\t#' @param listProperties List of new properties.", "\t\t#' @param uuid UUID of the project to update.", "\t\t#' @return A Group object.", "\t\tproject_properties_set = function(listProperties, uuid)", "\t\t{", "\t\t\tself$project_update(list(\"properties\" = listProperties), uuid)", "\t\t},", "", "\t\t#' @description Get a project and update it with additional properties.", "\t\t#' @param properties List of new properties.", "\t\t#' @param uuid UUID of the project to update.", "\t\t#' @return A Group object.", "\t\tproject_properties_append = function(properties, uuid)", "\t\t{", "\t\t\tproj <- private$get_project_by_list(uuid, list('uuid', 'properties'))", "\t\t\tnewListOfProperties <- c(proj$properties, properties)", "\t\t\tuniqueProperties <- unique(unlist(newListOfProperties))", "\t\t\tnewProperties <- suppressWarnings(newListOfProperties[which(newListOfProperties == uniqueProperties)])", "\t\t\tself$project_properties_set(newProperties, proj$uuid)", "\t\t},", "", "\t\t#' @description Get properties of a project.", "\t\t#' @param uuid The UUID of the project to query.", "\t\tproject_properties_get = function(uuid)", "\t\t{", "\t\t\tprivate$get_project_by_list(uuid, list('uuid', 'properties'))$properties", "\t\t},", "", "\t\t#' @description Delete one property from a project by name.", "\t\t#' @param oneProp Name of the property to delete.", "\t\t#' @param uuid The UUID of the project to update.", "\t\t#' @return A Group object.", "\t\tproject_properties_delete = function(oneProp, uuid)", "\t\t{", "\t\t\tprojProp <- self$project_properties_get(uuid)", "\t\t\tprojProp[[oneProp]] <- NULL", "\t\t\tself$project_properties_set(projProp, uuid)", "\t\t},", "", "\t\t#' @description Convenience wrapper of `links_list` to create a permission link.", "\t\t#' @param type The type of permission: one of `'can_read'`, `'can_write'`, or `'can_manage'`.", "\t\t#' @param uuid The UUID of the object to grant permission to.", "\t\t#' @param user The UUID of the user or group who receives this permission.", "\t\t#' @return A Link object if one was updated, else NULL.", "\t\tproject_permission_give = function(type, uuid, user)", "\t\t{", "\t\t\tlink <- list(", "\t\t\t\t'link_class' = 'permission',", "\t\t\t\t'name' = type,", "\t\t\t\t'head_uuid' = uuid,", "\t\t\t\t'tail_uuid' = user)", "\t\t\tself$links_create(link)", "\t\t},", "", "\t\t#' @description Find an existing permission link and update its level.", "\t\t#' @param typeOld The type of permission to find: one of `'can_read'`, `'can_write'`, or `'can_manage'`.", "\t\t#' @param typeNew The type of permission to set: one of `'can_read'`, `'can_write'`, or `'can_manage'`.", "\t\t#' @param uuid The UUID of the object to grant permission to.", "\t\t#' @param user The UUID of the user or group who receives this permission.", "\t\t#' @return A Link object if one was updated, else NULL.", "\t\tproject_permission_update = function(typeOld, typeNew, uuid, user)", "\t\t{", "\t\t\tlinks <- self$links_list(filters = list(", "\t\t\t\t\tlist('link_class', '=', 'permission'),", "\t\t\t\t\tlist('name', '=', typeOld),", "\t\t\t\t\tlist('head_uuid', '=', uuid),", "\t\t\t\t\tlist('tail_uuid', '=', user)", "\t\t\t\t), select=list('uuid'), count = 'none')$items", "\t\t\tif (length(links) == 0) {", "\t\t\t\tcat(format('No permission granted'))", "\t\t\t} else {", "\t\t\t\tself$links_update(list('name' = typeNew), links[[1]]$uuid)", "\t\t\t}", "\t\t},", "", "\t\t#' @description Delete an existing permission link.", "\t\t#' @param type The type of permission to delete: one of `'can_read'`, `'can_write'`, or `'can_manage'`.", "\t\t#' @param uuid The UUID of the object to grant permission to.", "\t\t#' @param user The UUID of the user or group who receives this permission.", "\t\t#' @return A Link object if one was deleted, else NULL.", "\t\tproject_permission_delete = function(type, uuid, user)", "\t\t{", "\t\t\tlinks <- self$links_list(filters = list(", "\t\t\t\t\tlist('link_class', '=', 'permission'),", "\t\t\t\t\tlist('name', '=', type),", "\t\t\t\t\tlist('head_uuid', '=', uuid),", "\t\t\t\t\tlist('tail_uuid', '=', user)", "\t\t\t\t), select=list('uuid'), count = 'none')$items", "\t\t\tif (length(links) == 0) {", "\t\t\t\tcat(format('No permission granted'))", "\t\t\t} else {", "\t\t\t\tself$links_delete(links[[1]]$uuid)", "\t\t\t}", "\t\t},", "", "\t\t#' @description Check for an existing permission link.", "\t\t#' @param type The type of permission to check: one of `'can_read'`, `'can_write'`, `'can_manage'`, or `NULL` (the default).", "\t\t#' @param uuid The UUID of the object to check permission on.", "\t\t#' @param user The UUID of the user or group to check permission for.", "\t\t#' @return If `type` is `NULL`, the list of matching permission links.", "\t\t#' Otherwise, prints and invisibly returns the level of the found permission link.", "\t\tproject_permission_check = function(uuid, user, type = NULL)", "\t\t{", "\t\t\tfilters <- list(", "\t\t\t\tlist('link_class', '=', 'permission'),", "\t\t\t\tlist('head_uuid', '=', uuid),", "\t\t\t\tlist('tail_uuid', '=', user))", "\t\t\tif (!is.null(type)) {", "\t\t\t\tfilters <- c(filters, list(list('name', '=', type)))", "\t\t\t}", "\t\t\tlinks <- self$links_list(filters = filters, count='none')$items", "\t\t\tif (is.null(type)) {", "\t\t\t\tlinks", "\t\t\t} else {", "\t\t\t\tprint(links[[1]]$name)", "\t\t\t}", "\t\t},", "") } genClassContent <- function(methodResources, resourceNames) { arvadosMethods <- Map(function(resource, resourceName) { methodNames <- names(resource$methods) functions <- Map(function(methodMetaData, methodName) { #NOTE: Index, show and destroy are aliases for the preferred names # "list", "get" and "delete". Until they are removed from discovery # document we will filter them here. if(methodName %in% c("index", "show", "destroy")) return(NULL) methodName <- paste0(resourceName, "_", methodName) unlist(c( getMethodDoc(methodName, methodMetaData), createMethod(methodName, methodMetaData) )) }, resource$methods, methodNames) unlist(unname(functions)) }, methodResources, resourceNames) arvadosMethods } genAPIClassFooter <- function() { c("\t\t#' @description Return the host name of this client's Arvados API server.", "\t\t#' @return Hostname string.", "\t\tgetHostName = function() private$host,", "", "\t\t#' @description Return the Arvados API token used by this client.", "\t\t#' @return API token string.", "\t\tgetToken = function() private$token,", "", "\t\t#' @description Set the RESTService object used by this client.", "\t\tsetRESTService = function(newREST) private$REST <- newREST,", "", "\t\t#' @description Return the RESTService object used by this client.", "\t\t#' @return RESTService object.", "\t\tgetRESTService = function() private$REST", "\t),", "", "\tprivate = list(", "\t\ttoken = NULL,", "\t\thost = NULL,", "\t\tREST = NULL,", "\t\tnumRetries = NULL,", "\t\tget_project_by_list = function(uuid, select = NULL)", "\t\t{", "\t\t\tself$groups_list(", "\t\t\t\tfilters = list(list('uuid', '=', uuid), list('group_class', '=', 'project')),", "\t\t\t\tselect = select,", "\t\t\t\tcount = 'none'", "\t\t\t)$items[[1]]", "\t\t}", "\t),", "", "\tcloneable = FALSE", ")") } createMethod <- function(name, methodMetaData) { args <- getMethodArguments(methodMetaData) signature <- getMethodSignature(name, args) body <- getMethodBody(methodMetaData) c(signature, "\t\t{", body, "\t\t},\n") } normalizeParamName <- function(name) { # Downcase the first letter name <- sub("^(\\w)", "\\L\\1", name, perl=TRUE) # Convert snake_case to camelCase gsub("_(uuid\\b|id\\b|\\w)", "\\U\\1", name, perl=TRUE) } getMethodArguments <- function(methodMetaData) { request <- methodMetaData$request requestArgs <- NULL if(!is.null(request)) { resourceName <- normalizeParamName(request$properties[[1]][[1]]) if(request$required) requestArgs <- resourceName else requestArgs <- paste(resourceName, "=", "NULL") } argNames <- names(methodMetaData$parameters) args <- sapply(argNames, function(argName) { arg <- methodMetaData$parameters[[argName]] argName <- normalizeParamName(argName) if(!arg$required) { return(paste(argName, "=", "NULL")) } argName }) c(requestArgs, args) } getMethodSignature <- function(methodName, args) { collapsedArgs <- paste0(args, collapse = ", ") lineLengthLimit <- 40 if(nchar(collapsedArgs) > lineLengthLimit) { return(paste0("\t\t", formatArgs(paste(methodName, "= function("), "\t", args, ")", lineLengthLimit))) } else { return(paste0("\t\t", methodName, " = function(", collapsedArgs, ")")) } } getMethodBody <- function(methodMetaData) { url <- getRequestURL(methodMetaData) headers <- getRequestHeaders() requestQueryList <- getRequestQueryList(methodMetaData) requestBody <- getRequestBody(methodMetaData) request <- getRequest(methodMetaData) response <- getResponse(methodMetaData) errorCheck <- getErrorCheckingCode(methodMetaData) returnStatement <- getReturnObject() body <- c(url, headers, requestQueryList, "", requestBody, "", request, response, "", errorCheck, "", returnStatement) paste0("\t\t\t", body) } getRequestURL <- function(methodMetaData) { endPoint <- methodMetaData$path endPoint <- stringr::str_replace_all(endPoint, "\\{", "${") url <- c(paste0("endPoint <- stringr::str_interp(\"", endPoint, "\")"), paste0("url <- paste0(private$host, endPoint)")) url } getRequestHeaders <- function() { c("headers <- list(Authorization = paste(\"Bearer\", private$token), ", " \"Content-Type\" = \"application/json\")") } getRequestQueryList <- function(methodMetaData) { queryArgs <- names(Filter(function(arg) arg$location == "query", methodMetaData$parameters)) if(length(queryArgs) == 0) return("queryArgs <- NULL") queryArgs <- sapply(queryArgs, function(arg) { arg <- normalizeParamName(arg) paste(arg, "=", arg) }) collapsedArgs <- paste0(queryArgs, collapse = ", ") lineLengthLimit <- 40 if(nchar(collapsedArgs) > lineLengthLimit) return(formatArgs("queryArgs <- list(", "\t\t\t\t ", queryArgs, ")", lineLengthLimit)) else return(paste0("queryArgs <- list(", collapsedArgs, ")")) } getRequestBody <- function(methodMetaData) { request <- methodMetaData$request if(is.null(request) || !request$required) return("body <- NULL") resourceName <- normalizeParamName(request$properties[[1]][[1]]) requestParameterName <- names(request$properties)[1] c(paste0("if(length(", resourceName, ") > 0)"), paste0("\tbody <- jsonlite::toJSON(list(", resourceName, " = ", resourceName, "), "), "\t auto_unbox = TRUE)", "else", "\tbody <- NULL") } getRequest <- function(methodMetaData) { method <- methodMetaData$httpMethod c(paste0("response <- private$REST$http$exec(\"", method, "\", url, headers, body,"), " queryArgs, private$numRetries)") } getResponse <- function(methodMetaData) { "resource <- private$REST$httpParser$parseJSONResponse(response)" } getErrorCheckingCode <- function(methodMetaData) { if ("ensure_unique_name" %in% names(methodMetaData$parameters)) { body <- c("\tif (identical(sub('Entity:.*', '', resource$errors), '//railsapi.internal/arvados/v1/collections: 422 Unprocessable ')) {", "\t\tresource <- cat(format('An object with the given name already exists with this owner. If you want to update it use the update method instead'))", "\t} else {", "\t\tstop(resource$errors)", "\t}") } else { body <- "\tstop(resource$errors)" } c("if(!is.null(resource$errors)) {", body, "}") } getReturnObject <- function() { "resource" } genAPIClassDoc <- function(methodResources, resourceNames) { c("#' @examples", "#' \\dontrun{", "#' arv <- Arvados$new(\"your Arvados token\", \"example.arvadosapi.com\")", "#'", "#' collection <- arv$collections.get(\"uuid\")", "#'", "#' collectionList <- arv$collections.list(list(list(\"name\", \"like\", \"Test%\")))", "#' collectionList <- listAll(arv$collections.list, list(list(\"name\", \"like\", \"Test%\")))", "#'", "#' deletedCollection <- arv$collections.delete(\"uuid\")", "#'", "#' updatedCollection <- arv$collections.update(list(name = \"New name\", description = \"New description\"),", "#' \"uuid\")", "#'", "#' createdCollection <- arv$collections.create(list(name = \"Example\",", "#' description = \"This is a test collection\"))", "#' }", "") } getAPIClassMethodList <- function(methodResources, resourceNames) { methodList <- unlist(unname(Map(function(resource, resourceName) { methodNames <- names(resource$methods) paste0(resourceName, ".", methodNames[!(methodNames %in% c("index", "show", "destroy"))]) }, methodResources, resourceNames))) hardcodedMethods <- c("projects.create", "projects.get", "projects.list", "projects.update", "projects.delete") paste0("#' \t\\item{}{\\code{\\link{", sort(c(methodList, hardcodedMethods)), "}}}") } getMethodDoc <- function(methodName, methodMetaData) { description <- paste("\t\t#' @description", gsub("\n", "\n\t\t#' ", methodMetaData$description)) params <- getMethodParams(methodMetaData) returnValue <- paste("\t\t#' @return", methodMetaData$response[["$ref"]], "object.") c(description, params, returnValue) } getMethodParams <- function(methodMetaData) { request <- methodMetaData$request requestDoc <- NULL if(!is.null(request)) { requestDoc <- unname(unlist(sapply(request$properties, function(prop) { className <- sapply(prop, function(ref) ref) objectName <- normalizeParamName(className) paste("\t\t#' @param", objectName, className, "object.") }))) } argNames <- names(methodMetaData$parameters) argsDoc <- unname(unlist(sapply(argNames, function(argName) { arg <- methodMetaData$parameters[[argName]] paste("\t\t#' @param", normalizeParamName(argName), gsub("\n", "\n\t\t#' ", arg$description) ) }))) c(requestDoc, argsDoc) } #NOTE: Utility functions: # This function is used to split very long lines of code into smaller chunks. # This is usually the case when we pass a lot of named argumets to a function. formatArgs <- function(prependAtStart, prependToEachSplit, args, appendAtEnd, lineLength) { if(length(args) > 1) { args[1:(length(args) - 1)] <- paste0(args[1:(length(args) - 1)], ",") } args[1] <- paste0(prependAtStart, args[1]) args[length(args)] <- paste0(args[length(args)], appendAtEnd) argsLength <- length(args) argLines <- list() index <- 1 while(index <= argsLength) { line <- args[index] index <- index + 1 while(nchar(line) < lineLength && index <= argsLength) { line <- paste(line, args[index]) index <- index + 1 } argLines <- c(argLines, line) } argLines <- unlist(argLines) argLinesLen <- length(argLines) if(argLinesLen > 1) argLines[2:argLinesLen] <- paste0(prependToEachSplit, argLines[2:argLinesLen]) argLines } args <- commandArgs(TRUE) if (length(args) == 0) { loc <- "arvados-v1-discovery.json" } else { loc <- args[[1]] } discoveryDocument <- getAPIDocument(loc) generateAPI(discoveryDocument) ================================================ FILE: contrib/R-sdk/install_deps.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 options(repos=structure(c(CRAN="https://cloud.r-project.org/"))) if (!requireNamespace("devtools")) { install.packages("devtools") } if (!requireNamespace("roxygen2")) { install.packages("roxygen2") } if (!requireNamespace("knitr")) { install.packages("knitr") } if (!requireNamespace("markdown")) { install.packages("markdown") } if (!requireNamespace("XML")) { install.packages("XML") } devtools::install_dev_deps() ================================================ FILE: contrib/R-sdk/run_test.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 devtools::check() results <- devtools::test() any_error <- any(as.data.frame(results)$error) if (any_error) { q("no", 1) } else { q("no", 0) } ================================================ FILE: contrib/R-sdk/tests/testthat/fakes/FakeArvados.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 FakeArvados <- R6::R6Class( "FakeArvados", public = list( token = NULL, host = NULL, webdavHost = NULL, http = NULL, httpParser = NULL, REST = NULL, initialize = function(token = NULL, host = NULL, webdavHost = NULL, http = NULL, httpParser = NULL) { self$token <- token self$host <- host self$webdavHost <- webdavHost self$http <- http self$httpParser <- httpParser }, getToken = function() self$token, getHostName = function() self$host, getHttpClient = function() self$http, getHttpParser = function() self$httpParser, getWebDavHostName = function() self$webdavHost ), cloneable = FALSE ) ================================================ FILE: contrib/R-sdk/tests/testthat/fakes/FakeHttpParser.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 FakeHttpParser <- R6::R6Class( "FakeHttrParser", public = list( validContentTypes = NULL, parserCallCount = NULL, initialize = function() { self$parserCallCount <- 0 self$validContentTypes <- c("text", "raw") }, parseJSONResponse = function(serverResponse) { self$parserCallCount <- self$parserCallCount + 1 if(!is.null(serverResponse$content)) return(serverResponse$content) serverResponse }, parseResponse = function(serverResponse, outputType) { self$parserCallCount <- self$parserCallCount + 1 if(!is.null(serverResponse$content)) return(serverResponse$content) serverResponse }, getFileNamesFromResponse = function(serverResponse, uri) { self$parserCallCount <- self$parserCallCount + 1 if(!is.null(serverResponse$content)) return(serverResponse$content) serverResponse }, getFileSizesFromResponse = function(serverResponse, uri) { self$parserCallCount <- self$parserCallCount + 1 if(!is.null(serverResponse$content)) return(serverResponse$content) serverResponse } ) ) ================================================ FILE: contrib/R-sdk/tests/testthat/fakes/FakeHttpRequest.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 FakeHttpRequest <- R6::R6Class( "FakeHttpRequest", public = list( serverMaxElementsPerRequest = NULL, content = NULL, expectedURL = NULL, URLIsProperlyConfigured = NULL, expectedQueryFilters = NULL, queryFiltersAreCorrect = NULL, requestHeaderContainsAuthorizationField = NULL, requestHeaderContainsDestinationField = NULL, requestHeaderContainsRangeField = NULL, requestHeaderContainsContentTypeField = NULL, JSONEncodedBodyIsProvided = NULL, requestBodyIsProvided = NULL, numberOfGETRequests = NULL, numberOfDELETERequests = NULL, numberOfPUTRequests = NULL, numberOfPOSTRequests = NULL, numberOfMOVERequests = NULL, numberOfCOPYRequests = NULL, numberOfgetConnectionCalls = NULL, initialize = function(expectedURL = NULL, serverResponse = NULL, expectedFilters = NULL) { if(is.null(serverResponse)) { self$content <- list() self$content$status_code <- 200 } else self$content <- serverResponse self$expectedURL <- expectedURL self$URLIsProperlyConfigured <- FALSE self$expectedQueryFilters <- expectedFilters self$queryFiltersAreCorrect <- FALSE self$requestHeaderContainsAuthorizationField <- FALSE self$requestHeaderContainsDestinationField <- FALSE self$requestHeaderContainsRangeField <- FALSE self$requestHeaderContainsContentTypeField <- FALSE self$JSONEncodedBodyIsProvided <- FALSE self$requestBodyIsProvided <- FALSE self$numberOfGETRequests <- 0 self$numberOfDELETERequests <- 0 self$numberOfPUTRequests <- 0 self$numberOfPOSTRequests <- 0 self$numberOfMOVERequests <- 0 self$numberOfCOPYRequests <- 0 self$numberOfgetConnectionCalls <- 0 self$serverMaxElementsPerRequest <- 5 }, exec = function(verb, url, headers = NULL, body = NULL, query = NULL, limit = NULL, offset = NULL, retryTimes = 0) { private$validateURL(url) private$validateHeaders(headers) private$validateFilters(queryFilters) private$validateBody(body) if(verb == "GET") self$numberOfGETRequests <- self$numberOfGETRequests + 1 else if(verb == "POST") self$numberOfPOSTRequests <- self$numberOfPOSTRequests + 1 else if(verb == "PUT") self$numberOfPUTRequests <- self$numberOfPUTRequests + 1 else if(verb == "DELETE") self$numberOfDELETERequests <- self$numberOfDELETERequests + 1 else if(verb == "MOVE") self$numberOfMOVERequests <- self$numberOfMOVERequests + 1 else if(verb == "COPY") self$numberOfCOPYRequests <- self$numberOfCOPYRequests + 1 else if(verb == "PROPFIND") { return(self$content) } if(!is.null(self$content$items_available)) return(private$getElements(offset, limit)) else return(self$content) }, getConnection = function(url, headers, openMode) { self$numberOfgetConnectionCalls <- self$numberOfgetConnectionCalls + 1 c(url, headers, openMode) } ), private = list( validateURL = function(url) { if(!is.null(self$expectedURL) && url == self$expectedURL) self$URLIsProperlyConfigured <- TRUE }, validateHeaders = function(headers) { if(!is.null(headers$Authorization)) self$requestHeaderContainsAuthorizationField <- TRUE if(!is.null(headers$Destination)) self$requestHeaderContainsDestinationField <- TRUE if(!is.null(headers$Range)) self$requestHeaderContainsRangeField <- TRUE if(!is.null(headers[["Content-Type"]])) self$requestHeaderContainsContentTypeField <- TRUE }, validateBody = function(body) { if(!is.null(body)) { self$requestBodyIsProvided <- TRUE if(class(body) == "json") self$JSONEncodedBodyIsProvided <- TRUE } }, validateFilters = function(filters) { if(!is.null(self$expectedQueryFilters) && !is.null(filters) && all.equal(unname(filters), self$expectedQueryFilters)) { self$queryFiltersAreCorrect <- TRUE } }, getElements = function(offset, limit) { start <- 1 elementCount <- self$serverMaxElementsPerRequest if(!is.null(offset)) { if(offset > self$content$items_available) stop("Invalid offset") start <- offset + 1 } if(!is.null(limit)) if(limit < self$serverMaxElementsPerRequest) elementCount <- limit - 1 serverResponse <- list() serverResponse$items_available <- self$content$items_available serverResponse$items <- self$content$items[start:(start + elementCount - 1)] if(start + elementCount > self$content$items_available) { elementCount = self$content$items_available - start serverResponse$items <- self$content$items[start:(start + elementCount)] } serverResponse } ), cloneable = FALSE ) ================================================ FILE: contrib/R-sdk/tests/testthat/fakes/FakeRESTService.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 FakeRESTService <- R6::R6Class( "FakeRESTService", public = list( getResourceCallCount = NULL, createResourceCallCount = NULL, listResourcesCallCount = NULL, deleteResourceCallCount = NULL, updateResourceCallCount = NULL, fetchAllItemsCallCount = NULL, createCallCount = NULL, deleteCallCount = NULL, moveCallCount = NULL, copyCallCount = NULL, getCollectionContentCallCount = NULL, getResourceSizeCallCount = NULL, readCallCount = NULL, writeCallCount = NULL, getConnectionCallCount = NULL, writeBuffer = NULL, filtersAreConfiguredCorrectly = NULL, bodyIsConfiguredCorrectly = NULL, expectedFilterContent = NULL, collectionContent = NULL, returnContent = NULL, initialize = function(collectionContent = NULL, returnContent = NULL, expectedFilterContent = NULL) { self$getResourceCallCount <- 0 self$createResourceCallCount <- 0 self$listResourcesCallCount <- 0 self$deleteResourceCallCount <- 0 self$updateResourceCallCount <- 0 self$fetchAllItemsCallCount <- 0 self$createCallCount <- 0 self$deleteCallCount <- 0 self$moveCallCount <- 0 self$copyCallCount <- 0 self$getCollectionContentCallCount <- 0 self$getResourceSizeCallCount <- 0 self$readCallCount <- 0 self$writeCallCount <- 0 self$getConnectionCallCount <- 0 self$filtersAreConfiguredCorrectly <- FALSE self$bodyIsConfiguredCorrectly <- FALSE self$collectionContent <- collectionContent self$returnContent <- returnContent self$expectedFilterContent <- expectedFilterContent }, getWebDavHostName = function() { }, getResource = function(resource, uuid) { self$getResourceCallCount <- self$getResourceCallCount + 1 self$returnContent }, listResources = function(resource, filters = NULL, limit = 100, offset = 0) { self$listResourcesCallCount <- self$listResourcesCallCount + 1 if(!is.null(self$expectedFilterContent) && !is.null(filters)) if(all.equal(filters, self$expectedFilterContent)) self$filtersAreConfiguredCorrectly <- TRUE self$returnContent }, fetchAllItems = function(resourceURL, filters) { self$fetchAllItemsCallCount <- self$fetchAllItemsCallCount + 1 if(!is.null(self$expectedFilterContent) && !is.null(filters)) if(all.equal(filters, self$expectedFilterContent)) self$filtersAreConfiguredCorrectly <- TRUE self$returnContent }, deleteResource = function(resource, uuid) { self$deleteResourceCallCount <- self$deleteResourceCallCount + 1 self$returnContent }, updateResource = function(resource, uuid, newContent) { self$updateResourceCallCount <- self$updateResourceCallCount + 1 if(!is.null(self$returnContent) && !is.null(newContent)) if(all.equal(newContent, self$returnContent)) self$bodyIsConfiguredCorrectly <- TRUE self$returnContent }, createResource = function(resource, content) { self$createResourceCallCount <- self$createResourceCallCount + 1 if(!is.null(self$returnContent) && !is.null(content)) if(all.equal(content, self$returnContent)) self$bodyIsConfiguredCorrectly <- TRUE self$returnContent }, create = function(files, uuid) { self$createCallCount <- self$createCallCount + 1 self$returnContent }, delete = function(relativePath, uuid) { self$deleteCallCount <- self$deleteCallCount + 1 self$returnContent }, move = function(from, to, uuid) { self$moveCallCount <- self$moveCallCount + 1 self$returnContent }, copy = function(from, to, uuid) { self$copyCallCount <- self$copyCallCount + 1 self$returnContent }, getCollectionContent = function(uuid, relativePath = NULL) { self$getCollectionContentCallCount <- self$getCollectionContentCallCount + 1 if (!is.null(relativePath)) { self$collectionContent[startsWith(self$collectionContent, relativePath)] } else { self$collectionContent } }, getResourceSize = function(uuid, relativePathToResource) { self$getResourceSizeCallCount <- self$getResourceSizeCallCount + 1 self$returnContent }, read = function(relativePath, uuid, contentType = "text", offset = 0, length = 0) { self$readCallCount <- self$readCallCount + 1 self$returnContent }, write = function(relativePath, uuid, content, contentType) { self$writeBuffer <- content self$writeCallCount <- self$writeCallCount + 1 self$returnContent }, getConnection = function(uuid, relativePath, openMode) { self$getConnectionCallCount <- self$getConnectionCallCount + 1 self$returnContent } ), cloneable = FALSE ) ================================================ FILE: contrib/R-sdk/tests/testthat/test-ArvadosFile.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 source("fakes/FakeRESTService.R") context("ArvadosFile") test_that("constructor raises error if file name is empty string", { expect_that(ArvadosFile$new(""), throws_error("Invalid name.")) }) test_that("getFileListing always returns file name", { dog <- ArvadosFile$new("dog") expect_that(dog$getFileListing(), equals("dog")) }) test_that("get always returns NULL", { dog <- ArvadosFile$new("dog") responseIsNull <- is.null(dog$get("something")) expect_true(responseIsNull) }) test_that("getFirst always returns NULL", { dog <- ArvadosFile$new("dog") responseIsNull <- is.null(dog$getFirst()) expect_true(responseIsNull) }) test_that(paste("getSizeInBytes returns zero if arvadosFile", "is not part of a collection"), { dog <- ArvadosFile$new("dog") expect_that(dog$getSizeInBytes(), equals(0)) }) test_that(paste("getSizeInBytes delegates size calculation", "to REST service class"), { collectionContent <- c("animal", "animal/fish") returnSize <- 100 fakeREST <- FakeRESTService$new(collectionContent, returnSize) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") resourceSize <- fish$getSizeInBytes() expect_that(resourceSize, equals(100)) }) test_that("getRelativePath returns path relative to the tree root", { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") shark <- ArvadosFile$new("shark") animal$add(fish) fish$add(shark) expect_that(shark$getRelativePath(), equals("animal/fish/shark")) }) test_that("read raises exception if file doesn't belong to a collection", { dog <- ArvadosFile$new("dog") expect_that(dog$read(), throws_error("ArvadosFile doesn't belong to any collection.")) }) test_that("read raises exception offset or length is negative number", { collectionContent <- c("animal", "animal/fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") expect_that(fish$read(contentType = "text", offset = -1), throws_error("Offset and length must be positive values.")) expect_that(fish$read(contentType = "text", length = -1), throws_error("Offset and length must be positive values.")) expect_that(fish$read(contentType = "text", offset = -1, length = -1), throws_error("Offset and length must be positive values.")) }) test_that("read delegates reading operation to REST service class", { collectionContent <- c("animal", "animal/fish") readContent <- "my file" fakeREST <- FakeRESTService$new(collectionContent, readContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") fileContent <- fish$read("text") expect_that(fileContent, equals("my file")) expect_that(fakeREST$readCallCount, equals(1)) }) test_that(paste("connection delegates connection creation ro RESTService class", "which returns curl connection opened in read mode when", "'r' of 'rb' is passed as argument"), { collectionContent <- c("animal", "animal/fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") connection <- fish$connection("r") expect_that(fakeREST$getConnectionCallCount, equals(1)) }) test_that(paste("connection returns textConnection opened", "in write mode when 'w' is passed as argument"), { collectionContent <- c("animal", "animal/fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") connection <- fish$connection("w") writeLines("file", connection) writeLines("content", connection) writeResult <- textConnectionValue(connection) expect_that(writeResult[1], equals("file")) expect_that(writeResult[2], equals("content")) }) test_that("flush sends data stored in a connection to a REST server", { collectionContent <- c("animal", "animal/fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") connection <- fish$connection("w") writeLines("file content", connection) fish$flush() expect_that(fakeREST$writeBuffer, equals("file content")) }) test_that("write raises exception if file doesn't belong to a collection", { dog <- ArvadosFile$new("dog") expect_that(dog$write(), throws_error("ArvadosFile doesn't belong to any collection.")) }) test_that("write delegates writing operation to REST service class", { collectionContent <- c("animal", "animal/fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") fileContent <- fish$write("new file content") expect_that(fakeREST$writeBuffer, equals("new file content")) }) test_that(paste("move raises exception if arvados file", "doesn't belong to any collection"), { animal <- ArvadosFile$new("animal") expect_that(animal$move("new/location"), throws_error("ArvadosFile doesn't belong to any collection.")) }) test_that(paste("move raises exception if newLocationInCollection", "parameter is invalid"), { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") dog <- collection$get("animal/dog") expect_that(dog$move("objects/dog"), throws_error("Unable to get destination subcollection.")) }) test_that("move raises exception if new location contains content with the same name", { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "dog") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") dog <- collection$get("animal/dog") expect_that(dog$move("dog"), throws_error("Destination already contains content with same name.")) }) test_that("move moves arvados file inside collection tree", { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") dog <- collection$get("animal/dog") dog$move("dog") dogIsNullOnOldLocation <- is.null(collection$get("animal/dog")) dogExistsOnNewLocation <- !is.null(collection$get("dog")) expect_true(dogIsNullOnOldLocation) expect_true(dogExistsOnNewLocation) }) test_that(paste("copy raises exception if arvados file", "doesn't belong to any collection"), { animal <- ArvadosFile$new("animal") expect_that(animal$copy("new/location"), throws_error("ArvadosFile doesn't belong to any collection.")) }) test_that(paste("copy raises exception if location parameter is invalid"), { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") dog <- collection$get("animal/dog") expect_that(dog$copy("objects/dog"), throws_error("Unable to get destination subcollection.")) }) test_that("copy raises exception if new location contains content with the same name", { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "dog") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") dog <- collection$get("animal/dog") expect_that(dog$copy("dog"), throws_error("Destination already contains content with same name.")) }) test_that("copy copies arvados file inside collection tree", { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") dog <- collection$get("animal/dog") dog$copy("dog") dogExistsOnOldLocation <- !is.null(collection$get("animal/dog")) dogExistsOnNewLocation <- !is.null(collection$get("dog")) expect_true(dogExistsOnOldLocation) expect_true(dogExistsOnNewLocation) }) test_that("duplicate performs deep cloning of Arvados file", { arvFile <- ArvadosFile$new("foo") newFile1 <- arvFile$duplicate() newFile2 <- arvFile$duplicate("bar") expect_that(newFile1$getFileListing(), equals(arvFile$getFileListing())) expect_that(newFile2$getFileListing(), equals(c("bar"))) }) ================================================ FILE: contrib/R-sdk/tests/testthat/test-Collection.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 source("fakes/FakeRESTService.R") context("Collection") test_that(paste("constructor creates file tree from text content", "retreived form REST service"), { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") root <- collection$get("") expect_that(fakeREST$getCollectionContentCallCount, equals(1)) expect_that(root$getName(), equals("")) }) test_that(paste("add raises exception if passed argumet is not", "ArvadosFile or Subcollection"), { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") newNumber <- 10 expect_that(collection$add(newNumber), throws_error(paste("Expected AravodsFile or Subcollection", "object, got (numeric)."), fixed = TRUE)) }) test_that("add raises exception if relative path is not valid", { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") newPen <- ArvadosFile$new("pen") expect_that(collection$add(newPen, "objects"), throws_error("Subcollection objects doesn't exist.", fixed = TRUE)) }) test_that("add raises exception if content name is empty string", { collectionContent <- c("animal", "animal/fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") rootFolder <- Subcollection$new("") expect_that(collection$add(rootFolder), throws_error("Content has invalid name.", fixed = TRUE)) }) test_that(paste("add adds ArvadosFile or Subcollection", "to local tree structure and remote REST service"), { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") newDog <- ArvadosFile$new("dog") collection$add(newDog, "animal") dog <- collection$get("animal/dog") dogExistsInCollection <- !is.null(dog) && dog$getName() == "dog" expect_true(dogExistsInCollection) expect_that(fakeREST$createCallCount, equals(1)) }) test_that("create raises exception if passed argumet is not character vector", { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") expect_that(collection$create(10), throws_error("Expected character vector, got (numeric).", fixed = TRUE)) }) test_that(paste("create adds files specified by fileNames", "to local tree structure and remote REST service"), { fakeREST <- FakeRESTService$new() api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") collection$create(c("animal/dog", "animal/cat")) dog <- collection$get("animal/dog") cat <- collection$get("animal/cat") dogExistsInCollection <- !is.null(dog) && dog$getName() == "dog" catExistsInCollection <- !is.null(cat) && cat$getName() == "cat" expect_true(dogExistsInCollection) expect_true(catExistsInCollection) expect_that(fakeREST$createCallCount, equals(2)) }) test_that("remove raises exception if passed argumet is not character vector", { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") expect_that(collection$remove(10), throws_error("Expected character vector, got (numeric).", fixed = TRUE)) }) test_that("remove raises exception if user tries to remove root folder", { collectionContent <- c("animal", "animal/fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") expect_that(collection$remove(""), throws_error("You can't delete root folder.", fixed = TRUE)) }) test_that(paste("remove removes files specified by paths", "from local tree structure and from remote REST service"), { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/cat", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") collection$remove(c("animal/dog", "animal/cat")) dog <- collection$get("animal/dog") cat <- collection$get("animal/dog") dogExistsInCollection <- !is.null(dog) && dog$getName() == "dog" catExistsInCollection <- !is.null(cat) && cat$getName() == "cat" expect_false(dogExistsInCollection) expect_false(catExistsInCollection) expect_that(fakeREST$deleteCallCount, equals(2)) }) test_that(paste("move moves content to a new location inside file tree", "and on REST service"), { collectionContent <- c("animal", "animal/dog", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") collection$move("animal/dog", "dog") dogIsNullOnOldLocation <- is.null(collection$get("animal/dog")) dogExistsOnNewLocation <- !is.null(collection$get("dog")) expect_true(dogIsNullOnOldLocation) expect_true(dogExistsOnNewLocation) expect_that(fakeREST$moveCallCount, equals(1)) }) test_that("move raises exception if new location is not valid", { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") expect_that(collection$move("fish", "object"), throws_error("Content you want to move doesn't exist in the collection.", fixed = TRUE)) }) test_that("getFileListing returns sorted collection content received from REST service", { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") contentMatchExpected <- all(collection$getFileListing() == c("animal", "animal/fish", "ball")) expect_true(contentMatchExpected) #2 calls because Collection$new calls getFileListing once expect_that(fakeREST$getCollectionContentCallCount, equals(2)) }) test_that("get returns arvados file or subcollection from internal tree structure", { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") fishIsNotNull <- !is.null(fish) expect_true(fishIsNotNull) expect_that(fish$getName(), equals("fish")) ball <- collection$get("ball") ballIsNotNull <- !is.null(ball) expect_true(ballIsNotNull) expect_that(ball$getName(), equals("ball")) }) test_that(paste("copy copies content to a new location inside file tree", "and on REST service"), { collectionContent <- c("animal", "animal/dog", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") collection$copy("animal/dog", "dog") dogExistsOnOldLocation <- !is.null(collection$get("animal/dog")) dogExistsOnNewLocation <- !is.null(collection$get("dog")) expect_true(dogExistsOnOldLocation) expect_true(dogExistsOnNewLocation) expect_that(fakeREST$copyCallCount, equals(1)) }) test_that("copy raises exception if new location is not valid", { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") expect_that(collection$copy("fish", "object"), throws_error("Content you want to copy doesn't exist in the collection.", fixed = TRUE)) }) test_that("refresh invalidates current tree structure", { collectionContent <- c("animal", "animal/fish", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "aaaaa-j7d0g-ccccccccccccccc") # Before refresh fish <- collection$get("animal/fish") expect_that(fish$getName(), equals("fish")) expect_that(fish$getCollection()$uuid, equals("aaaaa-j7d0g-ccccccccccccccc")) collection$refresh() # After refresh expect_that(fish$getName(), equals("fish")) expect_true(is.null(fish$getCollection())) }) ================================================ FILE: contrib/R-sdk/tests/testthat/test-CollectionTree.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 context("CollectionTree") test_that("constructor creates file tree from character array properly", { collection <- "myCollection" characterArray <- c("animal", "animal/dog", "boat") collectionTree <- CollectionTree$new(characterArray, collection) root <- collectionTree$getTree() animal <- collectionTree$getElement("animal") dog <- collectionTree$getElement("animal/dog") boat <- collectionTree$getElement("boat") rootHasNoParent <- is.null(root$getParent()) rootIsOfTypeSubcollection <- "Subcollection" %in% class(root) animalIsOfTypeSubcollection <- "Subcollection" %in% class(animal) dogIsOfTypeArvadosFile <- "ArvadosFile" %in% class(dog) boatIsOfTypeArvadosFile <- "ArvadosFile" %in% class(boat) animalsParentIsRoot <- animal$getParent()$getName() == root$getName() animalContainsDog <- animal$getFirst()$getName() == dog$getName() dogsParentIsAnimal <- dog$getParent()$getName() == animal$getName() boatsParentIsRoot <- boat$getParent()$getName() == root$getName() allElementsBelongToSameCollection <- root$getCollection() == "myCollection" && animal$getCollection() == "myCollection" && dog$getCollection() == "myCollection" && boat$getCollection() == "myCollection" expect_that(root$getName(), equals("")) expect_true(rootIsOfTypeSubcollection) expect_true(rootHasNoParent) expect_true(animalIsOfTypeSubcollection) expect_true(animalsParentIsRoot) expect_true(animalContainsDog) expect_true(dogIsOfTypeArvadosFile) expect_true(dogsParentIsAnimal) expect_true(boatIsOfTypeArvadosFile) expect_true(boatsParentIsRoot) expect_true(allElementsBelongToSameCollection) }) test_that("getElement returns element from tree if element exists on specified path", { collection <- "myCollection" characterArray <- c("animal", "animal/dog", "boat") collectionTree <- CollectionTree$new(characterArray, collection) dog <- collectionTree$getElement("animal/dog") expect_that(dog$getName(), equals("dog")) }) test_that("getElement returns NULL from tree if element doesn't exists on specified path", { collection <- "myCollection" characterArray <- c("animal", "animal/dog", "boat") collectionTree <- CollectionTree$new(characterArray, collection) fish <- collectionTree$getElement("animal/fish") fishIsNULL <- is.null(fish) expect_true(fishIsNULL) }) test_that("getElement trims ./ from start of relativePath", { collection <- "myCollection" characterArray <- c("animal", "animal/dog", "boat") collectionTree <- CollectionTree$new(characterArray, collection) dog <- collectionTree$getElement("animal/dog") dogWithDotSlash <- collectionTree$getElement("./animal/dog") expect_that(dogWithDotSlash$getName(), equals(dog$getName())) }) test_that("getElement trims / from end of relativePath", { collection <- "myCollection" characterArray <- c("animal", "animal/dog", "boat") collectionTree <- CollectionTree$new(characterArray, collection) animal <- collectionTree$getElement("animal") animalWithSlash <- collectionTree$getElement("animal/") expect_that(animalWithSlash$getName(), equals(animal$getName())) }) ================================================ FILE: contrib/R-sdk/tests/testthat/test-HttpParser.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 context("Http Parser") test_that("parseJSONResponse generates and returns JSON object from server response", { JSONContent <- "{\"bar\":{\"foo\":[10]}}" serverResponse <- list() serverResponse$content <- charToRaw(JSONContent) serverResponse$headers[["Content-Type"]] <- "application/json; charset=utf-8" class(serverResponse) <- c("response") parser <- HttpParser$new() result <- parser$parseJSONResponse(serverResponse) barExists <- !is.null(result$bar) expect_true(barExists) expect_that(unlist(result$bar$foo), equals(10)) }) test_that(paste("parseResponse generates and returns character vector", "from server response if outputType is text"), { content <- "random text" serverResponse <- list() serverResponse$content <- charToRaw(content) serverResponse$headers[["Content-Type"]] <- "text/plain; charset=utf-8" class(serverResponse) <- c("response") parser <- HttpParser$new() parsedResponse <- parser$parseResponse(serverResponse, "text") expect_that(parsedResponse, equals("random text")) }) webDAVResponseSample = paste0("/c=aaaaa-bbbbb-ccccccccccccccc/Fri, 11 Jan 2018 1", "1:11:11 GMTHTTP/1.1 200 OK/c=aaaaa-bbb", "bb-ccccccccccccccc/myFile.exeFri, 12 Jan 2018", " 22:22:22 GMTtext/x-c++src", "; charset=utf-8myFile.exe25\"123b12dd1234567890\"", "HTTP/1.1 200 OK") test_that(paste("getFileNamesFromResponse returns file names belonging to specific", "collection parsed from webDAV server response"), { serverResponse <- list() serverResponse$content <- charToRaw(webDAVResponseSample) serverResponse$headers[["Content-Type"]] <- "text/xml; charset=utf-8" class(serverResponse) <- c("response") url <- URLencode("https://webdav/c=aaaaa-bbbbb-ccccccccccccccc") parser <- HttpParser$new() result <- parser$getFileNamesFromResponse(serverResponse, url) expectedResult <- "myFile.exe" resultMatchExpected <- all.equal(result, expectedResult) expect_true(resultMatchExpected) }) test_that(paste("getFileSizesFromResponse returns file sizes", "parsed from webDAV server response"), { serverResponse <- list() serverResponse$content <- charToRaw(webDAVResponseSample) serverResponse$headers[["Content-Type"]] <- "text/xml; charset=utf-8" class(serverResponse) <- c("response") url <- URLencode("https://webdav/c=aaaaa-bbbbb-ccccccccccccccc") parser <- HttpParser$new() expectedResult <- "25" result <- parser$getFileSizesFromResponse(serverResponse, url) resultMatchExpected <- result == expectedResult expect_true(resultMatchExpected) }) ================================================ FILE: contrib/R-sdk/tests/testthat/test-HttpRequest.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 context("Http Request") test_that("execute raises exception if http verb is not valid", { http <- HttpRequest$new() expect_that(http$exec("FAKE VERB", "url"), throws_error("Http verb is not valid.")) }) test_that("createQuery generates and encodes query portion of http", { http <- HttpRequest$new() queryParams <- list() queryParams$filters <- list(list("color", "=", "red")) queryParams$limit <- 20 queryParams$offset <- 50 expect_that(http$createQuery(queryParams), equals(paste0("?filters=%5B%5B%22color%22%2C%22%3D%22%2C%22red", "%22%5D%5D&limit=20&offset=50"))) }) test_that("createQuery generates and empty string when queryParams is an empty list", { http <- HttpRequest$new() expect_that(http$createQuery(list()), equals("")) }) test_that("exec calls httr functions correctly", { httrNamespace <- getNamespace("httr") # Monkeypatch httr functions and assert that they are called later add_headersCalled <- FALSE unlockBinding("add_headers", httrNamespace) newAddHeaders <- function(h) { add_headersCalled <<- TRUE list() } httrNamespace$add_headers <- newAddHeaders lockBinding("add_headers", httrNamespace) expectedConfig <- list() retryCalled <- FALSE unlockBinding("RETRY", httrNamespace) newRETRY <- function(verb, url, body, config, times) { retryCalled <<- TRUE expectedConfig <<- config } httrNamespace$RETRY <- newRETRY lockBinding("RETRY", httrNamespace) Sys.setenv("ARVADOS_API_HOST_INSECURE" = TRUE) http <- HttpRequest$new() http$exec("GET", "url") expect_true(add_headersCalled) expect_true(retryCalled) expect_that(expectedConfig$options, equals(list(ssl_verifypeer = 0L))) }) test_that("getConnection calls curl functions correctly", { curlNamespace <- getNamespace("curl") # Monkeypatch curl functions and assert that they are called later curlCalled <- FALSE unlockBinding("curl", curlNamespace) newCurl <- function(url, open, handle) curlCalled <<- TRUE curlNamespace$curl <- newCurl lockBinding("curl", curlNamespace) new_handleCalled <- FALSE unlockBinding("new_handle", curlNamespace) newHandleFun <- function() { new_handleCalled <<- TRUE list() } curlNamespace$new_handle <- newHandleFun lockBinding("new_handle", curlNamespace) handle_setheadersCalled <- FALSE unlockBinding("handle_setheaders", curlNamespace) newHandleSetHeaders <- function(h, .list) handle_setheadersCalled <<- TRUE curlNamespace$handle_setheaders <- newHandleSetHeaders lockBinding("handle_setheaders", curlNamespace) handle_setoptCalled <- FALSE unlockBinding("handle_setopt", curlNamespace) newHandleSetOpt <- function(h, ssl_verifypeer) handle_setoptCalled <<- TRUE curlNamespace$handle_setopt <- newHandleSetOpt lockBinding("handle_setopt", curlNamespace) Sys.setenv("ARVADOS_API_HOST_INSECURE" = TRUE) http <- HttpRequest$new() http$getConnection("location", list(), "r") expect_true(new_handleCalled) expect_true(handle_setheadersCalled) expect_true(handle_setoptCalled) expect_true(curlCalled) }) ================================================ FILE: contrib/R-sdk/tests/testthat/test-RESTService.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 source("fakes/FakeArvados.R") source("fakes/FakeHttpRequest.R") source("fakes/FakeHttpParser.R") context("REST service") test_that("getWebDavHostName calls REST service properly", { expectedURL <- "https://host/arvados/v1/config" serverResponse <- list(Services = list(WebDAVDownload = list(ExternalURL = "https://myWebDavServer.com"))) httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse) REST <- RESTService$new("token", "host", httpRequest, FakeHttpParser$new()) REST$getWebDavHostName() expect_true(httpRequest$URLIsProperlyConfigured) expect_false(httpRequest$requestHeaderContainsAuthorizationField) expect_that(httpRequest$numberOfGETRequests, equals(1)) }) test_that("getWebDavHostName returns webDAV host name properly", { serverResponse <- list(Services = list(WebDAVDownload = list(ExternalURL = "https://myWebDavServer.com"))) httpRequest <- FakeHttpRequest$new(expectedURL = NULL, serverResponse) REST <- RESTService$new("token", "host", httpRequest, FakeHttpParser$new()) expect_that("https://myWebDavServer.com", equals(REST$getWebDavHostName())) }) test_that("create calls REST service properly", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file" fakeHttp <- FakeHttpRequest$new(expectedURL) fakeHttpParser <- FakeHttpParser$new() REST <- RESTService$new("token", "https://host/", fakeHttp, fakeHttpParser, 0, "https://webDavHost/") REST$create("file", uuid) expect_true(fakeHttp$URLIsProperlyConfigured) expect_true(fakeHttp$requestHeaderContainsAuthorizationField) expect_that(fakeHttp$numberOfPUTRequests, equals(1)) }) test_that("create raises exception if server response code is not between 200 and 300", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- list() response$status_code <- 404 fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, HttpParser$new(), 0, "https://webDavHost/") expect_that(REST$create("file", uuid), throws_error("Server code: 404")) }) test_that("delete calls REST service properly", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file" fakeHttp <- FakeHttpRequest$new(expectedURL) fakeHttpParser <- FakeHttpParser$new() REST <- RESTService$new("token", "https://host/", fakeHttp, fakeHttpParser, 0, "https://webDavHost/") REST$delete("file", uuid) expect_true(fakeHttp$URLIsProperlyConfigured) expect_true(fakeHttp$requestHeaderContainsAuthorizationField) expect_that(fakeHttp$numberOfDELETERequests, equals(1)) }) test_that("delete raises exception if server response code is not between 200 and 300", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- list() response$status_code <- 404 fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, HttpParser$new(), 0, "https://webDavHost/") expect_that(REST$delete("file", uuid), throws_error("Server code: 404")) }) test_that("move calls REST service properly", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file" fakeHttp <- FakeHttpRequest$new(expectedURL) fakeHttpParser <- FakeHttpParser$new() REST <- RESTService$new("token", "https://host/", fakeHttp, fakeHttpParser, 0, "https://webDavHost/") REST$move("file", "newDestination/file", uuid) expect_true(fakeHttp$URLIsProperlyConfigured) expect_true(fakeHttp$requestHeaderContainsAuthorizationField) expect_true(fakeHttp$requestHeaderContainsDestinationField) expect_that(fakeHttp$numberOfMOVERequests, equals(1)) }) test_that("move raises exception if server response code is not between 200 and 300", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- list() response$status_code <- 404 fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, HttpParser$new(), 0, "https://webDavHost/") expect_that(REST$move("file", "newDestination/file", uuid), throws_error("Server code: 404")) }) test_that("copy calls REST service properly", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file" fakeHttp <- FakeHttpRequest$new(expectedURL) fakeHttpParser <- FakeHttpParser$new() REST <- RESTService$new("token", "https://host/", fakeHttp, fakeHttpParser, 0, "https://webDavHost/") REST$copy("file", "newDestination/file", uuid) expect_true(fakeHttp$URLIsProperlyConfigured) expect_true(fakeHttp$requestHeaderContainsAuthorizationField) expect_true(fakeHttp$requestHeaderContainsDestinationField) expect_that(fakeHttp$numberOfCOPYRequests, equals(1)) }) test_that("copy raises exception if server response code is not between 200 and 300", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- list() response$status_code <- 404 fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, HttpParser$new(), 0, "https://webDavHost/") expect_that(REST$copy("file", "newDestination/file", uuid), throws_error("Server code: 404")) }) test_that("getCollectionContent retreives correct content from WebDAV server", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc" returnContent <- list() returnContent$status_code <- 200 returnContent$content <- c("animal", "animal/dog", "ball") fakeHttp <- FakeHttpRequest$new(expectedURL, returnContent) REST <- RESTService$new("token", "https://host/", fakeHttp, FakeHttpParser$new(), 0, "https://webDavHost/") returnResult <- REST$getCollectionContent(uuid) returnedContentMatchExpected <- all.equal(returnResult, c("animal", "animal/dog", "ball")) expect_true(returnedContentMatchExpected) expect_true(fakeHttp$requestHeaderContainsAuthorizationField) }) test_that("getCollectionContent raises exception if server returns empty response", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- "" fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, FakeHttpParser$new(), 0, "https://webDavHost/") expect_that(REST$getCollectionContent(uuid), throws_error("Response is empty, request may be misconfigured")) }) test_that("getCollectionContent parses server response", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" fakeHttpParser <- FakeHttpParser$new() REST <- RESTService$new("token", "https://host/", FakeHttpRequest$new(), fakeHttpParser, 0, "https://webDavHost/") REST$getCollectionContent(uuid) expect_that(fakeHttpParser$parserCallCount, equals(1)) }) test_that("getCollectionContent raises exception if server returns empty response", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- "" fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, FakeHttpParser$new(), 0, "https://webDavHost/") expect_that(REST$getCollectionContent(uuid), throws_error("Response is empty, request may be misconfigured")) }) test_that(paste("getCollectionContent raises exception if server", "response code is not between 200 and 300"), { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- list() response$status_code <- 404 fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, HttpParser$new(), 0, "https://webDavHost/") expect_that(REST$getCollectionContent(uuid), throws_error("Server code: 404")) }) test_that("getResourceSize calls REST service properly", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file" response <- list() response$status_code <- 200 response$content <- c(6, 2, 931, 12003) fakeHttp <- FakeHttpRequest$new(expectedURL, response) REST <- RESTService$new("token", "https://host/", fakeHttp, FakeHttpParser$new(), 0, "https://webDavHost/") returnResult <- REST$getResourceSize("file", uuid) returnedContentMatchExpected <- all.equal(returnResult, c(6, 2, 931, 12003)) expect_true(fakeHttp$URLIsProperlyConfigured) expect_true(fakeHttp$requestHeaderContainsAuthorizationField) expect_true(returnedContentMatchExpected) }) test_that("getResourceSize raises exception if server returns empty response", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- "" fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, FakeHttpParser$new(), 0, "https://webDavHost/") expect_that(REST$getResourceSize("file", uuid), throws_error("Response is empty, request may be misconfigured")) }) test_that(paste("getResourceSize raises exception if server", "response code is not between 200 and 300"), { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- list() response$status_code <- 404 fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, HttpParser$new(), 0, "https://webDavHost/") expect_that(REST$getResourceSize("file", uuid), throws_error("Server code: 404")) }) test_that("getResourceSize parses server response", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" fakeHttpParser <- FakeHttpParser$new() REST <- RESTService$new("token", "https://host/", FakeHttpRequest$new(), fakeHttpParser, 0, "https://webDavHost/") REST$getResourceSize("file", uuid) expect_that(fakeHttpParser$parserCallCount, equals(1)) }) test_that("read calls REST service properly", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file" serverResponse <- list() serverResponse$status_code <- 200 serverResponse$content <- "file content" fakeHttp <- FakeHttpRequest$new(expectedURL, serverResponse) REST <- RESTService$new("token", "https://host/", fakeHttp, FakeHttpParser$new(), 0, "https://webDavHost/") returnResult <- REST$read("file", uuid, "text", 1024, 512) expect_true(fakeHttp$URLIsProperlyConfigured) expect_true(fakeHttp$requestHeaderContainsAuthorizationField) expect_true(fakeHttp$requestHeaderContainsRangeField) expect_that(returnResult, equals("file content")) }) test_that("read raises exception if server response code is not between 200 and 300", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" response <- list() response$status_code <- 404 fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, HttpParser$new(), 0, "https://webDavHost/") expect_that(REST$read("file", uuid), throws_error("Server code: 404")) }) test_that("read raises exception if contentType is not valid", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" fakeHttp <- FakeHttpRequest$new() REST <- RESTService$new("token", "https://host/", fakeHttp, HttpParser$new(), 0, "https://webDavHost/") expect_that(REST$read("file", uuid, "some invalid content type"), throws_error("Invalid contentType. Please use text or raw.")) }) test_that("read parses server response", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" fakeHttpParser <- FakeHttpParser$new() REST <- RESTService$new("token", "https://host/", FakeHttpRequest$new(), fakeHttpParser, 0, "https://webDavHost/") REST$read("file", uuid, "text", 1024, 512) expect_that(fakeHttpParser$parserCallCount, equals(1)) }) test_that("write calls REST service properly", { fileContent <- "new file content" uuid <- "aaaaa-j7d0g-ccccccccccccccc" expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file" fakeHttp <- FakeHttpRequest$new(expectedURL) REST <- RESTService$new("token", "https://host/", fakeHttp, FakeHttpParser$new(), 0, "https://webDavHost/") REST$write("file", uuid, fileContent, "text/html") expect_true(fakeHttp$URLIsProperlyConfigured) expect_true(fakeHttp$requestBodyIsProvided) expect_true(fakeHttp$requestHeaderContainsAuthorizationField) expect_true(fakeHttp$requestHeaderContainsContentTypeField) }) test_that("write raises exception if server response code is not between 200 and 300", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" fileContent <- "new file content" response <- list() response$status_code <- 404 fakeHttp <- FakeHttpRequest$new(serverResponse = response) REST <- RESTService$new("token", "https://host/", fakeHttp, HttpParser$new(), 0, "https://webDavHost/") expect_that(REST$write("file", uuid, fileContent, "text/html"), throws_error("Server code: 404")) }) test_that("getConnection calls REST service properly", { uuid <- "aaaaa-j7d0g-ccccccccccccccc" fakeHttp <- FakeHttpRequest$new() REST <- RESTService$new("token", "https://host/", fakeHttp, FakeHttpParser$new(), 0, "https://webDavHost/") REST$getConnection("file", uuid, "r") expect_that(fakeHttp$numberOfgetConnectionCalls, equals(1)) }) ================================================ FILE: contrib/R-sdk/tests/testthat/test-Subcollection.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 source("fakes/FakeRESTService.R") context("Subcollection") test_that("getRelativePath returns path relative to the tree root", { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") animal$add(fish) expect_that(animal$getRelativePath(), equals("animal")) expect_that(fish$getRelativePath(), equals("animal/fish")) }) test_that(paste("getFileListing by default returns sorted path of all files", "relative to the current subcollection"), { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") shark <- ArvadosFile$new("shark") blueFish <- ArvadosFile$new("blueFish") animal$add(fish) fish$add(shark) fish$add(blueFish) result <- animal$getFileListing() #expect sorted array expectedResult <- c("animal/fish/blueFish", "animal/fish/shark") resultsMatch <- length(expectedResult) == length(result) && all(expectedResult == result) expect_true(resultsMatch) }) test_that(paste("getFileListing returns sorted names of all direct children", "if fullPath is set to FALSE"), { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") shark <- ArvadosFile$new("shark") dog <- ArvadosFile$new("dog") animal$add(fish) animal$add(dog) fish$add(shark) result <- animal$getFileListing(fullPath = FALSE) expectedResult <- c("dog", "fish") resultsMatch <- length(expectedResult) == length(result) && all(expectedResult == result) expect_true(resultsMatch) }) test_that("add adds content to inside collection tree", { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") dog <- ArvadosFile$new("dog") animal$add(fish) animal$add(dog) animalContainsFish <- animal$get("fish")$getName() == fish$getName() animalContainsDog <- animal$get("dog")$getName() == dog$getName() expect_true(animalContainsFish) expect_true(animalContainsDog) }) test_that("add raises exception if content name is empty string", { animal <- Subcollection$new("animal") rootFolder <- Subcollection$new("") expect_that(animal$add(rootFolder), throws_error("Content has invalid name.", fixed = TRUE)) }) test_that(paste("add raises exception if ArvadosFile/Subcollection", "with same name already exists in the subcollection"), { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") secondFish <- Subcollection$new("fish") thirdFish <- ArvadosFile$new("fish") animal$add(fish) expect_that(animal$add(secondFish), throws_error(paste("Subcollection already contains ArvadosFile or", "Subcollection with same name."), fixed = TRUE)) expect_that(animal$add(thirdFish), throws_error(paste("Subcollection already contains ArvadosFile or", "Subcollection with same name."), fixed = TRUE)) }) test_that(paste("add raises exception if passed argument is", "not ArvadosFile or Subcollection"), { animal <- Subcollection$new("animal") number <- 10 expect_that(animal$add(number), throws_error(paste("Expected AravodsFile or Subcollection object,", "got (numeric)."), fixed = TRUE)) }) test_that(paste("add post content to a REST service", "if subcollection belongs to a collection"), { collectionContent <- c("animal", "animal/fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") animal <- collection$get("animal") dog <- ArvadosFile$new("dog") animal$add(dog) expect_that(fakeREST$createCallCount, equals(1)) }) test_that("remove removes content from subcollection", { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") animal$add(fish) animal$remove("fish") returnValueAfterRemovalIsNull <- is.null(animal$get("fish")) expect_true(returnValueAfterRemovalIsNull) }) test_that(paste("remove raises exception", "if content to remove doesn't exist in the subcollection"), { animal <- Subcollection$new("animal") expect_that(animal$remove("fish"), throws_error(paste("Subcollection doesn't contains ArvadosFile", "or Subcollection with specified name."))) }) test_that("remove raises exception if passed argument is not character vector", { animal <- Subcollection$new("animal") number <- 10 expect_that(animal$remove(number), throws_error(paste("Expected character,", "got (numeric)."), fixed = TRUE)) }) test_that(paste("remove removes content from REST service", "if subcollection belongs to a collection"), { collectionContent <- c("animal", "animal/fish", "animal/dog") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") animal <- collection$get("animal") animal$remove("fish") expect_that(fakeREST$deleteCallCount, equals(1)) }) test_that(paste("get returns ArvadosFile or Subcollection", "if file or folder with given name exists"), { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") dog <- ArvadosFile$new("dog") animal$add(fish) animal$add(dog) returnedFish <- animal$get("fish") returnedDog <- animal$get("dog") returnedFishIsSubcollection <- "Subcollection" %in% class(returnedFish) returnedDogIsArvadosFile <- "ArvadosFile" %in% class(returnedDog) expect_true(returnedFishIsSubcollection) expect_that(returnedFish$getName(), equals("fish")) expect_true(returnedDogIsArvadosFile) expect_that(returnedDog$getName(), equals("dog")) }) test_that(paste("get returns NULL if file or folder", "with given name doesn't exists"), { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") animal$add(fish) returnedDogIsNull <- is.null(animal$get("dog")) expect_true(returnedDogIsNull) }) test_that("getFirst returns first child in the subcollection", { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") animal$add(fish) expect_that(animal$getFirst()$getName(), equals("fish")) }) test_that("getFirst returns NULL if subcollection contains no children", { animal <- Subcollection$new("animal") returnedElementIsNull <- is.null(animal$getFirst()) expect_true(returnedElementIsNull) }) test_that(paste("setCollection by default sets collection", "filed of subcollection and all its children"), { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") animal$add(fish) animal$setCollection("myCollection") expect_that(animal$getCollection(), equals("myCollection")) expect_that(fish$getCollection(), equals("myCollection")) }) test_that(paste("setCollection sets collection filed of subcollection only", "if parameter setRecursively is set to FALSE"), { animal <- Subcollection$new("animal") fish <- Subcollection$new("fish") animal$add(fish) animal$setCollection("myCollection", setRecursively = FALSE) fishCollectionIsNull <- is.null(fish$getCollection()) expect_that(animal$getCollection(), equals("myCollection")) expect_true(fishCollectionIsNull) }) test_that(paste("move raises exception if subcollection", "doesn't belong to any collection"), { animal <- Subcollection$new("animal") expect_that(animal$move("new/location"), throws_error("Subcollection doesn't belong to any collection")) }) test_that("move raises exception if new location contains content with the same name", { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") expect_that(fish$move("fish"), throws_error("Destination already contains content with same name.")) }) test_that(paste("move raises exception if newLocationInCollection", "parameter is invalid"), { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") expect_that(fish$move("objects/dog"), throws_error("Unable to get destination subcollection.")) }) test_that("move moves subcollection inside collection tree", { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") fish$move("fish") fishIsNullOnOldLocation <- is.null(collection$get("animal/fish")) fishExistsOnNewLocation <- !is.null(collection$get("fish")) expect_true(fishIsNullOnOldLocation) expect_true(fishExistsOnNewLocation) }) test_that(paste("getSizeInBytes returns zero if subcollection", "is not part of a collection"), { animal <- Subcollection$new("animal") expect_that(animal$getSizeInBytes(), equals(0)) }) test_that(paste("getSizeInBytes delegates size calculation", "to REST service class"), { collectionContent <- c("animal", "animal/fish") returnSize <- 100 fakeREST <- FakeRESTService$new(collectionContent, returnSize) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") animal <- collection$get("animal") resourceSize <- animal$getSizeInBytes() expect_that(resourceSize, equals(100)) }) ######################### test_that(paste("copy raises exception if subcollection", "doesn't belong to any collection"), { animal <- Subcollection$new("animal") expect_that(animal$copy("new/location"), throws_error("Subcollection doesn't belong to any collection.")) }) test_that("copy raises exception if new location contains content with the same name", { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "fish") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") expect_that(fish$copy("fish"), throws_error("Destination already contains content with same name.")) }) test_that(paste("copy raises exception if location parameter is invalid"), { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") expect_that(fish$copy("objects/dog"), throws_error("Unable to get destination subcollection.")) }) test_that("copy copies subcollection inside collection tree", { collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/fish/shark", "ball") fakeREST <- FakeRESTService$new(collectionContent) api <- Arvados$new("myToken", "myHostName") api$setRESTService(fakeREST) collection <- Collection$new(api, "myUUID") fish <- collection$get("animal/fish") fish$copy("fish") fishExistsOnOldLocation <- !is.null(collection$get("animal/fish")) fishExistsOnNewLocation <- !is.null(collection$get("fish")) expect_true(fishExistsOnOldLocation) expect_true(fishExistsOnNewLocation) }) test_that("duplicate performs deep cloning of Subcollection", { foo <- ArvadosFile$new("foo") bar <- ArvadosFile$new("bar") sub <- Subcollection$new("qux") sub$add(foo) sub$add(bar) newSub1 <- sub$duplicate() newSub2 <- sub$duplicate("quux") expect_that(newSub1$getFileListing(), equals(sub$getFileListing())) expect_that(sort(newSub2$getFileListing()), equals(c("quux/bar", "quux/foo"))) }) ================================================ FILE: contrib/R-sdk/tests/testthat/test-util.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 context("Utility function") test_that("listAll always returns all resource items from server", { serverResponseLimit <- 3 itemsAvailable <- 8 items <- list("collection1", "collection2", "collection3", "collection4", "collection5", "collection6", "collection7", "collection8") testFunction <- function(offset, ...) { response <- list() response$items_available <- itemsAvailable maxIndex <- offset + serverResponseLimit lastElementIndex <- if(maxIndex < itemsAvailable) maxIndex else itemsAvailable response$items <- items[(offset + 1):lastElementIndex] response } result <- listAll(testFunction) expect_that(length(result), equals(8)) }) test_that("trimFromStart trims string correctly if string starts with trimCharacters", { sample <- "./something/random" trimCharacters <- "./something/" result <- trimFromStart(sample, trimCharacters) expect_that(result, equals("random")) }) test_that("trimFromStart returns original string if string doesn't starts with trimCharacters", { sample <- "./something/random" trimCharacters <- "./nothing/" result <- trimFromStart(sample, trimCharacters) expect_that(result, equals("./something/random")) }) test_that("trimFromEnd trims string correctly if string ends with trimCharacters", { sample <- "./something/random" trimCharacters <- "/random" result <- trimFromEnd(sample, trimCharacters) expect_that(result, equals("./something")) }) test_that("trimFromEnd returns original string if string doesn't end with trimCharacters", { sample <- "./something/random" trimCharacters <- "specific" result <- trimFromStart(sample, trimCharacters) expect_that(result, equals("./something/random")) }) test_that("RListToPythonList converts nested R list to char representation of Python list", { sample <- list("insert", list("random", list("text")), list("here")) result <- RListToPythonList(sample) resultWithSeparator <- RListToPythonList(sample, separator = ",+") expect_that(result, equals("[\"insert\", [\"random\", \"text\"], \"here\"]")) expect_that(resultWithSeparator, equals("[\"insert\",+[\"random\",+\"text\"],+\"here\"]")) }) test_that("appendToStartIfNotExist appends characters to beginning of a string", { sample <- "New Year" charactersToAppend <- "Happy " result <- appendToStartIfNotExist(sample, charactersToAppend) expect_that(result, equals("Happy New Year")) }) test_that(paste("appendToStartIfNotExist returns original string if string", "doesn't start with specified characters"), { sample <- "Happy New Year" charactersToAppend <- "Happy" result <- appendToStartIfNotExist(sample, charactersToAppend) expect_that(result, equals("Happy New Year")) }) test_that(paste("splitToPathAndName splits relative path to file/folder", "name and rest of the path"), { relativePath <- "path/to/my/file.exe" result <- splitToPathAndName( relativePath) expect_that(result$name, equals("file.exe")) expect_that(result$path, equals("path/to/my")) }) ================================================ FILE: contrib/R-sdk/tests/testthat.R ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 library(testthat) library(ArvadosR) test_check("ArvadosR") ================================================ FILE: contrib/README.md ================================================ ## Arvados Client Contributions This directory contains client libraries and tools that can be used with an Arvados cluster. Core components of Arvados are regularly tested together to ensure they work in concert. These tools receive less frequent testing before release. The Arvados team is happy to receive bug reports and contributions to help improve them. However, bugs in these components will never be considered release-critical. * `arvados-bootstrap`: Scripts to initialize an Arvados cluster with data * `arvbash`: Arvados utility functions for the bash shell * `java-sdk-v2`: Java client SDK for Arvados * `R-sdk`: R client SDK for Arvados ================================================ FILE: contrib/arvados-bootstrap/LICENSE-2.0.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: contrib/arvados-bootstrap/README.md ================================================ # Arvados Bootstrap Tools ## Introduction This package provides scripts to initialize an Arvados cluster with data, built on top of the Python SDK. From inside this directory, you can install it by running: pipx install . or if you're managing your own virtualenvs and have one activated: pip install . ## arv-export `arv-export` saves records from a running Arvados cluster to the directory where you run it. It finds Arvados credentials the same way arv-copy does, by reading `~/.config/arvados/ZZZZZ.conf`, where `ZZZZZ` is a five-alphanumeric cluster ID. `cd` to a directory where you want to save data and run: arv-export [other options] OBJECT_UUID This will subdirectories inside the current directory with data from Arvados. You can load this data with `arv-import` as described in the next section. ## arv-import `arv-import` creates records on an Arvados cluster from records previously saved by `arv-export`. It finds Arvados credentials the same way arv-copy does, by reading `~/.config/arvados/ZZZZZ.conf`, where `ZZZZZ` is a five-alphanumeric cluster ID. `cd` to a directory where you previously saved data with `arv-export` and run: arv-import [--project-uuid=UUID] [--no-block-copy] [other options] OBJECT_UUID `OBJECT_UUID` should match a UUID you exported with `arv-export`. ### Using --no-block-copy If you have administrator access to the destination cluster, then you have the option to write Keep blocks directly to the underlying storage and skip the normal upload using the `--no-block-copy` option. This is normally faster than uploading the blocks via HTTP, but you are entirely responsible for the separate data transfer. For example, if you use a standard filesystem-backed Keep volume, you might run: rsync -r arv-export-data/keep/ root@keep.xurid.example:/var/lib/arvados/keep-data/ The exact process will vary by Keep volume and system configuration. Documenting all the possibilities is outside the scope of this document. ## arv-seed ### Synopsis arv-seed is a script to bulk create Arvados objects from JSON files. arv-seed [options] DIRECTORY [directory ...] ### Configuration By default, when running as root, this tool will read the cluster configuration file `$ARVADOS_CONFIG` (default `/etc/arvados/config.yml`), search for exactly one cluster configuration with a `Controller` endpoint and `SystemRootToken` configured, and use that. When running as a non-root user, this tool will search for user credentials the same way as other Arvados command-line tools. You can control how to load credentials using the `--client-from` option. ### Input Each directory will be scanned for files named `NAME.TYPE.json`. `NAME` is any name you like. `TYPE` is the name of an Arvados API resource type, like `group`, `collection`, or `container_request`. `TYPE` can be spelled with any punctuation, use CamelCase or not, and be singular or plural. Input can be further controlled with "base" JSON that sets attributes for all objects as well as additional parameters for the Arvados create method. Refer to the `--help` output for `--base-object` and `--parameters` for details. ### Output When finished, the tool writes JSON output like this to stdout: { "created": {"/path1": {… Arvados object…}, …}, "failed": {"/path2": "error message", …} } For both `created` and `failed`, each key is the absolute path of a JSON file that the tool read. For `created`, each value is the object that Arvados returned after creation. For `failed`, each value is an error message that describes why no object could be created. ### Logging The tool always logs to syslog. It also logs to stderr if `$TERM` is set. Control what gets logged with the `--loglevel` option. ### Exit codes arv-seed uses the following exit codes: * 0: Created all objects successfully (at least one) * 1: Early internal error * 2: Incorrect command line arguments * 11: Created no objects successfully (at least one) * 12: Mixed results: some objects were created, others failed * 66: Did not find any JSON input files (`EX_NOINPUT`) * 70: Internal error (`EX_SOFTWARE`) * 78: Could not initialize from configuration (`EX_CONFIG`) ### Example Read JSON from `~/arv-seed` and create them all in the given directory: arv-seed --base='{"owner_uuid":"zzzzz-j7d0g-12345abcde67890"}' ~/arv-seed ### systemd service example [Unit] After=arvados-railsapi.service arvados-controller.service network-online.target [Service] Type=oneshot StandardOutput=file:%t/%N.json ExecStart=/opt/arvados-bootstrap/bin/arv-seed /usr/local/share/arv-seed ## arv-federation-migrate ### Introduction When using multiple Arvados clusters before a federation, a user would have to create a separate account on each cluster. Unfortunately, because each account represents a separate "identity", in this system permissions granted to a user on one cluster do not transfer to another cluster, even if the accounts are associated with the same user. To address this, Arvados supports "federated user accounts". A federated user account is associated with a specific "home" cluster, and can be used to access other clusters in the federation that trust the home cluster. When a user arrives at another cluster's Workbench, they select and log in to their home cluster, and then are returned to the starting cluster logged in with the federated user account. When setting up federation capabilities on existing clusters, some users might already have accounts on multiple clusters. In order to have a single federated identity, users should be assigned a "home" cluster, and accounts associated with that user on the other (non-home) clusters should be migrated to the new federated user account. The @arv-federation-migrate@ tool assists with this. This tool is designed to help an administrator who has access to all clusters in a federation to migrate users who have multiple accounts to a single federated account. As part of migrating a user, any data or permissions associated with old user accounts will be reassigned to the federated account. ### Step 1: Get a user report #### With a LoginCluster When using centralized user database as specified by `LoginCluster` in the config file. Set the `ARVADOS_API_HOST` and `ARVADOS_API_TOKEN` environment variables to be an admin user on cluster in `LoginCluster` . It will automatically determine the other clusters that are listed in the federation. Next, run `arv-federation-migrate` with the `--report` flag: $ arv-federation-migrate --report users.csv Getting user list from x6b1s Getting user list from x3982 Wrote users.csv #### Without a LoginCluster The first step is to create `tokens.csv` and list each cluster and API token to access the cluster. API tokens must be trusted tokens with administrator access. This is a simple comma separated value file and can be created in a text editor. Example: x3982.arvadosapi.com,v2/x3982-gj3su-sb6meh2jf145s7x/98d40d70d8862e33d7398213435d1a71a96cf870 x6b1s.arvadosapi.com,v2/x6b1s-gj3su-dxc87btfv5kg91z/5575d980d3ff6231bb0c692281c42a7541c59417 Next, run `arv-federation-migrate` with the `--tokens` and `--report` flags: $ arv-federation-migrate --tokens tokens.csv --report users.csv Reading tokens.csv Getting user list from x6b1s Getting user list from x3982 Wrote users.csv ### Step 2: Update the user report This will produce a report of users across all clusters listed in `tokens.csv`, sorted by email address. This file can be loaded into a text editor or spreadsheet program for ease of viewing and editing. email,username,user uuid,primary cluster/user person_a@example.com,person_a,x6b1s-tpzed-hb5n7doogwhk6cf,x6b1s person_b@example.com,person_b,x3982-tpzed-1vl3k7knf7qihbe, person_b@example.com,person_b,x6b1s-tpzed-w4nhkx2rmrhlr54, The fourth column describes that user's home cluster. If a user only has one account (identified by email address), the column will be filled in and there is nothing to do. If the column is blank, that means there is more than one Arvados account associated with the user. Edit the file and provide the desired home cluster for each user as necessary (note: if there is a LoginCluster, all users will be migrated to the LoginCluster). It is also possible to change the desired username for a user. In this example, `person_b@example.com` is assigned the home cluster `x3982`. email,username,user uuid,primary cluster/user person_a@example.com,person_a,x6b1s-tpzed-hb5n7doogwhk6cf,x6b1s person_b@example.com,person_b,x3982-tpzed-1vl3k7knf7qihbe,x3982 person_b@example.com,person_b,x6b1s-tpzed-w4nhkx2rmrhlr54,x3982 ### Step 3: Migrate users To avoid disruption, advise users to log out and avoid running workflows while performing the migration. After updating `users.csv`, you can preview the migration using the `--dry-run` option (add `--tokens tokens.csv` if not using LoginCluster). This will print out what actions the migration will take (as if it were happening) and report possible problems, but not make any actual changes on any cluster: $ arv-federation-migrate --dry-run users.csv (person_b@example.com) Migrating x6b1s-tpzed-w4nhkx2rmrhlr54 to x3982-tpzed-1vl3k7knf7qihbe Execute the migration using the `--migrate` option (add `--tokens tokens.csv` if not using LoginCluster): $ arv-federation-migrate --migrate users.csv (person_b@example.com) Migrating x6b1s-tpzed-w4nhkx2rmrhlr54 to x3982-tpzed-1vl3k7knf7qihbe After migration, users should select their home cluster when logging into Arvados Workbench. If a user attempts to log into a migrated user account, they will be redirected to log in with their home cluster. ================================================ FILE: contrib/arvados-bootstrap/pyproject.toml ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 [build-system] requires = ["setuptools ~= 80.9"] build-backend = "setuptools.build_meta" [project] name = "arvados-bootstrap" version = "3.2.1" dependencies = [ "arvados-python-client == 3.2.1", ] description = "Tools to bootstrap an Arvados cluster" authors = [ {name = "Arvados", email = "info@arvados.org"}, ] classifiers = [ "Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Science/Research", "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] license = "Apache-2.0" license-files = [ "LICENSE-2.0.txt", ] readme = "README.md" requires-python = "~= 3.10" [project.scripts] arv-export = "arv_bootstrap.export_import:export_main" arv-federation-migrate = "arv_bootstrap.federation_migrate:main" arv-import = "arv_bootstrap.export_import:import_main" arv-seed = "arv_bootstrap.seed:main" [project.urls] Homepage = "https://arvados.org" Documentation = "https://doc.arvados.org" Repository = "https://github.com/arvados/arvados" Issues = "https://github.com/arvados/arvados/issues" Changelog = "https://arvados.org/releases/" [tool.setuptools.packages.find] where = ["src"] ================================================ FILE: contrib/arvados-bootstrap/src/arv_bootstrap/__init__.py ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 ================================================ FILE: contrib/arvados-bootstrap/src/arv_bootstrap/export_import.py ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 import argparse import importlib.metadata import logging import os import re import sys import arvados.commands.arv_copy as arv_copy import arvados.commands._util as arv_cmd import arvados.util as arv_util from . import stubapi logger = logging.getLogger('arvados.arv-export-import') class ArgumentParser(argparse.ArgumentParser): @classmethod def _base_options(cls, cmdname=sys.argv[0]): opts = argparse.ArgumentParser(add_help=False) opts.add_argument( '--version', action='version', version=f'{cmdname} {importlib.metadata.version("arvados-bootstrap")}', help='Print version and exit.', ) opts.add_argument( '--verbose', '-v', dest='verbose', action='store_true', help='Verbose output.', ) return opts @classmethod def _common_options(cls, verb): opts = cls._base_options(f'arv-{verb}') opts.add_argument( '--force', '-f', action='store_true', help=f"""{verb.capitalize()} even if the object has already been {verb}ed. """) opts.add_argument( '--recursive', action='store_true', help=f"""Recursively {verb} any dependencies for this object and subprojects. (default) """) opts.add_argument( '--no-recursive', dest='recursive', action='store_false', help=f"""Do not {verb} any dependencies or subprojects. """) opts.add_argument( '--block-copy', dest='keep_block_copy', action='store_true', help=f"""Copy Keep blocks when {verb}ing collections. (default) """) opts.add_argument( '--no-block-copy', dest='keep_block_copy', action='store_false', help=f"""Do not copy Keep blocks when {verb}ing collections. Must have administrator privileges to import collections. """) opts.add_argument( 'object_uuid', help=f"""The UUID of the collection or project to {verb}. """) return opts @classmethod def _import_options(cls): opts = cls._common_options('import') opts.add_argument( '--project-uuid', help="""The UUID of the project at the destination to which the collection or project should be imported. """) opts.add_argument( '--storage-classes', type=arv_cmd.UniqueSplit(), help="""Comma-separated list of storage classes to be used when saving data to the destinaton Arvados instance. """) opts.add_argument( '--replication', type=arv_cmd.RangedValue(int, range(1, sys.maxsize)), metavar='N', help=""" Number of replicas per storage class for the copied collections at the destination. If not provided (or if provided with invalid value), use the destination's default replication-level setting (if found), or the fallback value 2. """) return opts def _set_common_defaults(self): self.set_defaults( # Common defaults should use the "safer" value. export_all_fields=False, force=False, keep_block_copy=True, prefer_cached_downloads=False, project_uuid=None, progress=None, recursive=True, varying_url_params="", ) @classmethod def export_parser(cls): parser = cls( description=f"Export Arvados objects to a local filesystem", parents=[cls._common_options('export'), arv_cmd.retry_opt], ) parser._set_common_defaults() parser.set_defaults( export_all_fields=True, progress=True, replication=1, storage_classes=[], ) return parser @classmethod def import_parser(cls): parser = cls( description=f"Import Arvados objects from a local filesystem", parents=[cls._import_options(), arv_cmd.retry_opt], ) parser._set_common_defaults() return parser def setup_logging(name, args): global logger arvlogger = logging.getLogger('arvados') logger = arvlogger.getChild(name) if args.verbose: arvlogger.setLevel(logging.DEBUG) else: arvlogger.setLevel(logging.INFO) arvlogger.getChild('keep').setLevel(logging.WARNING) def transfer(src_arv, dst_arv, args, verb): if re.match(arv_util.collection_uuid_pattern, args.object_uuid): result = arv_copy.copy_collection(args.object_uuid, src_arv, dst_arv, args) elif re.match(arv_util.group_uuid_pattern, args.object_uuid): result = arv_copy.copy_project(args.object_uuid, src_arv, dst_arv, args.project_uuid, args) else: logger.error("Unsupported object type for %s: %s", verb, args.object_uuid) return os.EX_DATAERR if error := result.get('partial_error'): logger.error( "Error copying %s: %s", args.object_uuid, result if logger.isEnabledFor(logging.DEBUG) else error, ) return os.EX_IOERR return os.EX_OK def export_main(arglist=None): args = ArgumentParser.export_parser().parse_args(arglist) setup_logging('arv-export', args) src_arv = arv_copy.api_for_instance(args.object_uuid[:5], args.retries) dst_arv = stubapi.StubArvadosAPI.for_cwd() return transfer(src_arv, dst_arv, args, 'export') def import_main(arglist=None): args = ArgumentParser.import_parser().parse_args(arglist) setup_logging('arv-import', args) src_arv = stubapi.StubArvadosAPI.for_cwd() try: dst_id = args.project_uuid[:5] except TypeError: dst_id = '' dst_arv = arv_copy.api_for_instance(dst_id, args.retries) if args.project_uuid is None: args.project_uuid = dst_arv.users().current().execute()['uuid'] if args.replication is None: try: args.replication = int(dst_arv.config()["Collections"]["DefaultReplication"]) except (KeyError, TypeError, ValueError): args.replication = 2 return transfer(src_arv, dst_arv, args, 'import') ================================================ FILE: contrib/arvados-bootstrap/src/arv_bootstrap/federation_migrate.py ================================================ #!/usr/bin/env python3 # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 # Migration tool for merging user accounts belonging to the same user # but on separate clusters to use a single user account managed by a # specific cluster. import argparse import csv import hashlib import hmac import importlib.metadata import os import re import sys import urllib.parse import arvados import arvados.commands._util as arv_cmd import arvados.util import arvados.errors EMAIL=0 USERNAME=1 UUID=2 HOMECLUSTER=3 def connect_clusters(args): clusters = {} errors = [] loginCluster = None if args.tokens: print("Reading %s" % args.tokens) with open(args.tokens, "rt") as f: for r in csv.reader(f): if len(r) != 2: continue host = r[0] token = r[1] print("Contacting %s" % (host)) arv = arvados.api(host=host, token=token, cache=False, num_retries=args.retries) clusters[arv._rootDesc["uuidPrefix"]] = arv else: arv = arvados.api(cache=False, num_retries=args.retries) rh = arv._rootDesc["remoteHosts"] tok = arv.api_client_authorizations().current().execute() token = "v2/%s/%s" % (tok["uuid"], tok["api_token"]) for k,v in rh.items(): arv = arvados.api(host=v, token=token, cache=False, insecure=os.environ.get("ARVADOS_API_HOST_INSECURE")) clusters[k] = arv for _, arv in clusters.items(): config = arv.configs().get().execute() if config["Login"]["LoginCluster"] != "" and loginCluster is None: loginCluster = config["Login"]["LoginCluster"] print("Checking that the federation is well connected") for arv in clusters.values(): config = arv.configs().get().execute() if loginCluster and config["Login"]["LoginCluster"] != loginCluster and config["ClusterID"] != loginCluster: errors.append("Inconsistent login cluster configuration, expected '%s' on %s but was '%s'" % (loginCluster, config["ClusterID"], config["Login"]["LoginCluster"])) continue if arv._rootDesc["revision"] < "20200331": errors.append("Arvados API server revision on cluster '%s' is too old, must be updated to at least Arvados 2.0.2 before running migration." % config["ClusterID"]) continue try: cur = arv.users().current().execute() except arvados.errors.ApiError as e: errors.append("checking token for %s %s" % (arv._rootDesc["rootUrl"], e)) continue if not cur["is_admin"]: errors.append("User %s is not admin on %s" % (cur["uuid"], arv._rootDesc["uuidPrefix"])) continue for r in clusters: if r != arv._rootDesc["uuidPrefix"] and r not in arv._rootDesc["remoteHosts"]: errors.append("%s is missing from remoteHosts of %s" % (r, arv._rootDesc["uuidPrefix"])) for r in arv._rootDesc["remoteHosts"]: if r != "*" and r not in clusters: print("WARNING: %s is federated with %s but %s is missing from the tokens file or the token is invalid" % (arv._rootDesc["uuidPrefix"], r, r)) return clusters, errors, loginCluster def fetch_users(clusters, loginCluster): rows = [] by_email = {} by_username = {} users = [ user for prefix, arv in clusters.items() for user in arvados.util.keyset_list_all(arv.users().list, bypass_federation=True) if user['uuid'].startswith(prefix) ] # Users list is sorted by email # Go through users and collect users with same email # when we see a different email (or get to the end) # call add_accum_rows() to generate the report rows with # the "home cluster" set, and also fill in the by_email table. users.sort(key=lambda u: (u["email"], u["username"] or "", u["uuid"])) accum = [] lastemail = None def add_accum_rows(): homeuuid = None for a in accum: uuids = set(a["uuid"] for a in accum) homeuuid = ((len(uuids) == 1) and uuids.pop()) or "" for a in accum: r = (a["email"], a["username"], a["uuid"], loginCluster or homeuuid[0:5]) by_email.setdefault(a["email"], {}) by_email[a["email"]][a["uuid"]] = r homeuuid_and_username = "%s::%s" % (r[HOMECLUSTER], a["username"]) if homeuuid_and_username not in by_username: by_username[homeuuid_and_username] = a["email"] elif by_username[homeuuid_and_username] != a["email"]: print("ERROR: the username '%s' is listed for both '%s' and '%s' on cluster '%s'" % (r[USERNAME], r[EMAIL], by_username[homeuuid_and_username], r[HOMECLUSTER])) exit(1) rows.append(r) for u in users: if u["uuid"].endswith("-anonymouspublic") or u["uuid"].endswith("-000000000000000"): continue if lastemail == None: lastemail = u["email"] if u["email"] == lastemail: accum.append(u) else: add_accum_rows() lastemail = u["email"] accum = [u] add_accum_rows() return rows, by_email, by_username def read_migrations(args, by_email, by_username): rows = [] with open(args.migrate or args.dry_run, "rt") as f: for r in csv.reader(f): if r[EMAIL] == "email": continue by_email.setdefault(r[EMAIL], {}) by_email[r[EMAIL]][r[UUID]] = r homeuuid_and_username = "%s::%s" % (r[HOMECLUSTER], r[USERNAME]) if homeuuid_and_username not in by_username: by_username[homeuuid_and_username] = r[EMAIL] elif by_username[homeuuid_and_username] != r[EMAIL]: print("ERROR: the username '%s' is listed for both '%s' and '%s' on cluster '%s'" % (r[USERNAME], r[EMAIL], by_username[homeuuid_and_username], r[HOMECLUSTER])) exit(1) rows.append(r) return rows def update_username(args, email, user_uuid, username, migratecluster, migratearv): print("(%s) Updating username of %s to '%s' on %s" % (email, user_uuid, username, migratecluster)) if args.dry_run: return try: conflicts = migratearv.users().list(filters=[["username", "=", username]], bypass_federation=True).execute() if conflicts["items"]: # There's already a user with the username, move the old user out of the way migratearv.users().update(uuid=conflicts["items"][0]["uuid"], bypass_federation=True, body={"user": {"username": username+"migrate"}}).execute() migratearv.users().update(uuid=user_uuid, bypass_federation=True, body={"user": {"username": username}}).execute() except arvados.errors.ApiError as e: print("(%s) Error updating username of %s to '%s' on %s: %s" % (email, user_uuid, username, migratecluster, e)) def choose_new_user(args, by_email, email, userhome, username, old_user_uuid, clusters): candidates = [] conflict = False for b in by_email[email].values(): if b[2].startswith(userhome): candidates.append(b) if b[1] != username and b[3] == userhome: print("(%s) Cannot migrate %s, conflicting usernames %s and %s" % (email, old_user_uuid, b[1], username)) conflict = True break if conflict: return None if len(candidates) == 0: if len(userhome) == 5 and userhome not in clusters: print("(%s) Cannot migrate %s, unknown home cluster %s (typo?)" % (email, old_user_uuid, userhome)) return None print("(%s) No user listed with same email to migrate %s to %s, will create new user with username '%s'" % (email, old_user_uuid, userhome, username)) if not args.dry_run: oldhomecluster = old_user_uuid[0:5] oldhomearv = clusters[oldhomecluster] newhomecluster = userhome[0:5] homearv = clusters[userhome] user = None try: olduser = oldhomearv.users().get(uuid=old_user_uuid).execute() conflicts = homearv.users().list(filters=[["username", "=", username]], bypass_federation=True).execute() if conflicts["items"]: homearv.users().update( uuid=conflicts["items"][0]["uuid"], bypass_federation=True, body={"user": {"username": username+"migrate"}}).execute() user = homearv.users().create( body={"user": { "email": email, "first_name": olduser["first_name"], "last_name": olduser["last_name"], "username": username, "is_active": olduser["is_active"]}}).execute() except arvados.errors.ApiError as e: print("(%s) Could not create user: %s" % (email, str(e))) return None tup = (email, username, user["uuid"], userhome) else: # dry run tup = (email, username, "%s-tpzed-xfakexfakexfake" % (userhome[0:5]), userhome) by_email[email][tup[2]] = tup candidates.append(tup) if len(candidates) > 1: print("(%s) Multiple users listed to migrate %s to %s, use full uuid" % (email, old_user_uuid, userhome)) return None return candidates[0][2] def activate_remote_user(args, email, homearv, migratearv, old_user_uuid, new_user_uuid): # create a token for the new user and salt it for the # migration cluster, then use it to access the migration # cluster as the new user once before merging to ensure # the new user is known on that cluster. migratecluster = migratearv._rootDesc["uuidPrefix"] try: if not args.dry_run: newtok = homearv.api_client_authorizations().create(body={ "api_client_authorization": {'owner_uuid': new_user_uuid}}).execute() else: newtok = {"uuid": "dry-run", "api_token": "12345"} except arvados.errors.ApiError as e: print("(%s) Could not create API token for %s: %s" % (email, new_user_uuid, e)) return None try: findolduser = migratearv.users().list(filters=[["uuid", "=", old_user_uuid]], bypass_federation=True).execute() if len(findolduser["items"]) == 0: return False if len(findolduser["items"]) == 1: olduser = findolduser["items"][0] else: print("(%s) Unexpected result" % (email)) return None except arvados.errors.ApiError as e: print("(%s) Could not retrieve user %s from %s, user may have already been migrated: %s" % (email, old_user_uuid, migratecluster, e)) return None salted = 'v2/' + newtok["uuid"] + '/' + hmac.new(newtok["api_token"].encode(), msg=migratecluster.encode(), digestmod=hashlib.sha1).hexdigest() try: ru = urllib.parse.urlparse(migratearv._rootDesc["rootUrl"]) if not args.dry_run: newuser = arvados.api(host=ru.netloc, token=salted, insecure=os.environ.get("ARVADOS_API_HOST_INSECURE")).users().current().execute() else: newuser = {"is_active": True, "username": email.split('@')[0], "is_admin": False} except arvados.errors.ApiError as e: print("(%s) Error getting user info for %s from %s: %s" % (email, new_user_uuid, migratecluster, e)) return None if not newuser["is_active"] and olduser["is_active"]: print("(%s) Activating user %s on %s" % (email, new_user_uuid, migratecluster)) try: if not args.dry_run: migratearv.users().update(uuid=new_user_uuid, bypass_federation=True, body={"is_active": True}).execute() except arvados.errors.ApiError as e: print("(%s) Could not activate user %s on %s: %s" % (email, new_user_uuid, migratecluster, e)) return None if olduser["is_admin"] and not newuser["is_admin"]: print("(%s) Not migrating %s because user is admin but target user %s is not admin on %s. Please ensure the user admin status is the same on both clusters. Note that a federated admin account has admin privileges on the entire federation." % (email, old_user_uuid, new_user_uuid, migratecluster)) return None return newuser def migrate_user(args, migratearv, email, new_user_uuid, old_user_uuid): if args.dry_run: return try: new_owner_uuid = new_user_uuid if args.data_into_subproject: grp = migratearv.groups().create(body={ "owner_uuid": new_user_uuid, "name": "Migrated from %s (%s)" % (email, old_user_uuid), "group_class": "project" }, ensure_unique_name=True).execute() new_owner_uuid = grp["uuid"] migratearv.users().merge(old_user_uuid=old_user_uuid, new_user_uuid=new_user_uuid, new_owner_uuid=new_owner_uuid, redirect_to_new_user=True).execute() except arvados.errors.ApiError as e: name_collision = re.search(r'Key \(owner_uuid, name\)=\((.*?), (.*?)\) already exists\.\n.*UPDATE "(.*?)"', e._get_reason()) if name_collision: target_owner, rsc_name, rsc_type = name_collision.groups() print("(%s) Cannot migrate to %s because both origin and target users have a %s named '%s'. Please rename the conflicting items or use --data-into-subproject to migrate all users' data into a special subproject." % (email, target_owner, rsc_type[:-1], rsc_name)) else: print("(%s) Skipping user migration because of error: %s" % (email, e)) def main(): parser = argparse.ArgumentParser( description='Migrate users to federated identity, see https://doc.arvados.org/admin/merge-remote-account.html', parents=[arv_cmd.retry_opt], ) parser.add_argument( '--version', action='version', version=f"{sys.argv[0]} {importlib.metadata.version('arvados-bootstrap')}", help='Print version and exit.') parser.add_argument('--tokens', type=str, metavar='FILE', required=False, help="Read tokens from FILE. Not needed when using LoginCluster.") parser.add_argument('--data-into-subproject', action="store_true", help="Migrate user's data into a separate subproject. This can be used to avoid name collisions from within an account.") group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--report', type=str, metavar='FILE', help="Generate report .csv file listing users by email address and their associated Arvados accounts.") group.add_argument('--migrate', type=str, metavar='FILE', help="Consume report .csv and migrate users to designated Arvados accounts.") group.add_argument('--dry-run', type=str, metavar='FILE', help="Consume report .csv and report how user would be migrated to designated Arvados accounts.") group.add_argument('--check', action="store_true", help="Check that tokens are usable and the federation is well connected.") args = parser.parse_args() clusters, errors, loginCluster = connect_clusters(args) if errors: for e in errors: print("ERROR: "+str(e)) exit(1) if args.check: print("Tokens file passed checks") exit(0) rows, by_email, by_username = fetch_users(clusters, loginCluster) if args.report: out = csv.writer(open(args.report, "wt")) out.writerow(("email", "username", "user uuid", "home cluster")) for r in rows: out.writerow(r) print("Wrote %s" % args.report) return if args.migrate or args.dry_run: if args.dry_run: print("Performing dry run") rows = read_migrations(args, by_email, by_username) for r in rows: email = r[EMAIL] username = r[USERNAME] old_user_uuid = r[UUID] userhome = r[HOMECLUSTER] if userhome == "": print("(%s) Skipping %s, no home cluster specified" % (email, old_user_uuid)) if old_user_uuid.startswith(userhome): migratecluster = old_user_uuid[0:5] migratearv = clusters[migratecluster] if migratearv.users().get(uuid=old_user_uuid).execute()["username"] != username: update_username(args, email, old_user_uuid, username, migratecluster, migratearv) continue new_user_uuid = choose_new_user(args, by_email, email, userhome, username, old_user_uuid, clusters) if new_user_uuid is None: continue remote_users = {} got_error = False for migratecluster in clusters: # cluster where the migration is happening migratearv = clusters[migratecluster] # the user's new home cluster newhomecluster = userhome[0:5] homearv = clusters[newhomecluster] newuser = activate_remote_user(args, email, homearv, migratearv, old_user_uuid, new_user_uuid) if newuser is None: got_error = True remote_users[migratecluster] = newuser if not got_error: for migratecluster in clusters: migratearv = clusters[migratecluster] newuser = remote_users[migratecluster] if newuser is False: continue print("(%s) Migrating %s to %s on %s" % (email, old_user_uuid, new_user_uuid, migratecluster)) migrate_user(args, migratearv, email, new_user_uuid, old_user_uuid) if newuser['username'] != username: update_username(args, email, new_user_uuid, username, migratecluster, migratearv) if __name__ == "__main__": main() ================================================ FILE: contrib/arvados-bootstrap/src/arv_bootstrap/seed.py ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 import argparse import contextlib import dataclasses import functools import json import logging import logging.handlers import pathlib import os import re import sys import traceback import urllib.parse from collections import abc import arvados import arvados.commands._util as cmd_util logger = logging.getLogger('arvados.commands.seed') _root_logger = logging.getLogger() def is_mapping(arg): return isinstance(arg, abc.Mapping) @dataclasses.dataclass class ExceptHook: logger: logging.Logger exit_code: int = os.EX_SOFTWARE def __call__(self, exc_type, exc_value, exc_tb): self.logger.critical( "internal %s: %s", exc_type.__name__, exc_value, exc_info=self.logger.isEnabledFor(logging.DEBUG), ) raise SystemExit(self.exit_code) @contextlib.contextmanager def using_exit_code(self, exit_code): orig_code = self.exit_code self.exit_code = exit_code # We intentionally *don't* want to use `finally` here, because we don't # want to restore the original code for an unhandled exception. yield self self.exit_code = orig_code class Path(pathlib.Path): _flavour = pathlib._posix_flavour def __format__(self, format_spec=''): if format_spec.endswith('`'): return f"`{super().__format__(format_spec[:-1])}`" else: return super().__format__(format_spec) @dataclasses.dataclass class ArvadosResources: _resources: abc.Mapping @classmethod def from_client(cls, arv_client): return cls(arv_client._resourceDesc['resources']) @staticmethod def _sep_caps(match): return '_'.join(match.group(0).lower()) def singular_name(self, name): if name == 'sys': return name elif name.endswith('ies'): return f'{name[:-3]}y' else: return name.removesuffix('s') def canonical_name(self, name): s = re.sub(r'\W', '_', name) s = re.sub(r'[a-z][A-Z]', self._sep_caps, s) s = s.lower() if s in self._resources: return s elif s.endswith('y'): s = f'{s[:1]}ies' else: s += 's' if s in self._resources: return s raise ValueError(f"no resource found for {name!r}") def parameters(self, resource_name, method_name): return self._resources[resource_name]['methods'][method_name]['parameters'] @dataclasses.dataclass class DirectoryLoader: arv_client: arvados.api.ThreadSafeAPIClient base: abc.Mapping | None params: abc.Mapping | None resources: ArvadosResources OBJECT_BASE_PATH = Path('arvados_seed_object.json') PARAMETERS_PATH = Path('arvados_seed_parameters.json') @classmethod def from_args(cls, args): arv_client = arvados.api.api(**args.api_kwargs) return cls( arv_client, args.object_base, args.parameters, ArvadosResources.from_client(arv_client), ) def _load_defaults(self, instance_defaults, path): if instance_defaults is not None: return instance_defaults try: with path.open('rb') as json_file: defaults = json.load(json_file) except FileNotFoundError: defaults = {} if not is_mapping(defaults): raise ValueError(f"{path:`} does not contain a JSON object") return defaults def _create_from(self, json_path, base, base_params): prefix, _, rname = json_path.stem.rpartition('.') if not prefix: raise ValueError(f"{path:`} does have an object type in the name") rname = self.resources.canonical_name(rname) kwargs = dict(base_params) if 'ensure_unique_name' in self.resources.parameters(rname, 'create'): kwargs.setdefault('ensure_unique_name', True) with json_path.open('rb') as json_file: json_body = json.load(json_file) kwargs['body'] = {self.resources.singular_name(rname): base | json_body} resource = getattr(self.arv_client, rname) return resource().create(**kwargs).execute() def build_from(self, dir_path): base = self._load_defaults(self.base, dir_path / self.OBJECT_BASE_PATH) base_params = self._load_defaults(self.params, dir_path / self.PARAMETERS_PATH) created = {} failed = {} for path in sorted(dir_path.glob('*.json')): path_key = str(path.absolute()) try: result = self._create_from(path, base, base_params) except Exception as err: logger.warning( "failed to load %s: %s", path, err, exc_info=logger.isEnabledFor(logging.DEBUG), ) failed[path_key] = str(err) else: created[path_key] = result return (created, failed) class ConfigLoader: DEFAULT_CONFIG_PATH = Path('/etc/arvados/config.yml') DISCOVERY_SERVICE_PATH = 'discovery/v1/apis/{api}/{apiVersion}/rest' @classmethod def _load_yaml(cls, path): try: with open(path, 'rb') as yaml_file: result = yaml.safe_load(yaml_file) except OSError as err: raise ValueError(f"error reading {path:`}: {err}") from None if not is_mapping(result): raise ValueError(f"{path:`} is not a YAML object") return result @classmethod def _cluster_config_path(cls): return Path(os.environ.get('ARVADOS_CONFIG', cls.DEFAULT_CONFIG_PATH)) @classmethod def _from_one_cluster(cls, config): try: controller_url = config['Services']['Controller']['ExternalURL'] token = config['SystemRootToken'] except (KeyError, TypeError) as err: raise ValueError(f"error loading cluster configuration: {err}") from None try: insecure = config['TLS']['Insecure'] except (KeyError, TypeError): insecure = False return { 'version': 'v1', 'discoveryServiceUrl': urllib.parse.urljoin(controller_url, cls.DISCOVERY_SERVICE_PATH), 'token': token, 'insecure': insecure, } @classmethod def from_cluster(cls, arg): path = cls._cluster_config_path() whole_config = cls._load_yaml(path) try: configs = whole_config['Clusters'].items() except (AttributeError, KeyError, TypeError) as err: raise ValueError(f"error loading clusters configuration: {err}") from None kwargs = None kwargs_id = None for cluster_id, config in configs: try: new_kwargs = cls._from_one_cluster(config) except ValueError: continue if kwargs is None: kwargs = new_kwargs kwargs_id = cluster_id else: raise ValueError( f"{path:`} has configuration for both {kwargs_id} and {cluster_id} - " "specify a cluster ID", ) if kwargs is None: raise ValueError(f"no usable cluster configuration found in {path:`}") else: return kwargs @classmethod def from_cluster_id(cls, arg): path = cls._cluster_config_path() whole_config = cls._load_yaml(path) try: config = whole_config['Clusters'][arg] except (AttributeError, KeyError, TypeError) as err: raise ValueError(f"error loading {arg} configuration from {path:`}: {err}") from None return self._from_one_cluster(config) @classmethod def from_env(cls, arg): arvados.config.initialize('') return cls.from_user() @classmethod def from_user(cls, arg): return arvados.api.api_kwargs_from_config('v1') @classmethod def parse_arg(cls, arg): try: constructor = getattr(cls, f'from_{arg}') except AttributeError: if re.fullmatch(r'^[a-z0-9]$', arg): constructor = cls.from_cluster_id else: raise ValueError(f"invalid configuration source {arg!r}") from None return constructor(arg) @classmethod def default_config(cls): if os.geteuid() == 0: return cls.from_cluster(None) else: return cls.from_user(None) def parse_loglevel(arg): try: return logging.getLevelNamesMapping()[arg.upper()] except KeyError: raise ValueError(f"invalid log level {arg!r}") from None def validate_mapping(arg): if is_mapping(arg): return arg else: raise ValueError("value is not a JSON object") def parse_arguments(arglist=None): parser = argparse.ArgumentParser( prog="arv-seed", description="Create multiple Arvados objects from a directory of JSON files", ) parser.add_argument( '--client-from', metavar='SOURCE', type=ConfigLoader.parse_arg, dest='api_kwargs', help=""" Where to find the Arvados API server and token. Specify one of a cluster ID, `cluster`, `env`, or `user`. The first two options load the cluster configuration file from `$ARVADOS_CONFIG` or `/etc/arvados/config.yml`. """) parser.add_argument( '--loglevel', metavar='LEVEL', type=parse_loglevel, default=logging.INFO, help=""" The name of a log level like `debug`, `info`, `warning`, or `error` """) parser.add_argument( '--object-base', '--base', metavar='BASE_JSON', type=cmd_util.JSONArgument(validate_mapping, "JSON object"), help=""" JSON object or path to set common attributes for all created objects. If not set, will try to read `arvados_seed_object.json` in each directory. """) parser.add_argument( '--parameters', '--params', metavar='PARAMS_JSON', type=cmd_util.JSONArgument(validate_mapping, "JSON object"), help=""" JSON object or path to set parameters when creating objects. If not set, will try to read `arvados_seed_parameters.json` in each directory. """) parser.add_argument( 'dir_paths', metavar='DIRECTORY', type=Path, nargs=argparse.ONE_OR_MORE, help=""" Directory to read object JSON files from. Object files must be named `..json`, where `type` is an Arvados API resource type. """) args = parser.parse_args(arglist) if args.api_kwargs is None: args.api_kwargs = ConfigLoader.default_config() return args def add_log_handlers(logger, stderr=sys.stderr): syslog = logging.handlers.SysLogHandler('/dev/log') syslog.setFormatter(logging.Formatter('[%(name)s] %(message)s')) logger.addHandler(syslog) if os.environ.get('TERM'): stream = logging.StreamHandler(stderr) stream.setFormatter(logging.Formatter( '[%(asctime)s] arv-seed: %(levelname)s: %(message)s', '%Y-%m-%d %H:%M:%S', )) logger.addHandler(stream) def main( arglist=None, *, stdout=sys.stdout, stderr=sys.stderr, is_main=Path(sys.argv[0]).stem == 'arv-seed', ): if is_main: add_log_handlers(_root_logger) sys.excepthook = ExceptHook(logger) arvados.logger.removeHandler(arvados.logging.log_handler) args = parse_arguments(arglist) if is_main: _root_logger.setLevel(args.loglevel) setup_ctx = sys.excepthook.using_exit_code(os.EX_CONFIG) else: logger.setLevel(args.loglevel) setup_ctx = contextlib.nullcontext() with setup_ctx: loader = DirectoryLoader.from_args(args) created = {} failed = {} for dir_path in args.dir_paths: try: dir_created, dir_failed = loader.build_from(dir_path) except Exception as err: logger.warning( "failed to load directory %s: %s", dir_path, err, exc_info=logger.isEnabledFor(logging.DEBUG), ) failed[dir_path] = err else: created.update(dir_created) failed.update(dir_failed) json.dump({'created': created, 'failed': failed}, stdout) print(file=stdout) if created and failed: return 12 elif failed: return 11 elif created: return os.EX_OK else: return os.EX_NOINPUT ================================================ FILE: contrib/arvados-bootstrap/src/arv_bootstrap/stubapi.py ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 import functools import hashlib import json import logging import operator import os import arvados.util _FILTER_OPS = { '=': operator.eq, '>=': operator.ge, '>': operator.gt, '<=': operator.le, '<': operator.lt, '!=': operator.ne, '<>': operator.ne, } class DeferExecution: def __init__(self, fn): self._fn = fn def execute(self, *, num_retries=None): return self._fn() def defer_execution(f): @functools.wraps(f) def wrapper(*args, **kwds): return DeferExecution(functools.partial(f, *args, **kwds)) return wrapper class StubKeepClient: def __init__(self, basedir): self._basedir = basedir def get(self, locator): blockdir = os.path.join(self._basedir, locator[0:3]) filepath = os.path.join(blockdir, arvados.KeepLocator(locator).md5sum) with open(filepath, "rb") as fr: return fr.read() def put(self, data, copies=2, num_retries=None, request_id=None, classes=None): md5 = hashlib.md5(data).hexdigest() locator = '%s+%d' % (md5, len(data)) blockdir = os.path.join(self._basedir, locator[0:3]) os.makedirs(blockdir, exist_ok=True) filepath = os.path.join(blockdir, md5) with open(os.path.join(filepath + '.tmp'), 'wb') as f: f.write(data) os.rename(os.path.join(filepath + '.tmp'), os.path.join(filepath)) return locator def match_filter(fl, obj): key, op_key, val = fl try: op_func = _FILTER_OPS[op_key] except KeyError: raise NotImplementedError(f"unsupported filter operator {op_key}") from None else: return op_func(obj[key], val) class StubArvadosResources: def __init__(self, basedir, resource_type): self._basedir = basedir self._resource_type = resource_type self._logger = logging.getLogger(f'arvados.stubapi.{resource_type}') @defer_execution def get(self, *, uuid=""): with open(os.path.join(self._basedir, uuid), "rt") as fr: return json.load(fr) @defer_execution def create(self, *, body=None, ensure_unique_name=None): if self._resource_type in body: body = body[self._resource_type] with open(os.path.join(self._basedir, body["uuid"]), "wt") as fw: json.dump(body, fw, indent=2) return body @defer_execution def update(self, *, uuid="", body=None): if self._resource_type in body: body = body[self._resource_type] with open(os.path.join(self._basedir, uuid), "rt") as fr: obj = json.load(fr) for k,v in body.items(): obj[k] = v with open(os.path.join(self._basedir, uuid), "wt") as fw: json.dump(obj, fw, indent=2) return obj @defer_execution def list(self, *, filters=None, limit=None, count=None, order=None): items = [] for dirent in os.scandir(self._basedir): if not arvados.util.uuid_pattern.match(dirent.name) or not dirent.is_file(): continue with open(os.path.join(self._basedir, dirent.name), "rt") as fr: obj = json.load(fr) if all(match_filter(f, obj) for f in filters): items.append(obj) if order: if len(order) == 1: k1, r1 = order[0].split(' ') keycomp = lambda x: x[k1] elif len(order) == 2: k1, r1 = order[0].split(' ') k2, r2 = order[1].split(' ') if r1 != r2: raise NotImplementedError("Can't have secondary sort column in opposite direction") keycomp = lambda x: (x[k1], x[k2]) items.sort(key=keycomp, reverse=(r1=='desc')) if limit is not None: items = items[0:limit] return { "items": items, "items_available": len(items) } class StubArvadosAPI: def __init__(self, basedir): self._basedir = basedir os.makedirs(os.path.join(self._basedir, "keep"), exist_ok=True) os.makedirs(os.path.join(self._basedir, "arvados/v1/collections"), exist_ok=True) os.makedirs(os.path.join(self._basedir, "arvados/v1/links"), exist_ok=True) os.makedirs(os.path.join(self._basedir, "arvados/v1/groups"), exist_ok=True) os.makedirs(os.path.join(self._basedir, "arvados/v1/workflows"), exist_ok=True) self.keep = StubKeepClient(os.path.join(self._basedir, "keep")) @classmethod def for_cwd(cls): return cls(os.getcwd()) def collections(self): return StubArvadosResources(os.path.join(self._basedir, "arvados/v1/collections"), "collection") def links(self): return StubArvadosResources(os.path.join(self._basedir, "arvados/v1/links"), "link") def groups(self): return StubArvadosResources(os.path.join(self._basedir, "arvados/v1/groups"), "group") def workflows(self): return StubArvadosResources(os.path.join(self._basedir, "arvados/v1/workflows"), "workflow") ================================================ FILE: contrib/arvbash/arvbash.sh ================================================ #!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 # bash functions for managing Arvados tokens and other conveniences. read -rd "\000" helpmessage < Set ARVADOS_API_HOST and ARVADOS_API_TOKEN in the current environment based on $HOME/.config/arvados/.conf With no arguments, print current API host and available Arvados configurations. arvsave Save current values of ARVADOS_API_HOST and ARVADOS_API_TOKEN in the current environment to $HOME/.config/arvados/.conf arvrm Delete $HOME/.config/arvados/.conf arvopen Open an Arvados uuid in web browser (http://arvadosapi.com) arvissue Open an Arvados ticket in web browser (http://dev.arvados.org) EOF if [[ "$1" = "--install" ]] ; then this=$(readlink -f $0) if ! grep ". $this" ~/.bashrc >/dev/null ; then echo ". $this" >> ~/.bashrc echo "Installed into ~/.bashrc" else echo "Already installed in ~/.bashrc" fi elif ! [[ $0 =~ bash$ ]] ; then echo "$helpmessage" fi HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*' arvswitch() { if [[ -n "$1" ]] ; then if [[ -f $HOME/.config/arvados/$1.conf ]] ; then unset ARVADOS_API_HOST_INSECURE for a in $(cat $HOME/.config/arvados/$1.conf) ; do export $a ; done echo "Switched to $1" else echo "$1 unknown" fi else echo "Switch Arvados environment conf" echo "Current host: ${ARVADOS_API_HOST}" echo "Usage: arvswitch " echo "Available confs:" $((cd $HOME/.config/arvados && ls --indicator-style=none *.conf) | rev | cut -c6- | rev) fi } arvsave() { if [[ -n "$1" ]] ; then touch $HOME/.config/arvados/$1.conf chmod 0600 $HOME/.config/arvados/$1.conf env | grep ARVADOS_ > $HOME/.config/arvados/$1.conf else echo "Save current Arvados environment variables to conf file" echo "Usage: arvsave " fi } arvrm() { if [[ -n "$1" ]] ; then if [[ -f $HOME/.config/arvados/$1.conf ]] ; then rm $HOME/.config/arvados/$1.conf else echo "$1 unknown" fi else echo "Delete Arvados environment conf" echo "Usage: arvrm " fi } arvopen() { if [[ -n "$1" ]] ; then xdg-open https://arvadosapi.com/$1 else echo "Open Arvados uuid in browser" echo "Usage: arvopen " fi } arvissue() { if [[ -n "$1" ]] ; then xdg-open https://dev.arvados.org/issues/$1 else echo "Open Arvados issue in browser" echo "Usage: arvissue " fi } ================================================ FILE: contrib/java-sdk-v2/.gitignore ================================================ /.gradle/ /bin/ /build/ .project .classpath /.settings/ .DS_Store /.idea/ /out/ ================================================ FILE: contrib/java-sdk-v2/.licenseignore ================================================ .licenseignore agpl-3.0.txt apache-2.0.txt COPYING ================================================ FILE: contrib/java-sdk-v2/COPYING ================================================ Unless indicated otherwise in the header of the file, the files in this repository are dual-licensed AGPL-3.0 and Apache-2.0 Individual files contain an SPDX tag that indicates the license for the file. dual-licensed files use the following tag: SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 This enables machine processing of license information based on the SPDX License Identifiers that are available here: http://spdx.org/licenses/ The full license text for each license is available in this directory: AGPL-3.0: agpl-3.0.txt Apache-2.0: apache-2.0.txt ================================================ FILE: contrib/java-sdk-v2/README.md ================================================ ``` Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 ``` # Arvados Java SDK ##### About Arvados Java Client allows to access Arvados servers and uses two APIs: * lower level [Keep Server API](https://doc.arvados.org/api/index.html) * higher level [Keep-Web API](https://godoc.org/github.com/arvados/arvados/services/keep-web) (when needed) ##### Required Java version This SDK requires Java 8+ ##### Logging SLF4J is used for logging. Concrete logging framework and configuration must be provided by a client. ##### Configuration [TypeSafe Configuration](https://github.com/lightbend/config) is used for configuring this library. Please, have a look at java/resources/reference.conf for default values provided with this library. * **keepweb-host** - change to host of your Keep-Web installation * **keepweb-port** - change to port of your Keep-Web installation * **host** - change to host of your Arvados installation * **port** - change to port of your Arvados installation * **token** - authenticates registered user, one must provide [token obtained from Arvados Workbench](https://doc.arvados.org/user/reference/api-tokens.html) * **protocol** - don't change to unless really needed * **host-insecure** - insecure communication with Arvados (ignores SSL certificate verification), don't change to *true* unless really needed * **split-size** - size of chunk files in megabytes * **temp-dir** - temporary chunk files storage * **copies** - amount of chunk files duplicates per Keep server * **retries** - in case of chunk files send failure this should allow to repeat send (*NOTE*: this parameter is not used at the moment but was left for future improvements) In order to override default settings one can create application.conf file in an application. Example: src/test/resources/application.conf. Alternatively ExternalConfigProvider class can be used to pass configuration via code. ExternalConfigProvider comes with a builder and all of the above values must be provided in order for it to work properly. ArvadosFacade has two constructors, one without arguments that uses values from reference.conf and second one taking ExternalConfigProvider as an argument. ##### API clients All API clients inherit from BaseStandardApiClient. This class contains implementation of all common methods as described in http://doc.arvados.org/api/methods.html. Parameters provided to common or specific methods are String UUID or fields wrapped in Java objects. For example: ```java String uuid = "ardev-4zz18-rxcql7qwyakg1r1"; Collection actual = client.get(uuid); ``` ```java ListArgument listArgument = ListArgument.builder() .filters(Arrays.asList( Filter.of("owner_uuid", Operator.LIKE, "ardev%"), Filter.of("name", Operator.LIKE, "Super%"), Filter.of("portable_data_hash", Operator.IN, Lists.newArrayList("54f6d9f59065d3c009d4306660989379+65") ))) .build(); CollectionList actual = client.list(listArgument); ``` Non-standard API clients must inherit from BaseApiClient. For example: KeepServerApiClient communicates directly with Keep servers using exclusively non-common methods. ##### Business logic More advanced API data handling could be implemented as *Facade* classes. In current version functionalities provided by SDK are handled by *ArvadosFacade*. They include: * **downloading single file from collection** - using Keep-Web * **downloading whole collection** - using Keep-Web or Keep Server API * **listing file info from certain collection** - information is returned as list of *FileTokens* providing file details * **uploading single file** - to either new or existing collection * **uploading list of files** - to either new or existing collection * **creating an empty collection** * **getting current user info** * **listing current user's collections** * **creating new project** * **deleting certain collection** ##### Note regarding Keep-Web Current version requires both Keep Web and standard Keep Server API configured in order to use Keep-Web functionalities. ##### Integration tests In order to run integration tests all fields within following configuration file must be provided: ```java src/test/resources/integration-test-appliation.conf ``` Parameter **integration-tests.project-uuid** should contain UUID of one project available to user, whose token was provided within configuration file. Integration tests require connection to real Arvados server. ##### Note regarding file naming While uploading via this SDK all uploaded files within single collection must have different names. This applies also to uploading files to already existing collection. Renaming files with duplicate names is not implemented in current version. ##### Building with Gradle The Arvados Java SDK is built with `gradle`. Common development build tasks are: * `clean` * `test` * `jar` (build the jar files, including documentation) * `install` ================================================ FILE: contrib/java-sdk-v2/agpl-3.0.txt ================================================ GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ================================================ FILE: contrib/java-sdk-v2/apache-2.0.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: contrib/java-sdk-v2/build.gradle ================================================ apply plugin: 'java-library' apply plugin: 'eclipse' apply plugin: 'idea' apply plugin: 'maven' apply plugin: 'signing' repositories { mavenCentral() } dependencies { api 'com.squareup.okhttp3:okhttp:3.9.1' api 'com.fasterxml.jackson.core:jackson-databind:2.9.2' api 'com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.9.2' api 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.9.2' api 'commons-codec:commons-codec:1.11' api 'commons-io:commons-io:2.6' api 'com.google.guava:guava:23.4-jre' api 'org.slf4j:slf4j-api:1.7.25' api 'com.typesafe:config:1.3.2' testImplementation 'junit:junit:4.12' testImplementation 'org.mockito:mockito-core:5.17.0' testImplementation 'org.assertj:assertj-core:3.8.0' testImplementation 'com.squareup.okhttp3:mockwebserver:3.9.1' } test { useJUnit { excludeCategories 'org.arvados.client.junit.categories.IntegrationTests' } testLogging { events "passed", "skipped", "failed" afterSuite { desc, result -> if (!desc.parent) { // will match the outermost suite println "\n---- Test results ----" println "${result.resultType} (${result.testCount} tests, ${result.successfulTestCount} successes, ${result.failedTestCount} failures, ${result.skippedTestCount} skipped)" println "" } } } } task integrationTest(type: Test) { useJUnit { includeCategories 'org.arvados.client.junit.categories.IntegrationTests' } } task javadocJar(type: Jar) { classifier = 'javadoc' from javadoc } task sourcesJar(type: Jar) { classifier = 'sources' from sourceSets.main.allSource } artifacts { archives javadocJar, sourcesJar } signing { sign configurations.archives } uploadArchives { repositories { mavenDeployer { beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) } repository(url: "https://ossrh-staging-api.central.sonatype.com/service/local/staging/deploy/maven2") { authentication(userName: ossrhUsername, password: ossrhPassword) } snapshotRepository(url: "https://ossrh-staging-api.central.sonatype.com/content/repositories/snapshots") { authentication(userName: ossrhUsername, password: ossrhPassword) } pom.project { name 'Arvados Java SDK' packaging 'jar' groupId 'org.arvados' description 'Arvados Java SDK' url 'https://github.com/arvados/arvados' scm { url 'scm:git@https://github.com/arvados/arvados.git' connection 'scm:git@https://github.com/arvados/arvados.git' developerConnection 'scm:git@https://github.com/arvados/arvados.git' } licenses { license { name 'The Apache License, Version 2.0' url 'http://www.apache.org/licenses/LICENSE-2.0.txt' } } developers { developer { id 'veritasgenetics' name 'Veritas Genetics' email 'ops@veritasgenetics.com' } } } } } } ================================================ FILE: contrib/java-sdk-v2/gradle.properties ================================================ /* Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 */ ossrhUsername = '' ossrhPassword = '' ================================================ FILE: contrib/java-sdk-v2/settings.gradle ================================================ rootProject.name = 'arvados-java-sdk' ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import com.fasterxml.jackson.databind.ObjectMapper; import org.arvados.client.exception.ArvadosApiException; import org.arvados.client.api.client.factory.OkHttpClientFactory; import org.arvados.client.api.model.ApiError; import org.arvados.client.config.ConfigProvider; import okhttp3.OkHttpClient; import okhttp3.Request; import okhttp3.Response; import okhttp3.ResponseBody; import org.slf4j.Logger; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.util.Objects; import java.util.concurrent.TimeUnit; abstract class BaseApiClient { static final ObjectMapper MAPPER = new ObjectMapper().findAndRegisterModules(); final OkHttpClient client; final ConfigProvider config; private final Logger log = org.slf4j.LoggerFactory.getLogger(BaseApiClient.class); BaseApiClient(ConfigProvider config) { this.config = config; this.client = OkHttpClientFactory.INSTANCE.create(config.isApiHostInsecure()) .newBuilder() .connectTimeout(config.getConnectTimeout(), TimeUnit.MILLISECONDS) .readTimeout(config.getReadTimeout(), TimeUnit.MILLISECONDS) .writeTimeout(config.getWriteTimeout(), TimeUnit.MILLISECONDS) .build(); } Request.Builder getRequestBuilder() { return new Request.Builder() .addHeader("authorization", String.format("Bearer %s", config.getApiToken())) .addHeader("cache-control", "no-cache"); } String newCall(Request request) { return (String) getResponseBody(request, body -> body.string().trim()); } byte[] newFileCall(Request request) { return (byte[]) getResponseBody(request, ResponseBody::bytes); } private Object getResponseBody(Request request, Command command) { try { log.debug(URLDecoder.decode(request.toString(), StandardCharsets.UTF_8.name())); } catch (UnsupportedEncodingException e) { throw new ArvadosApiException(e); } try (Response response = client.newCall(request).execute()) { ResponseBody responseBody = response.body(); if (!response.isSuccessful()) { String errorBody = Objects.requireNonNull(responseBody).string(); if (errorBody == null || errorBody.length() == 0) { throw new ArvadosApiException(String.format("Error code %s with message: %s", response.code(), response.message())); } ApiError apiError = MAPPER.readValue(errorBody, ApiError.class); throw new ArvadosApiException(String.format("Error code %s with messages: %s", response.code(), apiError.getErrors())); } return command.readResponseBody(responseBody); } catch (IOException e) { throw new ArvadosApiException(e); } } private interface Command { Object readResponseBody(ResponseBody body) throws IOException; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectWriter; import okhttp3.MediaType; import okhttp3.HttpUrl; import okhttp3.HttpUrl.Builder; import okhttp3.Request; import okhttp3.RequestBody; import org.arvados.client.exception.ArvadosApiException; import org.arvados.client.api.model.Item; import org.arvados.client.api.model.ItemList; import org.arvados.client.api.model.argument.ListArgument; import org.arvados.client.config.ConfigProvider; import org.slf4j.Logger; import java.io.IOException; import java.util.Map; public abstract class BaseStandardApiClient extends BaseApiClient { protected static final MediaType JSON = MediaType.parse(com.google.common.net.MediaType.JSON_UTF_8.toString()); private final Logger log = org.slf4j.LoggerFactory.getLogger(BaseStandardApiClient.class); BaseStandardApiClient(ConfigProvider config) { super(config); } public L list(ListArgument listArguments) { log.debug("Get list of {}", getType().getSimpleName()); Builder urlBuilder = getUrlBuilder(); addQueryParameters(urlBuilder, listArguments); HttpUrl url = urlBuilder.build(); Request request = getRequestBuilder().url(url).build(); return callForList(request); } public L list() { return list(ListArgument.builder().build()); } public T get(String uuid) { log.debug("Get {} by UUID {}", getType().getSimpleName(), uuid); HttpUrl url = getUrlBuilder().addPathSegment(uuid).build(); Request request = getRequestBuilder().get().url(url).build(); return callForType(request); } public T create(T type) { log.debug("Create {}", getType().getSimpleName()); String json = mapToJson(type); RequestBody body = RequestBody.create(JSON, json); Request request = getRequestBuilder().post(body).build(); return callForType(request); } public T delete(String uuid) { log.debug("Delete {} by UUID {}", getType().getSimpleName(), uuid); HttpUrl url = getUrlBuilder().addPathSegment(uuid).build(); Request request = getRequestBuilder().delete().url(url).build(); return callForType(request); } public T update(T type) { String uuid = type.getUuid(); log.debug("Update {} by UUID {}", getType().getSimpleName(), uuid); String json = mapToJson(type); RequestBody body = RequestBody.create(JSON, json); HttpUrl url = getUrlBuilder().addPathSegment(uuid).build(); Request request = getRequestBuilder().put(body).url(url).build(); return callForType(request); } @Override Request.Builder getRequestBuilder() { return super.getRequestBuilder().url(getUrlBuilder().build()); } HttpUrl.Builder getUrlBuilder() { return new HttpUrl.Builder() .scheme(config.getApiProtocol()) .host(config.getApiHost()) .port(config.getApiPort()) .addPathSegment("arvados") .addPathSegment("v1") .addPathSegment(getResource()); } TL call(Request request, Class cls) { String bodyAsString = newCall(request); try { return mapToObject(bodyAsString, cls); } catch (IOException e) { throw new ArvadosApiException("A problem occurred while parsing JSON data", e); } } private TL mapToObject(String content, Class cls) throws IOException { return MAPPER.readValue(content, cls); } protected String mapToJson(TL type) { ObjectWriter writer = MAPPER.writer().withDefaultPrettyPrinter(); try { return writer.writeValueAsString(type); } catch (JsonProcessingException e) { log.error(e.getMessage()); return null; } } T callForType(Request request) { return call(request, getType()); } L callForList(Request request) { return call(request, getListType()); } abstract String getResource(); abstract Class getType(); abstract Class getListType(); Request getNoArgumentMethodRequest(String method) { HttpUrl url = getUrlBuilder().addPathSegment(method).build(); return getRequestBuilder().get().url(url).build(); } RequestBody getJsonRequestBody(Object object) { return RequestBody.create(JSON, mapToJson(object)); } void addQueryParameters(Builder urlBuilder, Object object) { Map queryMap = MAPPER.convertValue(object, new TypeReference>() {}); queryMap.keySet().forEach(key -> { Object type = queryMap.get(key); if (!(type instanceof String)) { type = mapToJson(type); } urlBuilder.addQueryParameter(key, (String) type); }); } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import org.arvados.client.api.model.Collection; import org.arvados.client.api.model.CollectionList; import org.arvados.client.api.model.CollectionReplaceFiles; import org.arvados.client.config.ConfigProvider; import org.slf4j.Logger; import okhttp3.HttpUrl; import okhttp3.Request; import okhttp3.RequestBody; public class CollectionsApiClient extends BaseStandardApiClient { private static final String RESOURCE = "collections"; private final Logger log = org.slf4j.LoggerFactory.getLogger(CollectionsApiClient.class); public CollectionsApiClient(ConfigProvider config) { super(config); } @Override public Collection create(Collection type) { Collection newCollection = super.create(type); log.debug(String.format("New collection '%s' with UUID %s has been created", newCollection.getName(), newCollection.getUuid())); return newCollection; } public Collection update(String collectionUUID, CollectionReplaceFiles replaceFilesRequest) { String json = mapToJson(replaceFilesRequest); RequestBody body = RequestBody.create(JSON, json); HttpUrl url = getUrlBuilder().addPathSegment(collectionUUID).build(); Request request = getRequestBuilder().put(body).url(url).build(); return callForType(request); } @Override String getResource() { return RESOURCE; } @Override Class getType() { return Collection.class; } @Override Class getListType() { return CollectionList.class; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/ConfigApiClient.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import com.fasterxml.jackson.databind.ObjectMapper; import okhttp3.OkHttpClient; import okhttp3.Request; import okhttp3.Response; import org.arvados.client.api.client.factory.OkHttpClientFactory; import org.arvados.client.api.model.ArvadosConfig; import org.arvados.client.exception.ArvadosApiException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.concurrent.TimeUnit; public class ConfigApiClient { private static final Logger log = LoggerFactory.getLogger(ConfigApiClient.class); private static final ObjectMapper MAPPER = new ObjectMapper().findAndRegisterModules(); private static final String CONFIG_ENDPOINT = "/arvados/v1/config"; private final OkHttpClient client; private final String baseUrl; public ConfigApiClient(String protocol, String host, int port, boolean insecure) { this.baseUrl = String.format("%s://%s:%d", protocol, host, port); this.client = OkHttpClientFactory.INSTANCE.create(insecure) .newBuilder() .connectTimeout(10, TimeUnit.SECONDS) .readTimeout(10, TimeUnit.SECONDS) .build(); } public ArvadosConfig fetchConfig() throws ArvadosApiException { String url = baseUrl + CONFIG_ENDPOINT; Request request = new Request.Builder() .url(url) .get() .build(); try (Response response = client.newCall(request).execute()) { if (!response.isSuccessful()) { String errorMessage = String.format("Failed to fetch config from %s. Status: %d", url, response.code()); log.error(errorMessage); throw new ArvadosApiException(errorMessage); } String responseBody = response.body() != null ? response.body().string() : ""; return MAPPER.readValue(responseBody, ArvadosConfig.class); } catch (IOException e) { String errorMessage = String.format("Error fetching config from %s: %s", url, e.getMessage()); log.error(errorMessage, e); throw new ArvadosApiException(errorMessage, e); } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/CountingFileRequestBody.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import okio.BufferedSink; import okio.Okio; import okio.Source; import java.io.File; /** * Based on: * {@link} https://gist.github.com/eduardb/dd2dc530afd37108e1ac */ public class CountingFileRequestBody extends CountingRequestBody { CountingFileRequestBody(final File file, final ProgressListener listener) { super(file, listener); } @Override public long contentLength() { return requestBodyData.length(); } @Override public void writeTo(BufferedSink sink) { try (Source source = Okio.source(requestBodyData)) { long total = 0; long read; while ((read = source.read(sink.buffer(), SEGMENT_SIZE)) != -1) { total += read; sink.flush(); listener.updateProgress(total); } } catch (RuntimeException rethrown) { throw rethrown; } catch (Exception ignored) { //ignore } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/CountingRequestBody.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import okhttp3.MediaType; import okhttp3.RequestBody; import org.slf4j.Logger; abstract class CountingRequestBody extends RequestBody { protected static final int SEGMENT_SIZE = 2048; // okio.Segment.SIZE protected static final MediaType CONTENT_BINARY = MediaType.parse(com.google.common.net.MediaType.OCTET_STREAM.toString()); protected final ProgressListener listener; protected final T requestBodyData; CountingRequestBody(T file, final ProgressListener listener) { this.requestBodyData = file; this.listener = listener; } @Override public MediaType contentType() { return CONTENT_BINARY; } static class TransferData { private final Logger log = org.slf4j.LoggerFactory.getLogger(TransferData.class); private int progressValue; private long totalSize; TransferData(long totalSize) { this.progressValue = 0; this.totalSize = totalSize; } void updateTransferProgress(long transferred) { float progress = (transferred / (float) totalSize) * 100; if (progressValue != (int) progress) { progressValue = (int) progress; log.debug("{} / {} / {}%", transferred, totalSize, progressValue); } } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/CountingStreamRequestBody.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import okio.BufferedSink; import okio.Okio; import okio.Source; import java.io.File; import java.io.IOException; import java.io.InputStream; public class CountingStreamRequestBody extends CountingRequestBody { CountingStreamRequestBody(final InputStream inputStream, final ProgressListener listener) { super(inputStream, listener); } @Override public long contentLength() throws IOException { return requestBodyData.available(); } @Override public void writeTo(BufferedSink sink) { try (Source source = Okio.source(requestBodyData)) { long total = 0; long read; while ((read = source.read(sink.buffer(), SEGMENT_SIZE)) != -1) { total += read; sink.flush(); listener.updateProgress(total); } } catch (RuntimeException rethrown) { throw rethrown; } catch (Exception ignored) { //ignore } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import okhttp3.HttpUrl; import okhttp3.HttpUrl.Builder; import okhttp3.Request; import okhttp3.RequestBody; import org.arvados.client.api.model.Group; import org.arvados.client.api.model.GroupList; import org.arvados.client.api.model.argument.ContentsGroup; import org.arvados.client.api.model.argument.ListArgument; import org.arvados.client.api.model.argument.UntrashGroup; import org.arvados.client.config.ConfigProvider; import org.slf4j.Logger; public class GroupsApiClient extends BaseStandardApiClient { private static final String RESOURCE = "groups"; private final Logger log = org.slf4j.LoggerFactory.getLogger(GroupsApiClient.class); public GroupsApiClient(ConfigProvider config) { super(config); } public GroupList contents(ContentsGroup contentsGroup) { log.debug("Get {} contents", getType().getSimpleName()); Builder urlBuilder = getUrlBuilder().addPathSegment("contents"); addQueryParameters(urlBuilder, contentsGroup); HttpUrl url = urlBuilder.build(); Request request = getRequestBuilder().url(url).build(); return callForList(request); } public GroupList contents(ListArgument listArguments) { this.log.debug("Get {} contents", this.getType().getSimpleName()); HttpUrl.Builder urlBuilder = this.getUrlBuilder().addPathSegment("contents"); this.addQueryParameters(urlBuilder, listArguments); HttpUrl url = urlBuilder.build(); Request request = this.getRequestBuilder().url(url).build(); return callForList(request); } public Group untrash(UntrashGroup untrashGroup) { log.debug("Untrash {} by UUID {}", getType().getSimpleName(), untrashGroup.getUuid()); HttpUrl url = getUrlBuilder().addPathSegment(untrashGroup.getUuid()).addPathSegment("untrash").build(); RequestBody requestBody = getJsonRequestBody(untrashGroup); Request request = getRequestBuilder().post(requestBody).url(url).build(); return callForType(request); } @Override String getResource() { return RESOURCE; } @Override Class getType() { return Group.class; } @Override Class getListType() { return GroupList.class; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import okhttp3.HttpUrl; import okhttp3.Request; import okhttp3.RequestBody; import okhttp3.Response; import okhttp3.ResponseBody; import org.arvados.client.config.ConfigProvider; import java.io.File; import java.io.IOException; import java.io.InputStream; public class KeepWebApiClient extends BaseApiClient { public KeepWebApiClient(ConfigProvider config) { super(config); } public byte[] download(String collectionUuid, String filePathName) { Request request = getRequestBuilder() .url(getUrlBuilder(collectionUuid,filePathName).build()) .get() .build(); return newFileCall(request); } public InputStream get(String collectionUuid, String filePathName, long start, Long end) throws IOException { Request.Builder builder = this.getRequestBuilder(); String rangeValue = "bytes=" + start + "-"; if (end != null) { rangeValue += end; } builder.addHeader("Range", rangeValue); Request request = builder.url(this.getUrlBuilder(collectionUuid, filePathName).build()).get().build(); Response response = client.newCall(request).execute(); if (!response.isSuccessful()) { response.close(); throw new IOException("Failed to download file: " + response); } ResponseBody body = response.body(); if (body == null) { response.close(); throw new IOException("Response body is null for request: " + request); } return body.byteStream(); } public String delete(String collectionUuid, String filePathName) { Request request = getRequestBuilder() .url(getUrlBuilder(collectionUuid, filePathName).build()) .delete() .build(); return newCall(request); } public String upload(String collectionUuid, File file, ProgressListener progressListener) { RequestBody requestBody = new CountingFileRequestBody(file, progressListener); Request request = getRequestBuilder() .url(getUrlBuilder(collectionUuid, file.getName()).build()) .put(requestBody) .build(); return newCall(request); } public String upload(String collectionUuid, InputStream inputStream, String fileName, ProgressListener progressListener) { RequestBody requestBody = new CountingStreamRequestBody(inputStream, progressListener); Request request = getRequestBuilder() .url(getUrlBuilder(collectionUuid, fileName).build()) .put(requestBody) .build(); return newCall(request); } private HttpUrl.Builder getUrlBuilder(String collectionUuid, String filePathName) { return new HttpUrl.Builder() .scheme(config.getApiProtocol()) .host(config.getKeepWebHost()) .port(config.getKeepWebPort()) .addPathSegment("c=" + collectionUuid) .addPathSegment(filePathName); } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/LinksApiClient.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import org.arvados.client.api.model.Link; import org.arvados.client.api.model.LinkList; import org.arvados.client.config.ConfigProvider; public class LinksApiClient extends BaseStandardApiClient { private static final String RESOURCE = "links"; public LinksApiClient(ConfigProvider config) { super(config); } @Override String getResource() { return RESOURCE; } @Override Class getType() { return Link.class; } @Override Class getListType() { return LinkList.class; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/ProgressListener.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; @FunctionalInterface public interface ProgressListener { void updateProgress(long uploadedBytes); } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/UsersApiClient.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import okhttp3.Request; import org.arvados.client.api.model.User; import org.arvados.client.api.model.UserList; import org.arvados.client.config.ConfigProvider; import org.slf4j.Logger; public class UsersApiClient extends BaseStandardApiClient { private static final String RESOURCE = "users"; private final Logger log = org.slf4j.LoggerFactory.getLogger(UsersApiClient.class); public UsersApiClient(ConfigProvider config) { super(config); } public User current() { log.debug("Get current {}", getType().getSimpleName()); Request request = getNoArgumentMethodRequest("current"); return callForType(request); } public User system() { log.debug("Get system {}", getType().getSimpleName()); Request request = getNoArgumentMethodRequest("system"); return callForType(request); } @Override String getResource() { return RESOURCE; } @Override Class getType() { return User.class; } @Override Class getListType() { return UserList.class; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/factory/OkHttpClientFactory.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client.factory; import com.google.common.base.Suppliers; import okhttp3.OkHttpClient; import org.arvados.client.exception.ArvadosClientException; import org.slf4j.Logger; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocketFactory; import javax.net.ssl.TrustManager; import javax.net.ssl.X509TrustManager; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; import java.security.cert.X509Certificate; import java.util.function.Supplier; /** * {@link OkHttpClient} instance factory that builds and configures client instances sharing * the common resource pool: this is the recommended approach to optimize resource usage. */ public final class OkHttpClientFactory { public static final OkHttpClientFactory INSTANCE = new OkHttpClientFactory(); private final Logger log = org.slf4j.LoggerFactory.getLogger(OkHttpClientFactory.class); private final OkHttpClient clientSecure = new OkHttpClient(); private final Supplier clientUnsecure = Suppliers.memoize(this::getDefaultClientAcceptingAllCertificates); private OkHttpClientFactory() { /* singleton */} public OkHttpClient create(boolean apiHostInsecure) { return apiHostInsecure ? getDefaultUnsecureClient() : getDefaultClient(); } /** * @return default secure {@link OkHttpClient} with shared resource pool. */ public OkHttpClient getDefaultClient() { return clientSecure; } /** * @return default {@link OkHttpClient} with shared resource pool * that will accept all SSL certificates by default. */ public OkHttpClient getDefaultUnsecureClient() { return clientUnsecure.get(); } /** * @return default {@link OkHttpClient.Builder} with shared resource pool. */ public OkHttpClient.Builder getDefaultClientBuilder() { return clientSecure.newBuilder(); } /** * @return default {@link OkHttpClient.Builder} with shared resource pool * that is preconfigured to accept all SSL certificates. */ public OkHttpClient.Builder getDefaultUnsecureClientBuilder() { return clientUnsecure.get().newBuilder(); } private OkHttpClient getDefaultClientAcceptingAllCertificates() { log.warn("Creating unsafe OkHttpClient. All SSL certificates will be accepted."); try { // Create a trust manager that does not validate certificate chains final TrustManager[] trustAllCerts = {createX509TrustManager()}; // Install the all-trusting trust manager SSLContext sslContext = SSLContext.getInstance("SSL"); sslContext.init(null, trustAllCerts, new SecureRandom()); // Create an ssl socket factory with our all-trusting manager final SSLSocketFactory sslSocketFactory = sslContext.getSocketFactory(); // Create the OkHttpClient.Builder with shared resource pool final OkHttpClient.Builder builder = clientSecure.newBuilder(); builder.sslSocketFactory(sslSocketFactory, (X509TrustManager) trustAllCerts[0]); builder.hostnameVerifier((hostname, session) -> true); return builder.build(); } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new ArvadosClientException("Error establishing SSL context", e); } } private static X509TrustManager createX509TrustManager() { return new X509TrustManager() { @Override public void checkClientTrusted(X509Certificate[] chain, String authType) { } @Override public void checkServerTrusted(X509Certificate[] chain, String authType) { } @Override public X509Certificate[] getAcceptedIssuers() { return new X509Certificate[]{}; } }; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/ApiError.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "errors", "error_token" }) public class ApiError { @JsonProperty("errors") private List errors; @JsonProperty("error_token") private String errorToken; public List getErrors() { return this.errors; } public String getErrorToken() { return this.errorToken; } public void setErrors(List errors) { this.errors = errors; } public void setErrorToken(String errorToken) { this.errorToken = errorToken; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/ArvadosConfig.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; @JsonIgnoreProperties(ignoreUnknown = true) public class ArvadosConfig { @JsonProperty("Services") private Services services; public Services getServices() { return services; } public void setServices(Services services) { this.services = services; } @JsonIgnoreProperties(ignoreUnknown = true) public static class Services { @JsonProperty("WebDAVDownload") private WebDAVDownload webDAVDownload; public WebDAVDownload getWebDAVDownload() { return webDAVDownload; } public void setWebDAVDownload(WebDAVDownload webDAVDownload) { this.webDAVDownload = webDAVDownload; } } @JsonIgnoreProperties(ignoreUnknown = true) public static class WebDAVDownload { @JsonProperty("ExternalURL") private String externalURL; public String getExternalURL() { return externalURL; } public void setExternalURL(String externalURL) { this.externalURL = externalURL; } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/Collection.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.time.LocalDateTime; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "portable_data_hash", "replication_desired", "replication_confirmed_at", "replication_confirmed", "manifest_text", "name", "description", "properties", "delete_at", "trash_at", "is_trashed" }) public class Collection extends Item { @JsonProperty("portable_data_hash") private String portableDataHash; @JsonProperty("replication_desired") private Integer replicationDesired; @JsonProperty("replication_confirmed_at") private LocalDateTime replicationConfirmedAt; @JsonProperty("replication_confirmed") private Integer replicationConfirmed; @JsonProperty("manifest_text") private String manifestText; @JsonProperty("name") private String name; @JsonProperty("description") private String description; @JsonProperty("properties") private Object properties; @JsonProperty("delete_at") private LocalDateTime deleteAt; @JsonProperty("trash_at") private LocalDateTime trashAt; @JsonProperty("is_trashed") private Boolean trashed; public String getPortableDataHash() { return this.portableDataHash; } public Integer getReplicationDesired() { return this.replicationDesired; } public LocalDateTime getReplicationConfirmedAt() { return this.replicationConfirmedAt; } public Integer getReplicationConfirmed() { return this.replicationConfirmed; } public String getManifestText() { return this.manifestText; } public String getName() { return this.name; } public String getDescription() { return this.description; } public Object getProperties() { return this.properties; } public LocalDateTime getDeleteAt() { return this.deleteAt; } public LocalDateTime getTrashAt() { return this.trashAt; } public Boolean getTrashed() { return this.trashed; } public void setPortableDataHash(String portableDataHash) { this.portableDataHash = portableDataHash; } public void setReplicationDesired(Integer replicationDesired) { this.replicationDesired = replicationDesired; } public void setReplicationConfirmedAt(LocalDateTime replicationConfirmedAt) { this.replicationConfirmedAt = replicationConfirmedAt; } public void setReplicationConfirmed(Integer replicationConfirmed) { this.replicationConfirmed = replicationConfirmed; } public void setManifestText(String manifestText) { this.manifestText = manifestText; } public void setName(String name) { this.name = name; } public void setDescription(String description) { this.description = description; } public void setProperties(Object properties) { this.properties = properties; } public void setDeleteAt(LocalDateTime deleteAt) { this.deleteAt = deleteAt; } public void setTrashAt(LocalDateTime trashAt) { this.trashAt = trashAt; } public void setTrashed(Boolean trashed) { this.trashed = trashed; } public String toString() { return "Collection(portableDataHash=" + this.getPortableDataHash() + ", replicationDesired=" + this.getReplicationDesired() + ", replicationConfirmedAt=" + this.getReplicationConfirmedAt() + ", replicationConfirmed=" + this.getReplicationConfirmed() + ", manifestText=" + this.getManifestText() + ", name=" + this.getName() + ", description=" + this.getDescription() + ", properties=" + this.getProperties() + ", deleteAt=" + this.getDeleteAt() + ", trashAt=" + this.getTrashAt() + ", trashed=" + this.getTrashed() + ")"; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/CollectionList.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "items" }) public class CollectionList extends ItemList { @JsonProperty("items") private List items; public List getItems() { return this.items; } public void setItems(List items) { this.items = items; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.HashMap; import java.util.Map; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public class CollectionReplaceFiles { @JsonProperty("collection") private CollectionOptions collectionOptions; @JsonProperty("replace_files") private Map replaceFiles; public CollectionReplaceFiles() { this.collectionOptions = new CollectionOptions(); this.replaceFiles = new HashMap<>(); } public void addFileReplacement(String targetPath, String sourcePath) { this.replaceFiles.put(targetPath, sourcePath); } @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) public static class CollectionOptions { @JsonProperty("preserve_version") private boolean preserveVersion; public CollectionOptions() { this.preserveVersion = true; } public boolean isPreserveVersion() { return preserveVersion; } public void setPreserveVersion(boolean preserveVersion) { this.preserveVersion = preserveVersion; } } public CollectionOptions getCollectionOptions() { return collectionOptions; } public void setCollectionOptions(CollectionOptions collectionOptions) { this.collectionOptions = collectionOptions; } public Map getReplaceFiles() { return replaceFiles; } public void setReplaceFiles(Map replaceFiles) { this.replaceFiles = replaceFiles; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/Group.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.time.LocalDateTime; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "command", "container_count", "container_count_max", "container_image", "container_uuid", "cwd", "environment", "expires_at", "filters", "log_uuid", "mounts", "output_name", "output_path", "output_uuid", "output_ttl", "priority", "properties", "requesting_container_uuid", "runtime_constraints", "scheduling_parameters", "state", "use_existing" }) public class Group extends Item { @JsonProperty("name") private String name; @JsonProperty("group_class") private String groupClass; @JsonProperty("description") private String description; @JsonProperty(value = "writable_by", access = JsonProperty.Access.WRITE_ONLY) private List writableBy; @JsonProperty("delete_at") private LocalDateTime deleteAt; @JsonProperty("trash_at") private LocalDateTime trashAt; @JsonProperty("is_trashed") private Boolean isTrashed; @JsonProperty("command") private List command; @JsonProperty("container_count") private Integer containerCount; @JsonProperty("container_count_max") private Integer containerCountMax; @JsonProperty("container_image") private String containerImage; @JsonProperty("container_uuid") private String containerUuid; @JsonProperty("cwd") private String cwd; @JsonProperty("environment") private Object environment; @JsonProperty("expires_at") private LocalDateTime expiresAt; @JsonProperty("filters") private List filters; @JsonProperty("log_uuid") private String logUuid; @JsonProperty("mounts") private Object mounts; @JsonProperty("output_name") private String outputName; @JsonProperty("output_path") private String outputPath; @JsonProperty("output_uuid") private String outputUuid; @JsonProperty("output_ttl") private Integer outputTtl; @JsonProperty("priority") private Integer priority; @JsonProperty("properties") private Object properties; @JsonProperty("requesting_container_uuid") private String requestingContainerUuid; @JsonProperty("runtime_constraints") private RuntimeConstraints runtimeConstraints; @JsonProperty("scheduling_parameters") private Object schedulingParameters; @JsonProperty("state") private String state; @JsonProperty("use_existing") private Boolean useExisting; public String getName() { return this.name; } public String getGroupClass() { return this.groupClass; } public String getDescription() { return this.description; } public List getWritableBy() { return this.writableBy; } public LocalDateTime getDeleteAt() { return this.deleteAt; } public LocalDateTime getTrashAt() { return this.trashAt; } public Boolean getIsTrashed() { return this.isTrashed; } public List getCommand() { return this.command; } public Integer getContainerCount() { return this.containerCount; } public Integer getContainerCountMax() { return this.containerCountMax; } public String getContainerImage() { return this.containerImage; } public String getContainerUuid() { return this.containerUuid; } public String getCwd() { return this.cwd; } public Object getEnvironment() { return this.environment; } public LocalDateTime getExpiresAt() { return this.expiresAt; } public List getFilters() { return this.filters; } public String getLogUuid() { return this.logUuid; } public Object getMounts() { return this.mounts; } public String getOutputName() { return this.outputName; } public String getOutputPath() { return this.outputPath; } public String getOutputUuid() { return this.outputUuid; } public Integer getOutputTtl() { return this.outputTtl; } public Integer getPriority() { return this.priority; } public Object getProperties() { return this.properties; } public String getRequestingContainerUuid() { return this.requestingContainerUuid; } public RuntimeConstraints getRuntimeConstraints() { return this.runtimeConstraints; } public Object getSchedulingParameters() { return this.schedulingParameters; } public String getState() { return this.state; } public Boolean getUseExisting() { return this.useExisting; } public void setName(String name) { this.name = name; } public void setGroupClass(String groupClass) { this.groupClass = groupClass; } public void setDescription(String description) { this.description = description; } public void setWritableBy(List writableBy) { this.writableBy = writableBy; } public void setDeleteAt(LocalDateTime deleteAt) { this.deleteAt = deleteAt; } public void setTrashAt(LocalDateTime trashAt) { this.trashAt = trashAt; } public void setIsTrashed(Boolean isTrashed) { this.isTrashed = isTrashed; } public void setCommand(List command) { this.command = command; } public void setContainerCount(Integer containerCount) { this.containerCount = containerCount; } public void setContainerCountMax(Integer containerCountMax) { this.containerCountMax = containerCountMax; } public void setContainerImage(String containerImage) { this.containerImage = containerImage; } public void setContainerUuid(String containerUuid) { this.containerUuid = containerUuid; } public void setCwd(String cwd) { this.cwd = cwd; } public void setEnvironment(Object environment) { this.environment = environment; } public void setExpiresAt(LocalDateTime expiresAt) { this.expiresAt = expiresAt; } public void setFilters(List filters) { this.filters = filters; } public void setLogUuid(String logUuid) { this.logUuid = logUuid; } public void setMounts(Object mounts) { this.mounts = mounts; } public void setOutputName(String outputName) { this.outputName = outputName; } public void setOutputPath(String outputPath) { this.outputPath = outputPath; } public void setOutputUuid(String outputUuid) { this.outputUuid = outputUuid; } public void setOutputTtl(Integer outputTtl) { this.outputTtl = outputTtl; } public void setPriority(Integer priority) { this.priority = priority; } public void setProperties(Object properties) { this.properties = properties; } public void setRequestingContainerUuid(String requestingContainerUuid) { this.requestingContainerUuid = requestingContainerUuid; } public void setRuntimeConstraints(RuntimeConstraints runtimeConstraints) { this.runtimeConstraints = runtimeConstraints; } public void setSchedulingParameters(Object schedulingParameters) { this.schedulingParameters = schedulingParameters; } public void setState(String state) { this.state = state; } public void setUseExisting(Boolean useExisting) { this.useExisting = useExisting; } public String toString() { return "Group(name=" + this.getName() + ", groupClass=" + this.getGroupClass() + ", description=" + this.getDescription() + ", writableBy=" + this.getWritableBy() + ", deleteAt=" + this.getDeleteAt() + ", trashAt=" + this.getTrashAt() + ", isTrashed=" + this.getIsTrashed() + ", command=" + this.getCommand() + ", containerCount=" + this.getContainerCount() + ", containerCountMax=" + this.getContainerCountMax() + ", containerImage=" + this.getContainerImage() + ", containerUuid=" + this.getContainerUuid() + ", cwd=" + this.getCwd() + ", environment=" + this.getEnvironment() + ", expiresAt=" + this.getExpiresAt() + ", filters=" + this.getFilters() + ", logUuid=" + this.getLogUuid() + ", mounts=" + this.getMounts() + ", outputName=" + this.getOutputName() + ", outputPath=" + this.getOutputPath() + ", outputUuid=" + this.getOutputUuid() + ", outputTtl=" + this.getOutputTtl() + ", priority=" + this.getPriority() + ", properties=" + this.getProperties() + ", requestingContainerUuid=" + this.getRequestingContainerUuid() + ", runtimeConstraints=" + this.getRuntimeConstraints() + ", schedulingParameters=" + this.getSchedulingParameters() + ", state=" + this.getState() + ", useExisting=" + this.getUseExisting() + ")"; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/GroupList.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "items" }) public class GroupList extends ItemList { @JsonProperty("items") private List items; public List getItems() { return this.items; } public void setItems(List items) { this.items = items; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/Item.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.time.LocalDateTime; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "kind", "etag", "uuid", "owner_uuid", "created_at", "modified_by_client_uuid", "modified_by_user_uuid", "modified_at", "updated_at" }) public abstract class Item { @JsonProperty("kind") private String kind; @JsonProperty("etag") private String etag; @JsonProperty("uuid") private String uuid; @JsonProperty("owner_uuid") private String ownerUuid; @JsonProperty("created_at") private LocalDateTime createdAt; @JsonProperty("modified_by_client_uuid") private String modifiedByClientUuid; @JsonProperty("modified_by_user_uuid") private String modifiedByUserUuid; @JsonProperty("modified_at") private LocalDateTime modifiedAt; @JsonProperty("updated_at") private LocalDateTime updatedAt; public String getKind() { return this.kind; } public String getEtag() { return this.etag; } public String getUuid() { return this.uuid; } public String getOwnerUuid() { return this.ownerUuid; } public LocalDateTime getCreatedAt() { return this.createdAt; } public String getModifiedByClientUuid() { return this.modifiedByClientUuid; } public String getModifiedByUserUuid() { return this.modifiedByUserUuid; } public LocalDateTime getModifiedAt() { return this.modifiedAt; } public LocalDateTime getUpdatedAt() { return this.updatedAt; } public void setKind(String kind) { this.kind = kind; } public void setEtag(String etag) { this.etag = etag; } public void setUuid(String uuid) { this.uuid = uuid; } public void setOwnerUuid(String ownerUuid) { this.ownerUuid = ownerUuid; } public void setCreatedAt(LocalDateTime createdAt) { this.createdAt = createdAt; } public void setModifiedByClientUuid(String modifiedByClientUuid) { this.modifiedByClientUuid = modifiedByClientUuid; } public void setModifiedByUserUuid(String modifiedByUserUuid) { this.modifiedByUserUuid = modifiedByUserUuid; } public void setModifiedAt(LocalDateTime modifiedAt) { this.modifiedAt = modifiedAt; } public void setUpdatedAt(LocalDateTime updatedAt) { this.updatedAt = updatedAt; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/ItemList.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "kind", "etag", "offset", "limit", "items_available" }) public class ItemList { @JsonProperty("kind") private String kind; @JsonProperty("etag") private String etag; @JsonProperty("offset") private Object offset; @JsonProperty("limit") private Object limit; @JsonProperty("items_available") private Integer itemsAvailable; public String getKind() { return this.kind; } public String getEtag() { return this.etag; } public Object getOffset() { return this.offset; } public Object getLimit() { return this.limit; } public Integer getItemsAvailable() { return this.itemsAvailable; } public void setKind(String kind) { this.kind = kind; } public void setEtag(String etag) { this.etag = etag; } public void setOffset(Object offset) { this.offset = offset; } public void setLimit(Object limit) { this.limit = limit; } public void setItemsAvailable(Integer itemsAvailable) { this.itemsAvailable = itemsAvailable; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/KeepService.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.*; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "service_host", "service_port", "service_ssl_flag", "service_type", "read_only" }) public class KeepService extends Item { @JsonProperty("service_host") private String serviceHost; @JsonProperty("service_port") private Integer servicePort; @JsonProperty("service_ssl_flag") private Boolean serviceSslFlag; @JsonProperty("service_type") private String serviceType; @JsonProperty("read_only") private Boolean readOnly; @JsonIgnore private String serviceRoot; public String getServiceHost() { return this.serviceHost; } public Integer getServicePort() { return this.servicePort; } public Boolean getServiceSslFlag() { return this.serviceSslFlag; } public String getServiceType() { return this.serviceType; } public Boolean getReadOnly() { return this.readOnly; } public String getServiceRoot() { return this.serviceRoot; } public void setServiceHost(String serviceHost) { this.serviceHost = serviceHost; } public void setServicePort(Integer servicePort) { this.servicePort = servicePort; } public void setServiceSslFlag(Boolean serviceSslFlag) { this.serviceSslFlag = serviceSslFlag; } public void setServiceType(String serviceType) { this.serviceType = serviceType; } public void setReadOnly(Boolean readOnly) { this.readOnly = readOnly; } public void setServiceRoot(String serviceRoot) { this.serviceRoot = serviceRoot; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/KeepServiceList.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "items" }) public class KeepServiceList extends ItemList { @JsonProperty("items") private List items; public List getItems() { return this.items; } public void setItems(List items) { this.items = items; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/Link.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({"name", "head_kind", "head_uuid", "link_class"}) public class Link extends Item { @JsonProperty("name") private String name; @JsonProperty(value = "head_kind", access = JsonProperty.Access.WRITE_ONLY) private String headKind; @JsonProperty("head_uuid") private String headUuid; @JsonProperty("tail_uuid") private String tailUuid; @JsonProperty(value = "tail_kind", access = JsonProperty.Access.WRITE_ONLY) private String tailKind; @JsonProperty("link_class") private String linkClass; public String getName() { return name; } public String getHeadKind() { return headKind; } public String getHeadUuid() { return headUuid; } public String getTailUuid() { return tailUuid; } public String getTailKind() { return tailKind; } public String getLinkClass() { return linkClass; } public void setName(String name) { this.name = name; } public void setHeadKind(String headKind) { this.headKind = headKind; } public void setHeadUuid(String headUuid) { this.headUuid = headUuid; } public void setTailUuid(String tailUuid) { this.tailUuid = tailUuid; } public void setTailKind(String tailKind) { this.tailKind = tailKind; } public void setLinkClass(String linkClass) { this.linkClass = linkClass; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/LinkList.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "items" }) public class LinkList extends ItemList { @JsonProperty("items") private List items; public List getItems() { return this.items; } public void setItems(List items) { this.items = items; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/RuntimeConstraints.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "API", "vcpus", "ram", "keep_cache_ram" }) public class RuntimeConstraints { @JsonProperty("API") private Boolean api; @JsonProperty("vcpus") private Integer vcpus; @JsonProperty("ram") private Long ram; @JsonProperty("keep_cache_ram") private Long keepCacheRam; public Boolean getApi() { return this.api; } public Integer getVcpus() { return this.vcpus; } public Long getRam() { return this.ram; } public Long getKeepCacheRam() { return this.keepCacheRam; } public void setApi(Boolean api) { this.api = api; } public void setVcpus(Integer vcpus) { this.vcpus = vcpus; } public void setRam(Long ram) { this.ram = ram; } public void setKeepCacheRam(Long keepCacheRam) { this.keepCacheRam = keepCacheRam; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/User.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "email", "username", "full_name", "first_name", "last_name", "identity_url", "is_active", "is_admin", "is_invited", "prefs", "writable_by" }) public class User extends Item { @JsonProperty("email") private String email; @JsonProperty("username") private String username; @JsonProperty("full_name") private String fullName; @JsonProperty("first_name") private String firstName; @JsonProperty("last_name") private String lastName; @JsonProperty("identity_url") private String identityUrl; @JsonProperty("is_active") private Boolean isActive; @JsonProperty("is_admin") private Boolean isAdmin; @JsonProperty("is_invited") private Boolean isInvited; @JsonProperty("prefs") private Object prefs; @JsonProperty("writable_by") private List writableBy; public String getEmail() { return this.email; } public String getUsername() { return this.username; } public String getFullName() { return this.fullName; } public String getFirstName() { return this.firstName; } public String getLastName() { return this.lastName; } public String getIdentityUrl() { return this.identityUrl; } public Boolean getIsActive() { return this.isActive; } public Boolean getIsAdmin() { return this.isAdmin; } public Boolean getIsInvited() { return this.isInvited; } public Object getPrefs() { return this.prefs; } public List getWritableBy() { return this.writableBy; } public void setEmail(String email) { this.email = email; } public void setUsername(String username) { this.username = username; } public void setFullName(String fullName) { this.fullName = fullName; } public void setFirstName(String firstName) { this.firstName = firstName; } public void setLastName(String lastName) { this.lastName = lastName; } public void setIdentityUrl(String identityUrl) { this.identityUrl = identityUrl; } public void setIsActive(Boolean isActive) { this.isActive = isActive; } public void setIsAdmin(Boolean isAdmin) { this.isAdmin = isAdmin; } public void setIsInvited(Boolean isInvited) { this.isInvited = isInvited; } public void setPrefs(Object prefs) { this.prefs = prefs; } public void setWritableBy(List writableBy) { this.writableBy = writableBy; } public String toString() { return "User(email=" + this.getEmail() + ", username=" + this.getUsername() + ", fullName=" + this.getFullName() + ", firstName=" + this.getFirstName() + ", lastName=" + this.getLastName() + ", identityUrl=" + this.getIdentityUrl() + ", isActive=" + this.getIsActive() + ", isAdmin=" + this.getIsAdmin() + ", isInvited=" + this.getIsInvited() + ", prefs=" + this.getPrefs() + ", writableBy=" + this.getWritableBy() + ")"; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/UserList.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown = true) @JsonPropertyOrder({ "items" }) public class UserList extends ItemList { @JsonProperty("items") private List items; public List getItems() { return this.items; } public void setItems(List items) { this.items = items; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/Argument.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model.argument; import com.fasterxml.jackson.annotation.JsonIgnore; public abstract class Argument { @JsonIgnore private String uuid; public String getUuid() { return this.uuid; } public void setUuid(String uuid) { this.uuid = uuid; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/ContentsGroup.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model.argument; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonPropertyOrder({ "limit", "order", "filters", "recursive" }) public class ContentsGroup extends Argument { @JsonProperty("limit") private Integer limit; @JsonProperty("order") private String order; @JsonProperty("filters") private List filters; @JsonProperty("recursive") private Boolean recursive; public Integer getLimit() { return this.limit; } public String getOrder() { return this.order; } public List getFilters() { return this.filters; } public Boolean getRecursive() { return this.recursive; } public void setLimit(Integer limit) { this.limit = limit; } public void setOrder(String order) { this.order = order; } public void setFilters(List filters) { this.filters = filters; } public void setRecursive(Boolean recursive) { this.recursive = recursive; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/Filter.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model.argument; import com.fasterxml.jackson.annotation.JsonFormat; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; @JsonFormat(shape = JsonFormat.Shape.ARRAY) @JsonInclude(JsonInclude.Include.NON_NULL) @JsonPropertyOrder({ "attribute", "operator", "operand" }) public class Filter { @JsonProperty("attribute") private String attribute; @JsonProperty("operator") private Operator operator; @JsonProperty("operand") private Object operand; private Filter(String attribute, Operator operator, Object operand) { this.attribute = attribute; this.operator = operator; this.operand = operand; } public static Filter of(String attribute, Operator operator, Object operand) { return new Filter(attribute, operator, operand); } public String getAttribute() { return this.attribute; } public Operator getOperator() { return this.operator; } public Object getOperand() { return this.operand; } public boolean equals(Object o) { if (o == this) return true; if (!(o instanceof Filter)) return false; final Filter other = (Filter) o; final Object this$attribute = this.getAttribute(); final Object other$attribute = other.getAttribute(); if (this$attribute == null ? other$attribute != null : !this$attribute.equals(other$attribute)) return false; final Object this$operator = this.getOperator(); final Object other$operator = other.getOperator(); if (this$operator == null ? other$operator != null : !this$operator.equals(other$operator)) return false; final Object this$operand = this.getOperand(); final Object other$operand = other.getOperand(); if (this$operand == null ? other$operand != null : !this$operand.equals(other$operand)) return false; return true; } public int hashCode() { final int PRIME = 59; int result = 1; final Object $attribute = this.getAttribute(); result = result * PRIME + ($attribute == null ? 43 : $attribute.hashCode()); final Object $operator = this.getOperator(); result = result * PRIME + ($operator == null ? 43 : $operator.hashCode()); final Object $operand = this.getOperand(); result = result * PRIME + ($operand == null ? 43 : $operand.hashCode()); return result; } public String toString() { return "Filter(attribute=" + this.getAttribute() + ", operator=" + this.getOperator() + ", operand=" + this.getOperand() + ")"; } public enum Operator { @JsonProperty("<") LESS, @JsonProperty("<=") LESS_EQUALS, @JsonProperty(">=") MORE_EQUALS, @JsonProperty(">") MORE, @JsonProperty("like") LIKE, @JsonProperty("ilike") ILIKE, @JsonProperty("=") EQUALS, @JsonProperty("!=") NOT_EQUALS, @JsonProperty("in") IN, @JsonProperty("not in") NOT_IN, @JsonProperty("is_a") IS_A, @JsonProperty("exists") EXISTS } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model.argument; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonPropertyOrder({ "limit", "offset", "filters", "order", "select", "distinct", "count", "exclude_home_project", "include_old_versions", "include_trash" }) public class ListArgument extends Argument { @JsonProperty("limit") private Integer limit; @JsonProperty("offset") private Integer offset; @JsonProperty("filters") private List filters; @JsonProperty("order") private List order; @JsonProperty("select") private List select; @JsonProperty("distinct") private Boolean distinct; @JsonProperty("count") private Count count; @JsonProperty("exclude_home_project") private Boolean excludeHomeProject; @JsonProperty("include_old_versions") private Boolean includeOldVersions; @JsonProperty("include_trash") private Boolean includeTrash; ListArgument( Integer limit, Integer offset, List filters, List order, List select, Boolean distinct, Count count, Boolean excludeHomeProject, Boolean includeOldVersions, Boolean includeTrash ) { this.limit = limit; this.offset = offset; this.filters = filters; this.order = order; this.select = select; this.distinct = distinct; this.count = count; this.excludeHomeProject = excludeHomeProject; this.includeOldVersions = includeOldVersions; this.includeTrash = includeTrash; } public static ListArgumentBuilder builder() { return new ListArgumentBuilder(); } public enum Count { @JsonProperty("exact") EXACT, @JsonProperty("none") NONE } public static class ListArgumentBuilder { private Integer limit; private Integer offset; private List filters; private List order; private List select; private Boolean distinct; private Count count; private Boolean excludeHomeProject; private Boolean includeOldVersions; private Boolean includeTrash; ListArgumentBuilder() { } public ListArgumentBuilder limit(Integer limit) { this.limit = limit; return this; } public ListArgumentBuilder offset(Integer offset) { this.offset = offset; return this; } public ListArgumentBuilder filters(List filters) { this.filters = filters; return this; } public ListArgumentBuilder order(List order) { this.order = order; return this; } public ListArgumentBuilder select(List select) { this.select = select; return this; } public ListArgumentBuilder distinct(Boolean distinct) { this.distinct = distinct; return this; } public ListArgumentBuilder count(Count count) { this.count = count; return this; } public ListArgument.ListArgumentBuilder excludeHomeProject(Boolean excludeHomeProject) { this.excludeHomeProject = excludeHomeProject; return this; } public ListArgument.ListArgumentBuilder includeOldVersions(Boolean includeOldVersions) { this.includeOldVersions = includeOldVersions; return this; } public ListArgument.ListArgumentBuilder includeTrash(Boolean includeTrash) { this.includeTrash = includeTrash; return this; } public ListArgument build() { return new ListArgument(limit, offset, filters, order, select, distinct, count, excludeHomeProject, includeOldVersions, includeTrash); } public String toString() { return "ListArgument.ListArgumentBuilder(limit=" + this.limit + ", offset=" + this.offset + ", filters=" + this.filters + ", order=" + this.order + ", select=" + this.select + ", distinct=" + this.distinct + ", count=" + this.count + ", excludeHomeProject=" + this.excludeHomeProject + ", includeOldVersions=" + this.includeOldVersions + ", includeTrash=" + this.includeTrash + ")"; } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/UntrashGroup.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.model.argument; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; @JsonInclude(JsonInclude.Include.NON_NULL) @JsonPropertyOrder({ "ensure_unique_name" }) public class UntrashGroup extends Argument { @JsonProperty("ensure_unique_name") private Boolean ensureUniqueName; public Boolean getEnsureUniqueName() { return this.ensureUniqueName; } public void setEnsureUniqueName(Boolean ensureUniqueName) { this.ensureUniqueName = ensureUniqueName; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/common/Characters.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.common; public final class Characters { private Characters() {} public static final String SPACE = "\\040"; public static final String NEW_LINE = "\n"; public static final String SLASH = "/"; public static final String DOT = "."; public static final String COLON = ":"; public static final String PERCENT = "%"; public static final String QUOTE = "\""; } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/common/Headers.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.common; public final class Headers { private Headers() {} public static final String X_KEEP_DESIRED_REPLICAS = "X-Keep-Desired-Replicas"; } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/common/Patterns.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.common; public final class Patterns { public static final String HINT_PATTERN = "^[A-Z][A-Za-z0-9@_-]+$"; public static final String FILE_TOKEN_PATTERN = "(\\d+:\\d+:\\S+)"; public static final String LOCATOR_PATTERN = "([0-9a-f]{32})\\+([0-9]+)(\\+[A-Z][-A-Za-z0-9@_]*)*"; public static final String GROUP_UUID_PATTERN = "[a-z0-9]{5}-j7d0g-[a-z0-9]{15}"; public static final String USER_UUID_PATTERN = "[a-z0-9]{5}-tpzed-[a-z0-9]{15}"; private Patterns() {} } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/config/ConfigProvider.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.config; import java.io.File; public interface ConfigProvider { //API boolean isApiHostInsecure(); String getKeepWebHost(); int getKeepWebPort(); String getApiHost(); int getApiPort(); String getApiToken(); String getApiProtocol(); int getConnectTimeout(); int getReadTimeout(); int getWriteTimeout(); //FILE UPLOAD int getFileSplitSize(); File getFileSplitDirectory(); int getNumberOfCopies(); int getNumberOfRetries(); } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; public class ExternalConfigProvider implements ConfigProvider { private static final Logger log = LoggerFactory.getLogger(ExternalConfigProvider.class); private static final int DEFAULT_CONNECTION_TIMEOUT = 60000; private static final int DEFAULT_READ_TIMEOUT = 60000; private static final int DEFAULT_WRITE_TIMEOUT = 60000; private final boolean apiHostInsecure; private final String keepWebHost; private final int keepWebPort; private final String apiHost; private final int apiPort; private final String apiToken; private final String apiProtocol; private final int fileSplitSize; private final File fileSplitDirectory; private final int numberOfCopies; private final int numberOfRetries; private final int connectTimeout; private final int readTimeout; private final int writeTimeout; ExternalConfigProvider(boolean apiHostInsecure, String keepWebHost, int keepWebPort, String apiHost, int apiPort, String apiToken, String apiProtocol, int fileSplitSize, File fileSplitDirectory, int numberOfCopies, int numberOfRetries) { this.apiHostInsecure = apiHostInsecure; this.keepWebHost = keepWebHost; this.keepWebPort = keepWebPort; this.apiHost = apiHost; this.apiPort = apiPort; this.apiToken = apiToken; this.apiProtocol = apiProtocol; this.fileSplitSize = fileSplitSize; this.fileSplitDirectory = fileSplitDirectory; this.numberOfCopies = numberOfCopies; this.numberOfRetries = numberOfRetries; this.connectTimeout = DEFAULT_CONNECTION_TIMEOUT; this.readTimeout = DEFAULT_READ_TIMEOUT; this.writeTimeout = DEFAULT_WRITE_TIMEOUT; } ExternalConfigProvider(boolean apiHostInsecure, String keepWebHost, int keepWebPort, String apiHost, int apiPort, String apiToken, String apiProtocol, int fileSplitSize, File fileSplitDirectory, int numberOfCopies, int numberOfRetries, int connectTimeout, int readTimeout, int writeTimeout) { this.apiHostInsecure = apiHostInsecure; this.keepWebHost = keepWebHost; this.keepWebPort = keepWebPort; this.apiHost = apiHost; this.apiPort = apiPort; this.apiToken = apiToken; this.apiProtocol = apiProtocol; this.fileSplitSize = fileSplitSize; this.fileSplitDirectory = fileSplitDirectory; this.numberOfCopies = numberOfCopies; this.numberOfRetries = numberOfRetries; this.connectTimeout = connectTimeout; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; } public static ExternalConfigProviderBuilder builder() { return new ExternalConfigProviderBuilder(); } @Override public String toString() { return "ExternalConfigProvider{" + "apiHostInsecure=" + apiHostInsecure + ", keepWebHost='" + keepWebHost + '\'' + ", keepWebPort=" + keepWebPort + ", apiHost='" + apiHost + '\'' + ", apiPort=" + apiPort + ", apiToken='" + apiToken + '\'' + ", apiProtocol='" + apiProtocol + '\'' + ", fileSplitSize=" + fileSplitSize + ", fileSplitDirectory=" + fileSplitDirectory + ", numberOfCopies=" + numberOfCopies + ", numberOfRetries=" + numberOfRetries + '}'; } public boolean isApiHostInsecure() { return this.apiHostInsecure; } public String getKeepWebHost() { return this.keepWebHost; } public int getKeepWebPort() { return this.keepWebPort; } public String getApiHost() { return this.apiHost; } public int getApiPort() { return this.apiPort; } public String getApiToken() { return this.apiToken; } public String getApiProtocol() { return this.apiProtocol; } public int getFileSplitSize() { return this.fileSplitSize; } public File getFileSplitDirectory() { return this.fileSplitDirectory; } public int getNumberOfCopies() { return this.numberOfCopies; } public int getNumberOfRetries() { return this.numberOfRetries; } public int getConnectTimeout() { return this.connectTimeout; } public int getReadTimeout() { return this.readTimeout; } public int getWriteTimeout() { return this.writeTimeout; } public static class ExternalConfigProviderBuilder { private boolean apiHostInsecure; private String keepWebHost; private int keepWebPort; private String apiHost; private int apiPort; private String apiToken; private String apiProtocol; private int fileSplitSize; private File fileSplitDirectory; private int numberOfCopies; private int numberOfRetries; private int connectTimeout = DEFAULT_CONNECTION_TIMEOUT; private int readTimeout = DEFAULT_READ_TIMEOUT; private int writeTimeout = DEFAULT_WRITE_TIMEOUT; private boolean autoFetchWebDAV = true; ExternalConfigProviderBuilder() { } public ExternalConfigProvider.ExternalConfigProviderBuilder apiHostInsecure(boolean apiHostInsecure) { this.apiHostInsecure = apiHostInsecure; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder keepWebHost(String keepWebHost) { this.keepWebHost = keepWebHost; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder keepWebPort(int keepWebPort) { this.keepWebPort = keepWebPort; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder apiHost(String apiHost) { this.apiHost = apiHost; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder apiPort(int apiPort) { this.apiPort = apiPort; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder apiToken(String apiToken) { this.apiToken = apiToken; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder apiProtocol(String apiProtocol) { this.apiProtocol = apiProtocol; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder fileSplitSize(int fileSplitSize) { this.fileSplitSize = fileSplitSize; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder fileSplitDirectory(File fileSplitDirectory) { this.fileSplitDirectory = fileSplitDirectory; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder numberOfCopies(int numberOfCopies) { this.numberOfCopies = numberOfCopies; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder numberOfRetries(int numberOfRetries) { this.numberOfRetries = numberOfRetries; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder connectTimeout(int connectTimeout) { this.connectTimeout = connectTimeout; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder readTimeout(int readTimeout) { this.readTimeout = readTimeout; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder writeTimeout(int writeTimeout) { this.writeTimeout = writeTimeout; return this; } public ExternalConfigProvider.ExternalConfigProviderBuilder autoFetchWebDAV(boolean autoFetchWebDAV) { this.autoFetchWebDAV = autoFetchWebDAV; return this; } public ExternalConfigProvider build() { if (shouldAutoFetchWebDAV()) { autoFetchWebDAVConfiguration(); } validateWebDAVConfiguration(); return new ExternalConfigProvider( apiHostInsecure, keepWebHost, keepWebPort, apiHost, apiPort, apiToken, apiProtocol, fileSplitSize, fileSplitDirectory, numberOfCopies, numberOfRetries, connectTimeout, readTimeout, writeTimeout ); } private boolean shouldAutoFetchWebDAV() { return autoFetchWebDAV && (keepWebHost == null || keepWebHost.isEmpty()); } private void autoFetchWebDAVConfiguration() { WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( apiProtocol, apiHost, apiPort, apiHostInsecure ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); if (config != null) { keepWebHost = config.getHost(); keepWebPort = config.getPort(); } } private void validateWebDAVConfiguration() { if (keepWebHost == null || keepWebHost.isEmpty()) { log.warn("WebDAV host is not configured. File operations may not work properly. Consider providing keepWebHost/keepWebPort or ensuring the Arvados API config endpoint is accessible."); } } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.config; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import java.io.File; public class FileConfigProvider implements ConfigProvider { private static final String DEFAULT_PATH = "arvados"; private final Config config; public FileConfigProvider() { config = ConfigFactory.load().getConfig(DEFAULT_PATH); } public FileConfigProvider(final String configFile) { config = (configFile != null) ? ConfigFactory.load(configFile).getConfig(DEFAULT_PATH) : ConfigFactory.load().getConfig(DEFAULT_PATH); } public Config getConfig() { return config; } private File getFile(String path) { return new File(config.getString(path)); } private int getInt(String path) { return config.getInt(path); } private boolean getBoolean(String path) { return config.getBoolean(path); } private String getString(String path) { return config.getString(path); } @Override public boolean isApiHostInsecure() { return this.getBoolean("api.host-insecure"); } @Override public String getKeepWebHost() { return this.getString("api.keepweb-host"); } @Override public int getKeepWebPort() { return this.getInt("api.keepweb-port"); } @Override public String getApiHost() { return this.getString("api.host"); } @Override public int getApiPort() { return this.getInt("api.port"); } @Override public String getApiToken() { return this.getString("api.token"); } @Override public String getApiProtocol() { return this.getString("api.protocol"); } @Override public int getFileSplitSize() { return this.getInt("split-size"); } @Override public File getFileSplitDirectory() { return this.getFile("temp-dir"); } @Override public int getNumberOfCopies() { return this.getInt("copies"); } @Override public int getNumberOfRetries() { return this.getInt("retries"); } public String getIntegrationTestProjectUuid() { return this.getString("integration-tests.project-uuid"); } @Override public int getConnectTimeout() { return this.getInt("connectTimeout"); } @Override public int getReadTimeout() { return this.getInt("readTimeout"); } @Override public int getWriteTimeout() { return this.getInt("writeTimeout"); } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/config/WebDAVConfigFetcher.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.config; import org.arvados.client.api.client.ConfigApiClient; import org.arvados.client.api.model.ArvadosConfig; import org.arvados.client.exception.ArvadosApiException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.MalformedURLException; import java.net.URL; public class WebDAVConfigFetcher { private static final Logger log = LoggerFactory.getLogger(WebDAVConfigFetcher.class); private static final int DEFAULT_HTTPS_PORT = 443; private static final int DEFAULT_HTTP_PORT = 80; private final String apiProtocol; private final String apiHost; private final int apiPort; private final boolean apiHostInsecure; public WebDAVConfigFetcher(String apiProtocol, String apiHost, int apiPort, boolean apiHostInsecure) { this.apiProtocol = apiProtocol != null ? apiProtocol : "https"; this.apiHost = apiHost; this.apiPort = apiPort > 0 ? apiPort : (this.apiProtocol.equals("https") ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT); this.apiHostInsecure = apiHostInsecure; } public WebDAVConfig fetch() { if (!isConfigured()) { log.debug("API host not configured, skipping WebDAV auto-fetch"); return null; } try { log.info("Attempting to auto-fetch WebDAV configuration from Arvados API"); ArvadosConfig config = fetchArvadosConfig(); String webDavUrl = extractWebDAVUrl(config); if (webDavUrl == null) { log.debug("No WebDAV URL found in Arvados config"); return null; } return parseWebDAVUrl(webDavUrl); } catch (ArvadosApiException e) { log.warn("Failed to auto-fetch WebDAV configuration: {}. " + "You may need to configure keepWebHost and keepWebPort manually.", e.getMessage()); } catch (Exception e) { log.warn("Unexpected error while auto-fetching WebDAV configuration: {}. " + "You may need to configure keepWebHost and keepWebPort manually.", e.getMessage()); } return null; } private boolean isConfigured() { return apiHost != null && !apiHost.isEmpty(); } private ArvadosConfig fetchArvadosConfig() throws ArvadosApiException { ConfigApiClient configClient = new ConfigApiClient( apiProtocol, apiHost, apiPort, apiHostInsecure ); return configClient.fetchConfig(); } private String extractWebDAVUrl(ArvadosConfig config) { if (config == null || config.getServices() == null) { return null; } ArvadosConfig.WebDAVDownload webDav = config.getServices().getWebDAVDownload(); if (webDav == null) { return null; } return webDav.getExternalURL(); } private WebDAVConfig parseWebDAVUrl(String webDavUrl) { if (webDavUrl == null || webDavUrl.isEmpty()) { return null; } try { URL url = new URL(webDavUrl); String host = url.getHost(); int port = url.getPort(); // Use default port based on protocol if not specified if (port == -1) { port = "https".equals(url.getProtocol()) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } log.info("Successfully auto-configured WebDAV: host={}, port={}", host, port); return new WebDAVConfig(host, port); } catch (MalformedURLException e) { log.warn("Failed to parse WebDAV URL '{}': {}", webDavUrl, e.getMessage()); return null; } } public static class WebDAVConfig { private final String host; private final int port; public WebDAVConfig(String host, int port) { this.host = host; this.port = port; } public String getHost() { return host; } public int getPort() { return port; } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/exception/ArvadosApiException.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.exception; public class ArvadosApiException extends ArvadosClientException { private static final long serialVersionUID = 1L; public ArvadosApiException(String message) { super(message); } public ArvadosApiException(String message, Throwable cause) { super(message, cause); } public ArvadosApiException(Throwable cause) { super(cause); } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/exception/ArvadosClientException.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.exception; /** * Parent exception for all exceptions in library. * More specific exceptions like ArvadosApiException extend this class. */ public class ArvadosClientException extends RuntimeException { public ArvadosClientException(String message) { super(message); } public ArvadosClientException(String message, Throwable cause) { super(message, cause); } public ArvadosClientException(Throwable cause) { super(cause); } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.facade; import com.google.common.collect.Lists; import org.arvados.client.api.client.CollectionsApiClient; import org.arvados.client.api.client.GroupsApiClient; import org.arvados.client.api.client.KeepWebApiClient; import org.arvados.client.api.client.UsersApiClient; import org.arvados.client.api.model.*; import org.arvados.client.api.model.argument.Filter; import org.arvados.client.api.model.argument.ListArgument; import org.arvados.client.config.FileConfigProvider; import org.arvados.client.config.ConfigProvider; import org.arvados.client.logic.collection.FileToken; import org.arvados.client.logic.collection.ManifestDecoder; import org.arvados.client.logic.keep.FileDownloader; import org.arvados.client.logic.keep.FileUploader; import org.slf4j.Logger; import java.io.File; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; public class ArvadosFacade { private final ConfigProvider config; private final Logger log = org.slf4j.LoggerFactory.getLogger(ArvadosFacade.class); private CollectionsApiClient collectionsApiClient; private GroupsApiClient groupsApiClient; private UsersApiClient usersApiClient; private FileDownloader fileDownloader; private FileUploader fileUploader; private static final String PROJECT = "project"; private static final String SUBPROJECT = "sub-project"; public ArvadosFacade(ConfigProvider config) { this.config = config; setFacadeFields(); } public ArvadosFacade() { this.config = new FileConfigProvider(); setFacadeFields(); } private void setFacadeFields() { collectionsApiClient = new CollectionsApiClient(config); groupsApiClient = new GroupsApiClient(config); usersApiClient = new UsersApiClient(config); ManifestDecoder manifestDecoder = new ManifestDecoder(); KeepWebApiClient keepWebApiClient = new KeepWebApiClient(config); fileDownloader = new FileDownloader(manifestDecoder, collectionsApiClient, keepWebApiClient); fileUploader = new FileUploader(keepWebApiClient, collectionsApiClient, config); } /** * This method downloads single file from collection using Arvados Keep-Web. * File is saved on a drive in specified location and returned. * * @param filePathName path to the file in collection. If requested file is stored * directly in collection (not within its subdirectory) this * would be just the name of file (ex. 'file.txt'). * Otherwise full file path must be passed (ex. 'folder/file.txt') * @param collectionUuid uuid of collection containing requested file * @param pathToDownloadFolder path to location in which file should be saved. * Passed location must be a directory in which file of * that name does not already exist. * @return downloaded file */ public File downloadFile(String filePathName, String collectionUuid, String pathToDownloadFolder) { return fileDownloader.downloadSingleFileUsingKeepWeb(filePathName, collectionUuid, pathToDownloadFolder); } /** * This method downloads all files from collection. * Directory named by collection uuid is created in specified location, * files are saved on a drive in this directory and list with downloaded * files is returned. * * @param collectionUuid uuid of collection from which files are downloaded * @param pathToDownloadFolder path to location in which files should be saved. * New folder named by collection uuid, containing * downloaded files, is created in this location. * Passed location must be a directory in which folder * of that name does not already exist. * @param usingKeepWeb if set to true files will be downloaded using Keep Web. * If set to false files will be downloaded using Keep Server API. * @return list containing downloaded files */ public List downloadCollectionFiles(String collectionUuid, String pathToDownloadFolder, boolean usingKeepWeb) { if (usingKeepWeb) return fileDownloader.downloadFilesFromCollectionUsingKeepWeb(collectionUuid, pathToDownloadFolder); return fileDownloader.downloadFilesFromCollection(collectionUuid, pathToDownloadFolder); } /** * Lists all FileTokens (objects containing information about files) for * specified collection. * Information in each FileToken includes file path, name, size and position * in data stream * * @param collectionUuid uuid of collection for which FileTokens are listed * @return list containing FileTokens for each file in specified collection */ public List listFileInfoFromCollection(String collectionUuid) { return fileDownloader.listFileInfoFromCollection(collectionUuid); } /** * Creates and uploads new collection containing passed files. * Created collection has a default name and is uploaded to user's 'Home' project. * * @see ArvadosFacade#upload(List, String, String) * @param files list of files to be uploaded within new collection * @return collection object mapped from JSON that is returned from server after successful upload */ public Collection upload(List files) { return upload(files, null, null); } /** * Creates and uploads new collection containing a single file. * Created collection has a default name and is uploaded to user's 'Home' project. * * @see ArvadosFacade#upload(List, String, String) * @param file file to be uploaded * @return collection object mapped from JSON that is returned from server after successful upload */ public Collection upload(File file) { return upload(Collections.singletonList(file), null, null); } /** * Uploads new collection with specified name and containing selected files * to an existing project. * * @param sourceFiles list of files to be uploaded within new collection * @param collectionName name for the newly created collection. * Collection with that name cannot be already created * in specified project. If null is passed * then collection name is set to default, containing * phrase 'New Collection' and a timestamp. * @param projectUuid uuid of the project in which created collection is to be included. * If null is passed then collection is uploaded to user's 'Home' project. * @return collection object mapped from JSON that is returned from server after successful upload */ public Collection upload(List sourceFiles, String collectionName, String projectUuid) { return fileUploader.upload(sourceFiles, collectionName, projectUuid); } /** * Uploads a file to a specified collection. * * @see ArvadosFacade#uploadToExistingCollection(List, String) * @param file file to be uploaded to existing collection. Filenames must be unique * in comparison with files already existing within collection. * @param collectionUUID UUID of collection to which files should be uploaded * @return collection object mapped from JSON that is returned from server after successful upload */ public Collection uploadToExistingCollection(File file, String collectionUUID) { return fileUploader.uploadToExistingCollection(Collections.singletonList(file), collectionUUID); } /** * Uploads multiple files to an existing collection. * * @param files list of files to be uploaded to existing collection. * File names must be unique - both within passed list and * in comparison with files already existing within collection. * @param collectionUUID UUID of collection to which files should be uploaded * @return collection object mapped from JSON that is returned from server after successful upload */ public Collection uploadToExistingCollection(List files, String collectionUUID) { return fileUploader.uploadToExistingCollection(files, collectionUUID); } /** * Creates and uploads new empty collection to specified project. * * @param collectionName name for the newly created collection. * Collection with that name cannot be already created * in specified project. * @param projectUuid uuid of project that will contain uploaded empty collection. * To select home project pass current user's uuid from getCurrentUser() * @return collection object mapped from JSON that is returned from server after successful upload * @see ArvadosFacade#getCurrentUser() */ public Collection createEmptyCollection(String collectionName, String projectUuid) { Collection collection = new Collection(); collection.setOwnerUuid(projectUuid); collection.setName(collectionName); return collectionsApiClient.create(collection); } /** * Uploads multiple files to an existing collection. * * @param collectionUUID UUID of collection to which the files are to be copied * @param files map of files to be copied to existing collection. * The map consists of a pair in the form of a filename and a filename * along with the Portable data hash * @return collection object mapped from JSON that is returned from server after successful copied */ public Collection updateWithReplaceFiles(String collectionUUID, Map files) { CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles(); replaceFilesRequest.getReplaceFiles().putAll(files); return collectionsApiClient.update(collectionUUID, replaceFilesRequest); } /** * Returns current user information based on Api Token provided via configuration * * @return user object mapped from JSON that is returned from server based on provided Api Token. * It contains information about user who has this token assigned. */ public User getCurrentUser() { return usersApiClient.current(); } /** * Gets uuid of current user based on api Token provided in configuration and uses it to list all * projects that this user owns in Arvados. * * @return GroupList containing all groups that current user is owner of. * @see ArvadosFacade#getCurrentUser() */ public GroupList showGroupsOwnedByCurrentUser() { ListArgument listArgument = ListArgument.builder() .filters(Arrays.asList( Filter.of("owner_uuid", Filter.Operator.LIKE, getCurrentUser().getUuid()), Filter.of("group_class", Filter.Operator.IN, Lists.newArrayList(PROJECT, SUBPROJECT) ))) .build(); GroupList groupList = groupsApiClient.list(listArgument); log.debug("Groups owned by user:"); groupList.getItems().forEach(m -> log.debug(m.getUuid() + " -- " + m.getName())); return groupList; } /** * Gets uuid of current user based on api Token provided in configuration and uses it to list all * projects that this user has read access to in Arvados. * * @return GroupList containing all groups that current user has read access to. */ public GroupList showGroupsAccessibleByCurrentUser() { ListArgument listArgument = ListArgument.builder() .filters(Collections.singletonList( Filter.of("group_class", Filter.Operator.IN, Lists.newArrayList(PROJECT, SUBPROJECT) ))) .build(); GroupList groupList = groupsApiClient.list(listArgument); log.debug("Groups accessible by user:"); groupList.getItems().forEach(m -> log.debug(m.getUuid() + " -- " + m.getName())); return groupList; } /** * Filters all collections from selected project and returns list of those that contain passed String in their name. * Operator "LIKE" is used so in order to obtain certain collection it is sufficient to pass just part of its name. * Returned collections in collectionList are ordered by date of creation (starting from oldest one). * * @param collectionName collections containing this param in their name will be returned. * Passing a wildcard is possible - for example passing "a%" searches for * all collections starting with "a". * @param projectUuid uuid of project in which will be searched for collections with given name. To search home * project provide user uuid (from getCurrentUser()) * @return object CollectionList containing all collections matching specified name criteria * @see ArvadosFacade#getCurrentUser() */ public CollectionList getCollectionsFromProjectByName(String collectionName, String projectUuid) { ListArgument listArgument = ListArgument.builder() .filters(Arrays.asList( Filter.of("owner_uuid", Filter.Operator.LIKE, projectUuid), Filter.of("name", Filter.Operator.LIKE, collectionName) )) .order(Collections.singletonList("created_at")) .build(); return collectionsApiClient.list(listArgument); } /** * Gets project details by uuid. * * @param projectUuid uuid of project * @return Group object containing information about project */ public Group getProjectByUuid(String projectUuid) { Group project = groupsApiClient.get(projectUuid); log.debug("Retrieved " + project.getName() + " with UUID: " + project.getUuid()); return project; } /** * Creates new project that will be a subproject of "home" for current user. * * @param projectName name for the newly created project * @return Group object containing information about created project * (mapped from JSON returned from server after creating the project) */ public Group createNewProject(String projectName) { Group project = new Group(); project.setName(projectName); project.setGroupClass(PROJECT); Group createdProject = groupsApiClient.create(project); log.debug("Project " + createdProject.getName() + " created with UUID: " + createdProject.getUuid()); return createdProject; } /** * Creates new project that will be a subproject of "home" for the specified owner. * * @param ownerUuid uuid of owner for subproject * @param projectName name for the newly created subproject * @return Group object containing information about created project * (mapped from JSON returned from server after creating the project) */ public Group createNewSubProject(String ownerUuid, String projectName) { Group project = new Group(); project.setName(projectName); project.setGroupClass(PROJECT); project.setOwnerUuid(ownerUuid); Group createdProject = groupsApiClient.create(project); this.log.debug("Project " + createdProject.getName() + " created with UUID: " + createdProject.getUuid()); return createdProject; } /** * Deletes collection with specified uuid. * * @param collectionUuid uuid of collection to be deleted. User whose token is provided in configuration * must be authorized to delete such collection. * @return collection object with deleted collection (mapped from JSON returned from server after deleting the collection) */ public Collection deleteCollection(String collectionUuid) { Collection deletedCollection = collectionsApiClient.delete(collectionUuid); log.debug("Collection: " + collectionUuid + " deleted."); return deletedCollection; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/CollectionFactory.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.collection; import org.arvados.client.api.client.GroupsApiClient; import org.arvados.client.api.client.UsersApiClient; import org.arvados.client.exception.ArvadosApiException; import org.arvados.client.api.model.Collection; import org.arvados.client.common.Patterns; import org.arvados.client.config.FileConfigProvider; import org.arvados.client.config.ConfigProvider; import org.arvados.client.exception.ArvadosClientException; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; import java.util.Optional; public class CollectionFactory { private ConfigProvider config; private UsersApiClient usersApiClient; private GroupsApiClient groupsApiClient; private final String name; private final String projectUuid; private CollectionFactory(ConfigProvider config, String name, String projectUuid) { this.name = name; this.projectUuid = projectUuid; this.config = config; setApiClients(); } public static CollectionFactoryBuilder builder() { return new CollectionFactoryBuilder(); } private void setApiClients() { if(this.config == null) this.config = new FileConfigProvider(); this.usersApiClient = new UsersApiClient(config); this.groupsApiClient = new GroupsApiClient(config); } public Collection create() { Collection newCollection = new Collection(); newCollection.setName(getNameOrDefault(name)); newCollection.setOwnerUuid(getDesiredProjectUuid(projectUuid)); return newCollection; } private String getNameOrDefault(String name) { return Optional.ofNullable(name).orElseGet(() -> { LocalDateTime dateTime = LocalDateTime.now(); DateTimeFormatter formatter = DateTimeFormatter.ofPattern("Y-MM-dd HH:mm:ss.SSS"); return String.format("New Collection (%s)", dateTime.format(formatter)); }); } public String getDesiredProjectUuid(String projectUuid) { try { if (projectUuid == null || projectUuid.length() == 0){ return usersApiClient.current().getUuid(); } else if (projectUuid.matches(Patterns.USER_UUID_PATTERN)) { return usersApiClient.get(projectUuid).getUuid(); } else if (projectUuid.matches(Patterns.GROUP_UUID_PATTERN)) { return groupsApiClient.get(projectUuid).getUuid(); } } catch (ArvadosApiException e) { throw new ArvadosClientException(String.format("An error occurred while getting project by UUID %s", projectUuid)); } throw new ArvadosClientException(String.format("No project with %s UUID found", projectUuid)); } public static class CollectionFactoryBuilder { private ConfigProvider config; private String name; private String projectUuid; CollectionFactoryBuilder() { } public CollectionFactoryBuilder config(ConfigProvider config) { this.config = config; return this; } public CollectionFactoryBuilder name(String name) { this.name = name; return this; } public CollectionFactoryBuilder projectUuid(String projectUuid) { this.projectUuid = projectUuid; return this; } public CollectionFactory build() { return new CollectionFactory(config, name, projectUuid); } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/FileToken.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.collection; import com.google.common.base.Strings; import org.arvados.client.common.Characters; public class FileToken { private long filePosition; private long fileSize; private String fileName; private String path; public FileToken(String fileTokenInfo) { splitFileTokenInfo(fileTokenInfo); } public FileToken(String fileTokenInfo, String path) { splitFileTokenInfo(fileTokenInfo); this.path = path; } private void splitFileTokenInfo(String fileTokenInfo) { String[] tokenPieces = fileTokenInfo.split(":"); this.filePosition = Long.parseLong(tokenPieces[0]); this.fileSize = Long.parseLong(tokenPieces[1]); this.fileName = tokenPieces[2].replace(Characters.SPACE, " "); } @Override public String toString() { return filePosition + ":" + fileSize + ":" + fileName; } public String getFullPath() { return Strings.isNullOrEmpty(path) ? fileName : path + fileName; } public long getFilePosition() { return this.filePosition; } public long getFileSize() { return this.fileSize; } public String getFileName() { return this.fileName; } public String getPath() { return this.path; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/ManifestDecoder.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.collection; import org.arvados.client.common.Characters; import org.arvados.client.exception.ArvadosClientException; import org.arvados.client.logic.keep.KeepLocator; import java.util.ArrayList; import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.Objects; import static java.util.stream.Collectors.toList; import static org.arvados.client.common.Patterns.FILE_TOKEN_PATTERN; import static org.arvados.client.common.Patterns.LOCATOR_PATTERN; public class ManifestDecoder { public List decode(String manifestText) { if (manifestText == null || manifestText.isEmpty()) { throw new ArvadosClientException("Manifest text cannot be empty."); } List manifestStreams = new ArrayList<>(Arrays.asList(manifestText.split("\\n"))); if (!manifestStreams.get(0).startsWith(". ")) { throw new ArvadosClientException("Invalid first path component (expecting \".\")"); } return manifestStreams.stream() .map(this::decodeSingleManifestStream) .collect(toList()); } private ManifestStream decodeSingleManifestStream(String manifestStream) { Objects.requireNonNull(manifestStream, "Manifest stream cannot be empty."); LinkedList manifestPieces = new LinkedList<>(Arrays.asList(manifestStream.split("\\s+"))); String streamName = manifestPieces.poll(); String path = ".".equals(streamName) ? "" : streamName.substring(2).concat(Characters.SLASH); List keepLocators = manifestPieces .stream() .filter(p -> p.matches(LOCATOR_PATTERN)) .map(this::getKeepLocator) .collect(toList()); List fileTokens = manifestPieces.stream() .skip(keepLocators.size()) .filter(p -> p.matches(FILE_TOKEN_PATTERN)) .map(p -> new FileToken(p, path)) .collect(toList()); return new ManifestStream(streamName, keepLocators, fileTokens); } private KeepLocator getKeepLocator(String locatorString ) { try { return new KeepLocator(locatorString); } catch (Exception e) { throw new RuntimeException(e); } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/ManifestFactory.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.collection; import com.google.common.collect.ImmutableList; import org.arvados.client.common.Characters; import java.io.File; import java.util.Collection; import java.util.List; import java.util.stream.Collectors; public class ManifestFactory { private Collection files; private List locators; ManifestFactory(Collection files, List locators) { this.files = files; this.locators = locators; } public static ManifestFactoryBuilder builder() { return new ManifestFactoryBuilder(); } public String create() { ImmutableList.Builder builder = new ImmutableList.Builder() .add(Characters.DOT) .addAll(locators); long filePosition = 0; for (File file : files) { builder.add(String.format("%d:%d:%s", filePosition, file.length(), file.getName().replace(" ", Characters.SPACE))); filePosition += file.length(); } String manifest = builder.build().stream().collect(Collectors.joining(" ")).concat(Characters.NEW_LINE); return manifest; } public static class ManifestFactoryBuilder { private Collection files; private List locators; ManifestFactoryBuilder() { } public ManifestFactory.ManifestFactoryBuilder files(Collection files) { this.files = files; return this; } public ManifestFactory.ManifestFactoryBuilder locators(List locators) { this.locators = locators; return this; } public ManifestFactory build() { return new ManifestFactory(files, locators); } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/ManifestStream.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.collection; import org.arvados.client.logic.keep.KeepLocator; import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; public class ManifestStream { private String streamName; private List keepLocators; private List fileTokens; public ManifestStream(String streamName, List keepLocators, List fileTokens) { this.streamName = streamName; this.keepLocators = keepLocators; this.fileTokens = fileTokens; } @Override public String toString() { return streamName + " " + Stream.concat(keepLocators.stream().map(KeepLocator::toString), fileTokens.stream().map(FileToken::toString)) .collect(Collectors.joining(" ")); } public String getStreamName() { return this.streamName; } public List getKeepLocators() { return this.keepLocators; } public List getFileTokens() { return this.fileTokens; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.keep; import com.google.common.collect.Lists; import org.arvados.client.api.client.CollectionsApiClient; import org.arvados.client.api.client.KeepWebApiClient; import org.arvados.client.api.model.Collection; import org.arvados.client.common.Characters; import org.arvados.client.exception.ArvadosClientException; import org.arvados.client.logic.collection.FileToken; import org.arvados.client.logic.collection.ManifestDecoder; import org.arvados.client.logic.collection.ManifestStream; import org.arvados.client.logic.keep.exception.DownloadFolderAlreadyExistsException; import org.arvados.client.logic.keep.exception.FileAlreadyExistsException; import org.slf4j.Logger; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; import java.nio.file.Files; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; import java.util.stream.Stream; public class FileDownloader { private final ManifestDecoder manifestDecoder; private final CollectionsApiClient collectionsApiClient; private final KeepWebApiClient keepWebApiClient; private final Logger log = org.slf4j.LoggerFactory.getLogger(FileDownloader.class); public FileDownloader(ManifestDecoder manifestDecoder, CollectionsApiClient collectionsApiClient, KeepWebApiClient keepWebApiClient) { this.manifestDecoder = manifestDecoder; this.collectionsApiClient = collectionsApiClient; this.keepWebApiClient = keepWebApiClient; } public List listFileInfoFromCollection(String collectionUuid) { Collection requestedCollection = collectionsApiClient.get(collectionUuid); String manifestText = requestedCollection.getManifestText(); // decode manifest text and get list of all FileTokens for this collection return manifestDecoder.decode(manifestText) .stream() .flatMap(p -> p.getFileTokens().stream()) .collect(Collectors.toList()); } public File downloadSingleFileUsingKeepWeb(String filePathName, String collectionUuid, String pathToDownloadFolder) { FileToken fileToken = getFileTokenFromCollection(filePathName, collectionUuid); if (fileToken == null) { throw new ArvadosClientException(String.format("%s not found in Collection with UUID %s", filePathName, collectionUuid)); } File downloadedFile = checkIfFileExistsInTargetLocation(fileToken, pathToDownloadFolder); try (FileOutputStream fos = new FileOutputStream(downloadedFile)) { fos.write(keepWebApiClient.download(collectionUuid, filePathName)); } catch (IOException e) { throw new ArvadosClientException(String.format("Unable to write down file %s", fileToken.getFileName()), e); } return downloadedFile; } public File downloadFileWithResume(String collectionUuid, String fileName, String pathToDownloadFolder, long start, Long end) throws IOException { if (end != null && end < start) { throw new IllegalArgumentException("End index must be greater than or equal to the start index"); } File destinationFile = new File(pathToDownloadFolder, fileName); if (!destinationFile.exists()) { boolean isCreated = destinationFile.createNewFile(); if (!isCreated) { throw new IOException("Failed to create new file: " + destinationFile.getAbsolutePath()); } } try (RandomAccessFile outputFile = new RandomAccessFile(destinationFile, "rw"); InputStream inputStream = keepWebApiClient.get(collectionUuid, fileName, start, end)) { outputFile.seek(start); long remaining = (end == null) ? Long.MAX_VALUE : end - start + 1; byte[] buffer = new byte[4096]; int bytesRead; while ((bytesRead = inputStream.read(buffer)) != -1 && remaining > 0) { int bytesToWrite = (int) Math.min(bytesRead, remaining); outputFile.write(buffer, 0, bytesToWrite); remaining -= bytesToWrite; } } return destinationFile; } public List downloadFilesFromCollectionUsingKeepWeb(String collectionUuid, String pathToDownloadFolder) { String collectionTargetDir = setTargetDirectory(collectionUuid, pathToDownloadFolder).getAbsolutePath(); List fileTokens = listFileInfoFromCollection(collectionUuid); List> futures = Lists.newArrayList(); for (FileToken fileToken : fileTokens) { futures.add(CompletableFuture.supplyAsync(() -> this.downloadOneFileFromCollectionUsingKeepWeb(fileToken, collectionUuid, collectionTargetDir))); } @SuppressWarnings("unchecked") CompletableFuture[] array = futures.toArray(new CompletableFuture[0]); return Stream.of(array) .map(CompletableFuture::join).collect(Collectors.toList()); } private FileToken getFileTokenFromCollection(String filePathName, String collectionUuid) { return listFileInfoFromCollection(collectionUuid) .stream() .filter(p -> (p.getFullPath()).equals(filePathName)) .findFirst() .orElse(null); } private File checkIfFileExistsInTargetLocation(FileToken fileToken, String pathToDownloadFolder) { String fileName = fileToken.getFileName(); File downloadFile = new File(pathToDownloadFolder + Characters.SLASH + fileName); if (downloadFile.exists()) { throw new FileAlreadyExistsException(String.format("File %s exists in location %s", fileName, pathToDownloadFolder)); } else { return downloadFile; } } private File downloadOneFileFromCollectionUsingKeepWeb(FileToken fileToken, String collectionUuid, String pathToDownloadFolder) { String filePathName = fileToken.getPath() + fileToken.getFileName(); File downloadedFile = new File(pathToDownloadFolder + Characters.SLASH + filePathName); downloadedFile.getParentFile().mkdirs(); try (FileOutputStream fos = new FileOutputStream(downloadedFile)) { fos.write(keepWebApiClient.download(collectionUuid, filePathName)); } catch (IOException e) { throw new RuntimeException(e); } return downloadedFile; } public List downloadFilesFromCollection(String collectionUuid, String pathToDownloadFolder) { // download requested collection and extract manifest text Collection requestedCollection = collectionsApiClient.get(collectionUuid); String manifestText = requestedCollection.getManifestText(); // if directory with this collectionUUID does not exist - create one // if exists - abort (throw exception) File collectionTargetDir = setTargetDirectory(collectionUuid, pathToDownloadFolder); // decode manifest text and create list of ManifestStream objects containing KeepLocators and FileTokens List manifestStreams = manifestDecoder.decode(manifestText); //list of all downloaded files that will be returned by this method List downloadedFilesFromCollection = new ArrayList<>(); // download files for each manifest stream for (ManifestStream manifestStream : manifestStreams) downloadedFilesFromCollection.addAll(downloadFilesFromSingleManifestStream(collectionUuid, manifestStream, collectionTargetDir)); log.debug(String.format("Total of: %d files downloaded", downloadedFilesFromCollection.size())); return downloadedFilesFromCollection; } private File setTargetDirectory(String collectionUUID, String pathToDownloadFolder) { //local directory to save downloaded files File collectionTargetDir = new File(pathToDownloadFolder + Characters.SLASH + collectionUUID); if (collectionTargetDir.exists()) { throw new DownloadFolderAlreadyExistsException(String.format("Directory for collection UUID %s already exists", collectionUUID)); } else { collectionTargetDir.mkdirs(); } return collectionTargetDir; } private List downloadFilesFromSingleManifestStream(String collectionUuid, ManifestStream manifestStream, File collectionTargetDir){ List downloadedFiles = new ArrayList<>(); for (FileToken fileToken : manifestStream.getFileTokens()) { File downloadedFile = new File(collectionTargetDir.getAbsolutePath() + Characters.SLASH + fileToken.getFullPath()); //create file downloadedFile.getParentFile().mkdirs(); try { byte[] download = keepWebApiClient.download(collectionUuid, fileToken.getFileName()); Files.write(downloadedFile.toPath(), download); } catch (IOException | ArvadosClientException e) { throw new ArvadosClientException(String.format("Unable to write down file %s", fileToken.getFileName()), e); } downloadedFiles.add(downloadedFile); log.debug(String.format("File %d / %d downloaded from manifest stream", manifestStream.getFileTokens().indexOf(fileToken) + 1, manifestStream.getFileTokens().size())); } return downloadedFiles; } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/FileUploader.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.keep; import org.arvados.client.api.client.CollectionsApiClient; import org.arvados.client.api.client.KeepWebApiClient; import org.arvados.client.api.model.Collection; import org.arvados.client.config.ConfigProvider; import org.arvados.client.logic.collection.CollectionFactory; import org.slf4j.Logger; import java.io.File; import java.util.List; public class FileUploader { private final KeepWebApiClient keepWebApiClient; private final CollectionsApiClient collectionsApiClient; private final ConfigProvider config; private final Logger log = org.slf4j.LoggerFactory.getLogger(FileUploader.class); public FileUploader(KeepWebApiClient keepWebApiClient, CollectionsApiClient collectionsApiClient, ConfigProvider config) { this.keepWebApiClient = keepWebApiClient; this.collectionsApiClient = collectionsApiClient; this.config = config; } public Collection upload(List sourceFiles, String collectionName, String projectUuid) { Collection newCollection = CollectionFactory.builder() .config(config) .name(collectionName) .projectUuid(projectUuid) .build() .create(); newCollection = collectionsApiClient.create(newCollection); String newCollectionId = newCollection.getUuid(); sourceFiles.forEach(file -> uploadFile(newCollectionId, file)); return collectionsApiClient.get(newCollection.getUuid()); } private void uploadFile(String collectionUuid, File file) { keepWebApiClient.upload(collectionUuid, file, (progress) -> log.info("Uploaded {} bytes for file: {}", progress, file.getName())); } public Collection uploadToExistingCollection(List files, String collectionUuid) { files.forEach(file -> uploadFile(collectionUuid, file)); return collectionsApiClient.get(collectionUuid); } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/KeepLocator.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.keep; import org.arvados.client.exception.ArvadosClientException; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.arvados.client.common.Patterns.HINT_PATTERN; public class KeepLocator { private final List hints = new ArrayList<>(); private String permSig; private LocalDateTime permExpiry; private final String md5sum; private final Integer size; public KeepLocator(String locatorString) { LinkedList pieces = new LinkedList<>(Arrays.asList(locatorString.split("\\+"))); md5sum = pieces.poll(); size = Integer.valueOf(Objects.requireNonNull(pieces.poll())); for (String hint : pieces) { if (!hint.matches(HINT_PATTERN)) { throw new ArvadosClientException(String.format("invalid hint format: %s", hint)); } else if (hint.startsWith("A")) { parsePermissionHint(hint); } else { hints.add(hint); } } } public List getHints() { return hints; } public String getMd5sum() { return md5sum; } @Override public String toString() { return Stream.concat(Stream.of(md5sum, size.toString(), permissionHint()), hints.stream()) .filter(Objects::nonNull) .collect(Collectors.joining("+")); } public String stripped() { return size != null ? String.format("%s+%d", md5sum, size) : md5sum; } public String permissionHint() { if (permSig == null || permExpiry == null) { return null; } long timestamp = permExpiry.toEpochSecond(ZoneOffset.UTC); String signTimestamp = Long.toHexString(timestamp); return String.format("A%s@%s", permSig, signTimestamp); } private void parsePermissionHint(String hint) { String[] hintSplit = hint.substring(1).split("@", 2); permSig = hintSplit[0]; int permExpiryDecimal = Integer.parseInt(hintSplit[1], 16); permExpiry = LocalDateTime.ofInstant(Instant.ofEpochSecond(permExpiryDecimal), ZoneOffset.UTC); } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/exception/DownloadFolderAlreadyExistsException.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.keep.exception; import org.arvados.client.exception.ArvadosClientException; /** * Exception indicating that directory with given name was already created in specified location. * *

This exception will be thrown during an attempt to download all files from certain * collection to a location that already contains folder named by this collection's UUID.

*/ public class DownloadFolderAlreadyExistsException extends ArvadosClientException { public DownloadFolderAlreadyExistsException(String message) { super(message); } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/exception/FileAlreadyExistsException.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.keep.exception; import org.arvados.client.exception.ArvadosClientException; /** * Signals that an attempt to download a file with given name has failed for a specified * download location. * *

This exception will be thrown during an attempt to download single file to a location * that already contains file with given name

*/ public class FileAlreadyExistsException extends ArvadosClientException { public FileAlreadyExistsException(String message) { super(message); } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/utils/FileMerge.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.utils; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.nio.file.Files; import java.util.Collection; public class FileMerge { public static void merge(Collection files, File targetFile) throws IOException { try (FileOutputStream fos = new FileOutputStream(targetFile); BufferedOutputStream mergingStream = new BufferedOutputStream(fos)) { for (File file : files) { Files.copy(file.toPath(), mergingStream); } } } } ================================================ FILE: contrib/java-sdk-v2/src/main/java/org/arvados/client/utils/FileSplit.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.utils; import org.apache.commons.io.FileUtils; import java.io.*; import java.util.ArrayList; import java.util.List; /** * Based on: * {@link} https://stackoverflow.com/questions/10864317/how-to-break-a-file-into-pieces-using-java */ public class FileSplit { public static List split(File f, File dir, int splitSize) throws IOException { int partCounter = 1; long sizeOfFiles = splitSize * FileUtils.ONE_MB; byte[] buffer = new byte[(int) sizeOfFiles]; List files = new ArrayList<>(); String fileName = f.getName(); try (FileInputStream fis = new FileInputStream(f); BufferedInputStream bis = new BufferedInputStream(fis)) { int bytesAmount = 0; while ((bytesAmount = bis.read(buffer)) > 0) { String filePartName = String.format("%s.%03d", fileName, partCounter++); File newFile = new File(dir, filePartName); try (FileOutputStream out = new FileOutputStream(newFile)) { out.write(buffer, 0, bytesAmount); } files.add(newFile); } } return files; } } ================================================ FILE: contrib/java-sdk-v2/src/main/resources/reference.conf ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 # # Arvados client default configuration # # Remarks: # * While providing data remove apostrophes ("") from each line # * See Arvados documentation for information how to obtain a token: # https://doc.arvados.org/user/reference/api-tokens.html # arvados { api { keepweb-host = localhost keepweb-port = 8000 host = localhost port = 8000 token = "" protocol = https host-insecure = false } split-size = 64 temp-dir = /tmp/file-split copies = 2 retries = 0 connectTimeout = 60000 readTimeout = 60000 writeTimeout = 60000 } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/BaseStandardApiClientTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import okhttp3.HttpUrl; import org.arvados.client.api.model.Item; import org.arvados.client.api.model.ItemList; import org.arvados.client.test.utils.ArvadosClientUnitTest; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; import static org.assertj.core.api.Assertions.assertThat; @RunWith(MockitoJUnitRunner.class) public class BaseStandardApiClientTest extends ArvadosClientUnitTest { @Spy private BaseStandardApiClient client = new BaseStandardApiClient(CONFIG) { @Override String getResource() { return "resource"; } @Override Class getType() { return null; } @Override Class getListType() { return null; } }; @Test public void urlBuilderBuildsExpectedUrlFormat() { // when HttpUrl.Builder actual = client.getUrlBuilder(); // then assertThat(actual.build().toString()).isEqualTo("http://localhost:9000/arvados/v1/resource"); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import java.io.IOException; import java.util.HashMap; import java.util.Map; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import okhttp3.mockwebserver.RecordedRequest; import org.arvados.client.api.model.Collection; import org.arvados.client.api.model.CollectionList; import org.arvados.client.api.model.CollectionReplaceFiles; import org.arvados.client.test.utils.RequestMethod; import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest; import org.junit.Before; import org.junit.Test; import static org.arvados.client.test.utils.ApiClientTestUtils.*; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest { private static final String RESOURCE = "collections"; private static final String TEST_COLLECTION_NAME = "Super Collection"; private static final String TEST_COLLECTION_UUID = "test-collection-uuid"; private ObjectMapper objectMapper; private CollectionsApiClient client; @Before public void setUp() { objectMapper = new ObjectMapper(); objectMapper.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true); client = new CollectionsApiClient(CONFIG); } @Test public void listCollections() throws Exception { // given server.enqueue(getResponse("collections-list")); // when CollectionList actual = client.list(); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE); assertRequestMethod(request, RequestMethod.GET); assertThat(actual.getItemsAvailable()).isEqualTo(41); } @Test public void getCollection() throws Exception { // given server.enqueue(getResponse("collections-get")); String uuid = "112ci-4zz18-p51w7z3fpopo6sm"; // when Collection actual = client.get(uuid); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE + "/" + uuid); assertRequestMethod(request, RequestMethod.GET); assertThat(actual.getUuid()).isEqualTo(uuid); assertThat(actual.getPortableDataHash()).isEqualTo("6c4106229b08fe25f48b3a7a8289dd46+143"); } @Test public void createCollection() throws Exception { // given server.enqueue(getResponse("collections-create-simple")); String name = TEST_COLLECTION_NAME; Collection collection = new Collection(); collection.setName(name); // when Collection actual = client.create(collection); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE); assertRequestMethod(request, RequestMethod.POST); assertThat(actual.getName()).isEqualTo(name); assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0"); assertThat(actual.getManifestText()).isEmpty(); } @Test public void createCollectionWithManifest() throws Exception { // given server.enqueue(getResponse("collections-create-manifest")); String name = TEST_COLLECTION_NAME; String manifestText = ". 7df44272090cee6c0732382bba415ee9+70+Aa5ece4560e3329315165b36c239b8ab79c888f8a@5a1d5708 0:70:README.md\n"; Collection collection = new Collection(); collection.setName(name); collection.setManifestText(manifestText); // when Collection actual = client.create(collection); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE); assertRequestMethod(request, RequestMethod.POST); assertThat(actual.getName()).isEqualTo(name); assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0"); assertThat(actual.getManifestText()).isEqualTo(manifestText); } @Test public void testUpdateWithReplaceFiles() throws IOException, InterruptedException { // given server.enqueue(getResponse("collections-create-manifest")); Map files = new HashMap<>(); files.put("targetPath1", "sourcePath1"); files.put("targetPath2", "sourcePath2"); CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles(); replaceFilesRequest.setReplaceFiles(files); // when Collection actual = client.update(TEST_COLLECTION_UUID, replaceFilesRequest); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, "collections/test-collection-uuid"); assertRequestMethod(request, RequestMethod.PUT); assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0"); String actualRequestBody = request.getBody().readUtf8(); Map actualRequestMap = objectMapper.readValue(actualRequestBody, Map.class); Map expectedRequestMap = new HashMap<>(); Map collectionOptionsMap = new HashMap<>(); collectionOptionsMap.put("preserve_version", true); Map replaceFilesMap = new HashMap<>(); replaceFilesMap.put("targetPath1", "sourcePath1"); replaceFilesMap.put("targetPath2", "sourcePath2"); expectedRequestMap.put("collection", collectionOptionsMap); expectedRequestMap.put("replace_files", replaceFilesMap); String expectedJson = objectMapper.writeValueAsString(expectedRequestMap); String actualJson = objectMapper.writeValueAsString(actualRequestMap); assertEquals(expectedJson, actualJson); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/GroupsApiClientTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import com.google.common.collect.Lists; import okhttp3.mockwebserver.RecordedRequest; import org.arvados.client.api.model.Group; import org.arvados.client.api.model.GroupList; import org.arvados.client.api.model.argument.Filter; import org.arvados.client.api.model.argument.ListArgument; import org.arvados.client.test.utils.RequestMethod; import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest; import org.junit.Test; import java.util.Arrays; import static org.arvados.client.test.utils.ApiClientTestUtils.*; import static org.junit.Assert.assertEquals; public class GroupsApiClientTest extends ArvadosClientMockedWebServerTest { private static final String RESOURCE = "groups"; private GroupsApiClient client = new GroupsApiClient(CONFIG); @Test public void listGroups() throws Exception { // given server.enqueue(getResponse("groups-list")); // when GroupList actual = client.list(); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE); assertRequestMethod(request, RequestMethod.GET); assertEquals(20, actual.getItems().size()); } @Test public void listProjectsByOwner() throws Exception { // given server.enqueue(getResponse("groups-list")); String ownerUuid = "ardev-tpzed-n3kzq4fvoks3uw4"; String filterSubPath = "?filters=[%20[%20%22owner_uuid%22,%20%22like%22,%20%22ardev-tpzed-n3kzq4fvoks3uw4%22%20],%20" + "[%20%22group_class%22,%20%22in%22,%20[%20%22project%22,%20%22sub-project%22%20]%20]%20]"; // when ListArgument listArgument = ListArgument.builder() .filters(Arrays.asList( Filter.of("owner_uuid", Filter.Operator.LIKE, ownerUuid), Filter.of("group_class", Filter.Operator.IN, Lists.newArrayList("project", "sub-project") ))) .build(); GroupList actual = client.list(listArgument); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE + filterSubPath); assertRequestMethod(request, RequestMethod.GET); assertEquals(20, actual.getItems().size()); } @Test public void getGroup() throws Exception { // given server.enqueue(getResponse("groups-get")); String uuid = "ardev-j7d0g-bmg3pfqtx3ivczp"; // when Group actual = client.get(uuid); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE + "/" + uuid); assertRequestMethod(request, RequestMethod.GET); assertEquals(uuid, actual.getUuid()); assertEquals("3hw0vk4mbl0ofvia5k6x4dwrx", actual.getEtag()); assertEquals("ardev-tpzed-n3kzq4fvoks3uw4", actual.getOwnerUuid()); assertEquals("TestGroup1", actual.getName()); assertEquals("project", actual.getGroupClass()); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest; import org.junit.Test; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import okhttp3.mockwebserver.MockResponse; import okio.Buffer; import static org.arvados.client.test.utils.ApiClientTestUtils.getResponse; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertNotNull; public class KeepWebApiClientTest extends ArvadosClientMockedWebServerTest { private final KeepWebApiClient client = new KeepWebApiClient(CONFIG); @Test public void uploadFile() throws Exception { // given String collectionUuid = "112ci-4zz18-p51w7z3fpopo6sm"; File file = Files.createTempFile("keep-upload-test", "txt").toFile(); Files.write(file.toPath(), "test data".getBytes()); server.enqueue(getResponse("keep-client-upload-response")); // when String uploadResponse = client.upload(collectionUuid, file, uploadedBytes -> System.out.printf("Uploaded bytes: %s/%s%n", uploadedBytes, file.length())); // then assertThat(uploadResponse).isEqualTo("Created"); } @Test public void downloadPartialIsPerformedSuccessfully() throws Exception { // given String collectionUuid = "some-collection-uuid"; String filePathName = "sample-file-path"; long start = 1024; Long end = null; byte[] expectedData = "test data".getBytes(); try (Buffer buffer = new Buffer().write(expectedData)) { server.enqueue(new MockResponse().setBody(buffer)); // when InputStream inputStream = client.get(collectionUuid, filePathName, start, end); byte[] actualData = inputStreamToByteArray(inputStream); // then assertNotNull(actualData); assertArrayEquals(expectedData, actualData); } } private byte[] inputStreamToByteArray(InputStream inputStream) throws IOException { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[1024]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { buffer.write(data, 0, nRead); } buffer.flush(); return buffer.toByteArray(); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/LinkApiClientTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import okhttp3.mockwebserver.RecordedRequest; import org.arvados.client.api.model.Link; import org.arvados.client.api.model.LinkList; import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest; import org.arvados.client.test.utils.RequestMethod; import org.junit.Test; import static org.arvados.client.test.utils.ApiClientTestUtils.assertAuthorizationHeader; import static org.arvados.client.test.utils.ApiClientTestUtils.assertRequestMethod; import static org.arvados.client.test.utils.ApiClientTestUtils.assertRequestPath; import static org.arvados.client.test.utils.ApiClientTestUtils.getResponse; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; public class LinkApiClientTest extends ArvadosClientMockedWebServerTest { private static final String RESOURCE = "links"; private final LinksApiClient client = new LinksApiClient(CONFIG); @Test public void listLinks() throws Exception { // given server.enqueue(getResponse("links-list")); // when LinkList actual = client.list(); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE); assertRequestMethod(request, RequestMethod.GET); assertThat(actual.getItemsAvailable()).isEqualTo(2); } @Test public void getLink() throws Exception { // given server.enqueue(getResponse("links-get")); String uuid = "arkau-o0j2j-huxuaxbi46s1yml"; // when Link actual = client.get(uuid); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE + "/" + uuid); assertRequestMethod(request, RequestMethod.GET); assertEquals(actual.getUuid(), uuid); assertEquals(actual.getName(), "can_read"); assertEquals(actual.getHeadKind(), "arvados#group"); assertEquals(actual.getHeadUuid(), "arkau-j7d0g-fcedae2076pw56h"); assertEquals(actual.getTailUuid(), "ardev-tpzed-n3kzq4fvoks3uw4"); assertEquals(actual.getTailKind(), "arvados#user"); assertEquals(actual.getLinkClass(), "permission"); } @Test public void createLink() throws Exception { // given server.enqueue(getResponse("links-create")); String name = "Star Link"; Link collection = new Link(); collection.setName(name); // when Link actual = client.create(collection); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE); assertRequestMethod(request, RequestMethod.POST); assertThat(actual.getName()).isEqualTo(name); assertEquals(actual.getName(), name); assertEquals(actual.getUuid(), "arkau-o0j2j-huxuaxbi46s1yml"); assertEquals(actual.getHeadKind(), "arvados#group"); assertEquals(actual.getHeadUuid(), "arkau-j7d0g-fcedae2076pw56h"); assertEquals(actual.getTailUuid(), "ardev-tpzed-n3kzq4fvoks3uw4"); assertEquals(actual.getTailKind(), "arvados#user"); assertEquals(actual.getLinkClass(), "star"); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/UsersApiClientTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client; import okhttp3.mockwebserver.RecordedRequest; import org.arvados.client.api.model.User; import org.arvados.client.api.model.UserList; import org.arvados.client.test.utils.RequestMethod; import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest; import org.junit.Test; import static org.arvados.client.common.Characters.SLASH; import static org.arvados.client.test.utils.ApiClientTestUtils.*; import static org.assertj.core.api.Assertions.assertThat; public class UsersApiClientTest extends ArvadosClientMockedWebServerTest { private static final String RESOURCE = "users"; private static final String USER_UUID = "ardev-tpzed-q6dvn7sby55up1b"; private UsersApiClient client = new UsersApiClient(CONFIG); @Test public void listUsers() throws Exception { // given server.enqueue(getResponse("users-list")); // when UserList actual = client.list(); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE); assertRequestMethod(request, RequestMethod.GET); assertThat(actual.getItemsAvailable()).isEqualTo(13); } @Test public void getUser() throws Exception { // given server.enqueue(getResponse("users-get")); // when User actual = client.get(USER_UUID); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE + SLASH + USER_UUID); assertRequestMethod(request, RequestMethod.GET); assertThat(actual.getUuid()).isEqualTo(USER_UUID); } @Test public void getCurrentUser() throws Exception { // given server.enqueue(getResponse("users-get")); // when User actual = client.current(); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE + SLASH + "current"); assertRequestMethod(request, RequestMethod.GET); assertThat(actual.getUuid()).isEqualTo(USER_UUID); } @Test public void getSystemUser() throws Exception { // given server.enqueue(getResponse("users-system")); // when User actual = client.system(); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE + SLASH + "system"); assertRequestMethod(request, RequestMethod.GET); assertThat(actual.getUuid()).isEqualTo("ardev-tpzed-000000000000000"); } @Test public void createUser() throws Exception { // given server.enqueue(getResponse("users-create")); String firstName = "John"; String lastName = "Wayne"; String fullName = String.format("%s %s", firstName, lastName); String username = String.format("%s%s", firstName, lastName).toLowerCase(); User user = new User(); user.setFirstName(firstName); user.setLastName(lastName); user.setFullName(fullName); user.setUsername(username); // when User actual = client.create(user); // then RecordedRequest request = server.takeRequest(); assertAuthorizationHeader(request); assertRequestPath(request, RESOURCE); assertRequestMethod(request, RequestMethod.POST); assertThat(actual.getFirstName()).isEqualTo(firstName); assertThat(actual.getLastName()).isEqualTo(lastName); assertThat(actual.getFullName()).isEqualTo(fullName); assertThat(actual.getUsername()).isEqualTo(username); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/factory/OkHttpClientFactoryTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.api.client.factory; import okhttp3.OkHttpClient; import okhttp3.Request; import okhttp3.Response; import okhttp3.mockwebserver.MockResponse; import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.junit.MockitoJUnitRunner; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocketFactory; import javax.net.ssl.TrustManagerFactory; import java.io.FileInputStream; import java.security.KeyStore; @RunWith(MockitoJUnitRunner.class) public class OkHttpClientFactoryTest extends ArvadosClientMockedWebServerTest { @Test(expected = javax.net.ssl.SSLHandshakeException.class) public void secureOkHttpClientIsCreated() throws Exception { // given OkHttpClientFactory factory = OkHttpClientFactory.INSTANCE; // * configure HTTPS server SSLSocketFactory sf = getSSLSocketFactoryWithSelfSignedCertificate(); server.useHttps(sf, false); server.enqueue(new MockResponse().setBody("OK")); // * prepare client HTTP request Request request = new Request.Builder() .url("https://localhost:9000/") .build(); // when - then (SSL certificate is verified) OkHttpClient actual = factory.create(false); Response response = actual.newCall(request).execute(); } @Test public void insecureOkHttpClientIsCreated() throws Exception { // given OkHttpClientFactory factory = OkHttpClientFactory.INSTANCE; // * configure HTTPS server SSLSocketFactory sf = getSSLSocketFactoryWithSelfSignedCertificate(); server.useHttps(sf, false); server.enqueue(new MockResponse().setBody("OK")); // * prepare client HTTP request Request request = new Request.Builder() .url("https://localhost:9000/") .build(); // when (SSL certificate is not verified) OkHttpClient actual = factory.create(true); Response response = actual.newCall(request).execute(); // then Assert.assertEquals(response.body().string(),"OK"); } /* This ugly boilerplate is needed to enable self signed certificate. It requires selfsigned.keystore.jks file. It was generated with: keytool -genkey -v -keystore mystore.keystore.jks -alias alias_name -keyalg RSA -keysize 2048 -validity 10000 */ public SSLSocketFactory getSSLSocketFactoryWithSelfSignedCertificate() throws Exception { FileInputStream stream = new FileInputStream("src/test/resources/selfsigned.keystore.jks"); char[] serverKeyStorePassword = "123456".toCharArray(); KeyStore serverKeyStore = KeyStore.getInstance(KeyStore.getDefaultType()); serverKeyStore.load(stream, serverKeyStorePassword); String kmfAlgorithm = KeyManagerFactory.getDefaultAlgorithm(); KeyManagerFactory kmf = KeyManagerFactory.getInstance(kmfAlgorithm); kmf.init(serverKeyStore, serverKeyStorePassword); TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(kmfAlgorithm); trustManagerFactory.init(serverKeyStore); SSLContext sslContext = SSLContext.getInstance("SSL"); sslContext.init(kmf.getKeyManagers(), trustManagerFactory.getTrustManagers(), null); return sslContext.getSocketFactory(); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/config/ExternalConfigProviderTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.config; import okhttp3.mockwebserver.MockResponse; import okhttp3.mockwebserver.MockWebServer; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.IOException; import static org.assertj.core.api.Assertions.assertThat; public class ExternalConfigProviderTest { private MockWebServer mockServer; @Before public void setUp() throws IOException { mockServer = new MockWebServer(); mockServer.start(); } @After public void tearDown() throws IOException { mockServer.shutdown(); } @Test public void testAutoFetchWebDAVConfiguration() { // Given String configResponse = "{\n" + " \"Services\": {\n" + " \"WebDAVDownload\": {\n" + " \"ExternalURL\": \"https://download.example.com:9000/\"\n" + " }\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When ExternalConfigProvider provider = ExternalConfigProvider.builder() .apiHost(mockServer.getHostName()) .apiPort(mockServer.getPort()) .apiProtocol("http") .apiToken("test-token") .build(); // Then assertThat(provider.getKeepWebHost()).isEqualTo("download.example.com"); assertThat(provider.getKeepWebPort()).isEqualTo(9000); } @Test public void testAutoFetchWebDAVConfigurationWithDefaultHttpsPort() { // Given String configResponse = "{\n" + " \"Services\": {\n" + " \"WebDAVDownload\": {\n" + " \"ExternalURL\": \"https://download.example.com/\"\n" + " }\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When ExternalConfigProvider provider = ExternalConfigProvider.builder() .apiHost(mockServer.getHostName()) .apiPort(mockServer.getPort()) .apiProtocol("http") .apiToken("test-token") .build(); // Then assertThat(provider.getKeepWebHost()).isEqualTo("download.example.com"); assertThat(provider.getKeepWebPort()).isEqualTo(443); } @Test public void testAutoFetchWebDAVConfigurationWithDefaultHttpPort() { // Given String configResponse = "{\n" + " \"Services\": {\n" + " \"WebDAVDownload\": {\n" + " \"ExternalURL\": \"http://download.example.com/\"\n" + " }\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When ExternalConfigProvider provider = ExternalConfigProvider.builder() .apiHost(mockServer.getHostName()) .apiPort(mockServer.getPort()) .apiProtocol("http") .apiToken("test-token") .build(); // Then assertThat(provider.getKeepWebHost()).isEqualTo("download.example.com"); assertThat(provider.getKeepWebPort()).isEqualTo(80); } @Test public void testManualConfigurationTakesPrecedence() { // Given - server returns config but we provide manual values String configResponse = "{\n" + " \"Services\": {\n" + " \"WebDAVDownload\": {\n" + " \"ExternalURL\": \"https://auto.example.com/\"\n" + " }\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When - manual configuration is provided ExternalConfigProvider provider = ExternalConfigProvider.builder() .apiHost(mockServer.getHostName()) .apiPort(mockServer.getPort()) .apiProtocol("http") .apiToken("test-token") .keepWebHost("manual.example.com") .keepWebPort(8080) .build(); // Then - manual values should be used assertThat(provider.getKeepWebHost()).isEqualTo("manual.example.com"); assertThat(provider.getKeepWebPort()).isEqualTo(8080); } @Test public void testAutoFetchDisabled() { // When - auto-fetch is explicitly disabled ExternalConfigProvider provider = ExternalConfigProvider.builder() .apiHost("api.example.com") .apiPort(443) .apiProtocol("https") .apiToken("test-token") .autoFetchWebDAV(false) .build(); // Then - keepWeb values should be null/0 assertThat(provider.getKeepWebHost()).isNull(); assertThat(provider.getKeepWebPort()).isEqualTo(0); } @Test public void testHandlesApiError() { // Given - server returns error mockServer.enqueue(new MockResponse() .setResponseCode(500) .setBody("Internal Server Error")); // When ExternalConfigProvider provider = ExternalConfigProvider.builder() .apiHost(mockServer.getHostName()) .apiPort(mockServer.getPort()) .apiProtocol("http") .apiToken("test-token") .build(); // Then - should handle gracefully, keepWeb values should be null/0 assertThat(provider.getKeepWebHost()).isNull(); assertThat(provider.getKeepWebPort()).isEqualTo(0); } @Test public void testHandlesMalformedResponse() { // Given - server returns malformed JSON mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody("{ invalid json ]") .addHeader("Content-Type", "application/json")); // When ExternalConfigProvider provider = ExternalConfigProvider.builder() .apiHost(mockServer.getHostName()) .apiPort(mockServer.getPort()) .apiProtocol("http") .apiToken("test-token") .build(); // Then - should handle gracefully assertThat(provider.getKeepWebHost()).isNull(); assertThat(provider.getKeepWebPort()).isEqualTo(0); } @Test public void testHandlesMissingWebDAVInResponse() { // Given - server returns config without WebDAV section String configResponse = "{\n" + " \"Services\": {\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When ExternalConfigProvider provider = ExternalConfigProvider.builder() .apiHost(mockServer.getHostName()) .apiPort(mockServer.getPort()) .apiProtocol("http") .apiToken("test-token") .build(); // Then - should handle gracefully assertThat(provider.getKeepWebHost()).isNull(); assertThat(provider.getKeepWebPort()).isEqualTo(0); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/config/WebDAVConfigFetcherTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.config; import okhttp3.mockwebserver.MockResponse; import okhttp3.mockwebserver.MockWebServer; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.IOException; import static org.assertj.core.api.Assertions.assertThat; public class WebDAVConfigFetcherTest { private MockWebServer mockServer; @Before public void setUp() throws IOException { mockServer = new MockWebServer(); mockServer.start(); } @After public void tearDown() throws IOException { mockServer.shutdown(); } @Test public void testFetchWithValidConfig() { // Given String configResponse = "{\n" + " \"Services\": {\n" + " \"WebDAVDownload\": {\n" + " \"ExternalURL\": \"https://download.example.com:9000/\"\n" + " }\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( "http", mockServer.getHostName(), mockServer.getPort(), false ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); // Then assertThat(config).isNotNull(); assertThat(config.getHost()).isEqualTo("download.example.com"); assertThat(config.getPort()).isEqualTo(9000); } @Test public void testFetchWithDefaultHttpsPort() { // Given String configResponse = "{\n" + " \"Services\": {\n" + " \"WebDAVDownload\": {\n" + " \"ExternalURL\": \"https://download.example.com/\"\n" + " }\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( "http", mockServer.getHostName(), mockServer.getPort(), false ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); // Then assertThat(config).isNotNull(); assertThat(config.getHost()).isEqualTo("download.example.com"); assertThat(config.getPort()).isEqualTo(443); } @Test public void testFetchWithDefaultHttpPort() { // Given String configResponse = "{\n" + " \"Services\": {\n" + " \"WebDAVDownload\": {\n" + " \"ExternalURL\": \"http://download.example.com/\"\n" + " }\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( "http", mockServer.getHostName(), mockServer.getPort(), false ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); // Then assertThat(config).isNotNull(); assertThat(config.getHost()).isEqualTo("download.example.com"); assertThat(config.getPort()).isEqualTo(80); } @Test public void testFetchWithApiError() { // Given mockServer.enqueue(new MockResponse() .setResponseCode(500) .setBody("Internal Server Error")); // When WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( "http", mockServer.getHostName(), mockServer.getPort(), false ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); // Then assertThat(config).isNull(); } @Test public void testFetchWithMalformedJson() { // Given mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody("{ invalid json ]") .addHeader("Content-Type", "application/json")); // When WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( "http", mockServer.getHostName(), mockServer.getPort(), false ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); // Then assertThat(config).isNull(); } @Test public void testFetchWithMissingWebDAVSection() { // Given String configResponse = "{\n" + " \"Services\": {\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( "http", mockServer.getHostName(), mockServer.getPort(), false ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); // Then assertThat(config).isNull(); } @Test public void testFetchWithNullApiHost() { // When WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( "https", null, 443, false ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); // Then assertThat(config).isNull(); } @Test public void testFetchWithEmptyApiHost() { // When WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( "https", "", 443, false ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); // Then assertThat(config).isNull(); } @Test public void testFetchWithInvalidWebDAVUrl() { // Given String configResponse = "{\n" + " \"Services\": {\n" + " \"WebDAVDownload\": {\n" + " \"ExternalURL\": \"not-a-valid-url\"\n" + " }\n" + " }\n" + "}"; mockServer.enqueue(new MockResponse() .setResponseCode(200) .setBody(configResponse) .addHeader("Content-Type", "application/json")); // When WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher( "http", mockServer.getHostName(), mockServer.getPort(), false ); WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch(); // Then assertThat(config).isNull(); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.facade; import org.apache.commons.io.FileUtils; import org.arvados.client.api.model.Collection; import org.arvados.client.common.Characters; import org.arvados.client.config.ExternalConfigProvider; import org.arvados.client.junit.categories.IntegrationTests; import org.arvados.client.logic.collection.FileToken; import org.arvados.client.test.utils.ArvadosClientIntegrationTest; import org.arvados.client.test.utils.FileTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import java.io.File; import java.util.Collections; import java.util.List; import java.util.UUID; import static org.arvados.client.test.utils.FileTestUtils.FILE_DOWNLOAD_TEST_DIR; import static org.arvados.client.test.utils.FileTestUtils.FILE_SPLIT_TEST_DIR; import static org.arvados.client.test.utils.FileTestUtils.TEST_FILE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @Category(IntegrationTests.class) public class ArvadosFacadeIntegrationTest extends ArvadosClientIntegrationTest { private static final String COLLECTION_NAME = "Test collection " + UUID.randomUUID().toString(); private String collectionUuid; @Before public void setUp() throws Exception { FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR); FileTestUtils.createDirectory(FILE_DOWNLOAD_TEST_DIR); } @Test public void uploadOfFileIsPerformedSuccessfully() throws Exception { // given File file = FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB / 200); // when Collection actual = FACADE.upload(Collections.singletonList(file), COLLECTION_NAME, PROJECT_UUID); collectionUuid = actual.getUuid(); // then assertThat(actual.getName()).contains("Test collection"); assertThat(actual.getManifestText()).contains(file.length() + Characters.COLON + file.getName()); } @Test public void uploadOfFilesIsPerformedSuccessfully() throws Exception { // given List files = FileTestUtils.generatePredefinedFiles(); files.addAll(FileTestUtils.generatePredefinedFiles()); // when Collection actual = FACADE.upload(files, COLLECTION_NAME, PROJECT_UUID); collectionUuid = actual.getUuid(); // then assertThat(actual.getName()).contains("Test collection"); files.forEach(f -> assertThat(actual.getManifestText()).contains(f.length() + Characters.COLON + f.getName().replace(" ", Characters.SPACE))); } @Test public void uploadToExistingCollectionIsPerformedSuccessfully() throws Exception { // given File file = FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_EIGTH_GB / 500); Collection existing = createTestCollection(); // when Collection actual = FACADE.uploadToExistingCollection(Collections.singletonList(file), collectionUuid); // then assertEquals(collectionUuid, actual.getUuid()); assertThat(actual.getManifestText()).contains(file.length() + Characters.COLON + file.getName()); } @Test public void uploadWithExternalConfigProviderWorksProperly() throws Exception { //given ArvadosFacade facade = new ArvadosFacade(buildExternalConfig()); File file = FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB / 200); //when Collection actual = facade.upload(Collections.singletonList(file), COLLECTION_NAME, PROJECT_UUID); collectionUuid = actual.getUuid(); //then assertThat(actual.getName()).contains("Test collection"); assertThat(actual.getManifestText()).contains(file.length() + Characters.COLON + file.getName()); } @Test public void creationOfEmptyCollectionPerformedSuccesfully() { // given String collectionName = "Empty collection " + UUID.randomUUID().toString(); // when Collection actual = FACADE.createEmptyCollection(collectionName, PROJECT_UUID); collectionUuid = actual.getUuid(); // then assertEquals(collectionName, actual.getName()); assertEquals(PROJECT_UUID, actual.getOwnerUuid()); } @Test public void fileTokensAreListedFromCollection() throws Exception { //given List files = uploadTestFiles(); //when List actual = FACADE.listFileInfoFromCollection(collectionUuid); //then assertEquals(files.size(), actual.size()); for (int i = 0; i < files.size(); i++) { assertEquals(files.get(i).length(), actual.get(i).getFileSize()); } } @Test public void downloadOfFilesPerformedSuccessfully() throws Exception { //given List files = uploadTestFiles(); File destination = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionUuid); //when List actual = FACADE.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false); //then assertEquals(files.size(), actual.size()); assertTrue(destination.exists()); assertThat(actual).allMatch(File::exists); for (int i = 0; i < files.size(); i++) { assertEquals(files.get(i).length(), actual.get(i).length()); } } @Test public void downloadOfFilesPerformedSuccessfullyUsingKeepWeb() throws Exception { //given List files = uploadTestFiles(); File destination = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionUuid); //when List actual = FACADE.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, true); //then assertEquals(files.size(), actual.size()); assertTrue(destination.exists()); assertThat(actual).allMatch(File::exists); for (int i = 0; i < files.size(); i++) { assertEquals(files.get(i).length(), actual.get(i).length()); } } @Test public void singleFileIsDownloadedSuccessfullyUsingKeepWeb() throws Exception { //given File file = uploadSingleTestFile(false); //when File actual = FACADE.downloadFile(file.getName(), collectionUuid, FILE_DOWNLOAD_TEST_DIR); //then assertThat(actual).exists(); assertThat(actual.length()).isEqualTo(file.length()); } @Test public void downloadOfOneFileSplittedToMultipleLocatorsPerformedSuccesfully() throws Exception { //given File file = uploadSingleTestFile(true); List actual = FACADE.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false); Assert.assertEquals(1, actual.size()); assertThat(actual.get(0).length()).isEqualTo(file.length()); } @Test public void downloadWithExternalConfigProviderWorksProperly() throws Exception { //given ArvadosFacade facade = new ArvadosFacade(buildExternalConfig()); List files = uploadTestFiles(); //when List actual = facade.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false); //then assertEquals(files.size(), actual.size()); assertThat(actual).allMatch(File::exists); for (int i = 0; i < files.size(); i++) { assertEquals(files.get(i).length(), actual.get(i).length()); } } private ExternalConfigProvider buildExternalConfig() { return ExternalConfigProvider .builder() .apiHostInsecure(CONFIG.isApiHostInsecure()) .keepWebHost(CONFIG.getKeepWebHost()) .keepWebPort(CONFIG.getKeepWebPort()) .apiHost(CONFIG.getApiHost()) .apiPort(CONFIG.getApiPort()) .apiToken(CONFIG.getApiToken()) .apiProtocol(CONFIG.getApiProtocol()) .fileSplitSize(CONFIG.getFileSplitSize()) .fileSplitDirectory(CONFIG.getFileSplitDirectory()) .numberOfCopies(CONFIG.getNumberOfCopies()) .numberOfRetries(CONFIG.getNumberOfRetries()) .connectTimeout(CONFIG.getConnectTimeout()) .readTimeout(CONFIG.getReadTimeout()) .writeTimeout(CONFIG.getWriteTimeout()) .build(); } private Collection createTestCollection() { Collection collection = FACADE.createEmptyCollection(COLLECTION_NAME, PROJECT_UUID); collectionUuid = collection.getUuid(); return collection; } private List uploadTestFiles() throws Exception{ createTestCollection(); List files = FileTestUtils.generatePredefinedFiles(); FACADE.uploadToExistingCollection(files, collectionUuid); return files; } private File uploadSingleTestFile(boolean bigFile) throws Exception{ createTestCollection(); Long fileSize = bigFile ? FileUtils.ONE_MB * 70 : FileTestUtils.ONE_EIGTH_GB / 100; File file = FileTestUtils.generateFile(TEST_FILE, fileSize); FACADE.uploadToExistingCollection(Collections.singletonList(file), collectionUuid); return file; } @After public void tearDown() throws Exception { FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR); FileTestUtils.cleanDirectory(FILE_DOWNLOAD_TEST_DIR); if(collectionUuid != null) FACADE.deleteCollection(collectionUuid); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.facade; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; import okhttp3.mockwebserver.MockResponse; import okio.Buffer; import org.apache.commons.io.FileUtils; import org.arvados.client.api.model.Collection; import org.arvados.client.api.model.KeepService; import org.arvados.client.api.model.KeepServiceList; import org.arvados.client.common.Characters; import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest; import org.arvados.client.test.utils.FileTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.Ignore; import java.io.File; import java.nio.charset.Charset; import java.nio.file.Files; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import static org.arvados.client.test.utils.ApiClientTestUtils.getResponse; import static org.arvados.client.test.utils.FileTestUtils.*; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; public class ArvadosFacadeTest extends ArvadosClientMockedWebServerTest { ArvadosFacade facade = new ArvadosFacade(CONFIG); @Before public void setUp() throws Exception { FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR); FileTestUtils.createDirectory(FILE_DOWNLOAD_TEST_DIR); } @Test @Ignore("Failing test #15041") public void uploadIsPerformedSuccessfullyUsingDiskOnlyKeepServices() throws Exception { // given String keepServicesAccessible = setMockedServerPortToKeepServices("keep-services-accessible-disk-only"); server.enqueue(new MockResponse().setBody(keepServicesAccessible)); String blockLocator = "7df44272090cee6c0732382bba415ee9"; String signedBlockLocator = blockLocator + "+70+A189a93acda6e1fba18a9dffd42b6591cbd36d55d@5a1c17b6"; for (int i = 0; i < 8; i++) { server.enqueue(new MockResponse().setBody(signedBlockLocator)); } server.enqueue(getResponse("users-get")); server.enqueue(getResponse("collections-create-manifest")); FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB); // when Collection actual = facade.upload(Arrays.asList(new File(TEST_FILE)), "Super Collection", null); // then assertThat(actual.getName()).contains("Super Collection"); } @Test public void uploadIsPerformedSuccessfully() throws Exception { // given // First response: get current user (called by CollectionFactory when projectUuid is null) server.enqueue(getResponse("users-get")); // Second response: create collection server.enqueue(getResponse("collections-create-manifest")); // Third response: upload file to KeepWeb (it returns empty response) server.enqueue(new MockResponse().setBody("")); // Fourth response: get the updated collection server.enqueue(getResponse("collections-create-manifest")); FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB); // when Collection actual = facade.upload(Arrays.asList(new File(TEST_FILE)), "Super Collection", null); // then assertThat(actual.getName()).contains("Super Collection"); } @Test public void downloadOfWholeCollectionIsPerformedSuccessfully() throws Exception { //given String collectionUuid = "ardev-4zz18-jk5vo4uo9u5vj52"; server.enqueue(getResponse("collections-download-file")); // Mock KeepWeb API responses for each file List files = generatePredefinedFiles(); for (File f : files) { server.enqueue(new MockResponse().setBody(new Buffer().write(Files.readAllBytes(f.toPath())))); } //when List downloadedFiles = facade.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false); //then File collectionDestination = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionUuid); assertEquals(3, downloadedFiles.size()); assertTrue(collectionDestination.exists()); assertThat(downloadedFiles).allMatch(File::exists); assertEquals(files.stream().map(File::getName).collect(Collectors.toList()), downloadedFiles.stream().map(File::getName).collect(Collectors.toList())); assertEquals(files.stream().map(File::length).collect(Collectors.toList()), downloadedFiles.stream().map(File::length).collect(Collectors.toList())); } @Test public void downloadOfWholeCollectionUsingKeepWebPerformedSuccessfully() throws Exception { //given String collectionUuid = "ardev-4zz18-jk5vo4uo9u5vj52"; server.enqueue(getResponse("collections-download-file")); List files = generatePredefinedFiles(); for (File f : files) { server.enqueue(new MockResponse().setBody(new Buffer().write(FileUtils.readFileToByteArray(f)))); } //when List downloadedFiles = facade.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, true); //then assertEquals(3, downloadedFiles.size()); assertThat(downloadedFiles).allMatch(File::exists); assertEquals(files.stream().map(File::getName).collect(Collectors.toList()), downloadedFiles.stream().map(File::getName).collect(Collectors.toList())); assertTrue(downloadedFiles.stream().map(File::length).collect(Collectors.toList()).containsAll(files.stream().map(File::length).collect(Collectors.toList()))); } @Test public void downloadOfSingleFilePerformedSuccessfully() throws Exception { //given String collectionUuid = "ardev-4zz18-jk5vo4uo9u5vj52"; server.enqueue(getResponse("collections-download-file")); File file = generatePredefinedFiles().get(0); byte[] fileData = FileUtils.readFileToByteArray(file); server.enqueue(new MockResponse().setBody(new Buffer().write(fileData))); //when File downloadedFile = facade.downloadFile(file.getName(), collectionUuid, FILE_DOWNLOAD_TEST_DIR); //then assertTrue(downloadedFile.exists()); assertEquals(file.getName(), downloadedFile.getName()); assertEquals(file.length(), downloadedFile.length()); } private String setMockedServerPortToKeepServices(String jsonPath) throws Exception { ObjectMapper mapper = new ObjectMapper().findAndRegisterModules(); String filePath = String.format("src/test/resources/org/arvados/client/api/client/%s.json", jsonPath); File jsonFile = new File(filePath); String json = FileUtils.readFileToString(jsonFile, Charset.defaultCharset()); KeepServiceList keepServiceList = mapper.readValue(json, KeepServiceList.class); List items = keepServiceList.getItems(); for (KeepService keepService : items) { keepService.setServicePort(server.getPort()); } ObjectWriter writer = mapper.writer().withDefaultPrettyPrinter(); return writer.writeValueAsString(keepServiceList); } //Method to copy multiple byte[] arrays into one byte[] array private byte[] addAll(byte[] array1, byte[] array2) { byte[] joinedArray = new byte[array1.length + array2.length]; System.arraycopy(array1, 0, joinedArray, 0, array1.length); System.arraycopy(array2, 0, joinedArray, array1.length, array2.length); return joinedArray; } @After public void tearDown() throws Exception { FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR); FileTestUtils.cleanDirectory(FILE_DOWNLOAD_TEST_DIR); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/junit/categories/IntegrationTests.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.junit.categories; public interface IntegrationTests {} ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/collection/FileTokenTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.collection; import org.arvados.client.common.Characters; import org.junit.Assert; import org.junit.Test; public class FileTokenTest { public static final String FILE_TOKEN_INFO = "0:1024:test-file1"; public static final int FILE_POSITION = 0; public static final long FILE_LENGTH = 1024L; public static final String FILE_NAME = "test-file1"; public static final String FILE_PATH = "c" + Characters.SLASH; private static FileToken fileToken = new FileToken(FILE_TOKEN_INFO); private static FileToken fileTokenWithPath = new FileToken(FILE_TOKEN_INFO, FILE_PATH); @Test public void tokenInfoIsDividedCorrectly(){ Assert.assertEquals(FILE_NAME, fileToken.getFileName()); Assert.assertEquals(FILE_POSITION, fileToken.getFilePosition()); Assert.assertEquals(FILE_LENGTH, fileToken.getFileSize()); } @Test public void toStringReturnsOriginalFileTokenInfo(){ Assert.assertEquals(FILE_TOKEN_INFO, fileToken.toString()); } @Test public void fullPathIsReturnedProperly(){ Assert.assertEquals(FILE_NAME, fileToken.getFullPath()); Assert.assertEquals(FILE_PATH + FILE_NAME, fileTokenWithPath.getFullPath()); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/collection/ManifestDecoderTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.collection; import org.arvados.client.exception.ArvadosClientException; import org.junit.Assert; import org.junit.Test; import java.util.List; import static junit.framework.TestCase.fail; public class ManifestDecoderTest { private ManifestDecoder manifestDecoder = new ManifestDecoder(); private static final String ONE_LINE_MANIFEST_TEXT = ". " + "eff999f3b5158331eb44a9a93e3b36e1+67108864+Aad3839bea88bce22cbfe71cf4943de7dab3ea52a@5826180f " + "db141bfd11f7da60dce9e5ee85a988b8+34038725+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f " + "0:101147589:rna.SRR948778.bam" + "\\n"; private static final String MULTIPLE_LINES_MANIFEST_TEXT = ". " + "930625b054ce894ac40596c3f5a0d947+33 " + "0:0:a 0:0:b 0:33:output.txt\n" + "./c d41d8cd98f00b204e9800998ecf8427e+0 0:0:d"; private static final String MANIFEST_TEXT_WITH_INVALID_FIRST_PATH_COMPONENT = "a" + ONE_LINE_MANIFEST_TEXT; @Test public void allLocatorsAndFileTokensAreExtractedFromSimpleManifest() { List actual = manifestDecoder.decode(ONE_LINE_MANIFEST_TEXT); // one manifest stream Assert.assertEquals(1, actual.size()); ManifestStream manifest = actual.get(0); // two locators Assert.assertEquals(2, manifest.getKeepLocators().size()); // one file token Assert.assertEquals(1, manifest.getFileTokens().size()); } @Test public void allLocatorsAndFileTokensAreExtractedFromComplexManifest() { List actual = manifestDecoder.decode(MULTIPLE_LINES_MANIFEST_TEXT); // two manifest streams Assert.assertEquals(2, actual.size()); // first stream - 1 locator and 3 file tokens ManifestStream firstManifestStream = actual.get(0); Assert.assertEquals(1, firstManifestStream.getKeepLocators().size()); Assert.assertEquals(3, firstManifestStream.getFileTokens().size()); // second stream - 1 locator and 1 file token ManifestStream secondManifestStream = actual.get(1); Assert.assertEquals(1, secondManifestStream.getKeepLocators().size()); Assert.assertEquals(1, secondManifestStream.getFileTokens().size()); } @Test public void manifestTextWithInvalidStreamNameThrowsException() { try { List actual = manifestDecoder.decode(MANIFEST_TEXT_WITH_INVALID_FIRST_PATH_COMPONENT); fail(); } catch (ArvadosClientException e) { Assert.assertEquals("Invalid first path component (expecting \".\")", e.getMessage()); } } @Test public void emptyManifestTextThrowsException() { String emptyManifestText = null; try { List actual = manifestDecoder.decode(emptyManifestText); fail(); } catch (ArvadosClientException e) { Assert.assertEquals("Manifest text cannot be empty.", e.getMessage()); } emptyManifestText = ""; try { List actual = manifestDecoder.decode(emptyManifestText); fail(); } catch (ArvadosClientException e) { Assert.assertEquals("Manifest text cannot be empty.", e.getMessage()); } } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/collection/ManifestFactoryTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.collection; import org.arvados.client.test.utils.FileTestUtils; import org.assertj.core.util.Lists; import org.junit.Test; import org.junit.Ignore; import java.io.File; import java.util.List; import static org.assertj.core.api.Assertions.assertThat; public class ManifestFactoryTest { @Test @Ignore("Failing test #15041") public void manifestIsCreatedAsExpected() throws Exception { // given List files = FileTestUtils.generatePredefinedFiles(); List locators = Lists.newArrayList("a", "b", "c"); ManifestFactory factory = ManifestFactory.builder() .files(files) .locators(locators) .build(); // when String actual = factory.create(); // then assertThat(actual).isEqualTo(". a b c 0:1024:test-file1 1024:20480:test-file2 21504:1048576:test-file\\0403\n"); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/collection/ManifestStreamTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.collection; import org.junit.Assert; import org.junit.Test; import java.util.List; public class ManifestStreamTest { private ManifestDecoder manifestDecoder = new ManifestDecoder(); @Test public void toStringReturnsProperlyConnectedManifestStream() throws Exception{ String encodedManifest = ". eff999f3b5158331eb44a9a93e3b36e1+67108864 db141bfd11f7da60dce9e5ee85a988b8+34038725 0:101147589:rna.SRR948778.bam\\n\""; List manifestStreams = manifestDecoder.decode(encodedManifest); Assert.assertEquals(encodedManifest, manifestStreams.get(0).toString()); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.keep; import com.fasterxml.jackson.databind.ObjectMapper; import org.arvados.client.api.client.CollectionsApiClient; import org.arvados.client.api.client.KeepWebApiClient; import org.arvados.client.api.model.Collection; import org.arvados.client.common.Characters; import org.arvados.client.logic.collection.FileToken; import org.arvados.client.logic.collection.ManifestDecoder; import org.arvados.client.logic.collection.ManifestStream; import org.arvados.client.test.utils.FileTestUtils; import org.apache.commons.io.FileUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import static org.arvados.client.test.utils.FileTestUtils.*; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class FileDownloaderTest { static final ObjectMapper MAPPER = new ObjectMapper().findAndRegisterModules(); private Collection collectionToDownload; private ManifestStream manifestStream; @Mock private CollectionsApiClient collectionsApiClient; @Mock private KeepWebApiClient keepWebApiClient; @Mock private ManifestDecoder manifestDecoder; @InjectMocks private FileDownloader fileDownloader; @Before public void setUp() throws Exception { FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR); FileTestUtils.createDirectory(FILE_DOWNLOAD_TEST_DIR); collectionToDownload = prepareCollection(); manifestStream = prepareManifestStream(); } @Test public void downloadingAllFilesFromCollectionWorksProperly() throws Exception { // given List files = generatePredefinedFiles(); //having when(collectionsApiClient.get(collectionToDownload.getUuid())).thenReturn(collectionToDownload); when(manifestDecoder.decode(collectionToDownload.getManifestText())).thenReturn(Arrays.asList(manifestStream)); // Mock download responses for all three files based on the file tokens when(keepWebApiClient.download(collectionToDownload.getUuid(), "test-file1")).thenReturn(FileUtils.readFileToByteArray(files.get(0))); when(keepWebApiClient.download(collectionToDownload.getUuid(), "test-file2")).thenReturn(FileUtils.readFileToByteArray(files.get(1))); when(keepWebApiClient.download(collectionToDownload.getUuid(), "test-file 3")).thenReturn(FileUtils.readFileToByteArray(files.get(2))); //when List downloadedFiles = fileDownloader.downloadFilesFromCollection(collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR); //then assertEquals(3, downloadedFiles.size()); // 3 files downloaded File collectionDir = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionToDownload.getUuid()); assertTrue(collectionDir.exists()); // collection directory created // 3 files correctly saved assertThat(downloadedFiles).allMatch(File::exists); // Verify file contents match File downloaded1 = new File(collectionDir + Characters.SLASH + "test-file1"); File downloaded2 = new File(collectionDir + Characters.SLASH + "test-file2"); File downloaded3 = new File(collectionDir + Characters.SLASH + "test-file 3"); assertArrayEquals(FileUtils.readFileToByteArray(downloaded1), FileUtils.readFileToByteArray(files.get(0))); assertArrayEquals(FileUtils.readFileToByteArray(downloaded2), FileUtils.readFileToByteArray(files.get(1))); assertArrayEquals(FileUtils.readFileToByteArray(downloaded3), FileUtils.readFileToByteArray(files.get(2))); } @Test public void downloadingSingleFileFromKeepWebWorksCorrectly() throws Exception{ //given File file = generatePredefinedFiles().get(0); //having when(collectionsApiClient.get(collectionToDownload.getUuid())).thenReturn(collectionToDownload); when(manifestDecoder.decode(collectionToDownload.getManifestText())).thenReturn(Arrays.asList(manifestStream)); when(keepWebApiClient.download(collectionToDownload.getUuid(), file.getName())).thenReturn(FileUtils.readFileToByteArray(file)); //when File downloadedFile = fileDownloader.downloadSingleFileUsingKeepWeb(file.getName(), collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR); //then assertTrue(downloadedFile.exists()); assertEquals(file.getName(), downloadedFile.getName()); assertArrayEquals(FileUtils.readFileToByteArray(downloadedFile), FileUtils.readFileToByteArray(file)); } @Test public void testDownloadFileWithResume() throws Exception { //given String collectionUuid = "some-collection-uuid"; String expectedDataString = "testData"; String fileName = "sample-file-name"; long start = 0; Long end = null; InputStream inputStream = new ByteArrayInputStream(expectedDataString.getBytes()); when(keepWebApiClient.get(collectionUuid, fileName, start, end)).thenReturn(inputStream); //when File downloadedFile = fileDownloader.downloadFileWithResume(collectionUuid, fileName, FILE_DOWNLOAD_TEST_DIR, start, end); //then assertNotNull(downloadedFile); assertTrue(downloadedFile.exists()); String actualDataString = Files.readString(downloadedFile.toPath()); assertEquals("The content of the file does not match the expected data.", expectedDataString, actualDataString); } @After public void tearDown() throws Exception { FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR); FileTestUtils.cleanDirectory(FILE_DOWNLOAD_TEST_DIR); } private Collection prepareCollection() throws IOException { // collection that will be returned by mocked collectionsApiClient String filePath = "src/test/resources/org/arvados/client/api/client/collections-download-file.json"; File jsonFile = new File(filePath); return MAPPER.readValue(jsonFile, Collection.class); } private ManifestStream prepareManifestStream() throws Exception { // manifestStream that will be returned by mocked manifestDecoder List fileTokens = new ArrayList<>(); fileTokens.add(new FileToken("0:1024:test-file1")); fileTokens.add(new FileToken("1024:20480:test-file2")); fileTokens.add(new FileToken("21504:1048576:test-file\\0403")); KeepLocator keepLocator = new KeepLocator("163679d58edaadc28db769011728a72c+1070080+A3acf8c1fe582c265d2077702e4a7d74fcc03aba8@5aa4fdeb"); return new ManifestStream(".", Arrays.asList(keepLocator), fileTokens); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/keep/KeepLocatorTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.logic.keep; import org.junit.Test; import static org.assertj.core.api.Assertions.assertThat; public class KeepLocatorTest { private KeepLocator locator; @Test public void md5sumIsExtracted() throws Exception { // given locator = new KeepLocator("7df44272090cee6c0732382bba415ee9+70"); // when String actual = locator.getMd5sum(); // then assertThat(actual).isEqualTo("7df44272090cee6c0732382bba415ee9"); } @Test public void locatorIsStrippedWithMd5sumAndSize() throws Exception { // given locator = new KeepLocator("7df44272090cee6c0732382bba415ee9+70"); // when String actual = locator.stripped(); // then assertThat(actual).isEqualTo("7df44272090cee6c0732382bba415ee9+70"); } @Test public void locatorToStringProperlyShowing() throws Exception { // given locator = new KeepLocator("7df44272090cee6c0732382bba415ee9+70+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f"); // when String actual = locator.toString(); // then assertThat(actual).isEqualTo("7df44272090cee6c0732382bba415ee9+70+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f"); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.test.utils; import org.arvados.client.config.FileConfigProvider; import okhttp3.mockwebserver.MockResponse; import okhttp3.mockwebserver.RecordedRequest; import org.apache.commons.io.FileUtils; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; import static org.assertj.core.api.Assertions.assertThat; public final class ApiClientTestUtils { static final String BASE_URL = "/arvados/v1/"; private ApiClientTestUtils() {} public static MockResponse getResponse(String filename) throws IOException { String filePath = String.format("src/test/resources/org/arvados/client/api/client/%s.json", filename); File jsonFile = new File(filePath); String json = FileUtils.readFileToString(jsonFile, Charset.defaultCharset()); return new MockResponse().setBody(json); } public static void assertAuthorizationHeader(RecordedRequest request) { assertThat(request.getHeader("authorization")).isEqualTo("Bearer " + new FileConfigProvider().getApiToken()); } public static void assertRequestPath(RecordedRequest request, String subPath) { assertThat(request.getPath()).isEqualTo(BASE_URL + subPath); } public static void assertRequestMethod(RecordedRequest request, RequestMethod requestMethod) { assertThat(request.getMethod()).isEqualTo(requestMethod.name()); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientIntegrationTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.test.utils; import org.arvados.client.config.FileConfigProvider; import org.arvados.client.facade.ArvadosFacade; import org.junit.BeforeClass; import static org.junit.Assert.assertTrue; public class ArvadosClientIntegrationTest { protected static final FileConfigProvider CONFIG = new FileConfigProvider("integration-tests-application.conf"); protected static final ArvadosFacade FACADE = new ArvadosFacade(CONFIG); protected static final String PROJECT_UUID = CONFIG.getIntegrationTestProjectUuid(); @BeforeClass public static void validateConfiguration(){ String msg = " info must be provided in configuration"; CONFIG.getConfig().entrySet() .forEach(e -> assertTrue("Parameter " + e.getKey() + msg, !e.getValue().render().equals("\"\""))); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientMockedWebServerTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.test.utils; import okhttp3.mockwebserver.MockWebServer; import org.junit.After; import org.junit.Before; public class ArvadosClientMockedWebServerTest extends ArvadosClientUnitTest { private static final int PORT = CONFIG.getApiPort(); protected MockWebServer server = new MockWebServer(); @Before public void setUpServer() throws Exception { server.start(PORT); } @After public void tearDownServer() throws Exception { server.shutdown(); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientUnitTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.test.utils; import org.arvados.client.config.FileConfigProvider; import org.junit.BeforeClass; import static org.junit.Assert.assertTrue; public class ArvadosClientUnitTest { protected static final FileConfigProvider CONFIG = new FileConfigProvider("application.conf"); @BeforeClass public static void validateConfiguration(){ String msg = " info must be provided in configuration"; CONFIG.getConfig().entrySet().forEach(e -> assertTrue("Parameter " + e.getKey() + msg, !e.getValue().render().equals("\"\""))); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/FileTestUtils.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.test.utils; import org.apache.commons.io.FileUtils; import org.assertj.core.util.Lists; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.util.List; public class FileTestUtils { public static final String FILE_SPLIT_TEST_DIR = "/tmp/file-split"; public static final String FILE_DOWNLOAD_TEST_DIR = "/tmp/arvados-downloaded"; public static final String TEST_FILE = FILE_SPLIT_TEST_DIR + "/test-file"; public static long ONE_FOURTH_GB = FileUtils.ONE_GB / 4; public static long ONE_EIGTH_GB = FileUtils.ONE_GB / 8; public static long HALF_GB = FileUtils.ONE_GB / 2; public static int FILE_SPLIT_SIZE = 64; public static void createDirectory(String path) throws Exception { new File(path).mkdirs(); } public static void cleanDirectory(String directory) throws Exception { FileUtils.cleanDirectory(new File(directory)); } public static File generateFile(String path, long length) throws IOException { RandomAccessFile testFile = new RandomAccessFile(path, "rwd"); testFile.setLength(length); testFile.close(); return new File(path); } public static List generatePredefinedFiles() throws IOException { return Lists.newArrayList( generateFile(TEST_FILE + 1, FileUtils.ONE_KB), generateFile(TEST_FILE + 2, FileUtils.ONE_KB * 20), generateFile(TEST_FILE + " " + 3, FileUtils.ONE_MB) ); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/RequestMethod.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.test.utils; public enum RequestMethod { GET, POST, PUT, DELETE } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/utils/FileMergeTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.utils; import org.arvados.client.test.utils.FileTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.File; import java.util.List; import static org.arvados.client.test.utils.FileTestUtils.*; import static org.assertj.core.api.Assertions.assertThat; public class FileMergeTest { @Before public void setUp() throws Exception { FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR); } @Test public void fileChunksAreMergedIntoOneFile() throws Exception { // given FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_EIGTH_GB); List files = FileSplit.split(new File(TEST_FILE), new File(FILE_SPLIT_TEST_DIR), FILE_SPLIT_SIZE); File targetFile = new File(TEST_FILE); // when FileMerge.merge(files, targetFile); // then assertThat(targetFile.length()).isEqualTo(FileTestUtils.ONE_EIGTH_GB); } @After public void tearDown() throws Exception { FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR); } } ================================================ FILE: contrib/java-sdk-v2/src/test/java/org/arvados/client/utils/FileSplitTest.java ================================================ /* * Copyright (C) The Arvados Authors. All rights reserved. * * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0 * */ package org.arvados.client.utils; import org.arvados.client.test.utils.FileTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.File; import java.util.List; import static org.arvados.client.test.utils.FileTestUtils.*; import static org.assertj.core.api.Assertions.assertThat; public class FileSplitTest { @Before public void setUp() throws Exception { FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR); } @Test public void fileIsDividedIntoSmallerChunks() throws Exception { // given int expectedSize = 2; int expectedFileSizeInBytes = 67108864; FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_EIGTH_GB); // when List actual = FileSplit.split(new File(TEST_FILE), new File(FILE_SPLIT_TEST_DIR), FILE_SPLIT_SIZE); // then assertThat(actual).hasSize(expectedSize); assertThat(actual).allMatch(a -> a.length() == expectedFileSizeInBytes); } @After public void tearDown() throws Exception { FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR); } } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/application.conf ================================================ # configuration for unit tests arvados { api { port = 9000 keepweb-port = 9000 token = 1m69yw9m2wanubzyfkb1e9icplqhtr2r969bu9rnzqbqhb7cnb protocol = "http" } } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/integration-tests-application.conf ================================================ # Configuration for integration tests # # Remarks: # * For example see integration-tests-application.conf.example # * While providing data remove apostrophes ("") from each line # * See Arvados documentation for information how to obtain a token: # https://doc.arvados.org/user/reference/api-tokens.html # arvados { api { keepweb-host = "" keepweb-port = 443 host = "" port = 443 token = "" protocol = https host-insecure = false } integration-tests { project-uuid = "" } } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/integration-tests-application.conf.example ================================================ # example configuration for integration tests arvados { api { keepweb-host = collections.ardev.mycompany.com keepweb-port = 443 host = api.ardev.mycompany.com port = 443 token = mytoken protocol = https host-insecure = false } integration-tests { project-uuid = ardev-j7d0g-aa123f81q6y7skk } } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker ================================================ mock-maker-inline ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json ================================================ { "kind": "arvados#collection", "etag": "bqoujj7oybdx0jybwvtsebj7y", "uuid": "112ci-4zz18-12tncxzptzbec1p", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-21T13:38:56.521853000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-21T13:38:56.521853000Z", "name": "Super Collection", "description": null, "properties": {}, "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0", "manifest_text": ". 7df44272090cee6c0732382bba415ee9+70+Aa5ece4560e3329315165b36c239b8ab79c888f8a@5a1d5708 0:70:README.md\n", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json ================================================ { "kind": "arvados#collection", "etag": "bqoujj7oybdx0jybwvtsebj7y", "uuid": "112ci-4zz18-12tncxzptzbec1p", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-21T13:38:56.521853000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-21T13:38:56.521853000Z", "name": "Super Collection", "description": null, "properties": {}, "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0", "manifest_text": "", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json ================================================ { "kind": "arvados#collection", "etag": "2vm76dxmzr23u9774iguuxsrg", "uuid": "ardev-4zz18-jk5vo4uo9u5vj52", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-02-19T11:00:00.852389000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-02-19T11:00:00.852389000Z", "name": "New Collection (2018-02-19 12:00:00.273)", "description": null, "properties": {}, "portable_data_hash": "49581091dfad651945c12b08d4735d88+112", "manifest_text": ". 163679d58edaadc28db769011728a72c+1070080+A3acf8c1fe582c265d2077702e4a7d74fcc03aba8@5aa4fdeb 0:1024:test-file1 1024:20480:test-file2 21504:1048576:test-file\\0403\n", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-get.json ================================================ { "kind": "arvados#collection", "etag": "52tk5yg024cwhkkcidu3zcmj2", "uuid": "112ci-4zz18-p51w7z3fpopo6sm", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-15T10:36:03.554356000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-15T10:36:03.554356000Z", "name": "Collection With Manifest #2", "description": null, "properties": {}, "portable_data_hash": "6c4106229b08fe25f48b3a7a8289dd46+143", "manifest_text": ". 66c9daa69630e092e9ce554b7aae8a20+524288+A4a15ffea58f259e09f68d3f7eea29942750a79d0@5a269ff6 435f38dd384b06c248feabee0cabca52+524288+A8a99e8148bd368c49901526098901bb7d7890c3b@5a269ff6 dc5b6c104aab35fff6d70a4dadc28d37+391727+Ab0662d549c422c983fccaad02b4ade7b48a8255b@5a269ff6 0:1440303:lombok.jar\n", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-list.json ================================================ { "kind": "arvados#collectionList", "etag": "", "offset": 0, "limit": 100, "items": [ { "kind": "arvados#collection", "etag": "8xyiwnih5b5vzmj5sa33348a7", "uuid": "112ci-4zz18-x6xfmvz0chnkzgv", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-15T13:06:36.934337000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-15T13:06:36.934337000Z", "name": "Collection With Manifest #3", "description": null, "properties": {}, "portable_data_hash": "6c4106229b08fe25f48b3a7a8289dd46+143", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "8cmhep8aixe4p42pxjoct5502", "uuid": "112ci-4zz18-p51w7z3fpopo6sm", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-15T10:36:03.554356000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-15T10:36:03.554356000Z", "name": "Collection With Manifest #2", "description": null, "properties": {}, "portable_data_hash": "6c4106229b08fe25f48b3a7a8289dd46+143", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "de2ol2dyvsba3mn46al760cyg", "uuid": "112ci-4zz18-xb6gf2yraln7cwa", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-15T09:32:44.146172000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-15T09:32:44.146172000Z", "name": "New collection", "description": null, "properties": {}, "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "dby68gd0vatvi090cu0axvtq3", "uuid": "112ci-4zz18-r5jfktpn3a9o0ap", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-14T13:00:35.431046000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-14T13:00:35.431046000Z", "name": "Collection With Manifest #1", "description": null, "properties": {}, "portable_data_hash": "3c59518bf8e1100d420488d822682b4a+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "2b34uzau862w862a2rv36agv6", "uuid": "112ci-4zz18-nqxk8xjn6mtskzt", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-14T12:59:34.767068000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-14T12:59:34.767068000Z", "name": "Empty Collection #2", "description": null, "properties": {}, "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "60aywazztwfspnasltufcjxpa", "uuid": "112ci-4zz18-rs9bcf5qnyfjrkm", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-14T12:52:33.124452000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-14T12:52:33.124452000Z", "name": "Empty Collection #1", "description": null, "properties": {}, "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "1jward6snif3tsjzftxh8hvwh", "uuid": "112ci-4zz18-af656lee4kv7q2m", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-14T12:09:05.319319000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-14T12:09:05.319319000Z", "name": "create example", "description": null, "properties": {}, "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "zs2n4zliu6nb5yk3rw6h5ugw", "uuid": "112ci-4zz18-y2zqix7k9an7nro", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-13T16:59:02.299257000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-13T16:59:02.299257000Z", "name": "Saved at 2017-11-13 16:59:01 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "eijhemzgy44ofmu0dtrowl604", "uuid": "112ci-4zz18-wq77jfi62u5i4rv", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-13T16:58:10.637548000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-13T16:58:10.637548000Z", "name": "Saved at 2017-11-13 16:58:07 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "1oq7ye0gfbf3ih6y864w3n683", "uuid": "112ci-4zz18-unaeckkjgeg7ui0", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-10T09:43:07.583862000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-10T09:43:07.583862000Z", "name": "Saved at 2017-11-10 09:43:03 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "4qmqlro878yx8q7ikhilo8qwn", "uuid": "112ci-4zz18-5y6atonkxq55lms", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T12:46:15.245770000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T12:46:15.245770000Z", "name": "Saved at 2017-11-09 12:46:13 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "91v698hngoz241c38bbmh0ogc", "uuid": "112ci-4zz18-b3fjqd01pxjvseo", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:54:07.259998000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:54:07.259998000Z", "name": "Saved at 2017-11-09 11:54:04 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "215t842ckrrgjpxrxr4j0gsui", "uuid": "112ci-4zz18-cwfxl8h41q18n65", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:49:38.276888000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:49:38.276888000Z", "name": "Saved at 2017-11-09 11:49:35 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "90z6i3oqv197osng3wvjjir3t", "uuid": "112ci-4zz18-uv4xu08739tn1vy", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:43:05.917513000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:43:05.917513000Z", "name": "Saved at 2017-11-09 11:43:05 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "5lcf6wvc3wypwobswdz22wen", "uuid": "112ci-4zz18-pzisn8c5mefzczv", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:40:38.804718000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:40:38.804718000Z", "name": "Saved at 2017-11-09 11:40:36 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "98s08xew49avui1gy3mzit8je", "uuid": "112ci-4zz18-mj24uwtnqqrno27", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:40:25.189869000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:40:25.189869000Z", "name": "Saved at 2017-11-09 11:40:24 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "a09wnvl4i51xqx7u9yf4qbi94", "uuid": "112ci-4zz18-oco162516upgqng", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:39:04.148785000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:39:04.148785000Z", "name": "Saved at 2017-11-09 11:39:03 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "4ee2xudbc5rkr597drgu9tg10", "uuid": "112ci-4zz18-tlze7dgczsdwkep", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:37:59.478975000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:37:59.478975000Z", "name": "Saved at 2017-11-09 11:37:58 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "5aa3evnbceo3brnps2e1sq8ts", "uuid": "112ci-4zz18-nq0kxi9d7w64la1", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:32:23.329259000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:32:23.329259000Z", "name": "Saved at 2017-11-09 11:32:22 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "97vicgogv8bovmk4s2jymsdq", "uuid": "112ci-4zz18-fks9mewtw155pvx", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:30:17.589462000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:30:17.589462000Z", "name": "Saved at 2017-11-09 11:30:17 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "btktwjclv063s1rd6duvk51v3", "uuid": "112ci-4zz18-kp356e0q2wdl2df", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:29:26.820481000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:29:26.820481000Z", "name": "Saved at 2017-11-09 11:29:25 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "bob83na42pufqli1a5buxryvm", "uuid": "112ci-4zz18-0ey8ob38xf7surq", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:08:53.781498000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:08:53.781498000Z", "name": "Saved at 2017-11-09 11:08:52 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "7pl1x327eeutqtsjppdj284g8", "uuid": "112ci-4zz18-wu2n0fv3cewna1n", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T11:08:33.423284000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T11:08:33.423284000Z", "name": "Saved at 2017-11-09 11:08:33 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "2wg1wn2o18ubrgbhbqwwsslhf", "uuid": "112ci-4zz18-hyybo6yuvkx4hrm", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T10:44:53.096798000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T10:44:53.096798000Z", "name": "Saved at 2017-11-09 10:44:51 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "8jk0at4e69cwjyjamvm4wz2oj", "uuid": "112ci-4zz18-h3gjq7gzd4syanw", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T10:41:31.278281000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T10:41:31.278281000Z", "name": "Saved at 2017-11-09 10:41:30 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "be57zhzufz2hp1tbdwidoro5j", "uuid": "112ci-4zz18-jinwyyaeigjs1yg", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T10:41:07.083017000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T10:41:07.083017000Z", "name": "Saved at 2017-11-09 10:41:06 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "29lj2roie4cygo5ffgrduflly", "uuid": "112ci-4zz18-etf8aghyxlfxvo1", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T10:40:31.710865000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T10:40:31.710865000Z", "name": "Saved at 2017-11-09 10:40:31 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "6div78e1nhusii4x1xkp3rg2v", "uuid": "112ci-4zz18-jtbn4edpkkhbm9b", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T10:39:36.999602000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T10:39:36.999602000Z", "name": "Saved at 2017-11-09 10:39:36 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "12wlbsxlmy3sze4v2m0ua7ake", "uuid": "112ci-4zz18-whdleimp34hiqp6", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T10:19:52.879907000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T10:19:52.879907000Z", "name": "Saved at 2017-11-09 10:19:52 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "9bv1bw9afb3w84gu55uzcgd6h", "uuid": "112ci-4zz18-kj8dz72zpo5kbtm", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T10:16:31.558621000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T10:16:31.558621000Z", "name": "Saved at 2017-11-09 10:16:30 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "683d77tvlhe97etk9bk2bx8ds", "uuid": "112ci-4zz18-tr306nau9hrr437", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T09:59:44.978811000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T09:59:44.978811000Z", "name": "Saved at 2017-11-09 09:59:44 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "1m34v9jbna2v7gv7auio54i8w", "uuid": "112ci-4zz18-oxuk69569mxztp0", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T09:59:30.774888000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T09:59:30.774888000Z", "name": "Saved at 2017-11-09 09:59:30 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "7l2a9fhqmxg7ghn7osx0s19v4", "uuid": "112ci-4zz18-wf8sl6xbyfwjyer", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T09:58:21.496088000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T09:58:21.496088000Z", "name": "Saved at 2017-11-09 09:58:20 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "33dw426fhs2vlb50b6301ukn0", "uuid": "112ci-4zz18-drpia2es1hp9ydi", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T09:56:08.506505000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T09:56:08.506505000Z", "name": "Saved at 2017-11-09 09:56:08 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "2437tnhn2gmti52lpm8nfq9ct", "uuid": "112ci-4zz18-5b4px2i2dwyidfi", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T09:54:06.651026000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T09:54:06.651026000Z", "name": "Saved at 2017-11-09 09:54:06 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "7e0k48zu93o57zudxjp1yrgjq", "uuid": "112ci-4zz18-94oslnwnxe1f9wp", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T09:40:04.240297000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T09:40:04.240297000Z", "name": "Saved at 2017-11-09 09:39:58 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "cuirr803f54e89reakuq50oaq", "uuid": "112ci-4zz18-2fk0d5d4jjc1fmq", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T09:36:14.952671000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T09:36:14.952671000Z", "name": "Saved at 2017-11-09 09:36:08 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "3bi5xd8ezxrazk5266cwzn4s4", "uuid": "112ci-4zz18-xp9pu81xyc5h422", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T09:35:29.552746000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T09:35:29.552746000Z", "name": "Saved at 2017-11-09 09:35:29 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "59uaoxy6uh82i6lrvr3ht8gz1", "uuid": "112ci-4zz18-znb4lo0if2as58c", "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz", "created_at": "2017-11-09T09:31:08.109971000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-09T09:31:08.109971000Z", "name": "Saved at 2017-11-09 09:31:06 UTC by VirtualBox", "description": null, "properties": {}, "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "dksrh8jznxoaidl29i1vv5904", "uuid": "112ci-4zz18-6pvl5ea5u932qzi", "owner_uuid": "112ci-j7d0g-tw71k7mxii6fqgx", "created_at": "2017-11-08T12:48:32.238698000Z", "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy", "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz", "modified_at": "2017-11-08T12:50:23.946608000Z", "name": "New collection", "description": null, "properties": {}, "portable_data_hash": "18c037c51c3f74be53ea2b115afd0c5f+69", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#collection", "etag": "1w1rhhd6oql4ceb7h9t16sf0q", "uuid": "112ci-4zz18-wq5pyrxfv1t9isu", "owner_uuid": "112ci-j7d0g-anonymouspublic", "created_at": "2017-11-03T10:03:20.364737000Z", "modified_by_client_uuid": null, "modified_by_user_uuid": "112ci-tpzed-000000000000000", "modified_at": "2017-11-03T10:03:20.364737000Z", "name": null, "description": null, "properties": {}, "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0", "replication_desired": null, "replication_confirmed": null, "replication_confirmed_at": null, "delete_at": null, "trash_at": null, "is_trashed": false } ], "items_available": 41 } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/groups-get.json ================================================ { "kind": "arvados#group", "etag": "3hw0vk4mbl0ofvia5k6x4dwrx", "uuid": "ardev-j7d0g-bmg3pfqtx3ivczp", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-03-29T11:09:05.984597000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-03-29T11:09:05.984597000Z", "name": "TestGroup1", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/groups-list.json ================================================ { "kind": "arvados#groupList", "etag": "", "offset": 0, "limit": 100, "items": [ { "kind": "arvados#group", "etag": "68vubv3iw7663763bozxebmyf", "uuid": "ardev-j7d0g-ylx7wnu1moge2di", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-18T09:09:21.126649000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-18T09:09:21.126649000Z", "name": "TestProject1", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "68q7r8r37u9hckr2zsynvton3", "uuid": "ardev-j7d0g-mnzhga726itrbrq", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-17T12:11:24.389594000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T12:11:24.389594000Z", "name": "TestProject2", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "ef4vzx5gyudkrg9zml0zdv6qu", "uuid": "ardev-j7d0g-0w9m1sz46ljtdnm", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-17T12:08:39.066802000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T12:08:39.066802000Z", "name": "TestProject3", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "6h6h4ta6yyf9058delxk8fnqs", "uuid": "ardev-j7d0g-r20iem5ou6h5wao", "owner_uuid": "ardev-j7d0g-j7drd8yikkp6evd", "created_at": "2018-04-17T12:03:39.647244000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T12:03:39.647244000Z", "name": "TestProject4", "group_class": "project", "description": null, "writable_by": [ "ardev-j7d0g-j7drd8yikkp6evd", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "6se2y8f9o7uu06pbopgq56xds", "uuid": "ardev-j7d0g-j7drd8yikkp6evd", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-17T11:58:31.339515000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T11:58:31.339515000Z", "name": "TestProject5", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "2si26vaig3vig9266pqkqh2gy", "uuid": "ardev-j7d0g-kh1g7i5va870xt0", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-17T10:56:54.391676000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T10:56:54.391676000Z", "name": "TestProject6", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "edgnz6q0vt2u3o13ujtfohb75", "uuid": "ardev-j7d0g-sclkdyuwm4h2m78", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-17T10:27:15.914517000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T10:27:15.914517000Z", "name": "TestProject7", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "39ig9ttgec6lbe096uetn2cb9", "uuid": "ardev-j7d0g-593khc577zuyyhe", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-17T10:27:03.858203000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T10:27:03.858203000Z", "name": "TestProject8", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "1dpr8v6tx6pta0fozq93eyeou", "uuid": "ardev-j7d0g-iotds0tm559dbz7", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-17T10:26:25.180623000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T10:26:25.180623000Z", "name": "TestProject9", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "dizbavs2opfe1wpx6thocfki0", "uuid": "ardev-j7d0g-gbqay74778tonb8", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-17T10:26:06.435961000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T10:26:06.435961000Z", "name": "TestProject10", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "6xue8m3lx9qpptfvdf13val5t", "uuid": "ardev-j7d0g-fmq1t0jlznehbdm", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-17T10:25:55.546399000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-17T10:25:55.546399000Z", "name": "TestProject11", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "2gqix9e4m023usi9exhrsjx6z", "uuid": "ardev-j7d0g-vxju56ch64u51gq", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-16T14:09:49.700566000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-16T14:09:49.700566000Z", "name": "TestProject12", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "73n8x82814o6ihld0kltf468d", "uuid": "ardev-j7d0g-g8m4w0d22gv6fbj", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-11T15:02:35.016850000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-11T15:02:35.016850000Z", "name": "TestProject13", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "91f7uwq7pj3d3ez1u4smjg3ch", "uuid": "ardev-j7d0g-lstqed4y78khaqm", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-06T15:29:27.754408000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-06T15:29:27.754408000Z", "name": "TestProject14", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "7dbxhvbcfaogwnvo8k4mtqthk", "uuid": "ardev-j7d0g-0jbezvnq8i07l7p", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-04-05T09:32:46.946417000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-04-05T09:32:46.946417000Z", "name": "TestProject15", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "dhfu203rckzdzvx832wm7jv59", "uuid": "ardev-j7d0g-72dxer22g6iltqz", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-03-29T11:27:02.482218000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-03-29T13:17:00.045606000Z", "name": "TestProject16", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "7l9oxbdf4e1m9ddnujokf7czz", "uuid": "ardev-j7d0g-nebzwquxtq1v3o5", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-03-29T11:11:26.235411000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-03-29T11:11:26.235411000Z", "name": "TestProject17", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "83862x2o4453mja2rvypjl5gv", "uuid": "ardev-j7d0g-5589c8dmxevecqh", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-03-29T11:10:58.496482000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-03-29T11:10:58.496482000Z", "name": "TestProject18", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "3hw0vk4mbl0ofvia5k6x4dwrx", "uuid": "ardev-j7d0g-bmg3pfqtx3ivczp", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-03-29T11:09:05.984597000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-03-29T11:09:05.984597000Z", "name": "TestProject19", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false }, { "kind": "arvados#group", "etag": "6p9xbxpttj782mpqs537gfvc6", "uuid": "ardev-j7d0g-mfitz2oa4rpycou", "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "created_at": "2018-03-29T11:00:19.809612000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2018-03-29T11:00:19.809612000Z", "name": "TestProject20", "group_class": "project", "description": null, "writable_by": [ "ardev-tpzed-n3kzq4fvoks3uw4", "ardev-tpzed-n3kzq4fvoks3uw4" ], "delete_at": null, "trash_at": null, "is_trashed": false } ], "items_available": 20 } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-client-test-file.txt ================================================ Sample text file to test keep client. ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-client-upload-response.json ================================================ Created ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json ================================================ { "kind": "arvados#keepServiceList", "etag": "", "offset": null, "limit": null, "items": [ { "kind": "arvados#keepService", "etag": "bjzh7og2d9z949lbd38vnnslt", "uuid": "112ci-bi6l4-hv02fg8sbti8ykk", "owner_uuid": "112ci-tpzed-000000000000000", "created_at": "2017-11-03T10:04:48.314229000Z", "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt", "modified_by_user_uuid": "112ci-tpzed-000000000000000", "modified_at": "2017-11-03T10:04:48.314229000Z", "service_host": "localhost", "service_port": 9000, "service_ssl_flag": false, "service_type": "disk", "read_only": false }, { "kind": "arvados#keepService", "etag": "7m64l69kko4bytpsykf8cay7t", "uuid": "112ci-bi6l4-f0r03wrqymotwql", "owner_uuid": "112ci-tpzed-000000000000000", "created_at": "2017-11-03T10:04:48.351577000Z", "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt", "modified_by_user_uuid": "112ci-tpzed-000000000000000", "modified_at": "2017-11-03T10:04:48.351577000Z", "service_host": "localhost", "service_port": 9001, "service_ssl_flag": false, "service_type": "disk", "read_only": false } ], "items_available": 2 } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json ================================================ { "kind": "arvados#keepServiceList", "etag": "", "offset": null, "limit": null, "items": [ { "kind": "arvados#keepService", "etag": "bjzh7og2d9z949lbd38vnnslt", "uuid": "112ci-bi6l4-hv02fg8sbti8ykk", "owner_uuid": "112ci-tpzed-000000000000000", "created_at": "2017-11-03T10:04:48.314229000Z", "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt", "modified_by_user_uuid": "112ci-tpzed-000000000000000", "modified_at": "2017-11-03T10:04:48.314229000Z", "service_host": "localhost", "service_port": 9000, "service_ssl_flag": false, "service_type": "disk", "read_only": false }, { "kind": "arvados#keepService", "etag": "7m64l69kko4bytpsykf8cay7t", "uuid": "112ci-bi6l4-f0r03wrqymotwql", "owner_uuid": "112ci-tpzed-000000000000000", "created_at": "2017-11-03T10:04:48.351577000Z", "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt", "modified_by_user_uuid": "112ci-tpzed-000000000000000", "modified_at": "2017-11-03T10:04:48.351577000Z", "service_host": "localhost", "service_port": 9000, "service_ssl_flag": false, "service_type": "gpfs", "read_only": false } ], "items_available": 2 } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json ================================================ { "kind": "arvados#keepService", "etag": "bjzh7og2d9z949lbd38vnnslt", "uuid": "112ci-bi6l4-hv02fg8sbti8ykk", "owner_uuid": "112ci-tpzed-000000000000000", "created_at": "2017-11-03T10:04:48.314229000Z", "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt", "modified_by_user_uuid": "112ci-tpzed-000000000000000", "modified_at": "2017-11-03T10:04:48.314229000Z", "service_host": "10.0.2.15", "service_port": 9000, "service_ssl_flag": false, "service_type": "disk", "read_only": false } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json ================================================ { "kind": "arvados#keepServiceList", "etag": "", "offset": 0, "limit": 100, "items": [ { "kind": "arvados#keepService", "etag": "7m64l69kko4bytpsykf8cay7t", "uuid": "112ci-bi6l4-f0r03wrqymotwql", "owner_uuid": "112ci-tpzed-000000000000000", "created_at": "2017-11-03T10:04:48.351577000Z", "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt", "modified_by_user_uuid": "112ci-tpzed-000000000000000", "modified_at": "2017-11-03T10:04:48.351577000Z", "service_host": "10.0.2.15", "service_port": 9000, "service_ssl_flag": false, "service_type": "disk", "read_only": false }, { "kind": "arvados#keepService", "etag": "bjzh7og2d9z949lbd38vnnslt", "uuid": "112ci-bi6l4-hv02fg8sbti8ykk", "owner_uuid": "112ci-tpzed-000000000000000", "created_at": "2017-11-03T10:04:48.314229000Z", "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt", "modified_by_user_uuid": "112ci-tpzed-000000000000000", "modified_at": "2017-11-03T10:04:48.314229000Z", "service_host": "10.0.2.15", "service_port": 9001, "service_ssl_flag": false, "service_type": "disk", "read_only": false }, { "kind": "arvados#keepService", "etag": "4be61qkpt6nzdfff4vj9nkpmj", "uuid": "112ci-bi6l4-ko27cfbsf2ssx2m", "owner_uuid": "112ci-tpzed-000000000000000", "created_at": "2017-11-03T10:04:36.355045000Z", "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt", "modified_by_user_uuid": "112ci-tpzed-000000000000000", "modified_at": "2017-11-03T10:04:36.355045000Z", "service_host": "10.0.2.15", "service_port": 9002, "service_ssl_flag": false, "service_type": "proxy", "read_only": false } ], "items_available": 3 } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json ================================================ { "kind": "arvados#keepServiceList", "etag": "", "offset": null, "limit": null, "items": [], "items_available": 0 } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/links-create.json ================================================ { "kind": "arvados#link", "etag": "zw1rlnbig0kpm9btw8us3pn9", "uuid": "arkau-o0j2j-huxuaxbi46s1yml", "owner_uuid": "arkau-tpzed-000000000000000", "created_at": "2021-11-30T08:45:04.373354745Z", "modified_by_client_uuid": null, "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2021-11-30T08:45:04.374489000Z", "tail_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "link_class": "star", "name": "Star Link", "head_uuid": "arkau-j7d0g-fcedae2076pw56h", "head_kind": "arvados#group", "tail_kind": "arvados#user", "properties": {} } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/links-get.json ================================================ { "kind": "arvados#link", "etag": "zw1rlnbig0kpm9btw8us3pn9", "uuid": "arkau-o0j2j-huxuaxbi46s1yml", "owner_uuid": "arkau-tpzed-000000000000000", "created_at": "2021-11-30T08:45:04.373354745Z", "modified_by_client_uuid": null, "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "modified_at": "2021-11-30T08:45:04.374489000Z", "tail_uuid": "ardev-tpzed-n3kzq4fvoks3uw4", "link_class": "permission", "name": "can_read", "head_uuid": "arkau-j7d0g-fcedae2076pw56h", "head_kind": "arvados#group", "tail_kind": "arvados#user", "properties": {} } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/links-list.json ================================================ { "kind": "arvados#linkList", "etag": "", "offset": 0, "limit": 100, "items": [ { "kind": "arvados#link", "etag": "dkhtr9tvp9zfy0d90xjn7w1t7", "uuid": "arkau-o0j2j-x2b4rdadxs2fizn", "owner_uuid": "arkau-j7d0g-publicfavorites", "created_at": "2021-10-27T12:00:06.607794000Z", "modified_by_client_uuid": null, "modified_by_user_uuid": "arlog-tpzed-fyiau9qwo7ytntu", "modified_at": "2021-10-27T12:00:06.609840000Z", "tail_uuid": "arkau-j7d0g-publicfavorites", "link_class": "star", "name": "pRED Data Commons Service - Open access", "head_uuid": "arkau-j7d0g-sfhw8b1uson0hwh", "head_kind": "arvados#group", "tail_kind": "arvados#group", "properties": {} }, { "kind": "arvados#link", "etag": "9nt0c2xn5oz1jzjzawlycmehz", "uuid": "arkau-o0j2j-r5am4lz9gnu488k", "owner_uuid": "arkau-j7d0g-publicfavorites", "created_at": "2021-06-23T14:58:06.189520000Z", "modified_by_client_uuid": null, "modified_by_user_uuid": "arlog-tpzed-xzjyeljl6co7vlz", "modified_at": "2021-06-23T14:58:06.196208000Z", "tail_uuid": "arkau-j7d0g-publicfavorites", "link_class": "star", "name": "Open Targets Genetics", "head_uuid": "arkau-j7d0g-pj5wysmpy5wn8yo", "head_kind": "arvados#group", "tail_kind": "arvados#group", "properties": {} } ], "items_available": 2 } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/users-create.json ================================================ { "kind": "arvados#user", "etag": "b21emst9eu9u1wdpqcz6la583", "uuid": "ardev-tpzed-q6dvn7sby55up1b", "owner_uuid": "ardev-tpzed-000000000000000", "created_at": "2017-10-30T19:42:43.324740000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-o3km4ug9jhs189j", "modified_at": "2017-10-31T09:01:03.985749000Z", "email": "example@email.com", "username": "johnwayne", "full_name": "John Wayne", "first_name": "John", "last_name": "Wayne", "identity_url": "ardev-tpzed-r09t5ztf5qd3rlj", "is_active": true, "is_admin": null, "is_invited": true, "prefs": {}, "writable_by": [ "ardev-tpzed-000000000000000", "ardev-tpzed-q6dvn7sby55up1b", "ardev-j7d0g-000000000000000" ] } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/users-get.json ================================================ { "kind": "arvados#user", "etag": "b21emst9eu9u1wdpqcz6la583", "uuid": "ardev-tpzed-q6dvn7sby55up1b", "owner_uuid": "ardev-tpzed-000000000000000", "created_at": "2017-10-30T19:42:43.324740000Z", "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay", "modified_by_user_uuid": "ardev-tpzed-o3km4ug9jhs189j", "modified_at": "2017-10-31T09:01:03.985749000Z", "email": "example@email.com", "username": "johnwayne", "full_name": "John Wayne", "first_name": "John", "last_name": "Wayne", "identity_url": "ardev-tpzed-r09t5ztf5qd3rlj", "is_active": true, "is_admin": null, "is_invited": true, "prefs": {}, "writable_by": [ "ardev-tpzed-000000000000000", "ardev-tpzed-q6dvn7sby55up1b", "ardev-j7d0g-000000000000000" ] } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/users-list.json ================================================ { "kind": "arvados#userList", "etag": "", "offset": 0, "limit": 100, "items": [ { "kind": "arvados#user", "uuid": "ardev-tpzed-12389ux30402est", "email": "test.user@email.com", "first_name": "Test", "last_name": "User", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-123vn7sby55up1b", "email": "test.user1@email.com", "first_name": "Test1", "last_name": "User1", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-123g70lq1m3c6fz", "email": "test.user2@email.com", "first_name": "Test2", "last_name": "User2", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-1233zsoudkgq92e", "email": "test.user3@email.com", "first_name": "Test3", "last_name": "User3", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-1234xjvs0clppd3", "email": "test.user4@email.com", "first_name": "Test4", "last_name": "User4", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-123bpggscmn6z8m", "email": "test.user5@email.com", "first_name": "Test5", "last_name": "User5", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-1231uysivaz6ipi", "email": "test.user6@email.com", "first_name": "Test6", "last_name": "User6", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-123b0a1wu0q6cm4", "email": "test.user7@email.com", "first_name": "Test7", "last_name": "User7", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-123bz6n6si24t6v", "email": "test.user8@email.com", "first_name": "Test8", "last_name": "User8", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-123lxhzifligheu", "email": "test.user9@email.com", "first_name": "Test9", "last_name": "User9", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-123gaz31qbopewh", "email": "test.user10@email.com", "first_name": "Test10", "last_name": "User10", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-123dmcf65z973uo", "email": "test.user11@email.com", "first_name": "Test11", "last_name": "User11", "is_active": true }, { "kind": "arvados#user", "uuid": "ardev-tpzed-1239y3lj7ybpyg8", "email": "test.user12@email.com", "first_name": "Test12", "last_name": "User12", "is_active": true } ], "items_available": 13 } ================================================ FILE: contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/users-system.json ================================================ { "kind": "arvados#user", "etag": "2ehmra38iwfuexvz1cjno5xua", "uuid": "ardev-tpzed-000000000000000", "owner_uuid": "ardev-tpzed-000000000000000", "created_at": "2016-10-19T07:48:04.838534000Z", "modified_by_client_uuid": null, "modified_by_user_uuid": "ardev-tpzed-000000000000000", "modified_at": "2016-10-19T07:48:04.833164000Z", "email": "root", "username": null, "full_name": "root", "first_name": "root", "last_name": "", "identity_url": null, "is_active": true, "is_admin": true, "is_invited": true, "prefs": {}, "writable_by": [ "ardev-tpzed-000000000000000" ] } ================================================ FILE: doc/Gemfile ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: CC-BY-SA-3.0 source 'https://rubygems.org' gem 'zenweb' gem 'liquid', '~>4.0.0' gem 'RedCloth' gem 'colorize' ================================================ FILE: doc/README.textile ================================================ ###. Copyright (C) The Arvados Authors. All rights reserved. .... .... SPDX-License-Identifier: CC-BY-SA-3.0 h1. Arvados documentation This is the source code for "doc.arvados.org":http://doc.arvados.org. Here's how to build the HTML pages locally so you can preview your updates before you commit and push. Additional information is available on the "'Documentation' page on the Arvados wiki":https://dev.arvados.org/projects/arvados/wiki/Documentation. h2. Install dependencies To build the core Arvados documentation:
arvados/doc$ sudo apt-get install build-essential libcurl4-openssl-dev libgnutls28-dev libssl-dev
arvados/doc$ bundle install
SDK reference documentation has additional, optional build requirements. h3. Java SDK documentation
$ sudo apt install gradle
h3. Python SDK documentation
arvados/doc$ sudo apt install python3-venv
arvados/doc$ python3 -m venv .venv
arvados/doc$ .venv/bin/pip install pdoc setuptools
Then you must activate the virtualenv (e.g., run @. .venv/bin/activate@) before you run the @bundle exec rake@ commands below. h3. R SDK documentation
$ sudo apt install r-cran-devtools r-cran-roxygen2 r-cran-knitr r-cran-markdown r-cran-xml
h2. Generate HTML pages
arvados/doc$ bundle exec rake
Alternately, to make the documentation browsable on the local filesystem:
arvados/doc$ bundle exec rake generate baseurl=$PWD/.site
h3. Selecting SDK documentation to build By default, the build process will try to detect what SDK documentation it can build, build all that, and skip the rest. You can specify exactly what you want to build using the @sdks@ environment variable. This is a list of comma- or space-separated SDKs you wanted to build documentation for. Valid values are @java@, @python@, @r@, @all@, or @none@. @all@ is a shortcut for listing all the valid SDKs. @none@ means do not build documentation for any SDK. For example, to build documentation for the Java and Python SDKs, but skip R:
arvados/doc$ bundle exec rake generate baseurl=$PWD/.site sdks=java,python
Specifying @sdks@ skips the build detection logic. If the Rakefile cannot build the requested SDK documentation, the build will fail. For backwards compatibility, if you do not specify @sdks@, but the @NO_SDK@ environment variable is set, or the @no-sdk@ file exists, the build will run as if you set @sdks=none@. h2. Run linkchecker If you have "Linkchecker":http://wummel.github.io/linkchecker/ installed on your system, you can run it against the documentation:
arvados/doc$ bundle exec rake linkchecker baseurl=file://$PWD/.site
Please note that this will regenerate your $PWD/.site directory. h2. Preview HTML pages
arvados/doc$ bundle exec rake run
[2014-03-10 09:03:41] INFO  WEBrick 1.3.1
[2014-03-10 09:03:41] INFO  ruby 2.1.1 (2014-02-24) [x86_64-linux]
[2014-03-10 09:03:41] INFO  WEBrick::HTTPServer#start: pid=8926 port=8000
Preview the rendered pages at "http://localhost:8000":http://localhost:8000. h2. Publish HTML pages inside Workbench (or some other web site) You can set @baseurl@ (the URL prefix for all internal links), @arvados_cluster_uuid@, @arvados_api_host@ and @arvados_workbench_host@ without changing @_config.yml@:
arvados/doc$ bundle exec rake generate baseurl=/doc arvados_api_host=xyzzy.arvadosapi.com
h2. Delete generated files
arvados/doc$ bundle exec rake realclean
================================================ FILE: doc/Rakefile ================================================ #!/usr/bin/env rake # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: CC-BY-SA-3.0 # As a convenience to the documentation writer, you can touch a file # called 'no-sdk' in the 'doc' directory and it will suppress # generating the documentation for the SDKs, which (the R docs # especially) take a fair bit of time and slow down the edit-preview # cycle. # # To generate and view the documentation locally, run this command # # rake && sensible-browser .site/index.html # # Or alternatively: # # baseurl=http://localhost:8000 rake && rake run # # and then visit http://localhost:8000 in a browser. require "uri" require "rubygems" require "colorize" def can_run?(*command, **options) options = { :in => :close, :out => [File::NULL, "w"], }.merge(options) system(*command, **options) end class JavaSDK def self.build_path "contrib/java-sdk-v2" end def self.can_build? can_run?("gradle", "--version") end def self.doc_path "sdk/java-v2" end end class PythonSDK def self.build_path "sdk/python/arvados" end def self.can_build? can_run?("./pysdk_pdoc.py", "--version") end def self.doc_path "sdk/python/arvados" end end class RSDK def self.build_path "contrib/R-sdk" end def self.can_build? can_run?("make", "can_run", chdir: File.join("..", self.build_path)) end def self.doc_path "sdk/R" end end $build_sdks = begin no_sdk_env = ENV.fetch("NO_SDK", "") sdks_env = ENV.fetch("sdks", "") all_sdks = Hash[[JavaSDK, PythonSDK, RSDK].map { |c| [c.name, c] }] if no_sdk_env != "" and sdks_env != "" fail "both NO_SDK and sdks defined in environment" elsif sdks_env != "" # Nothing to do elsif no_sdk_env != "" or File.exist?("no-sdk") sdks_env = "none" end if sdks_env == "" all_sdks.each_pair.filter_map do |name, sdk| if sdk.can_build? sdk else puts "Warning: cannot build #{name.gsub(/SDK$/, ' SDK')} documentation, skipping".colorize(:light_red) end end else wanted_sdks = [] sdks_env.split(/\s*[,\s]\s*/).each do |key| key = "#{key.capitalize}SDK" if key == "AllSDK" wanted_sdks = all_sdks.values elsif key == "NoneSDK" wanted_sdks.clear elsif sdk = all_sdks[key] wanted_sdks << sdk else fail "cannot build documentation for unknown #{key}" end end wanted_sdks end end module Zenweb class Site @binary_files = %w[png jpg gif eot svg ttf woff2? ico pdf m4a t?gz xlsx] end end task :generate => [ :realclean, 'sdk/python/arvados.html', 'sdk/R/arvados/index.html', 'sdk/java-v2/javadoc/index.html' ] do vars = ['baseurl', 'arvados_cluster_uuid', 'arvados_api_host', 'arvados_workbench_host'] if ! ENV.key?('baseurl') || ENV['baseurl'] == "" if !ENV.key?('WORKSPACE') || ENV['WORKSPACE'] == "" puts "The `baseurl` variable was not specified and the `WORKSPACE` environment variable is not set. Defaulting `baseurl` to file://#{pwd}/.site" ENV['baseurl'] = "file://#{pwd}/.site/" else puts "The `baseurl` variable was not specified, defaulting to a value derived from the `WORKSPACE` environment variable" ENV['baseurl'] = "file://#{ENV['WORKSPACE']}/doc/.site/" end end vars.each do |v| if ENV[v] website.config.h[v] = ENV[v] end end end file ["install/new_cluster_checklist_Azure.xlsx", "install/new_cluster_checklist_AWS.xlsx"] do |t| cp(t, t) end file "sdk/python/arvados.html" do |t| next unless $build_sdks.include?(PythonSDK) raise unless system("pip", "install", "../sdk/python", out: :err) raise unless system("python3", "pysdk_pdoc.py", out: :err) end file "sdk/R/arvados/index.html" do |t| next unless $build_sdks.include?(RSDK) Dir.mkdir("sdk/R") Dir.mkdir("sdk/R/arvados") cp('css/R.css', 'sdk/R/arvados') raise unless system("make", "man", chdir: "../contrib/R-sdk", out: :err) docnames = Dir.glob("../contrib/R-sdk/man/*.Rd").map { |rd| File.basename(rd, ".Rd") }.sort docnames.each do |basename| raise unless system( "R", "CMD", "Rdconv", "--type=html", "man/#{basename}.Rd", chdir: "../contrib/R-sdk", out: ["sdk/R/arvados/#{basename}.html", "w"], ) end File.open("sdk/R/index.html.md", "w") do |fn| fn.write(<<-EOF --- layout: default navsection: sdk navmenu: R title: "R SDK Overview" ... EOF ) File.open("../contrib/R-sdk/README.md", "r") do |rd| fn.write(rd.read) end end File.open("sdk/R/arvados/index.html.textile.liquid", "w") do |fn| fn.write(<<-EOF --- layout: default navsection: sdk navmenu: R title: "R Reference" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} EOF ) docnames.each do |basename| fn.printf("* \"%s\":%s.html\n", basename, basename) end end end file "sdk/java-v2/javadoc/index.html" do |t| next unless $build_sdks.include?(JavaSDK) tgt = Dir.pwd docfiles = [] Dir.chdir("../contrib/java-sdk-v2") do STDERR.puts `gradle javadoc 2>&1` raise if $? != 0 puts `sed -i "s/@import.*dejavu.css.*//g" build/docs/javadoc/stylesheet.css` raise if $? != 0 end cp_r("../contrib/java-sdk-v2/build/docs/javadoc", "sdk/java-v2") raise if $? != 0 end task :linkchecker => [ :generate ] do # we need --check-extern to check relative links, weird but true opts = [ "--check-extern", "--ignore-url=!^file://", ] ([JavaSDK, PythonSDK, RSDK] - $build_sdks).map(&:doc_path).each do |sdk_path| sdk_url = URI.join(ENV["baseurl"], sdk_path) url_re = Regexp.escape(sdk_url.to_s) opts << "--ignore-url=^#{url_re}[./]" end result = system( "linkchecker", *opts, "index.html", chdir: ".site", ) if result.nil? fail "could not run linkchecker command (is it installed?)" elsif !result fail "linkchecker exited #{$?.exitstatus}" end end task :import_vscode_training do Dir.chdir("user") do rm_rf "arvados-vscode-cwl-training" `git clone https://github.com/arvados/arvados-vscode-cwl-training` githash = `git --git-dir arvados-vscode-cwl-training/.git log -n1 --format=%H HEAD` File.open("cwl/arvados-vscode-training.html.md.liquid", "w") do |fn| File.open("arvados-vscode-cwl-training/README.md", "r") do |rd| fn.write(<<-EOF --- layout: default navsection: userguide title: "Developing CWL Workflows with VSCode" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 Imported from https://github.com/arvados/arvados-vscode-cwl-training git hash: #{githash} {% endcomment %} EOF ) fn.write(rd.read()) end end rm_rf "arvados-vscode-cwl-training" end end task :clean do rm_rf "sdk/python/arvados" rm_f "sdk/python/arvados.html" rm_f "sdk/python/index.html" rm_rf "sdk/R" rm_rf "sdk/java-v2/javadoc" end require "zenweb/tasks" load "zenweb-textile.rb" load "zenweb-liquid.rb" load "zenweb-fix-body.rb" task :extra_wirings do $website.pages["sdk/python/python.html.textile.liquid"].depends_on("sdk/python/arvados.html") end ================================================ FILE: doc/_config.yml ================================================ # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: CC-BY-SA-3.0 # baseurl is the location of the generated site from the browser's # perspective (e.g., http://doc.arvados.org or # file:///tmp/arvados/doc/.site). You can also set these on the # command line: $ rake generate baseurl=/example # arvados_api_host=example.com baseurl: current_version: all_versions: latest_version: arvados_api_host: localhost arvados_cluster_uuid: local arvados_workbench_host: http://localhost google_analytics: "G-EFLSBXJ5SQ" matomo_analytics_url: "https://piwik.arvados.org" matomo_analytics_siteid: "3" exclude: ["Rakefile", "tmp", "vendor"] navbar: userguide: - Welcome: - user/index.html.textile.liquid - user/getting_started/community.html.textile.liquid - Walkthough: - user/getting_started/workbench.html.textile.liquid - user/tutorials/wgs-tutorial.html.textile.liquid - Working at the Command Line: - user/getting_started/setup-cli.html.textile.liquid - user/reference/api-tokens.html.textile.liquid - user/getting_started/check-environment.html.textile.liquid - Working with data sets: - user/tutorials/tutorial-projects.html.textile.liquid - user/tutorials/tutorial-keep.html.textile.liquid - user/tutorials/tutorial-keep-get.html.textile.liquid - user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid - user/topics/arv-copy.html.textile.liquid - user/tutorials/tutorial-keep-mount-gnu-linux.html.textile.liquid - user/tutorials/tutorial-keep-mount-os-x.html.textile.liquid - user/tutorials/tutorial-keep-mount-windows.html.textile.liquid - user/topics/collection-versioning.html.textile.liquid - user/topics/storage-classes.html.textile.liquid - Data Analysis with Workflows: - user/tutorials/tutorial-workflow-workbench.html.textile.liquid - user/cwl/cwl-runner.html.textile.liquid - user/cwl/crunchstat-summary.html.textile.liquid - user/debugging/container-shell-access.html.textile.liquid - user/topics/external-inputs.html.textile.liquid - user/topics/service-containers.html.textile.liquid - user/cwl/costanalyzer.html.textile.liquid - user/cwl/federated-workflows.html.textile.liquid - user/cwl/cwl-run-options.html.textile.liquid - Common Workflow Language: - user/cwl/rnaseq-cwl-training.html.textile.liquid - user/cwl/arvados-vscode-training.html.md.liquid - user/topics/arv-docker.html.textile.liquid - user/cwl/cwl-style.html.textile.liquid - user/tutorials/writing-cwl-workflow.html.textile.liquid - user/cwl/cwl-extensions.html.textile.liquid - user/cwl/cwl-versions.html.textile.liquid - Access an Arvados virtual machine: - user/getting_started/vm-login-with-webshell.html.textile.liquid - user/getting_started/ssh-access-unix.html.textile.liquid - user/getting_started/ssh-access-windows.html.textile.liquid - Reference: - user/topics/workbench-migration.html.textile.liquid - user/topics/link-accounts.html.textile.liquid - user/reference/cookbook.html.textile.liquid - Arvados License: - user/copying/copying.html.textile.liquid - user/copying/agpl-3.0.html - user/copying/LICENSE-2.0.html - user/copying/by-sa-3.0.html sdk: - Overview: - sdk/index.html.textile.liquid - Python: - sdk/python/sdk-python.html.textile.liquid - sdk/python/api-client.html.textile.liquid - sdk/python/cookbook.html.textile.liquid - sdk/python/python.html.textile.liquid - sdk/python/arvados-cwl-runner.html.textile.liquid - sdk/python/events.html.textile.liquid - Command line tools (CLI SDK): - sdk/cli/install.html.textile.liquid - sdk/cli/index.html.textile.liquid - sdk/cli/reference.html.textile.liquid - sdk/cli/subcommands.html.textile.liquid - FUSE Driver: - sdk/fuse/install.html.textile.liquid - sdk/fuse/options.html.textile.liquid - Go: - sdk/go/index.html.textile.liquid - sdk/go/example.html.textile.liquid - Java: - sdk/java-v2/index.html.textile.liquid - sdk/java-v2/example.html.textile.liquid - sdk/java-v2/javadoc.html.textile.liquid - R: - sdk/R/index.html.md - sdk/R/arvados/index.html.textile.liquid - Ruby: - sdk/ruby/index.html.textile.liquid - sdk/ruby/example.html.textile.liquid api: - Concepts: - api/index.html.textile.liquid - api/tokens.html.textile.liquid - api/requests.html.textile.liquid - api/methods.html.textile.liquid - api/resources.html.textile.liquid - Permission and authentication: - api/methods/users.html.textile.liquid - api/methods/groups.html.textile.liquid - api/methods/api_client_authorizations.html.textile.liquid - api/methods/links.html.textile.liquid - api/methods/computed_permissions.html.textile.liquid - api/methods/authorized_keys.html.textile.liquid - api/methods/credentials.html.textile.liquid - api/methods/user_agreements.html.textile.liquid - api/methods/virtual_machines.html.textile.liquid - Data management: - api/keep-webdav.html.textile.liquid - api/keep-s3.html.textile.liquid - api/keep-web-urls.html.textile.liquid - api/projects.html.textile.liquid - api/properties.html.textile.liquid - api/methods/collections.html.textile.liquid - api/methods/logs.html.textile.liquid - api/methods/keep_services.html.textile.liquid - Container engine: - api/methods/container_requests.html.textile.liquid - api/methods/containers.html.textile.liquid - api/methods/workflows.html.textile.liquid - api/dispatch.html.textile.liquid architecture: - Topics: - architecture/index.html.textile.liquid - Storage in Keep: - architecture/storage.html.textile.liquid - architecture/keep-components-overview.html.textile.liquid - architecture/keep-clients.html.textile.liquid - architecture/keep-data-lifecycle.html.textile.liquid - architecture/manifest-format.html.textile.liquid - Computation with Crunch: - api/execution.html.textile.liquid - architecture/dispatchcloud.html.textile.liquid - architecture/hpc.html.textile.liquid - architecture/singularity.html.textile.liquid - Other: - api/permission-model.html.textile.liquid - architecture/federation.html.textile.liquid admin: - Topics: - admin/index.html.textile.liquid - Users and Groups: - admin/user-management.html.textile.liquid - admin/user-management-cli.html.textile.liquid - admin/group-management.html.textile.liquid - admin/reassign-ownership.html.textile.liquid - admin/link-accounts.html.textile.liquid - admin/federation.html.textile.liquid - admin/migrating-providers.html.textile.liquid - user/topics/arvados-sync-external-sources.html.textile.liquid - admin/scoped-tokens.html.textile.liquid - admin/token-expiration-policy.html.textile.liquid - Monitoring: - admin/logging.html.textile.liquid - admin/metrics.html.textile.liquid - admin/health-checks.html.textile.liquid - admin/inspect.html.textile.liquid - admin/diagnostics.html.textile.liquid - admin/management-token.html.textile.liquid - admin/user-activity.html.textile.liquid - admin/memory-cpu-profiling.html.textile.liquid - Data Management: - admin/collection-versioning.html.textile.liquid - admin/collection-managed-properties.html.textile.liquid - admin/restricting-upload-download.html.textile.liquid - admin/keep-balance.html.textile.liquid - admin/controlling-container-reuse.html.textile.liquid - admin/logs-table-management.html.textile.liquid - admin/metadata-vocabulary.html.textile.liquid - admin/storage-classes.html.textile.liquid - admin/keep-recovering-data.html.textile.liquid - admin/keep-measuring-deduplication.html.textile.liquid - admin/keep-faster-gc-s3.html.textile.liquid - Cloud: - admin/spot-instances.html.textile.liquid - admin/cloudtest.html.textile.liquid - admin/dispatch.html.textile.liquid installguide: - Overview: - install/index.html.textile.liquid - Arvados Installer: - install/install-single-host.html.textile.liquid - install/install-multi-host.html.textile.liquid - Manual installation: - install/install-manual-prerequisites.html.textile.liquid - install/packages.html.textile.liquid - Configuration: - install/config.html.textile.liquid - admin/config-urls.html.textile.liquid - admin/config.html.textile.liquid - Maintenance and upgrading: - admin/upgrading.html.textile.liquid - admin/maintenance-and-upgrading.html.textile.liquid - Core: - install/install-api-server.html.textile.liquid - install/diagnostics.html.textile.liquid - Keep: - install/install-keepstore.html.textile.liquid - install/configure-fs-storage.html.textile.liquid - install/configure-s3-object-storage.html.textile.liquid - install/configure-azure-blob-storage.html.textile.liquid - install/install-keepproxy.html.textile.liquid - install/install-keep-web.html.textile.liquid - install/install-keep-balance.html.textile.liquid - User interface: - install/setup-login.html.textile.liquid - install/install-ws.html.textile.liquid - install/install-workbench2-app.html.textile.liquid - install/workbench.html.textile.liquid - Additional services: - install/install-shell-server.html.textile.liquid - install/install-webshell.html.textile.liquid - Containers API (cloud): - install/crunch2-cloud/install-compute-node.html.textile.liquid - install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid - Compute nodes (Slurm or LSF): - install/crunch2/install-compute-node-docker.html.textile.liquid - install/crunch2/install-compute-node-singularity.html.textile.liquid - Containers API (Slurm): - install/crunch2-slurm/install-dispatch.html.textile.liquid - install/crunch2-slurm/configure-slurm.html.textile.liquid - install/crunch2-slurm/install-test.html.textile.liquid - Containers API (LSF): - install/crunch2-lsf/install-dispatch.html.textile.liquid - Additional configuration: - install/container-shell-access.html.textile.liquid - External dependencies: - install/install-postgresql.html.textile.liquid - install/ruby.html.textile.liquid - install/nginx.html.textile.liquid - install/install-docker.html.textile.liquid ================================================ FILE: doc/_includes/_admin_list_collections_without_property_py.liquid ================================================ #!/usr/bin/env python3 {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} import arvados import arvados.util as util filters = [['properties.responsible_person_uuid', 'exists', False]] cols = util.keyset_list_all(arvados.api().collections().list, filters=filters, select=['uuid', 'name'], order='uuid') print('Found {} collections:'.format(len(cols))) for c in cols: print('{}, "{}"'.format(c['uuid'], c['name'])) ================================================ FILE: doc/_includes/_admin_set_property_to_collections_under_project_py.liquid ================================================ #!/usr/bin/env python3 {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} import arvados import arvados.util as util def get_subproject_uuids(api, root_uuid): uuids = [] groups = util.keyset_list_all(api.groups().list, filters=[['owner_uuid', '=', '{}'.format(root_uuid)]], select=['uuid'], order='uuid') for g in groups: uuids += ([g['uuid']] + get_subproject_uuids(api, g['uuid'])) return uuids def get_cols(api, filters): cols = util.keyset_list_all(api.collections().list, filters=filters, select=['uuid', 'properties'], order='uuid') return cols # Search for collections on project hierarchy rooted at root_uuid root_uuid = 'zzzzz-j7d0g-ppppppppppppppp' # Set the property to the UUID below responsible_uuid = 'zzzzz-tpzed-xxxxxxxxxxxxxxx' api = arvados.api() for p_uuid in [root_uuid] + get_subproject_uuids(api, root_uuid): f = [['properties.responsible_person_uuid', 'exists', False], ['owner_uuid', '=', p_uuid]] cols = get_cols(api, f) print('Found {} collections owned by {}'.format(len(cols), p_uuid)) for c in cols: print(' - Updating collection {}'.format(c['uuid'])) props = c['properties'] props['responsible_person_uuid'] = responsible_uuid api.collections().update(uuid=c['uuid'], body={'properties': props}).execute() ================================================ FILE: doc/_includes/_admin_update_collection_property_py.liquid ================================================ #!/usr/bin/env python3 {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} import arvados import arvados.util as util old_uuid = 'zzzzz-tpzed-xxxxxxxxxxxxxxx' new_uuid = 'zzzzz-tpzed-yyyyyyyyyyyyyyy' api = arvados.api() filters = [['properties.responsible_person_uuid', '=', '{}'.format(old_uuid)]] cols = util.keyset_list_all(api.collections().list, filters=filters, select=['uuid', 'properties'], order='uuid') print('Found {} collections'.format(len(cols))) for c in cols: print('Updating collection {}'.format(c['uuid'])) props = c['properties'] props['responsible_person_uuid'] = new_uuid api.collections().update(uuid=c['uuid'], body={'properties': props}).execute() ================================================ FILE: doc/_includes/_assign_volume_uuid.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Note that each volume has a UUID, like @zzzzz-nyw5e-0123456789abcde@. You assign these manually: replace @zzzzz@ with your Cluster ID, and replace @0123456789abcde@ with an arbitrary unique string of 15 alphanumerics. Once assigned, UUIDs should not be changed. Essential configuration values are highlighted in red. Remaining parameters are provided for documentation, with their default values. ================================================ FILE: doc/_includes/_branchname.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% if site.current_version and site.current_version != 'main' %} {% assign branchname = site.current_version | slice: 1, 3 | append: '-release' %} {% else %} {% assign branchname = 'main' %} {% endif %} ================================================ FILE: doc/_includes/_container_glob_patterns.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2. Glob patterns Each pattern in the @output_glob@ array can include the following special terms: table(table table-bordered table-condensed). |@*@|matches any sequence of non-@/@ characters| |@?@|matches any single non-@/@ character| |@[abcde]@ or @[a-e]@|matches any non-@/@ character in @abcde@| |@[^abcde]@ or @[^a-e]@ or @[!abcde]@ or @[!a-e]@|matches any non-@/@ character other than @abcde@| |@/**/@|matches zero or more levels of subdirectories| |@**/@|at the beginning of a pattern, matches zero or more directories| |@/**@|at the end of a pattern, matches any file in any subdirectory| Example patterns: table(table table-bordered table-condensed). |@*.txt@|matches files with extension @.txt@ at the top level| |@foo/**@|matches the entire tree rooted at @foo@ in the top level| |@**/fo[og]@|matches all files named @foo@ or @fog@ anywhere in the tree| |@foo/**/*.txt@|matches all files with extension @.txt@ anywhere in the tree rooted at @foo@ in the top level| ================================================ FILE: doc/_includes/_container_published_ports.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2. Published ports Containers can expose web services. These can be interactive web applications such as Jupyter notebooks or AI chats, or HTTP-based APIs. Arvados acts as a reverse proxy, forwarding HTTP requests to the container and relaying responses back to the client. The external URL will be one of the following, depending on how Arvados is configured (see ContainerWebServices in the "default config file":{{site.baseurl}}/admin/config.html): * @https://-.containers.zzzzz.example.com/@ where @@ is the container UUID and @@ is the port where the container process is listening * @https://containers.zzzzz.example.com:/@ where @@ is a dynamically assigned port To accept requests, the container should listen on 0.0.0.0 (listening on localhost or 127.0.0.1 will _not_ work) and handle plain text HTTP/1.1 requests. The @published_ports@ attribute of the container request record advertises which ports on the container should be available to external clients. The value @published_ports@ is a hash. Each key in the hash is a port number that the container is listening on. Each entry in the hash has three keys described here: table(table table-bordered table-condensed). |_. Key|_. Type|_. Description| |access|string|One of "public" or "private". If "private", the client connecting to the container must provide an Arvados API for the user who submitted the container request(s) corresponding to the container. The token is provided as a query parameter @?arvados_api_token=...@. Arvados will consume the query parameter and respond with a redirect and a cookie used to authenticate subsequent requests. If "public", no authentication is required.| |label|string|A string that will be displayed to the user on Workbench describing the service. Cannot be empty.| |initial_path|string|The relative path that should be included when constructing the URL that will be presented in Workbench and in the @initial_url@ field described below. May include any or none of path, fragment and query parameter parts of the URL, or be blank. Leading slash is optional.| The @published_ports@ attribute of the container record is a copy of the corresponding container request attribute, with the following entries added for each exposed port when the container enters @Running@ state: table(table table-bordered table-condensed). |_. Key|_. Type|_. Description|_. Examples| |base_url|string|The external URL where the service is reachable.|@https://zzzzz-dz642-abcdefghijklmno-80.containers.zzzzz.example.com/@ @https://containers.zzzzz.example.com:2000/@| |initial_url|string|The external URL with @initial_path@ applied.|@https://zzzzz-dz642-abcdefghijklmno-80.containers.zzzzz.example.com/index.html?start=true@ @https://containers.zzzzz.example.com:2000/index.html?start=true@| |external_port|integer|The dynamically assigned external port if applicable, otherwise null.|@2000@ @null@| Example @published_ports@ attribute for a container request:
{
  "published_ports": {
    "80": {
      "access": "private",
      "label": "Jupyter notebook instance",
      "initial_path": "?path=example.ipynb"
    }
  }
}
Example @published_ports@ attribute for a running container:
{
  "published_ports": {
    "80": {
      "access": "private",
      "label": "Jupyter notebook instance",
      "initial_path": "?path=example.ipynb",
      "external_port": 2025,
      "base_url": "https://containers.zzzzz.example.com:2025/",
      "initial_url": "https://containers.zzzzz.example.com:2025/?path=example.ipynb"
    }
  }
}
h3. Accessing unpublished ports If the @Services.ContainerWebServices.ExternalURL@ config entry is a wildcard, it is possible to connect to _any_ port in a running container, whether or not it is listed in @published_ports@, by providing the container request or container UUID and the listening port number as @-@ in place of the @*@ wildcard in the URL, _i.e._, @https://-.containers.zzzzz.example.com/@. Unpublished ports are not displayed in Workbench and have a default acccess level of "private". ================================================ FILE: doc/_includes/_container_runtime_constraints.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2. Runtime constraints Runtime constraints restrict the container's access to compute resources and the outside world (in addition to its explicitly stated inputs and output). table(table table-bordered table-condensed). |_. Key|_. Type|_. Description|_. Notes| |ram|integer|Number of ram bytes to be used to run this process.|Optional. However, a ContainerRequest that is in "Committed" state must provide this.| |vcpus|integer|Number of cores to be used to run this process.|Optional. However, a ContainerRequest that is in "Committed" state must provide this.| |keep_cache_disk|integer|When the container process accesses data from Keep via the filesystem, that data will be cached on disk, up to this amount in bytes.|Optional. If your cluster is configured to use a disk cache by default, the default size will match your @ram@ constraint, bounded between 2GiB and 32GiB.| |keep_cache_ram|integer|When the container process accesses data from Keep via the filesystem, that data will be cached in memory, up to this amount in bytes.|Optional. If your cluster is configured to use a RAM cache by default, the administrator sets a default cache size.| |API|boolean|When set, ARVADOS_API_HOST and ARVADOS_API_TOKEN will be set, and container will have networking enabled to access the Arvados API server.|Optional.| |gpu|object|Request GPU support, see below|Optional.| |cuda|object|Old way to request CUDA GPU support, included for backwards compatability only. Use the 'gpu' field instead.|Deprecated.| h3. GPU support table(table table-bordered table-condensed). |_. Key|_. Type|_. Description|_. Notes| |stack|string|One of 'cuda' or 'rocm' to request Nvidia or AMD GPU support.|| |device_count|int|Number of GPUs to request.|Count greater than 0 enables GPU support.| |driver_version|string|Minimum driver version, in "X.Y" format.|Required when device_count > 0| |hardware_target|array of strings|For CUDA: a single item with minimum CUDA hardware capability, in "X.Y" format, or multiple items listing CUDA specific hardware capability versions, one of which must be an exact match on the compute node the container is scheduled on. For ROCm: A list of one or more hardware targets (e.g. gfx1100) corresponding to the GPU architectures supported by the container. To be scheduled, at least one item in this list must match the @HardwareTarget@ of one of the cluster's @InstanceTypes@.|Required when device_count > 0| |vram|int|Amount of VRAM to request, in bytes.|| h3. CUDA support (deprecated) Note. This API is deprecated. Use the 'gpu' API instead. table(table table-bordered table-condensed). |_. Key|_. Type|_. Description|_. Notes| |device_count|int|Number of GPUs to request.|Count greater than 0 enables CUDA GPU support.| |driver_version|string|Minimum CUDA driver version, in "X.Y" format.|Required when device_count > 0| |hardware_capability|string|Minimum CUDA hardware capability, in "X.Y" format.|Required when device_count > 0| ================================================ FILE: doc/_includes/_container_scheduling_parameters.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2. Scheduling parameters Parameters to be passed to the container scheduler (e.g., Slurm) when running a container. table(table table-bordered table-condensed). |_. Key|_. Type|_. Description|_. Notes| |partitions|array of strings|The names of one or more compute partitions that may run this container. If not provided, the system will choose where to run the container.|Optional.| |preemptible|boolean|If true, the dispatcher should use a preemptible cloud node instance (eg: AWS Spot Instance) to run this container. Whether a preemptible instance is actually used "depends on cluster configuration.":{{site.baseurl}}/admin/spot-instances.html|Optional. Default is false.| |max_run_time|integer|Maximum running time (in seconds) that this container will be allowed to run before being cancelled.|Optional. Default is 0 (no limit).| ================================================ FILE: doc/_includes/_contrib_component.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin' %} {{ component_name|default: 'This component' }} is an Arvados client contribution. It is supported by the Arvados development team and we are happy to receive contributions for it, but it receives less testing than core components and bug reports may get lower priority. {% include 'notebox_end' %} ================================================ FILE: doc/_includes/_download_installer.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'branchname' %} This is a package-based installation method, however the installation script is currently distributed in source form via @git@. We recommend checking out the git tree on your local workstation, not directly on the target(s) where you want to install and run Arvados.
git clone https://github.com/arvados/arvados.git
cd arvados
git checkout {{ branchname }}
cd tools/salt-install
The @install.sh@ and @provision.sh@ scripts will help you deploy Arvados by preparing your environment to be able to run the installer, then running it. The actual installer is located in the "arvados-formula git repository":https://github.com/arvados/arvados-formula/tree/refs/heads/{{ branchname }} and will be cloned during the running of the @provision.sh@ script. The installer is built using "Saltstack":https://saltproject.io/ and @provision.sh@ performs the install using masterless mode. h2(#copy_config). Initialize the installer Replace "xarv1" with the cluster id you selected earlier. This creates a git repository in @~/setup-arvados-xarv1@. The @installer.sh@ will record all the configuration changes you make, as well as using @git push@ to synchronize configuration edits if you have multiple nodes. Important! Once you have initialized the installer directory, all further commands must be run with @~/setup-arvados-${CLUSTER}@ as the current working directory. h3. Using Terraform (AWS specific) If you are going to use Terraform to set up the infrastructure on AWS, you first need to install the "Terraform CLI":https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli and the "AWS CLI":https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html tool. Then you can initialize the installer.
CLUSTER=xarv1
./installer.sh initialize ~/setup-arvados-${CLUSTER} {{local_params_src}} {{config_examples_src}} {{terraform_src}}
cd ~/setup-arvados-${CLUSTER}
h3. Without Terraform
CLUSTER=xarv1
./installer.sh initialize ~/setup-arvados-${CLUSTER} {{local_params_src}} {{config_examples_src}}
cd ~/setup-arvados-${CLUSTER}
================================================ FILE: doc/_includes/_example_sdk_go.liquid ================================================ // Copyright (C) The Arvados Authors. All rights reserved. // // SPDX-License-Identifier: CC-BY-SA-3.0 package main // ******************* // Import the modules. // // Our examples don't use keepclient, but they do use fmt and log to // display output. import ( "fmt" "git.arvados.org/arvados.git/sdk/go/arvadosclient" "log" ) func main() { // ******************************** // Set up an API client user agent. // arv, err := arvadosclient.MakeArvadosClient() if err != nil { log.Fatalf("Error setting up arvados client %s", err.Error()) } // ***************************************** // Print the full name of the current user. // type user struct { // Remember to start each field name with a capital letter, // otherwise it won't get populated by the arvados client because // the field will be invisible to it. Uuid string `json:"uuid"` FullName string `json:"full_name"` } var u user err = arv.Call("GET", "users", "", "current", nil, &u) if err != nil { log.Fatalf("error querying current user", err.Error()) } log.Printf("Logged in as %s (uuid %s)", u.FullName, u.Uuid) // ******************************************************** // Print all fields from the first five collections returned. // // Note that some fields, are not returned by default and have to be // requested. See below for an example. var results map[string]interface{} params := arvadosclient.Dict{"limit": 5} err = arv.List("collections", params, &results) if err != nil { log.Fatalf("error querying collections", err.Error()) } printArvadosResults(results) // ********************************************************* // Print some fields from the first two collections returned. // // We also print manifest_test, which has to be explicitly requested. // collection_fields_wanted := []string{"manifest_text", "owner_uuid", "uuid"} params = arvadosclient.Dict{"limit": 2, "select": collection_fields_wanted} err = arv.List("collections", params, &results) if err != nil { log.Fatalf("error querying collections", err.Error()) } printArvadosResults(results) } // A helper method which will print out a result map returned by // arvadosclient. func printArvadosResults(results map[string]interface{}) { for key, value := range results { // "items", if it exists, holds a map. // So we print it prettily below. if key != "items" { fmt.Println(key, ":", value) } } if value, ok := results["items"]; ok { items := value.([]interface{}) for index, item := range items { fmt.Println("=========== ", index, " ===========") item_map := item.(map[string]interface{}) if len(item_map) == 0 { fmt.Println("item", index, ": empty map") } else { for k, v := range item_map { fmt.Println(index, k, ":", v) } } } } } ================================================ FILE: doc/_includes/_google_analytics.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} ================================================ FILE: doc/_includes/_hpc_max_gateway_tunnels.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h3(#MaxGatewayTunnels). API.MaxGatewayTunnels Each Arvados container that runs on your HPC cluster will bring up a long-lived connection to the Arvados controller and keep it open for the entire duration of the container. This connection is used to access real-time container logs from Workbench, and to enable the "container shell":{{site.baseurl}}/install/container-shell-access.html feature. Set the @MaxGatewayTunnels@ config entry high enough to accommodate the maximum number of containers you expect to run concurrently on your HPC cluster, plus incoming container shell sessions.
    API:
      MaxGatewayTunnels: 2000
Also, configure Nginx (and any other HTTP proxies or load balancers running between the HPC and Arvados controller) to allow the expected number of connections, i.e., @MaxConcurrentRequests + MaxQueuedRequests + MaxGatewayTunnels@. ================================================ FILE: doc/_includes/_html_tags.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The following HTML tags are permitted: *a*, *b*, *blockquote*, *br*, *code*, *del*, *dd*, *dl*, *dt*, *em*, *h1*, *h2*, *h3*, *h4*, *h5*, *h6*, *hr*, *i*, *img*, *kbd*, *li*, *ol*, *p*, *pre*, *s*, *del*, *section*, *span*, *strong*, *sub*, *sup*, and *ul*. The following HTML attributes are permitted: *src*, *width*, *height*, *href*, *alt*, *title*, and *style*. All styling must be made in-line with the style attribute. Disallowed tags and attributes will not render. ================================================ FILE: doc/_includes/_install_ansible.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {{ header_level|default: 'h3' }}(#install-ansible-pipx). Option 1. Install Ansible with pipx The pipx tool is packaged in many of our supported distributions. You can install it on Debian/Ubuntu by running:
# apt install pipx
Or install it on Red Hat/AlmaLinux/Rocky Linux by running:
# dnf install pipx
{% include 'notebox_begin' %} If the pipx package is not found, it is not available for your distribution. Instead "install Ansible with virtualenv and pip":#install-ansible-venv. {% include 'notebox_end' %} After pipx is installed, install Ansible by running:
$ arvados/tools/ansible/install-ansible.sh
  installed package ansible-core 2.15.13, installed using Python 3.11.2
  These apps are now globally available
    - ansible
    - ansible-config
    - ansible-connection
    - ansible-console
    - ansible-doc
    - ansible-galaxy
    - ansible-inventory
    - ansible-playbook
    - ansible-pull
    - ansible-test
    - ansible-vault
done! ✨ 🌟 ✨

[…]

Ansible successfully installed!
If this script reports the final success message, skip the next section. {{ header_level|default: 'h3' }}(#install-ansible-venv). Option 2. Install Ansible in a virtualenv This method works on all of our supported distributions, but requires you to configure a lot of paths manually. Install Python and virtualenv on Debian/Ubuntu by running:
# apt install python3-venv
Or install it on Red Hat/AlmaLinux/Rocky Linux by running:
# dnf install python3
Next, set up a virtualenv. If you want to install this somewhere other than @~/arvados-ansible@, you may change that path each time it appears.
$ arvados/tools/ansible/install-ansible.sh ~/arvados-ansible
Collecting ansible-core~=2.15.13
[…]

Ansible successfully installed!
Finally, add all the Ansible tools to your executable path. If you keep personal executables somewhere other than @~/.local/bin@, you may change that path.
$ ln -st ~/.local/bin ~/arvados-ansible/bin/ansible*
Alternatively, you may reconfigure your shell to add $HOME/arvados-ansible/bin to the end of your @$PATH@ variable. ================================================ FILE: doc/_includes/_install_ca_cert.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h3. Web Browser Installing the root certificate into your web browser will prevent security errors when accessing Arvados services with your web browser. h4. Chrome # Go to "Settings → Privacy and Security → Security → Manage Certificates" or enter @chrome://settings/certificates@ in the URL bar. # *Click on the "Authorities" tab* (it is not selected by default) # Click on the "Import" button # Choose @{{ca_cert_name}}@ # Tick the checkbox next to "Trust this certificate for identifying websites" # Hit OK # The certificate should appear in the list of Authorities under "Arvados" h4. Firefox # Go to "Preferences → Privacy & Security" or enter @about:preferences#privacy@ in the URL bar # Scroll down to the *Certificates* section # Click on the button "View Certificates...". # Make sure the "Authorities" tab is selected # Press the "Import..." button. # Choose @{{ca_cert_name}}@ # Tick the checkbox next to "Trust this CA to identify websites" # Hit OK # The certificate should appear in the list of Authorities under "Arvados" h4. Other browsers (Safari, etc) The process will be similar to that of Chrome and Firefox, but the exact user interface will be different. If you can't figure it out, try searching for "how do I install a custom certificate authority in (my browser)". h3. Installation on Linux OS certificate storage To access your Arvados instance using command line clients (such as @arv-get@ and @arv-put@) without security errors, install the certificate into the OS certificate storage. h4. Debian/Ubuntu *Important* the certificate file added to @ca-certificates@ must have the extension @.crt@ or it won't be recognized.
cp {{ca_cert_name}} /usr/local/share/ca-certificates/arvados-snakeoil-ca.crt
/usr/sbin/update-ca-certificates
h4. Red Hat, AlmaLinux, and Rocky Linux
cp {{ca_cert_name}} /etc/pki/ca-trust/source/anchors/
/usr/bin/update-ca-trust
================================================ FILE: doc/_includes/_install_compute_docker.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2(#cgroups). Configure Linux cgroups accounting Linux can report what compute resources are used by processes in a specific cgroup or Docker container. Crunch can use these reports to share that information with users running compute work. This can help workflow authors debug and optimize their workflows. To enable cgroups accounting, you must boot Linux with the command line parameters @cgroup_enable=memory swapaccount=1@. Currently Arvados is not compatible with the new cgroups accounting, also known as cgroups v2. Currently, all supported GNU/Linux distributions don't use cgroups v2 as default If you are using a distribution in the compute nodes that ships with cgroups v2 enabled, make sure to disable it by booting Linux with the command line parameters @systemd.unified_cgroup_hierarchy=0@. After making changes, reboot the system to make these changes effective. h3. Red Hat, AlmaLinux, and Rocky Linux
~$ sudo grubby --update-kernel=ALL --args='cgroup_enable=memory swapaccount=1 systemd.unified_cgroup_hierarchy=0'
h3. Debian and Ubuntu Open the file @/etc/default/grub@ in an editor. Find where the string @GRUB_CMDLINE_LINUX@ is set. Add @cgroup_enable=memory swapaccount=1 systemd.unified_cgroup_hierarchy=0@ to that string. Save the file and exit the editor. Then run:
~$ sudo update-grub
h2(#install_docker). Install Docker Compute nodes must have Docker installed to run containers. This requires a relatively recent version of Linux (at least upstream version 3.10, or a distribution version with the appropriate patches backported). Follow the "Docker Engine installation documentation":https://docs.docker.com/install/ for your distribution. Make sure Docker is enabled to start on boot:
# systemctl enable --now docker
h2(#configure_docker_daemon). Configure the Docker daemon Depending on your anticipated workload or cluster configuration, you may need to tweak Docker options. For information about how to set configuration options for the Docker daemon, see https://docs.docker.com/config/daemon/systemd/ h3. Changing ulimits Docker containers inherit ulimits from the Docker daemon. However, the ulimits for a single Unix daemon may not accommodate a long-running Crunch job. You may want to increase default limits for compute containers by passing @--default-ulimit@ options to the Docker daemon. For example, to allow containers to open 10,000 files, set @--default-ulimit nofile=10000:10000@. h2. Troubleshooting h3. Workflows fail with @ValidationException: Not found: '/var/lib/cwl/workflow.json#main'@ A possible configuration error is having Docker installed as a @snap@ package rather than a @deb@ package. This is a problem because @snap@ packages are partially containerized and may have a different view of the filesystem than @crunch-run@. This will produce confusing problems, for example, directory bind mounts sent to Docker that are empty (instead of containing the intended files) and resulting in unexpected "file not found" errors. To check for this situation, run @snap list@ and look for @docker@. If found, run @snap remove docker@ and follow the instructions to above to "install Docker Engine":#install_docker . ================================================ FILE: doc/_includes/_install_compute_fuse.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2(#fuse). Update fuse.conf FUSE must be configured with the @user_allow_other@ option enabled for Crunch to set up Keep mounts that are readable by containers. Install this file as @/etc/fuse.conf@:
# Allow non-root users to specify the 'allow_other' or 'allow_root'
# mount options.
user_allow_other
================================================ FILE: doc/_includes/_install_cuda.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2(#cuda). Install NVIDA CUDA Toolkit (optional) If you want to use NVIDIA GPUs, "install the CUDA toolkit":https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html and the "NVIDIA Container Toolkit":https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html. ================================================ FILE: doc/_includes/_install_debian_key.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %}
# install -d /etc/apt/keyrings
# curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg
================================================ FILE: doc/_includes/_install_docker_cleaner.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2(#docker-cleaner). Update docker-cleaner.json The @arvados-docker-cleaner@ program removes least recently used Docker images as needed to keep disk usage below a configured limit. Create a file @/etc/arvados/docker-cleaner/docker-cleaner.json@ in an editor, with the following contents.
{
    "Quota": "10G",
    "RemoveStoppedContainers": "always"
}
*Choosing a quota:* Most deployments will want a quota that's at least 10G. From there, a larger quota can help reduce compute overhead by preventing reloading the same Docker image repeatedly, but will leave less space for other files on the same storage (usually Docker volumes). Make sure the quota is less than the total space available for Docker images. {% include 'notebox_begin' %} This also removes all containers as soon as they exit, as if they were run with @docker run --rm@. If you need to debug or inspect containers after they stop, temporarily stop arvados-docker-cleaner or configure it with @"RemoveStoppedContainers":"never"@. {% include 'notebox_end' %} ================================================ FILE: doc/_includes/_install_packages.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 packages_to_install should be a list fallback on arvados_component if not defined {% endcomment %} {% if package_to_install == nil %} {% assign packages_to_install = arvados_component | split: " " %} {% endif %} h2(#install-packages). Install {{packages_to_install | join: " and " }} h3. Red Hat, AlmaLinux, and Rocky Linux
# dnf install {{packages_to_install | join: " "}}
h3. Debian and Ubuntu
# apt install {{packages_to_install  | join " "}}
================================================ FILE: doc/_includes/_install_postgres_database.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %}
  1. Start a shell for the postgres user:
    # su postgres
  2. Generate a new database password:
    postgres$ tr -dc 0-9a-zA-Z </dev/urandom | head -c25; echo
    yourgeneratedpassword
    
    Record this. You'll need it when you set up the Rails server later.
  3. Create a database user with the password you generated:
    postgres$ createuser --encrypted --no-createrole --no-superuser --pwprompt {{service_role}}
      Enter password for new role: yourgeneratedpassword
      Enter it again: yourgeneratedpassword
  4. Create a database owned by the new user:
    postgres$ createdb {{service_database}} -T template0 -E UTF8 -O {{service_role}}
  5. {% if use_contrib %}
  6. Enable the pg_trgm extension
    postgres$ psql {{service_database}} -c "CREATE EXTENSION IF NOT EXISTS pg_trgm"
  7. {% endif %}
  8. Exit the postgres user shell:
    postgres$ exit
================================================ FILE: doc/_includes/_install_rails_command.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% comment %} This template recognizes four variables: * railshost: The hostname included in the prompt, to let the user know where to run the command. If this is the empty string, no hostname will be displayed. Default "apiserver". * railsdir: The directory included in the prompt, to let the user know where to run the command. Default "/var/www/arvados-api/current". * railscmd: The full command to run. Default "bin/rails console". * railsout: The expected output of the command, if any. {% endcomment %} Change *@webserver-user@* to the user that runs your web server process. If you install Phusion Passenger as we recommend, this is *@www-data@* on Debian-based systems, and *@nginx@* on Red Hat-based systems. {% unless railshost %} {% assign railshost = "apiserver" %} {% endunless %} {% unless (railshost == "") or (railshost contains ":") %} {% capture railshost %}{{railshost}}:{% endcapture %} {% endunless %} {% unless railsdir %} {% assign railsdir = "/var/www/arvados-api/current" %} {% endunless %} {% unless railscmd %} {% assign railscmd = "bin/rails console" %} {% endunless %}
{{railshost}}~$ cd {{railsdir}}
{{railshost}}{{railsdir}}$ sudo -u webserver-user RAILS_ENV=production {{railscmd}}
{% if railsout %}{{railsout}}
{% endif %}
================================================ FILE: doc/_includes/_install_ruby_and_bundler.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Ruby 2.7 or newer is required. h2. Red Hat, AlmaLinux, and Rocky Linux Version 8 of these distributions provides Ruby 2.7. You can install it by running:
# dnf module enable ruby:2.7
# dnf install --enablerepo=devel ruby ruby-devel
h2. Debian and Ubuntu All supported versions of Debian and Ubuntu include a version of Ruby you can use with Arvados.
# apt --no-install-recommends install ruby ruby-dev
================================================ FILE: doc/_includes/_matomo_analytics.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} ================================================ FILE: doc/_includes/_metadata_vocabulary_example.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %}{ "strict_tags": false, "tags": { "IDTAGANIMALS": { "strict": false, "labels": [{"label": "Animal" }, {"label": "Creature"}, {"label": "Species"}], "values": { "IDVALANIMALS1": { "labels": [{"label": "Human"}, {"label": "Homo sapiens"}] }, "IDVALANIMALS2": { "labels": [{"label": "Dog"}, {"label": "Canis lupus familiaris"}] }, "IDVALANIMALS3": { "labels": [{"label": "Elephant"}, {"label": "Loxodonta"}] }, "IDVALANIMALS4": { "labels": [{"label": "Eagle"}, {"label": "Haliaeetus leucocephalus"}] } } }, "IDTAGCOMMENT": { "labels": [{"label": "Comment"}, {"label": "Suggestion"}] }, "IDTAGIMPORTANCES": { "strict": true, "labels": [{"label": "Importance"}, {"label": "Priority"}], "values": { "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] }, "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] }, "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] } } } } } ================================================ FILE: doc/_includes/_mount_types.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2. Mount types The "mounts" hash is the primary mechanism for adding data to the container at runtime (beyond what is already in the container image). Each value of the "mounts" hash is itself a hash, whose "kind" key determines the handler used to attach data to the container. table(table table-bordered table-condensed). |_. Mount type|_. Kind|_. Description|_. Examples| |Arvados data collection|@collection@|@"portable_data_hash"@ _or_ @"uuid"@ _may_ be provided. If not provided, a new collection will be created. This is useful when @"writable":true@ and the container's @output_path@ is (or is a subdirectory of) this mount target. @"writable"@ may be provided with a @true@ or @false@ to indicate the path must (or must not) be writable. If not specified, the system can choose. @"path"@ may be provided, and defaults to @"/"@. At container startup, the target path will have the same directory structure as the given path within the collection. Even if the files/directories are writable in the container, modifications will _not_ be saved back to the original collections when the container ends.|
{
 "kind":"collection",
 "uuid":"...",
 "path":"/foo.txt"
}
{
 "kind":"collection",
 "uuid":"..."
}
| |Temporary directory|@tmp@|@"capacity"@: capacity (in bytes) of the storage device. @"device_type"@ (optional, default "network"): one of @{"ram", "ssd", "disk", "network"}@ indicating the acceptable level of performance. (*note: not yet implemented as of v1.5*) At container startup, the target path will be empty. When the container finishes, the content will be discarded. This will be backed by a storage mechanism no slower than the specified type.|
{
 "kind":"tmp",
 "capacity":100000000000
}
{
 "kind":"tmp",
 "capacity":1000000000,
 "device_type":"ram"
}
| |Keep|@keep@|Expose all readable collections via arv-mount. Requires suitable runtime constraints.|
{
 "kind":"keep"
}
| |Mounted file or directory|@file@|@"path"@: absolute path (inside the container) of a file or directory that is (or is inside) another mount target. Can be used for "stdin" and "stdout" targets.|
{
 "kind":"file",
 "path":"/mounted_tmp/a.out"
}
| |JSON document|@json@|A JSON-encoded string, array, or object.|
{
 "kind":"json",
 "content":{"foo":"bar"}
}
| |Plain text|@text@|A plain text string.|
{
 "kind":"text",
 "content":"foo bar\n"
}
| h2(#pre-populate-output). Pre-populate output using Mount points When a container's output_path is a tmp mount backed by local disk, this output directory can be pre-populated with content from existing collections. This content can be specified by mounting collections at mount points that are subdirectories of output_path. Certain restrictions apply: 1. Only mount points of kind @collection@ are supported. 2. Mount points underneath output_path which have @"writable":true@ are copied into output_path during container initialization and may be updated, renamed, or deleted by the running container. The original collection is not modified. On container completion, files remaining in the output are saved to the output collection. The mount at output_path must be big enough to accommodate copies of the inner writable mounts. 3. If any such mount points are configured as @"exclude_from_output":true@, they will be excluded from the output. If any process in the container tries to modify, remove, or rename these mount points or anything underneath them, the operation will fail and the container output and the underlying collections used to pre-populate are unaffected. h3. Example mount point configurations All the below examples are based on this collection:

portable_data_hash cdfbe2e823222d26483d52e5089d553c+175

manifest_text: ./alice 03032680d3fa0561ef4f85071140861e+13+A04e9d06459cda00aa997565bd78001061cf5bffb@58ab593d 0:13:hello.txt\n./bob d820b9df970e1b498e7723c50b107e1b+11+A42d162a60210479d1cfaf9fbb98d494ac6322ae6@58ab593d 0:11:hello.txt\n./carol cf72b172ff969250ae14a893a6745440+13+A476a2fd39e14e9c03af3076bd17e3612c075ff66@58ab593d 0:13:hello.txt\n
table(table table-bordered table-condensed). |{width:40%}. *Mount point*|{width:30%}. *Description*|{width:30%}. *Resulting collection manifest text*| |
"mounts": {
  "/tmp/foo": {
    "kind": "collection",
    "portable_data_hash": "cdfbe2...+175"
  },
},
"output_path": "/tmp"
|No path specified and hence the entire collection will be mounted.|./*foo/*alice 030326... 0:13:hello.txt\n ./*foo/*bob d820b9... 0:11:hello.txt\n ./*foo/*carol cf72b1... 0:13:hello.txt\n *Note:* Here the "." in streams is replaced with *foo*.| |
"mounts": {
  "/tmp/foo/bar": {
    "kind": "collection",
    "portable_data_hash": "cdfbe2...+175"
    "path": "alice"
  },
},
"output_path": "/tmp"
|Specified path refers to the subdirectory *alice* in the collection.|./*foo/bar* 030326... 0:13:hello.txt\n *Note:* only the manifest text segment for the subdirectory *alice* is included after replacing the subdirectory *alice* with *foo/bar*.| |
"mounts": {
  "/tmp/foo/bar": {
    "kind": "collection",
    "portable_data_hash": "cdfbe2...+175"
    "path": "alice/hello.txt"
  },
},
"output_path": "/tmp"
|Specified path refers to the file *hello.txt* in the *alice* subdirectory|./*foo* 030326... 0:13:*bar*\n *Note:* Here the subdirectory *alice* is replaced with *foo* and the filename *hello.txt* from this subdirectory is replaced with *bar*.| h2(#symlinks-in-output). Symlinks in output When a container's output_path is a tmp mount backed by local disk, this output directory can contain symlinks to other files in the output directory, or to collection mount points. If the symlink leads to a collection mount, efficiently copy the collection into the output collection. Symlinks leading to files or directories are expanded and created as regular files in the output collection. Further, whether symlinks are relative or absolute, every symlink target (even targets that are symlinks themselves) must point to a path in either the output directory or a collection mount. ================================================ FILE: doc/_includes/_multi_host_install_custom_certificates.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} You will need certificates for each DNS name and DNS wildcard previously listed in the "DNS hostnames for each service":#DNS . To simplify certificate management, we recommend creating a single certificate for all of the hostnames, or creating a wildcard certificate that covers all possible hostnames (with the following patterns in subjectAltName):
xarv1.example.com
*.xarv1.example.com
*.collections.xarv1.example.com
*.containers.xarv1.example.com
(Replacing @xarv1.example.com@ with your own @${DOMAIN}@) Copy your certificates to the directory specified with the variable @CUSTOM_CERTS_DIR@ in the remote directory where you copied the @provision.sh@ script. The provision script will find the certificates there. The script expects cert/key files with these basenames (matching the role except for keepweb, which is split in both download / collections): # @balancer@ -- Optional on multi-node installations # @collections@ -- Part of keepweb, must be a wildcard for @*.collections.${DOMAIN}@ # @controller@ -- Must be valid for @${DOMAIN}@ and @*.containers.${DOMAIN}@ # @download@ -- Part of keepweb # @grafana@ -- Service available by default on multi-node installations # @keepproxy@ -- Corresponds to default domain @keep.${DOMAIN}@ # @prometheus@ -- Service available by default on multi-node installations # @webshell@ # @websocket@ -- Corresponds to default domain @ws.${DOMAIN}@ # @workbench@ # @workbench2@ For example, for the @keepproxy@ service the script will expect to find this certificate:
${CUSTOM_CERTS_DIR}/keepproxy.crt
${CUSTOM_CERTS_DIR}/keepproxy.key
Make sure that all the FQDNs that you will use for the public-facing applications (API/controller, Workbench, Keepproxy/Keepweb) are reachable. Note: because the installer currently looks for a different certificate file for each service, if you use a single certificate, we recommend creating a symlink for each certificate and key file to the primary certificate and key, e.g.
ln -s xarv1.crt ${CUSTOM_CERTS_DIR}/controller.crt
ln -s xarv1.key ${CUSTOM_CERTS_DIR}/controller.key
ln -s xarv1.crt ${CUSTOM_CERTS_DIR}/keepproxy.crt
ln -s xarv1.key ${CUSTOM_CERTS_DIR}/keepproxy.key
...
================================================ FILE: doc/_includes/_navbar_left.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% assign highlighturl = "" %} {% for section in site.navbar[page.navsection] %} {% for entry in section %} {% comment %} Want to highlight the current page on the left nav. But some pages have been renamed with a symlink from the old page to the new one. Then the URL won't match. So if the URL doesn't match, as a fallback look for a page with a matching title. {% endcomment %} {% for item in entry[1] %} {% if site.pages[item].url == page.url %} {% assign highlighturl = site.pages[item].url %} {% endif %} {% endfor %} {% if highlighturl == "" %} {% for item in entry[1] %} {% if site.pages[item].title == page.title %} {% assign highlighturl = site.pages[item].url %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endfor %}
================================================ FILE: doc/_includes/_navbar_top.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} ================================================ FILE: doc/_includes/_notebox_begin.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %}

Note:

================================================ FILE: doc/_includes/_notebox_begin_warning.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %}

Note:

================================================ FILE: doc/_includes/_notebox_end.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %}
================================================ FILE: doc/_includes/_restart_api.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2(#restart-api). Restart the API server and controller *Make sure the cluster config file is up to date on the API server host* then restart the API server and controller processes to ensure the configuration changes are visible to the whole cluster.
# systemctl restart nginx arvados-controller
# arvados-server check
================================================ FILE: doc/_includes/_setup_debian_repo.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 packages_to_install may be a space-separated string {% endcomment %} Set up the Arvados package repository {%- if packages_to_install == nil %} {%- elsif packages_to_install contains " " %} and install the packages {%- else %} and install @{{ packages_to_install }}@ {%- endif %} by running these commands:
# install -d /etc/apt/keyrings
# curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg
# declare $(grep "^VERSION_CODENAME=" /etc/os-release || echo VERSION_CODENAME=MISSING)
# tee /etc/apt/sources.list.d/arvados.sources >/dev/null <<EOF
Types: deb
URIs: https://apt.arvados.org/$VERSION_CODENAME
Suites: $VERSION_CODENAME
Components: main
Signed-by: /etc/apt/keyrings/arvados.asc
EOF
# apt update
{%- if packages_to_install != nil %}
# apt install {{ packages_to_install }}
{% endif -%}
================================================ FILE: doc/_includes/_setup_redhat_repo.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 modules_to_enable and packages_to_install may be space-separated strings {% endcomment %} {%- if modules_to_enable != nil %} {% include 'notebox_begin_warning' %} Arvados tools require newer language runtimes than the default versions included with these distributions. These instructions will **upgrade language runtimes for the entire system**. Check that won't interfere with any existing software before you proceed. {% include 'notebox_end' %} {% endif -%} Set up the Arvados package repository {%- if packages_to_install == nil %} {%- elsif packages_to_install contains " " %} and install the packages {%- else %} and install @{{ packages_to_install }}@ {%- endif %} by running these commands:
# tee /etc/yum.repos.d/arvados.repo >/dev/null <<'EOF'
[arvados]
name=Arvados
baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/
gpgcheck=1
gpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados
EOF
{%- if modules_to_enable != nil %}
# dnf module enable {{ modules_to_enable }}
{% endif -%}
{%- if packages_to_install != nil -%}
# dnf install {{ packages_to_install }}
{% endif -%}
================================================ FILE: doc/_includes/_singularity_mksquashfs_configuration.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {{ mksquashfs_header|default: "h2" }}(#singularity_mksquashfs_configuration). Singularity mksquashfs configuration {% if show_docker_warning != nil %} {% include 'notebox_begin_warning' %} This section is only relevant when using Singularity. Skip this section when using Docker. {% include 'notebox_end' %} {% endif %} Docker images are converted on the fly by @mksquashfs@, which can consume a considerable amount of RAM. The RAM usage of mksquashfs can be restricted in @/etc/singularity/singularity.conf@ with a line like @mksquashfs mem = 256M@. The amount of memory made available for mksquashfs should be configured lower than the smallest amount of memory requested by a container on the cluster to avoid the conversion being killed for using too much memory. The default memory allocation in CWL is 256M, so that is also a good choice for the @mksquashfs mem@ setting. ================================================ FILE: doc/_includes/_ssh_addkey.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} You may now proceed to "adding your key to the Arvados Workbench.":#workbench h1(#workbench). Adding your key to Arvados Workbench In the Workbench top navigation menu, click on the dropdown menu icon to access the Account Management menu. Then, click on the menu item *Ssh keys* to go to the *SSH keys* page. Click on the + ADD NEW SSH KEY button in the upper-right on that page. You will see a popup as shown in this screenshot: !{width: 100%;}{{ site.baseurl }}/images/ssh-adding-public-key.png! Paste your _public_ key into the text area labeled *Public Key*, and click on the ADD NEW SSH KEY button in lower-right. You are now ready to "log into an Arvados VM":#login. h1(#login). Using SSH to log into an Arvados VM To see a list of virtual machines that you have access to, click on the dropdown menu icon in the upper right corner of the top navigation menu to access the Account Management menu. Then, click on the menu item *Virtual Machines*. You will then see a page that lists the virtual machines you can access. The *Host name* column lists the name of each available VM. The *Login name* column lists your login name on that VM. The *Command line* column provides a sample @ssh@ command. !{width: 100%;}{{ site.baseurl }}/images/vm-access-with-webshell.png! At the bottom of the page there may be additional instructions for connecting your specific Arvados instance. If so, follow your site-specific instructions. If there are no site-specific instructions, you can probably connect directly with @ssh@. The following are generic instructions. In these examples, the login name will be *_you_* and the host domain will be *_ClusterID.example.com_*. Replace these with your login name and hostname as appropriate. ================================================ FILE: doc/_includes/_ssh_intro.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados requires a public SSH key in order to securely log in to an Arvados VM instance. The three sections below help you get started: # "Getting your SSH key":#gettingkey # "Adding your key to Arvados Workbench":#workbench # "Using SSH to log into an Arvados VM instance":#login ================================================ FILE: doc/_includes/_ssl_config_multi.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2(#certificates). Choose the SSL/TLS configuration (SSL_MODE) Arvados requires a valid TLS certificate to work correctly. This installer supports these options: # @lets-encrypt@: "automatically obtain and install an SSL certificates for your hostnames":#lets-encrypt # @bring-your-own@: "supply your own certificates in the @certs@ directory":#bring-your-own h3(#lets-encrypt). Using a Let's Encrypt certificate In the default configuration, this installer gets a valid certificate via Let's Encrypt. If you have the CLUSTER.DOMAIN domain in a route53 zone, you can set USE_LETSENCRYPT_ROUTE53 to YES and supply appropriate credentials so that Let's Encrypt can use dns-01 validation to get the appropriate certificates.
SSL_MODE="lets-encrypt"
USE_LETSENCRYPT_ROUTE53="yes"
LE_AWS_REGION="us-east-1"
LE_AWS_ACCESS_KEY_ID="AKIABCDEFGHIJKLMNOPQ"
LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey"
Please note that when using AWS, EC2 instances can have a default hostname that ends with amazonaws.com. Let's Encrypt has a blacklist of domain names for which it will not issue certificates, and that blacklist includes the amazonaws.com domain, which means the default hostname can not be used to get a certificate from Let's Encrypt. h3(#bring-your-own). Bring your own certificates To supply your own certificates, change the configuration like this:
SSL_MODE="bring-your-own"
{% include 'multi_host_install_custom_certificates' %} All certificate files will be used by nginx. You may need to include intermediate certificates in your certificate files. See "the nginx documentation":http://nginx.org/en/docs/http/configuring_https_servers.html#chains for more details. h4(#secure-tls-keys). Securing your TLS certificate keys (AWS specific) (optional) When using @SSL_MODE=bring-your-own@, you can keep your TLS certificate keys encrypted on the server nodes. This reduces the risk of certificate leaks from node disk volumes snapshots or backups. This feature is currently implemented in AWS by providing the certificate keys’ password via Amazon’s "Secrets Manager":https://aws.amazon.com/es/secrets-manager/ service, and installing appropriate services on the nodes that provide this password to nginx via a file that only lives in system RAM. If you use the installer's Terraform code, the secret and related permission cloud resources are created automatically, and you can customize the secret's name by editing @terraform/services/terraform.tfvars@ and setting its suffix in @ssl_password_secret_name_suffix@. In @local.params@ you need to set @SSL_KEY_ENCRYPTED@ to @yes@ and change the default values for @SSL_KEY_AWS_SECRET_NAME@ and @SSL_KEY_AWS_REGION@ if necessary. Then, if your certificate key file is not yet encrypted, you can generated an encrypted version of it by running the @openssl@ command as follows:
openssl rsa -aes256 -in your.key -out your.encrypted.key
(this will ask you to type the encryption password) This encrypted key file will be the one needed to be copied to the @${CUSTOM_CERTS_DIR}@ directory, instead of the plain key file. In order to allow the appropriate nodes decrypt the key file, you should set the password on Amazon Secrets Manager. There're a couple way this can be done: # Through AWS web interface may be the easiest, just make sure to set it as "plain text" instead of JSON. # By using the AWS CLI tools, for example:
aws secretsmanager put-secret-value --secret-id pkey-pwd --secret-string "p455w0rd" --region us-east-1
Where @pkey-pwd@ should match with what's set in @SSL_KEY_AWS_SECRET_NAME@ and @us-east-1@ with what's set in @SSL_KEY_AWS_REGION@. Take into account that the AWS secret should be set before running @installer.sh deploy@ to avoid any failures when trying to start the @nginx@ servers. If you ever need to change the encryption password on a running cluster, you should first change the secret's value on AWS, and only then copy the newly encrypted key file to @${CUSTOM_CERTS_DIR}@ and re-run the deploy command. ================================================ FILE: doc/_includes/_ssl_config_single.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2(#certificates). Choose the SSL configuration (SSL_MODE) Arvados requires an SSL certificate to work correctly. This installer supports these options: * @self-signed@: let the installer create a self-signed certificate * @lets-encrypt@: automatically obtain and install an SSL certificate for your hostname * @bring-your-own@: supply your own certificate in the `certs` directory h3(#self-signed). Using a self-signed certificate In the default configuration, this installer uses self-signed certificate(s):
SSL_MODE="self-signed"
This works everywhere and does not require that you have a domain name. However, after installation, users will need to "install the self-signed root certificate in the browser.":#ca_root_certificate" h3(#lets-encrypt). Using a Let's Encrypt certificate To automatically get a valid certificate via Let's Encrypt, change the configuration like this:
SSL_MODE="lets-encrypt"
This requires that you have a "real" hostname that you control. The hostname for your Arvados cluster must be defined in @HOSTNAME_EXT@ and resolve to the public IP address of your Arvados instance, so that Let's Encrypt can validate the domainname ownership and issue the certificate. When using AWS, EC2 instances can have a default hostname that ends with amazonaws.com. Let's Encrypt has a blacklist of domain names for which it will not issue certificates, and that blacklist includes the amazonaws.com domain, which means the default hostname can not be used to get a certificate from Let's Encrypt. h3(#bring-your-own). Bring your own certificate To supply your own certificate, change the configuration like this:
SSL_MODE="bring-your-own"
Copy your certificate files to the directory specified with the variable @CUSTOM_CERTS_DIR@. The provision script will find it there. The certificate and its key need to be copied to a file named after @HOSTNAME_EXT@. For example, if @HOSTNAME_EXT@ is defined as @my-arvados.example.net@, the script will look for
${CUSTOM_CERTS_DIR}/my-arvados.example.net.crt
${CUSTOM_CERTS_DIR}/my-arvados.example.net.key
All certificate files will be used by nginx. You may need to include intermediate certificates in your certificate file. See "the nginx documentation":http://nginx.org/en/docs/http/configuring_https_servers.html#chains for more details. ================================================ FILE: doc/_includes/_start_service.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2(#start-service). Start the service
# systemctl enable --now {{arvados_component}}
# systemctl status {{arvados_component}}
[...]
If @systemctl status@ indicates it is not running, use @journalctl@ to check logs for errors:
# journalctl --since -5min -u {{ arvados_component | split: ' ' | join: ' -u ' }}
================================================ FILE: doc/_includes/_supportedlinux.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} table(table table-bordered table-condensed). |_. *Supported Linux Distributions*| |AlmaLinux 10 (since 10.0)| |AlmaLinux 9 (since 9.2)| |AlmaLinux 8 (since 8.8)| |Debian 12 ("bookworm")| |Red Hat Enterprise Linux 10 (since 10.0)| |Red Hat Enterprise Linux 9 (since 9.2)| |Red Hat Enterprise Linux 8 (since 8.8)| |Rocky Linux 10 (since 10.0)| |Rocky Linux 9 (since 9.2)| |Rocky Linux 8 (since 8.8)| |Ubuntu 24.04 ("noble")| |Ubuntu 22.04 ("jammy")| ================================================ FILE: doc/_includes/_tutorial_expectations.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin' %} This tutorial assumes that you have access to "Arvados command line tools":{{ site.baseurl }}/user/getting_started/setup-cli.html, configured your "API token":{{site.baseurl}}/user/reference/api-tokens.html, and confirmed a "working environment":{{site.baseurl}}/user/getting_started/check-environment.html. {% include 'notebox_end' %} ================================================ FILE: doc/_includes/_tutorial_expectations_workstation.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin' %} This tutorial assumes that you have installed the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html {% include 'notebox_end' %} ================================================ FILE: doc/_includes/_tutorial_hello_cwl.liquid ================================================ #!/usr/bin/env cwl-runner # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: CC-BY-SA-3.0 cwlVersion: v1.0 class: CommandLineTool inputs: [] outputs: [] arguments: ["echo", "hello world!"] ================================================ FILE: doc/_includes/_webring.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% assign n = 0 %} {% assign prev = "" %} {% assign nx = 0 %} {% for section in site.navbar[page.navsection] %} {% for entry in section %} {% for item in entry[1] %} {% assign p = site.pages[item] %} {% if nx == 1 %}
{% if prev != "" %} Previous: {{ prev.title }} {% endif %} Next: {{ p.title }} {% assign nx = 0 %} {% assign n = 1 %} {% endif %} {% if p.url == page.url %} {% assign nx = 1 %} {% else %} {% assign prev = p %} {% endif %} {% endfor %} {% endfor %} {% endfor %} {% if n == 0 && prev != "" %}
Previous: {{ prev.title }} {% assign n = 1 %} {% endif %} ================================================ FILE: doc/_includes/_what_is_cwl.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The "Common Workflow Language (CWL)":http://commonwl.org is a multi-vendor open standard for describing analysis tools and workflows that are portable across a variety of platforms. CWL is the primary way to develop and run workflows for Arvados. Arvados supports versions "v1.0":http://commonwl.org/v1.0, "v1.1":http://commonwl.org/v1.1, and "v1.2":http://commonwl.org/v1.2 of the CWL standard. ================================================ FILE: doc/_layouts/default.html.liquid ================================================ {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% unless page.title == "Arvados | Documentation" %} Arvados {% if page.navmenu %}| {{ page.navmenu }} {% endif %} | {% endunless %}{{ page.title }} {% if site.current_version != site.latest_version %} {% endif %} {% include 'matomo_analytics' %} {% include 'google_analytics' %} {% include 'navbar_top' %} {% if page.navsection == 'top' or page.no_nav_left %} {{ content }} {% else %}
{% include 'navbar_left' %}

{{ page.title }}

{{ content }} {% include 'webring' %}
{% endif %} {% if page.no_nav_left %} {% else %}

The content of this documentation is licensed under the Creative Commons Attribution-Share Alike 3.0 United States licence.
Code samples in this documentation are licensed under the Apache License, Version 2.0.

{% endif %} ================================================ FILE: doc/admin/cloudtest.html.textile.liquid ================================================ --- layout: default navsection: admin title: Testing cloud configuration ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The @arvados-server@ package includes a @cloudtest@ tool that checks compatibility between your Arvados configuration, your cloud driver, your cloud provider's API, your cloud provider's VM instances, and the worker image you use with the "cloud dispatcher":../install/crunch2-cloud/install-dispatch-cloud.html. @arvados-server cloudtest@ performs the following steps: # Create a new instance # Wait for it to finish booting # Run a shell command on the new instance (optional) # Pause while you log in to the new instance and do other tests yourself (optional) # Shut down the instance This is an easy way to expose problems like these: * Configured cloud credentials don't work * Configured image types don't work * Configured driver is not compatible with your cloud API/region * Newly created instances are not usable due to a network problem or misconfiguration * Newly created instances do not accept the configured SSH private key * Selected machine image does not boot properly * Selected machine image is incompatible with some instance types * Driver has bugs h2. Typical uses Before bringing up the @arvados-dispatch-cloud@ service for the first time, we recommend running @cloudtest@ to check your configuration:
$ arvados-server cloudtest -command "crunch-run --list"
Before updating your configuration to use a new VM image, we recommend running @cloudtest@ with the new image:
$ arvados-server cloudtest -image-id new_image_id -command "crunch-run --list"
After adding an instance type to your configuration, we recommend running @cloudtest@ with the new instance type:
$ arvados-server cloudtest -instance-type new_instance_type_name
For a full list of options, use the @-help@ flag:
$ arvados-server cloudtest -help
Usage:
  -command string
        Run an interactive shell command on the test instance when it boots
  -config file
        Site configuration file (default "/etc/arvados/config.yml")
  -destroy-existing
        Destroy any existing instances tagged with our InstanceSetID, instead of erroring out
  -image-id string
        Image ID to use when creating the test instance (if empty, use cluster config)
  -instance-set-id value
        InstanceSetID tag value to use on the test instance (default "cloudtest-user@hostname.example")
  -instance-type string
        Instance type to create (if empty, use cheapest type in config)
  -pause-before-destroy
        Prompt and wait before destroying the test instance
================================================ FILE: doc/admin/collection-managed-properties.html.textile.liquid ================================================ --- layout: default navsection: admin title: Configuring collection's managed properties ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Collection's managed properties allow a cluster administrator to enable some special behaviors regarding properties at creation & update times. This page describes how to enable and configure these behaviors on the API server. h3. API Server configuration The @Collections.ManagedProperties@ setting from the @config.yml@ file is used for enabling any of the following behaviors: h4. Pre-assigned property key & value For every newly created collection, assign a predefined key/value pair if it isn't already passed at creation time:
Collections:
  ManagedProperties:
    foo: {Value: bar}
h4. Original owner UUID This behavior will assign to a property key the UUID of the user who owns the collection's contaning project.
Collections:
  ManagedProperties:
    responsible_person_uuid: {Function: original_owner}
h4. Protected properties If there's a need to prevent a non-admin user from modifying a specific property, even by its owner, the @Protected@ attribute can be set to @true@, like so:
Collections:
  ManagedProperties:
    sample_id: {Protected: true}
This configuration won't assign a @sample_id@ property on collection creation, but if the user adds it to any collection, its value is protected from that point on. Another use case would be to protect properties that were automatically assigned by the system:
Collections:
  ManagedProperties:
    responsible_person_uuid: {Function: original_owner, Protected: true}
If missing, the @Protected@ attribute it’s assumed as being @false@ by default. h3. Supporting example scripts When enabling this feature, there may be pre-existing collections that won't have the managed properties just configured. The following script examples may be helpful to sync these older collections. For the following examples we assume that the @responsible_person_uuid@ property is set as @{Function: original_owner, Protected: true}@. h4. List uuid/names of collections without @responsible_person_uuid@ property The collection's managed properties feature assigns the configured properties to newly created collections. This means that previously existing collections won't get the default properties and if needed, they should be assigned manually. The following example script outputs a listing of collection UUIDs and names of those collections that don't include the @responsible_person_uuid@ property. {% codeblock as python %} {% include 'admin_list_collections_without_property_py' %} {% endcodeblock %} h4. Update the @responsible_person_uuid@ property from nil to X in the project hierarchy rooted at P When enabling @responsible_person_uuid@, new collections will get this property's value set to the user who owns the root project where the collection is placed, but older collections won't have the property set. The following example script allows an administrator to set the @responsible_person_uuid@ property to collections below a certaing project hierarchy. {% codeblock as python %} {% include 'admin_set_property_to_collections_under_project_py' %} {% endcodeblock %} h4. Update the @responsible_person_uuid@ property from X to Y on all collections This example can be useful to change responsibility from one user to another. Please note that the following code should run with admin privileges, assuming that the managed property is @Protected@. {% codeblock as python %} {% include 'admin_update_collection_property_py' %} {% endcodeblock %} ================================================ FILE: doc/admin/collection-versioning.html.textile.liquid ================================================ --- layout: default navsection: admin title: Configuring collection versioning ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} This page describes how to enable and configure the collection versioning feature on the API server. h3. Configuration There are 2 configuration settings in the @Collections@ section of @config.yml@ that control this feature.
    Collections:
      # If true, enable collection versioning.
      # When a collection's preserve_version field is true or the current version
      # is older than the amount of seconds defined on PreserveVersionIfIdle,
      # a snapshot of the collection's previous state is created and linked to
      # the current collection.
      CollectionVersioning: true

      # This setting control the auto-save aspect of collection versioning, and can be set to:
      #   0s = auto-create a new version on every update.
      #  -1s = never auto-create new versions.
      # > 0s = auto-create a new version when older than the specified number of seconds.
      PreserveVersionIfIdle: 10s
Note that if you set @CollectionVersioning@ to @false@ after being enabled, old versions will still be accessible, but further changes will not be versioned. h3. Using collection versioning "Discussed in the user guide":{{site.baseurl}}/user/topics/collection-versioning.html ================================================ FILE: doc/admin/config-urls.html.textile.liquid ================================================ --- layout: default navsection: installguide title: InternalURLs and ExternalURL ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The Arvados configuration is stored at @/etc/arvados/config.yml@. See the "Configuration reference":config.html for more detail. The @Services@ section lists a number of Arvados services, each with an @InternalURLs@ and/or @ExternalURL@ configuration key. This document explains the precise meaning of these configuration keys, and how they are used by the Arvados services. The @ExternalURL@ is the address where the service should be reachable by clients, both from inside and from outside the Arvados cluster. Some services do not expose an Arvados API, only Prometheus metrics. In that case, @ExternalURL@ is not used. The keys under @InternalURLs@ are the URLs through which Arvados system components can connect to one another, including the reverse proxy (e.g. Nginx) that fronts Arvados services. The exception is the @Keepstore@ service, where clients on the local network connect directly to @Keepstore.InternalURLs@ (while clients from outside networks connect to @Keepproxy.ExternalURL@). If a service is not fronted by a reverse proxy, e.g. when its endpoint only exposes Prometheus metrics, the intention is that metrics are collected directly from the endpoints defined in @InternalURLs@. Each entry in the @InternalURLs@ section may also indicate a @ListenURL@ to determine the protocol, address/interface, and port where the service process will listen, in case the desired listening address differs from the @InternalURLs@ key itself -- for example, when passing internal traffic through a reverse proxy. If the Arvados service lives behind a reverse proxy (e.g. Nginx), configuring the reverse proxy and the @InternalURLs@ and @ExternalURL@ values must be done in concert. h2. Overview
table(table table-bordered table-condensed). |_.Service |_.ExternalURL required? |_.InternalURLs required?|_.InternalURLs must be reachable from other cluster nodes?|_.Note| |railsapi |no |yes|no ^1^|InternalURLs only used by Controller| |controller |yes |yes|yes ^2,4^|InternalURLs used by reverse proxy and container shell connections| |arvados-dispatch-cloud|no |yes|no ^3^|InternalURLs only used to expose Prometheus metrics| |arvados-dispatch-lsf|no |yes|no ^3^|InternalURLs only used to expose Prometheus metrics| |container web services|yes |no |no |controller's InternalURLs are used by reverse proxy (e.g. Nginx)| |git-ssh |yes |no |no || |keepproxy |yes |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)| |keepstore |no |yes|yes |All clients connect to InternalURLs| |keep-balance |no |yes|no ^3^|InternalURLs only used to expose Prometheus metrics| |keep-web |yes |yes|yes ^5^|InternalURLs used by reverse proxy and container log API| |websocket |yes |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)| |workbench2 |yes |no|no ||
^1^ If @Controller@ runs on a different host than @RailsAPI@, the @InternalURLs@ will need to be reachable from the host that runs @Controller@. ^2^ If the reverse proxy (e.g. Nginx) does not run on the same host as the Arvados service it fronts, the @InternalURLs@ will need to be reachable from the host that runs the reverse proxy. ^3^ If the Prometheus metrics are not collected from the same machine that runs the service, the @InternalURLs@ will need to be reachable from the host that collects the metrics. ^4^ If dispatching containers to HPC (Slurm/LSF) and there are multiple @Controller@ services, they must be able to connect to one another using their InternalURLs, otherwise the "tunnel connections":{{site.baseurl}}/architecture/hpc.html enabling "container shell access":{{site.baseurl}}/install/container-shell-access.html will not work. ^5^ All URLs in @Services.WebDAV.InternalURLs@ must be reachable by all Controller services. Alternatively, each entry in @Services.Controller.InternalURLs@ must have a corresponding entry in @Services.WebDAV.InternalURLs@ with the same hostname. When @InternalURLs@ do not need to be reachable from other nodes, it is most secure to use loopback addresses as @InternalURLs@, e.g. @http://127.0.0.1:9005@. It is recommended to use a split-horizon DNS setup where the hostnames specified in @ExternalURL@ resolve to an internal IP address from inside the Arvados cluster, and a publicly routed external IP address when resolved from outside the cluster. This simplifies firewalling and provides optimally efficient traffic routing. In a cloud environment where traffic that flows via public IP addresses is charged, using split horizon DNS can also avoid unnecessary expense. h2. Examples The remainder of this document walks through a number of examples to provide more detail. h3. Keep-balance Consider this section for the @Keep-balance@ service: {% codeblock as yaml %} Keepbalance: InternalURLs: "http://ip-10-0-1-233.internal:9005/": {} {% endcodeblock %} @Keep-balance@ has an API endpoint, but it is only used to expose "Prometheus":https://prometheus.io metrics. There is no @ExternalURL@ key because @Keep-balance@ does not have an Arvados API, no Arvados services need to connect to @Keep-balance@. The value for @InternalURLs@ tells the @Keep-balance@ service to start up and listen on port 9005, if it is started on a host where @ip-10-0-1-233.internal@ resolves to a local IP address. If @Keep-balance@ is started on a machine where the @ip-10-0-1-233.internal@ hostname does not resolve to a local IP address, it would refuse to start up, because it would not be able to find a local IP address to listen on. It is also possible to use IP addresses in @InternalURLs@, for example: {% codeblock as yaml %} Keepbalance: InternalURLs: "http://127.0.0.1:9005/": {} {% endcodeblock %} In this example, @Keep-balance@ would start up and listen on port 9005 at the @127.0.0.1@ IP address. Prometheus would only be able to access the @Keep-balance@ metrics if it could reach that IP and port, e.g. if it runs on the same machine. Finally, it is also possible to listen on all interfaces, for example: {% codeblock as yaml %} Keepbalance: InternalURLs: "http://0.0.0.0:9005/": {} {% endcodeblock %} In this case, @Keep-balance@ will listen on port 9005 on all IP addresses local to the machine. h3. Keepstore Consider this section for the @Keepstore@ service: {% codeblock as yaml %} Keepstore: InternalURLs: "http://keep0.ClusterID.example.com:25107": {} "http://keep1.ClusterID.example.com:25107": {} {% endcodeblock %} There is no @ExternalURL@ key because @Keepstore@ is only accessed from inside the Arvados cluster. For access from outside, all traffic goes via @Keepproxy@. When @Keepstore@ is installed on the host where @keep0.ClusterID.example.com@ resolves to a local IP address, it will listen on port 25107 on that IP address. Likewise on the @keep1.ClusterID.example.com@ host. On all other systems, @Keepstore@ will refuse to start. h3. Keepproxy Consider this section for the @Keepproxy@ service: {% codeblock as yaml %} Keepproxy: ExternalURL: https://keep.ClusterID.example.com InternalURLs: "http://localhost:25107": {} {% endcodeblock %} The @ExternalURL@ advertised is @https://keep.ClusterID.example.com@. The @Keepproxy@ service will start up on @localhost@ port 25107, however. This is possible because we also configure Nginx to terminate SSL and sit in front of the @Keepproxy@ service:
upstream keepproxy {
  server                127.0.0.1:25107;
}

server {
  listen                  443 ssl;
  server_name             keep.ClusterID.example.com;

  proxy_connect_timeout   90s;
  proxy_read_timeout      300s;
  proxy_set_header        X-Real-IP $remote_addr;
  proxy_http_version      1.1;
  proxy_request_buffering off;
  proxy_max_temp_file_size 0;

  ssl_certificate     /YOUR/PATH/TO/cert.pem;
  ssl_certificate_key /YOUR/PATH/TO/cert.key;

  # Clients need to be able to upload blocks of data up to 64MiB in size.
  client_max_body_size    64m;

  location / {
    proxy_pass            http://keepproxy;
  }
}
If a client connects to the @Keepproxy@ service, it will talk to Nginx which will reverse proxy the traffic to the @Keepproxy@ service. h3. API server Consider this section for the @RailsAPI@ service: {% codeblock as yaml %} RailsAPI: InternalURLs: "http://localhost:8004": {} {% endcodeblock %} There is no @ExternalURL@ defined because the @RailsAPI@ is not directly accessible and does not need to advertise a URL: all traffic to it flows via @Controller@, which is the only client that talks to it. The @RailsAPI@ service is also a Rails application, and its listening host and port are set in the @arvados-railsapi.service@ unit definition:
# systemctl cat arvados-railsapi.service
[...]
[Service]
Environment=PASSENGER_ADDRESS=localhost
Environment=PASSENGER_PORT=8004
[...]
So then, why is there a need to specify @InternalURLs@ for the @RailsAPI@ service? It is there because this is how the @Controller@ service locates the @RailsAPI@ service it should talk to. Since this connection is internal to the Arvados cluster, @Controller@ uses @InternalURLs@ to find the @RailsAPI@ endpoint. h3. Controller Consider this section for the @Controller@ service: {% codeblock as yaml %} Controller: InternalURLs: "https://ctrl-0.internal": ListenURL: "http://localhost:8003" ExternalURL: "https://ClusterID.example.com" {% endcodeblock %} The @ExternalURL@ advertised to clients is @https://ClusterID.example.com@. The @arvados-controller@ process will listen on @localhost@ port 8003. Other Arvados service processes in the cluster can connect to this specific controller instance, using the URL @https://ctrl-0.internal@. Container web service traffic at @https://*.containers.ClusterID.example.com@ is also handled by the same @arvados-controller@ process. Nginx is configured to sit in front of the @Controller@ service and terminate TLS:

# This is the port where nginx expects to contact arvados-controller.
upstream controller {
  server     localhost:8003  fail_timeout=10s;
}

server {
  # This configures the public https port that clients will actually connect to,
  # the request is reverse proxied to the upstream 'controller'

  listen       443 ssl;
  server_name  ClusterID.example.com
               ctrl-0.internal
               *.containers.ClusterID.example.com;

  ssl_certificate     /YOUR/PATH/TO/cert.pem;
  ssl_certificate_key /YOUR/PATH/TO/cert.key;

  # Refer to the comment about this setting in the passenger (arvados
  # api server) section of your Nginx configuration.
  client_max_body_size 128m;

  location / {
    proxy_pass               http://controller;
    proxy_redirect           off;
    proxy_connect_timeout    90s;
    proxy_read_timeout       300s;
    proxy_max_temp_file_size 0;
    proxy_request_buffering  off;
    proxy_buffering          off;
    proxy_http_version       1.1;

    proxy_set_header      Host              $http_host;
    proxy_set_header      Upgrade           $http_upgrade;
    proxy_set_header      Connection        "upgrade";
    proxy_set_header      X-External-Client $external_client;
    proxy_set_header      X-Forwarded-For   $proxy_add_x_forwarded_for;
    proxy_set_header      X-Forwarded-Proto https;
    proxy_set_header      X-Real-IP         $remote_addr;
  }
}
If the host part of @ListenURL@ is ambiguous, in the sense that more than one system host is able to listen on that address (e.g., @localhost@), configure each host's startup scripts to set the environment variable @ARVADOS_SERVICE_INTERNAL_URL@ to the @InternalURLs@ key that will reach that host. In the example above, this would be @ARVADOS_SERVICE_INTERNAL_URL=https://ctrl-0.internal@. If the cluster has just a single node running all of the Arvados server processes, configuration can be simplified: {% codeblock as yaml %} Controller: InternalURLs: "http://localhost:8003": {} ExternalURL: "https://ClusterID.example.com" {% endcodeblock %} ================================================ FILE: doc/admin/config.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Configuration reference ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The Arvados configuration is stored at @/etc/arvados/config.yml@ {% codeblock as yaml %} {% include 'config_default_yml' %} {% endcodeblock %} ================================================ FILE: doc/admin/controlling-container-reuse.html.textile.liquid ================================================ --- layout: default navsection: admin title: Preventing container reuse ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Sometimes a container exited successfully but produced bad output, and re-running the workflow will cause it to re-use the bad container instead of running a new container. One way to deal with this is to re-run the entire workflow with reuse disabled. Another way is for the workflow author to tweak the input data or workflow so that on re-run it produces a distinct container request. However, for large or complex workflows both these options may be impractical. To prevent an individual container from being reused in later workflows, an admin can manually change the state of the bad container record from @Complete@ to @Cancelled@. The following @arv@ command demonstrates how change a container state to @Cancelled@, where @xxxxx-xxxxx-xxxxxxxxxxxxxxx@ is the @UUID@ of the container:
arv container update -u xxxxx-xxxxx-xxxxxxxxxxxxxxx -c '{"state":"Cancelled"}'
================================================ FILE: doc/admin/diagnostics.html.textile.liquid ================================================ --- layout: default navsection: admin title: Diagnostics ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The @arvados-client diagnostics@ command exercises basic cluster functionality, and identifies some common installation and configuration problems. Especially after upgrading or reconfiguring Arvados or server/network infrastructure, it can be the quickest way to identify problems. h2. Using system privileges On a server node, it is easiest to run the diagnostics command with system privileges. The word @sudo@ here instructs the @arvados-client@ command to load @Controller.ExternalURL@ and @SystemRootToken@ from @/etc/arvados/config.yml@ and use those credentials to run tests with system privileges. When run this way, diagnostics will also include "health checks":health-checks.html.
# arvados-client sudo diagnostics
h2. Using regular user privileges On any node (server node, shell node, or a workstation outside the system network), you can also run diagnostics by setting the usual @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables. Typically this is done with a regular user account.
$ export ARVADOS_API_HOST=zzzzz.arvadosapi.com
$ export ARVADOS_API_TOKEN=xxxxxxxxxx
$ arvados-client diagnostics
h2. Internal/external client detection The diagnostics output indicates whether its client connection is categorized by the server as internal or external. If you run diagnostics automatically with cron or a monitoring tool, you can use the @-internal-client@ or @-external-client@ flag to specify how you _expect_ the client to be categorized, and the test will fail otherwise. Example:
# arvados-client sudo diagnostics -internal-client
[...]

--- cut here --- error summary ---

ERROR     60: checking internal/external client detection (11 ms): expecting internal=true external=false, but found internal=false external=true
h2(#container-options). Container-running options By default, the @diagnostics@ command builds a custom Docker image containing a copy of its own binary, and uses that image to run diagnostic checks from inside an Arvados container. This can help detect problems like lack of network connectivity between containers and Arvados cluster services. The default approach works well if the client host (i.e., the host where you invoke @arvados-client diagnostics@) meets certain conditions: * Docker is installed and working (so the diagnostics command can run @docker build@ and @docker save@). * Its hardware and kernel are similar to the cluster's compute instances (so the @arvados-client@ binary and the custom-built Docker image are compatible with the compute instances). * Network bandwidth supports uploading the Docker image (about 100 megabytes) in less than a minute. The following options provide flexibility in case the default approach is not suitable. * @-priority=0@ skips the container-running part of the diagnostics suite. * @-docker-image="hello-world"@ uses a tiny "hello world" image that is already embedded in the @arvados-client@ binary. This works even if the client host does not have any docker tools installed, and it minimizes the data transferred during the diagnostics suite. It provides less test coverage than the default option, but it will at least check that it is possible to run a container on the cluster. * @-docker-image=X@ (where @X@ is a Docker image name or a portable data hash) uses a Docker image that has already been uploaded to your Arvados cluster using @arv keep docker@. In this case the diagnostics tool will run a container with the command @echo {timestamp}@. * @-docker-image-from=NAME@ builds a custom Docker image on the fly as described above, but using the specified image as a base instead of the default @debian:slim-stable@ image. Note that the build recipe runs commands like @apt-get install [...] libfuse2 ca-certificates@ so only Debian-based base images are supported. For more flexibility, use one of the above @-docker-image=...@ options. * @-timeout=2m@ extends the time limit for each HTTP request made by the diagnostics suite, including the process of uploading a custom-built Docker image, to 2 minutes (the default HTTP request timeout is 10 seconds, and the default upload time limit is either the HTTP timeout or 1 minute, whichever is longer). h2. Example output
# arvados-client sudo diagnostics
INFO       5: running health check (same as `arvados-server check`)
INFO      10: getting discovery document from https://zzzzz.arvadosapi.com/discovery/v1/apis/arvados/v1/rest
INFO      20: getting exported config from https://zzzzz.arvadosapi.com/arvados/v1/config
INFO      30: getting current user record
INFO      40: connecting to service endpoint https://keep.zzzzz.arvadosapi.com/
INFO      41: connecting to service endpoint https://*.collections.zzzzz.arvadosapi.com/
INFO      42: connecting to service endpoint https://download.zzzzz.arvadosapi.com/
INFO      43: connecting to service endpoint wss://ws.zzzzz.arvadosapi.com/websocket
INFO      44: connecting to service endpoint https://workbench.zzzzz.arvadosapi.com/
INFO      45: connecting to service endpoint https://workbench2.zzzzz.arvadosapi.com/
INFO      50: checking CORS headers at https://zzzzz.arvadosapi.com/
INFO      51: checking CORS headers at https://keep.zzzzz.arvadosapi.com/d41d8cd98f00b204e9800998ecf8427e+0
INFO      52: checking CORS headers at https://download.zzzzz.arvadosapi.com/
INFO      60: checking internal/external client detection
INFO      61: reading+writing via keep service at https://keep.zzzzz.arvadosapi.com:443/
INFO      80: finding/creating "scratch area for diagnostics" project
INFO      90: creating temporary collection
INFO     100: uploading file via webdav
INFO     110: checking WebDAV ExternalURL wildcard (https://*.collections.zzzzz.arvadosapi.com/)
INFO     120: downloading from webdav (https://d41d8cd98f00b204e9800998ecf8427e-0.collections.zzzzz.arvadosapi.com/foo)
INFO     121: downloading from webdav (https://d41d8cd98f00b204e9800998ecf8427e-0.collections.zzzzz.arvadosapi.com/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)
INFO     122: downloading from webdav (https://download.zzzzz.arvadosapi.com/c=d41d8cd98f00b204e9800998ecf8427e+0/_/foo)
INFO     123: downloading from webdav (https://download.zzzzz.arvadosapi.com/c=d41d8cd98f00b204e9800998ecf8427e+0/_/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)
INFO     124: downloading from webdav (https://a15a27cbc1c7d2d4a0d9e02529aaec7e-128.collections.zzzzz.arvadosapi.com/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)
INFO     125: downloading from webdav (https://download.zzzzz.arvadosapi.com/c=zzzzz-4zz18-twitqma8mbvwydy/_/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)
INFO     130: getting list of virtual machines
INFO     150: connecting to webshell service
INFO     160: running a container
INFO      ... container request submitted, waiting up to 10m for container to run
INFO    9990: deleting temporary collection
================================================ FILE: doc/admin/dispatch.html.textile.liquid ================================================ --- layout: default navsection: admin title: Cloud dispatcher ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The @arvados-server@ program provides subcommands for accessing the "cloud dispatcher's management API":{{site.baseurl}}/api/dispatch.html interactively. h2. List instances @arvados-server instance list@ Display a list of instances managed by the dispatcher. A placeholder @-@ in the @instance@ column indicates that an instance has been requested but the cloud provider has not yet returned a response with an instance ID. A placeholder @-@ in the @address@ column indicates that an instance has been requested but has not yet been assigned an IP address by the cloud provider. A placeholder @-@ in the @running-containers@ column indicates that the instance is not running any containers. Use the @-header@ flag to display column names.
# arvados-server instance list -header
instance	address	state	idle-behavior	config-type	provider-type	price	running-containers
i-03d59cfcfacf307ff	10.253.254.184	running	run	c5large	c5.large	0.085000	tordo-dz642-r6fz90awybvywr6
i-0df614e93e4170ae7	10.253.254.157	booting	run	t3small	t3.small	0.020800	-
h2. Drain instances
# arvados-server instance drain  [instance-id ...]
Set the indicated instances' idle behavior to @drain@. Containers currently running will be allowed to continue, but when each instance becomes idle, it will be shut down. h2. Hold instances
# arvados-server instance hold  [instance-id ...]
Set the indicated instances' idle behavior to @hold@. The instances will not be shut down automatically. Containers currently running will be allowed to continue, but no new containers will be scheduled. h2. Run instances
# arvados-server instance run  [instance-id ...]
Set the indicated instances' idle behavior to @run@ (the normal behavior). When the instances become idle, they will be eligible to run new containers. They will be shut down automatically when the configured idle threshold is reached. h2. Kill instances
# arvados-server instance kill [-reason "..."]  [instance-id ...]
Shut down the indicated instances immediately, abandoning/failing any containers they are currently running. The provided reason string will appear in the dispatcher’s log. ================================================ FILE: doc/admin/federation.html.textile.liquid ================================================ --- layout: default navsection: admin title: Configuring federation ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} This page describes how to enable and configure federation capabilities between clusters. An overview on how this feature works is discussed in the "architecture section":{{site.baseurl}}/architecture/federation.html h2. Configuration To enable a cluster to communicate with other clusters, some settings need to be added to the @config.yml@ file. Federated clusters are identified by listing the cluster-to-hostname mapping in the @RemoteClusters@ section. Here is an example of the settings that should be added to the @/etc/arvados/config.yml@ file:
Clusters:
  clsr1:
    RemoteClusters:
      clsr2:
        Host: api.cluster2.example
        Proxy: true
	ActivateUsers: true
      clsr3:
        Host: api.cluster3.example
        Proxy: true
	ActivateUsers: false
Similar settings should be added to @clsr2@ & @clsr3@ hosts, so that all clusters in the federation can talk to each other. The @ActivateUsers@ setting indicates whether users from a given cluster are automatically activated or they require manual activation. User activation is covered in more detail in the "user activation section":{{site.baseurl}}/admin/user-management.html. In the current example, users from @clsr2@ would be automatically activated but users from @clsr3@ would require an admin to activate the account. Note: The @Proxy:@ variable is intended for future use, and should always be set to @true@. h2(#LoginCluster). User management A federation of clusters can be configured to use a separate user database per cluster, or delegate a central cluster to manage the database. h3. Peer federation If clusters belong to separate organizations, each cluster will have its own user database for the members of that organization. Through federation, a user from one organization can be granted access to the cluster of another organization. The admin of the second cluster can control access on a individual basis by choosing to activate or deactivate accounts from other organizations. h3. Centralized (LoginCluster) federation If all clusters belong to the same organization, and users in that organization should have access to all the clusters, user management can be simplified by setting the @LoginCluster@ which manages the user database used by all other clusters in the federation. To do this, choose one cluster in the federation which will be the 'login cluster'. Set the @Login.LoginCluster@ configuration value on all clusters in the federation to the cluster id of the login cluster. After setting @LoginCluster@, restart arvados-api-server and arvados-controller.
Clusters:
  clsr2:
    Login:
      LoginCluster: clsr1
The @LoginCluster@ configuration redirects all user logins to the LoginCluster, and the LoginCluster will issue API tokens which will be accepted by the federation. Users are activated or deactivated across the entire federation based on their status on the login cluster. Note: tokens issued by the login cluster need to be periodically re-validated when used on other clusters in the federation. The period between revalidation attempts is configured with @Login.RemoteTokenRefresh@. The default is 5 minutes. A longer period reduces overhead from validating tokens, but means it may take longer for other clusters to notice when a token has been revoked or a user has changed status (being activated/deactivated, admin flag changed). To migrate users of existing clusters with separate user databases to use a single LoginCluster, a script @arv-federation-migrate@ is available in @contrib/arvados-bootstrap@. h2. Groups In order for a user to see (and be able to share with) other users, the admin needs to create a "can_read" permission link from the user to either the "All users" group, or another group that grants visibility to a subset of users. In a peer federation, this means that for a user that has joined a second cluster, that user needs to be added to the "All users" group on the second cluster as well, to be able to share with other users. In a LoginCluster federation, all visibility of users to share with other users is set by the LoginCluster. It is not necessary to add users to "All users" on the other clusters. h3. Trusted clients When a cluster is configured to use a LoginCluster, the login flow goes to the LoginCluster to log in and issue a token, then returns the user to the starting workbench. In this case, you want to configure the LoginCluster to "trust" the workbench instances associated with the other clusters.
Clusters:
  clsr1:
    Login:
      TrustedClients:
        "https://workbench.cluster2.example": {}
        "https://workbench2.cluster2.example": {}
        "https://workbench.cluster3.example": {}
        "https://workbench2.cluster3.example": {}
h2. Testing Following the above example, let's suppose @clsr1@ is our "home cluster", that is to say, we use our @clsr1@ user account as our federated identity and both @clsr2@ and @clsr3@ remote clusters are set up to allow users from @clsr1@ and to auto-activate them. The first thing to do would be to log into a remote workbench using the local user token. This can be done following these steps: 1. Log into the local workbench and get the user token 2. Visit the remote workbench specifying the local user token by URL: @https://workbench.cluster2.example?api_token=token_from_clsr1@ 3. You should now be logged into @clsr2@ with your account from @clsr1@ To further test the federation setup, you can create a collection on @clsr2@, uploading some files and copying its UUID. Next, logged into a shell node on your home cluster you should be able to get that collection by running:
user@clsr1:~$ arv collection get --uuid clsr2-xvhdp-xxxxxxxxxxxxxxx
The returned collection metadata should show the local user's uuid on the @owner_uuid@ field. This tests that the @arvados-controller@ service is proxying requests correctly. One last test may be performed, to confirm that the @keepstore@ services also recognize remote cluster prefixes and proxy the requests. You can ask for the previously created collection using any of the usual tools, for example:
user@clsr1:~$ arv-get clsr2-xvhdp-xxxxxxxxxxxxxxx/uploaded_file .
================================================ FILE: doc/admin/group-management.html.textile.liquid ================================================ --- layout: default navsection: admin title: Role group management at the CLI ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} This page describes how to manage groups at the command line. You should be familiar with the "permission system":{{site.baseurl}}/api/permission-model.html . h2. Create a role group User groups are entries in the "groups" table with @"group_class": "role"@.
arv group create --group '{"name": "My new group", "group_class": "role"}'
h2(#add). Add a user to a role group There are two separate permissions associated with group membership. The first link grants the user @can_manage@ permission to manage things that the group can manage. The second link grants permission for other users of the group to see that this user is part of the group.
arv link create --link '{
  "link_class": "permission",
  "name": "can_manage",
  "tail_uuid": "the_user_uuid",
  "head_uuid": "the_group_uuid"}'

arv link create --link '{
  "link_class": "permission",
  "name": "can_read",
  "tail_uuid": "the_group_uuid",
  "head_uuid": "the_user_uuid"}'
A user can also be given read-only access to a group. In that case, the first link should be created with @can_read@ instead of @can_manage@. h2. List role groups
arv group list --filters '[["group_class", "=", "role"]]'
h2. List members of a role group Use the command "jq":https://stedolan.github.io/jq/ to extract the tail_uuid of each permission link which has the user uuid.
arv link list --filters '[["link_class", "=", "permission"],
  ["head_uuid", "=", "the_group_uuid"]]' | jq .items[].tail_uuid
h2(#share-project). Share a project with a role group Members of the role group will have access to the project based on their level of access to the role group.
arv link create --link '{
  "link_class": "permission",
  "name": "can_manage",
  "tail_uuid": "the_group_uuid",
  "head_uuid": "the_project_uuid"}'
A project can also be shared read-only. In that case, the link @name@ should be @can_read@ instead of @can_manage@. h2. List things shared with the group Use the command "jq":https://stedolan.github.io/jq/ to extract the head_uuid of each permission link which has the object uuid.
arv link list --filters '[["link_class", "=", "permission"],
  ["tail_uuid", "=", "the_group_uuid"]]' | jq .items[].head_uuid
h2(#stop-sharing-project). Stop sharing a project with a group This will remove access for members of the group. The first step is to find the permission link objects. The second step is to delete them.
arv --format=uuid link list --filters '[["link_class", "=", "permission"],
  ["tail_uuid", "=", "the_group_uuid"], ["head_uuid", "=", "the_project_uuid"]]'

arv link delete --uuid each_link_uuid
h2. Remove user from a role group The first step is to find the permission link objects. The second step is to delete them.
arv --format=uuid link list --filters '[["link_class", "=", "permission"],
  ["tail_uuid", "in", ["the_user_uuid", "the_group_uuid"]],
  ["head_uuid", "in", ["the_user_uuid", "the_group_uuid"]]'

arv link delete --uuid each_link_uuid
================================================ FILE: doc/admin/health-checks.html.textile.liquid ================================================ --- layout: default navsection: admin title: Health checks ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Health check endpoints are found at @/_health/ping@ on many Arvados services. The purpose of the health check is to offer a simple method of determining if a service can be reached and allow the service to self-report any problems, suitable for integrating into operational alert systems. To access health check endpoints, services must be configured with a "management token":management-token.html . Health check endpoints return a JSON object with the field @health@. This has a value of either @OK@ or @ERROR@. On error, it may also include a field @error@ with additional information. Examples:
{
  "health": "OK"
}
{
  "health": "ERROR"
  "error": "Inverted polarity in the warp core"
}
h2. Health check aggregator The service @arvados-health@ performs health checks on all configured services and returns a single value of @OK@ or @ERROR@ for the entire cluster. It exposes the endpoint @/_health/all@ . The healthcheck aggregator uses the @Services@ section of the cluster-wide @config.yml@ configuration file. h2. Health check command The @arvados-server check@ command is another way to perform the same health checks as the health check aggregator service. It does not depend on the aggregator service. If all checks pass, it writes @health check OK@ to stderr (unless the @-quiet@ flag is used) and exits 0. Otherwise, it writes error messages to stderr and exits with error status. @arvados-server check -yaml@ outputs a YAML document on stdout with additional details about each service endpoint that was checked. {% codeblock as yaml %} Checks: "arvados-api-server+http://localhost:8004/_health/ping": ClockTime: "2024-12-13T14:38:25Z" ConfigSourceSHA256: 5a2b21ce0aeeeebcaf623329871b4628772446d4684ab0f89da4a2cbc7b3f17c ConfigSourceTimestamp: "2024-12-12T11:14:06.487848-05:00" HTTPStatusCode: 200 Health: OK Response: health: OK ResponseTime: 0.051136 Server: nginx/1.26.1 + Phusion Passenger(R) 6.0.23 Version: 3.0.0 "arvados-controller+http://localhost:8003/_health/ping": ClockTime: "2024-12-13T14:38:25Z" ConfigSourceSHA256: 5a2b21ce0aeeeebcaf623329871b4628772446d4684ab0f89da4a2cbc7b3f17c ConfigSourceTimestamp: "2024-12-12T11:14:06.487848-05:00" HTTPStatusCode: 200 Health: OK Response: health: OK ResponseTime: 0.014869 Server: "" Version: 3.0.0 (go1.21.10) # ... {% endcodeblock %} ================================================ FILE: doc/admin/index.html.textile.liquid ================================================ --- layout: default navsection: admin title: "Arvados admin overview" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} This section describes how to administer an Arvados cluster. Cluster admins should already be familiar with the "Arvados architecture.":{{site.baseurl}}/architecture/index.html For instructions on installing and configuring an Arvados cluster, see the "install guide.":{{site.baseurl}}/install/index.html ================================================ FILE: doc/admin/inspect.html.textile.liquid ================================================ --- layout: default navsection: admin title: Inspecting active requests ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Most Arvados services publish a snapshot of HTTP requests currently being serviced at @/_inspect/requests@. This can be useful for troubleshooting slow requests and understanding high server load conditions. To access snapshots, services must be configured with a "management token":management-token.html. When accessing this endpoint, prefix the management token with @"Bearer "@ and supply it in the @Authorization@ request header. In an interactive setting, use the @jq@ tool to format the JSON response.
curl -sfH "Authorization: Bearer your_management_token_goes_here" "https://0.0.0.0:25107/_inspect/requests" | jq .
table(table table-bordered table-condensed table-hover){width:40em}. |_. Component|_. Provides @/_inspect/requests@ endpoint| |arvados-api-server|| |arvados-controller|✓| |arvados-dispatch-cloud|✓| |arvados-dispatch-lsf|✓| |arvados-ws|✓| |composer|| |keepproxy|✓| |keepstore|✓| |keep-balance|✓| |keep-web|✓| |workbench2|| h2. Report fields Most fields are self explanatory. The @Host@ field reports the virtual host specified in the incoming HTTP request. The @RemoteAddr@ field reports the source of the incoming TCP connection, which is typically a local address associated with the Nginx proxy service. The @XForwardedFor@ field reports the value of the "@X-Forwarded-For@ header":https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/X-Forwarded-For in the request. The @Elapsed@ field reports the number of seconds since the incoming HTTP request headers were received. h2. Example response
[
  {
    "RequestID": "req-1vzzj6nwrki0rd2hj08a",
    "Method": "GET",
    "Host": "tordo.arvadosapi.com",
    "URL": "/arvados/v1/groups?order=name+asc&filters=[[%22owner_uuid%22,%22%3D%22,%22zzzzz-tpzed-aaaaaaaaaaaaaaa%22],[%22group_class%22,%22in%22,[%22project%22,%22filter%22]]]",
    "RemoteAddr": "127.0.0.1:55822",
    "XForwardedFor": "192.168.0.111, 10.0.0.123",
    "Elapsed": 0.006363228
  },
  {
    "RequestID": "req-1wrof2b2wlj5s1rao4u3",
    "Method": "GET",
    "Host": "tordo.arvadosapi.com",
    "URL": "/arvados/v1/users/current",
    "RemoteAddr": "127.0.0.1:55814",
    "XForwardedFor": "192.168.0.222, 10.0.0.123",
    "Elapsed": 0.04796585
  }
]
================================================ FILE: doc/admin/keep-balance.html.textile.liquid ================================================ --- layout: default navsection: admin title: Balancing Keep servers ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} This page describes how to balance keepstore servers using keep-balance. Keep-balance creates new copies of under-replicated blocks, deletes excess copies of over-replicated and unreferenced blocks, and moves blocks to better positions (e.g. after adding new keepstore servers) so clients find them faster. See "the Keep-balance install docs":{{site.baseurl}}/install/install-keep-balance.html for installation instructions. h3. Data deletion The keep-balance service determines which blocks are candidates for deletion and instructs the keepstore to move those blocks to the trash. When a block is newly written, it is protected from deletion for the duration in @BlobSigningTTL@. During this time, it cannot be trashed or deleted. If keep-balance instructs keepstore to trash a block which is older than @BlobSigningTTL@, and @BlobTrashLifetime@ is non-zero, the block will be moved to "trash". A block which is in the trash is no longer accessible by read requests, but has not yet been permanently deleted. Blocks which are in the trash may be recovered using the "untrash" API endpoint. Blocks are permanently deleted after they have been in the trash for the duration in @BlobTrashLifetime@. Keep-balance is also responsible for balancing the distribution of blocks across keepstore servers by asking servers to pull blocks from other servers (as determined by their "storage class":{{site.baseurl}}/admin/storage-classes.html and "rendezvous hashing order":{{site.baseurl}}/architecture/keep-clients.html#rendezvous). Pulling a block makes a copy. If a block is overreplicated (i.e. there are excess copies) after pulling, it will be subsequently trashed and deleted on the original server, subject to @BlobTrash@ and @BlobTrashLifetime@ settings. h3. Scanning By default, keep-balance operates periodically, i.e. do a scan/balance operation, sleep, repeat. The @Collections.BalancePeriod@ value in @/etc/arvados/config.yml@ determines the interval between start times of successive scan/balance operations. If an operation takes longer than the @Collections.BalancePeriod@, the next operation will follow it immediately. If SIGUSR1 is received during an idle period between operations, the next operation will start immediately. Keep-balance can also be run with the @-once@ flag to do a single scan/balance operation and then exit. The exit code will be zero if the operation was successful. h3. Additional configuration For configuring resource usage tuning and lost block reporting, please see the @Collections.BlobMissingReport@, @Collections.BalanceCollectionBatch@, @Collections.BalanceCollectionBuffers@ option in the "default config.yml file":{{site.baseurl}}/admin/config.html. The @Collections.BalancePullLimit@ and @Collections.BalanceTrashLimit@ configuration entries determine the maximum number of pull and trash operations keep-balance will attempt to apply on each keepstore server. If both values are zero, keep-balance will operate in "dry run" mode, where all changes are computed but none are committed. h3. Limitations Keep-balance does not attempt to discover whether committed pull and trash requests ever get carried out -- only that they are accepted by the Keep services. If some services are full, new copies of under-replicated blocks might never get made, only repeatedly requested. ================================================ FILE: doc/admin/keep-faster-gc-s3.html.textile.liquid ================================================ --- layout: default navsection: admin title: "Faster garbage collection in S3" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} When there is a large number of unneeded blocks stored in an S3 bucket, particularly when using @PrefixLength: 0@, the speed of garbage collection can be severely limited by AWS API rate limits and Arvados's multi-step trash/delete process. The multi-step trash/delete process can be short-circuited by setting @BlobTrashLifetime@ to zero and enabling @UnsafeDelete@ on S3-backed volumes. However, on an actively used cluster such a configuration *can result in data loss* in the rare case where a given block is trashed and then rewritten soon afterward, and S3 processes the write and delete requests in the opposite order. The following steps can be used to temporarily disable writes on an S3 bucket to enable faster garbage collection without data loss or service interruption. Note that garbage collection on other S3 volumes will be temporarily disabled during this procedure. # Create a new S3 bucket and configure it as an additional volume (this step may be skipped if the configuration already has enough writable volumes that clients will still be able to write blocks while the target volume is read-only). We recommend using @PrefixLength: 3@ for the new volume because this results in a much higher rate limit for I/O and garbage collection operations compared to the default @PrefixLength: 0@. If the target volume configuration specifies @StorageClasses@, use the same values for the new volume. # Shut down the @keep-balance@ service. # Update your configuration as follows:
  Collections:
    BlobTrashLifetime: 0
    BalancePullLimit: 0
  [...]
  Volumes:
    target-volume-uuid:
      ReadOnly: true
      AllowTrashWhenReadOnly: true
      DriverParameters:
        UnsafeDelete: true
Note that @BlobTrashLifetime: 0@ instructs keepstore to delete unneeded blocks outright (bypassing the recoverable trash phase); however, in this mode it will normally not trash any blocks at all on an S3 volume due to the safety issue mentioned above, unless the volume is configured with @UnsafeDelete: true@. # Restart all @keepstore@ services with the updated configuration. # Start the @keep-balance@ service. # Objects will be deleted immediately instead of being first copied to trash on the S3 volume, which should significantly speed up cleanup of trashed objects. Monitor progress by watching @keep-balance@ logs and metrics. When garbage collection is complete, keep-balance logs will show an empty changeset:
zzzzz-bi6l4-0123456789abcdef (keep0.zzzzz.arvadosapi.com:25107, disk): ChangeSet{Pulls:0, Trashes:0}
# Remove the @UnsafeDelete@ configuration entry on the target volume. # Remove the @BlobTrashLifetime@ configuration entry (or restore it to its previous value). # If the target volume has @PrefixLength: 0@ and the new volume has @PrefixLength: 3@, skip the next two steps: new data will be stored on the new volume, some existing data will be moved automatically to other volumes, and some will be left on the target volume as long as it's needed. # If you want to resume writing new data to the target volume, revert to @ReadOnly: false@ and @AllowTrashWhenReadOnly: false@ on the target volume. # If you want to stop writing new data to the newly created volume, set @ReadOnly: true@ and @AllowTrashWhenReadOnly: true@ on the new volume. # Remove the @BalancePullLimit@ configuration entry (or restore its previous value), and restart @keep-balance@. # Restart all @keepstore@ services with the updated configuration. ================================================ FILE: doc/admin/keep-measuring-deduplication.html.textile.liquid ================================================ --- layout: default navsection: admin title: "Measuring deduplication" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The @arvados-client@ tool can be used to generate a deduplication report across an arbitrary number of collections. It can be installed from packages (@apt install arvados-client@ or @dnf install arvados-client@). h2(#syntax). Syntax
~$ arvados-client deduplication-report -h
Usage:
  arvados-client deduplication-report [options ...]   ...

  arvados-client deduplication-report [options ...] , \
     , ...

  This program analyzes the overlap in blocks used by 2 or more collections. It
  prints a deduplication report that shows the nominal space used by the
  collections, as well as the actual size and the amount of space that is saved
  by Keep's deduplication.

  The list of collections may be provided in two ways. A list of collection
  uuids is sufficient. Alternatively, the PDH for each collection may also be
  provided. This is will greatly speed up operation when the list contains
  multiple collections with the same PDH.

  Exit status will be zero if there were no errors generating the report.

Example:

  Use the 'arv' and 'jq' commands to get the list of the 100
  largest collections and generate the deduplication report:

  arv collection list --order 'file_size_total desc' --limit 100 | \
    jq -r '.items[] | [.portable_data_hash,.uuid] |@csv' | \
    sed -e 's/"//g'|tr '\n' ' ' | \
    xargs arvados-client deduplication-report

Options:
  -log-level string
      logging level (debug, info, ...) (default "info")

The usual environment variables (@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@) need to be set for the deduplication report to be be generated. To get cluster-wide results, an admin token will need to be supplied. Users can also run this report, but only collections their token is able to read will be included. Example output (with uuids and portable data hashes obscured) from a small Arvados cluster:
~$ arv collection list --order 'file_size_total desc' --limit 10 | jq -r '.items[] | [.portable_data_hash,.uuid] |@csv' |sed -e 's/"//g'|tr '\n' ' ' |xargs arvados-client deduplication-report
Collection _____-_____-_______________: pdh ________________________________+5003343; nominal size 7382073267640 (6.7 TiB); file count 2796
Collection _____-_____-_______________: pdh ________________________________+4961919; nominal size 6989909625775 (6.4 TiB); file count 5592
Collection _____-_____-_______________: pdh ________________________________+1903643; nominal size 2677933564052 (2.4 TiB); file count 2796
Collection _____-_____-_______________: pdh ________________________________+1903643; nominal size 2677933564052 (2.4 TiB); file count 2796
Collection _____-_____-_______________: pdh ________________________________+137710; nominal size 191858151583 (179 GiB); file count 201
Collection _____-_____-_______________: pdh ________________________________+137636; nominal size 191858101962 (179 GiB); file count 200
Collection _____-_____-_______________: pdh ________________________________+135350; nominal size 191715427388 (178 GiB); file count 201
Collection _____-_____-_______________: pdh ________________________________+135276; nominal size 191715384167 (178 GiB); file count 200
Collection _____-_____-_______________: pdh ________________________________+135350; nominal size 191707276684 (178 GiB); file count 201
Collection _____-_____-_______________: pdh ________________________________+135276; nominal size 191707233463 (178 GiB); file count 200

Collections:                              10
Nominal size of stored data:  20878411596766 bytes (19 TiB)
Actual size of stored data:   17053104444050 bytes (16 TiB)
Saved by Keep deduplication:   3825307152716 bytes (3.5 TiB)


================================================ FILE: doc/admin/keep-recovering-data.html.textile.liquid ================================================ --- layout: default navsection: admin title: "Recovering data" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados has several features to prevent accidental loss or deletion of data, but accidents can happen. This page lays out the options to recover deleted or overwritten collections. For more detail on the data lifecycle in Arvados, see the "Data lifecycle":{{ site.baseurl }}/architecture/keep-data-lifecycle.html page. h2(#check_the_trash). Check the trash When a collection is deleted, it is moved to the trash. It will remain there for the duration of @Collections.DefaultTrashLifetime@, and it can be untrashed via workbench or with the cli tools, as described in "Recovering trashed collections":{{ site.baseurl }}/user/tutorials/tutorial-keep-collection-lifecycle.html#trash-recovery. h2(#check_other_collections). Check for other collections with the same PDH Multiple collections may share a _portable data hash_, i.e. have the same contents. If another collection exists with the same portable data hash, recovering data is not necessary, everything is still stored in Keep. A new copy of the collection can be made to make the data available in the correct project and with the correct permissions. h2(#check_collection_versioning). Consider collection versioning Arvados supports collection versioning. If it has not been "disabled":{{ site.baseurl }}/admin/collection-versioning.html on your cluster, the deleted collection may be recoverable from an older version. See "Using collection versioning":{{ site.baseurl }}/user/topics/collection-versioning.html for details. h2(#recover_collection). Recovering collections When all the above options fail, it may still be possible to recover a collection that has been deleted. To recover a collection the manifest is required. Arvados has a built-in audit log, which consists of a row added to the "logs" table in the PostgreSQL database each time an Arvados object is created, modified, or deleted. Collection manifests are included, unless they are listed in the @AuditLogs.UnloggedAttributes@ configuration parameter. The audit log is retained for up to @AuditLogs.MaxAge@. In some cases, it is possible to recover files that have been lost by modifying or deleting a collection. Possibility of recovery depends on many factors, including: * Whether the collection manifest is still available, e.g., in an audit log entry * Whether the data blocks are also referenced by other collections * Whether the data blocks have been unreferenced long enough to be marked for deletion/trash by keep-balance * Blob signature TTL, trash lifetime, trash check interval, and other config settings To attempt recovery of a previous version of a deleted/modified collection, use the @arvados-server recover-collection@ command. It should be run on one of your server nodes where the @arvados-server@ package is installed and the @/etc/arvados/config.yml@ file is up to date. Specify the collection you want to recover by passing either the UUID of an audit log entry, or a file containing the manifest. If recovery is successful, the @recover-collection@ program saves the recovered data a new collection belonging to the system user, and prints the new collection's UUID on stdout.
# arvados-server recover-collection 9tee4-57u5n-nb5awmk1pahac2t
INFO[2020-06-05T19:52:29.557761245Z] loaded log entry                              logged_event_time="2020-06-05 16:48:01.438791 +0000 UTC" logged_event_type=update old_collection_uuid=9tee4-4zz18-1ex26g95epmgw5w src=9tee4-57u5n-nb5awmk1pahac2t
INFO[2020-06-05T19:52:29.642145127Z] recovery succeeded                            UUID=9tee4-4zz18-5trfp4k4xxg97f1 src=9tee4-57u5n-nb5awmk1pahac2t
9tee4-4zz18-5trfp4k4xxg97f1
INFO[2020-06-05T19:52:29.644699436Z] exiting
In this example, the original data has been restored and saved in a new collection with UUID @9tee4-4zz18-5trfp4k4xxg97f1@. For more options, run @arvados-server recover-collection -help@. h2(#untrashing_lost_blocks). Untrashing lost blocks In some cases it is possible to recover data blocks that were trashed erroneously by @keep-balance@ (e.g. due to an install/config error). If you suspect blocks have been trashed erroneously, you should immediately: * On all keepstore servers: set @BlobTrashCheckInterval@ to a long time like 2400h * On all keepstore servers: restart keepstore * Stop the keep-balance service When you think you have corrected the underlying problem, you should: * Set @Collections.BlobMissingReport@ to a suitable value (perhaps "/tmp/keep-balance-lost-blocks.txt"). * Start @keep-balance@ * After @keep-balance@ completes its first sweep, inspect /tmp/keep-balance-lost-blocks.txt. If it's not empty, you can request all keepstores to untrash any blocks that are still recoverable with a script like this:

#!/bin/bash
set -e

# see Client.AuthToken in /etc/arvados/keep-balance/keep-balance.yml
token=xxxxxxx-your-system-auth-token-xxxxxxx

# all keep server hostnames
hosts=(keep0 keep1 keep2 keep3 keep4 keep5)

while read hash pdhs; do
  echo "${hash}"
  for h in ${hosts[@]}; do
    if curl -fgs -H "Authorization: Bearer $token" -X PUT "http://${h}:25107/untrash/$hash"; then
      echo "${hash} ok ${host}"
    fi
  done
done < /tmp/keep-balance-lost-blocks.txt

Any blocks which were successfully untrashed can be removed from the list of blocks and collections which need to be recovered. h2(#regenerating_lost_blocks). Regenerating lost blocks For blocks which were trashed long enough ago that they've been deleted, it may be possible to regenerate them by rerunning the workflows which generated them. To do this, the process is: * Delete the affected collections so that job reuse doesn't attempt to reuse them (it's likely that if one block is missing, they all are, so they're unlikely to contain any useful data) * Resubmit any container requests for which you want the output collections regenerated The Arvados repository contains a tool that can be used to generate a report to help with this task at "arvados/tools/keep-xref/keep-xref.py":https://github.com/arvados/arvados/blob/main/tools/keep-xref/keep-xref.py ================================================ FILE: doc/admin/link-accounts.html.textile.liquid ================================================ --- layout: default navsection: admin title: "Link user accounts" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} If a user needs to log in to Arvados with a upstream account or provider, they may end up with two Arvados user accounts. If the user still has the ability to log in with the old account, they can use the "self-serve account linking":{{site.baseurl}}/user/topics/link-accounts.html feature of workbench. However, if the user does not have the ability to log in with both upstream accounts, the admin can also link the accounts using the command line. bq. NOTE: self-serve account linking is currently not supported on LoginCluster federations and needs to be performed manually by the site admin. h3. Step 1: Determine user uuids User uuids can be determined by browsing workbench or using @arv user list@ at the command line. Account linking works by recording in the database that a log in to the "old" account should redirected and treated as a login to the "new" account. The "old" account is the Arvados account that will be redirected. The "new" account is the user that the "old" account is redirected to. As part of account linking any Arvados records owned by the "old" account is also transferred to the "new" account. Counter-intuitively, if you do not want the account uuid of the user to change, the "new" account should be the pre-existing account, and the "old" account should be the redundant second account that was more recently created. This means "old" and "new" are opposite from their expected chronological meaning. In this case, the use of "old" and "new" reflect the direction of transfer of ownership -- the login was associated with the "old" user account, but will be associated with the "new" user account. In the example below, @zzzzz-tpzed-3kz0nwtjehhl0u4@ is the "old" account (the pre-existing account we want to keep) and @zzzzz-tpzed-fr97h9t4m5jffxs@ is the "new" account (the redundant account we want to merge into the existing account). h3. Step 2: Create a project Create a project owned by the "new" account that will hold any data owned by the "old" account.
$ arv --format=uuid group create --group '{"group_class": "project", "name": "Data from old user", "owner_uuid": "zzzzz-tpzed-fr97h9t4m5jffxs"}'
zzzzz-j7d0g-mczqiguhil13083
h3. Step 3: Merge "old" user to "new" user The @user merge@ method redirects login and reassigns data from the "old" account to the "new" account.
$ arv user merge  --redirect-to-new-user \
  --old-user-uuid=zzzzz-tpzed-3kz0nwtjehhl0u4 \
  --new-user-uuid=zzzzz-tpzed-fr97h9t4m5jffxs \
  --new-owner-uuid=zzzzz-j7d0g-mczqiguhil13083 \
Note that authorization credentials (API tokens, ssh keys) are also transferred to the "new" account, so credentials used to access the "old" account work with the "new" account. ================================================ FILE: doc/admin/logging.html.textile.liquid ================================================ --- layout: default navsection: admin title: Logging ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Most Arvados services write JSON-format structured logs to stderr, which can be parsed by any operational tools that support JSON. h2. Request ids Using a distributed system with several services working together sometimes makes it difficult to find the root cause of errors, as one single client request usually means several different requests to more than one service. To deal with this difficulty, Arvados creates a request ID that gets carried over different services as the requests take place. This ID has a specific format and it's comprised of the prefix "@req-@" followed by 20 random alphanumeric characters:
req-frdyrcgdh4rau1ajiq5q
This ID gets propagated via an HTTP @X-Request-Id@ header, and gets logged on every service. h3. API Server error reporting and logging In addition to providing the request ID on every HTTP response, the API Server adds it to every error message so that all clients show enough information to the user to be able to track a particular issue. As an example, let's suppose that we get the following error when trying to create a collection using the CLI tools:
$ arv collection create --collection '{}'
Error: # (req-ku5ct9ehw0y71f1c5p79)
The API Server logs every request in JSON format on the @production.log@ (usually under @/var/www/arvados-api/current/log/@ when installing from packages) file, so we can retrieve more information about this by using @grep@ and @jq@ tools:
# grep req-ku5ct9ehw0y71f1c5p79 /var/www/arvados-api/current/log/production.log | jq .
{
  "method": "POST",
  "path": "/arvados/v1/collections",
  "format": "json",
  "controller": "Arvados::V1::CollectionsController",
  "action": "create",
  "status": 422,
  "duration": 1.52,
  "view": 0.25,
  "db": 0,
  "request_id": "req-ku5ct9ehw0y71f1c5p79",
  "client_ipaddr": "127.0.0.1",
  "client_auth": "zzzzz-gj3su-jllemyj9v3s5emu",
  "exception": "#",
  "exception_backtrace": "/var/www/arvados-api/current/app/controllers/arvados/v1/collections_controller.rb:43:in `create'\n/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/action_controller/metal/basic_implicit_render.rb:4:in `send_action'\n ...[snipped]",
  "params": {
    "collection": "{}",
    "_profile": "true",
    "cluster_id": "",
    "collection_given": "true",
    "ensure_unique_name": "false",
    "help": "false"
  },
  "@timestamp": "2019-07-15T16:40:41.726634182Z",
  "@version": "1",
  "message": "[422] POST /arvados/v1/collections (Arvados::V1::CollectionsController#create)"
}
When logging a request that produced an error, the API Server adds @exception@ and @exception_backtrace@ keys to the JSON log. The latter includes the complete error stack trace as a string, and can be displayed in a more readable form like so:
# grep req-ku5ct9ehw0y71f1c5p79 /var/www/arvados-api/current/log/production.log | jq -r .exception_backtrace
/var/www/arvados-api/current/app/controllers/arvados/v1/collections_controller.rb:43:in `create'
/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/action_controller/metal/basic_implicit_render.rb:4:in `send_action'
/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/abstract_controller/base.rb:188:in `process_action'
/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/action_controller/metal/rendering.rb:30:in `process_action'
/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/abstract_controller/callbacks.rb:20:in `block in process_action'
/var/lib/gems/ruby/2.3.0/gems/activesupport-5.0.7.2/lib/active_support/callbacks.rb:126:in `call'
...
================================================ FILE: doc/admin/logs-table-management.html.textile.liquid ================================================ --- layout: default navsection: admin title: "Logs table management" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} This page aims to provide insight about managing the ever growing API Server's logs table. h3. Logs table purpose & behavior This database table is accessed via "the @logs@ endpoint.":../api/methods/logs.html This table currently serves several purposes: * Audit logging, permitting admins and users to look up the time and details of past changes to Arvados objects. * Logging other system events, specifically "file uploads and downloads from keep-web.":restricting-upload-download.html#audit_logs * The source for cache-invalidation events, published through websockets to Workbench to refresh the view. It can also be monitored by the Python SDK "events module.":../sdk/python/events.html * Prior to Arvados 2.7, it was used a staging area for stdout/stderr text coming from users' containers, permitting users to see what their containers are doing while they are still running (i.e., before those text files are written to Keep). Starting with Arvados 2.7, this is superseded by a more efficient mechanism, so these logs are disabled by default. See "2.7.0 upgrade notes":upgrading.html#v2_7_0 for details. As a result, this table grows indefinitely, even on sites where policy does not require an audit log; making backups, migrations, and upgrades unnecessarily slow and painful. h3. Configuration To solve the problem mentioned above, the @AuditLogs@ section of @config.yml@ offers several options to limit the amount of log information stored on the table:
    AuditLogs:
      # Time to keep audit logs. (An audit log is a row added
      # to the "logs" table in the PostgreSQL database each time an
      # Arvados object is created, modified, or deleted.)
      #
      # Currently, websocket event notifications rely on audit logs, so
      # this should not be set lower than 5 minutes.
      MaxAge: 336h

      # Maximum number of log rows to delete in a single SQL transaction,
      # to prevent surprises and avoid bad database behavior
      # (especially the first time the cleanup job runs on an existing
      # cluster with a huge backlog) a maximum number of rows to
      # delete in a single transaction.
      #
      # If MaxDeleteBatch is 0, log entries will never be
      # deleted by Arvados. Cleanup can be done by an external process
      # without affecting any Arvados system processes, as long as very
      # recent (<5 minutes old) logs are not deleted.
      #
      # 100000 is a reasonable batch size for most sites.
      MaxDeleteBatch: 0

      # Attributes to suppress in events and audit logs.  Notably,
      # specifying {"manifest_text": {}} here typically makes the database
      # smaller and faster.
      #
      # Warning: Using any non-empty value here can have undesirable side
      # effects for any client or component that relies on event logs.
      # Use at your own risk.
      UnloggedAttributes: {}
h3. Additional consideration Depending on the local installation's audit requirements, the cluster admins should plan for an external backup procedure before enabling this feature, as this information is not replicated anywhere else. ================================================ FILE: doc/admin/maintenance-and-upgrading.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Maintenance and upgrading ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Commercial support":#commercial_support # "Maintaining Arvados":#maintaining ## "Modification of the config.yml file":#configuration ## "Distributing the configuration file":#distribution ## "Restart the services affected by the change":#restart # "Upgrading Arvados":#upgrading h2(#commercial_support). Commercial support Arvados is "100% open source software":{{site.baseurl}}/user/copying/copying.html. Anyone can download, install, maintain and upgrade it. However, if this is not something you want to spend your time and energy doing, "Curii Corporation":https://curii.com provides managed Arvados installations as well as commercial support for Arvados. Please contact "info@curii.com":mailto:info@curii.com for more information. If you'd prefer to do things yourself, a few starting points for maintaining and upgrading Arvados can be found below. h2(#maintaining). Maintaining Arvados After Arvados is installed, periodic configuration changes may be required to adapt the software to your needs. Arvados uses a unified configuration file, which is normally found at @/etc/arvados/config.yml@. Making a configuration change to Arvados typically involves three steps: * modification of the @config.yml@ file * distribution of the modified file to the machines in the cluster * restarting of the services affected by the change h3(#configchange). Modification of the @config.yml@ file Consult the "configuration reference":{{site.baseurl}}/admin/config.html or another part of the documentation to identify the change to be made. Preserve a copy of your existing configuration file as a backup, and make the desired modification. Run @arvados-server config-check@ to make sure the configuration file has no errors and no warnings. h3(#distribution). Distribute the configuration file It is very important to keep the @config.yml@ file in sync between all the Arvados system nodes, to avoid issues with services running on different versions of the configuration. We provide "installer.sh":../install/install-multi-host.html#installation to distribute config changes. You may also do your own orchestration e.g. @scp@, configuration management software, etc. h3(#restart). Restart the services affected by the change If you know which Arvados service uses the specific configuration that was modified, restart those services. When in doubt, restart all Arvados system services. To check for services that have not restarted since the configuration file was updated, run the @arvados-server check@ command on each system node. To test functionality and check for common problems, run the @arvados-client sudo diagnostics@ command on a system node. h2(#upgrading). Upgrading Arvados Upgrading Arvados typically involves the following steps: # consult the "upgrade notes":{{site.baseurl}}/admin/upgrading.html and the "release notes":https://arvados.org/releases/ for the release you want to upgrade to # Wait for the cluster to be idle and stop Arvados services. # Make a backup of your database, as a precaution. # update the configuration file for the new release, if necessary (see "Maintaining Arvados":#maintaining above) # Update compute nodes ## (cloud) Rebuild and deploy the "compute node image":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html ## (slurm/LSF) Upgrade the @python3-arvados-fuse@ package used on your compute nodes # Install new packages using @apt upgrade@ or @dnf upgrade@. # Wait for package installation scripts as they perform any necessary data migrations. # Run @arvados-server config-check@ to detect configuration errors or deprecated entries. # Verify that the Arvados services were restarted as part of the package upgrades. # Run @arvados-server check@ to detect services that did not restart properly. # Run @arvados-client sudo diagnostics@ to test functionality. ================================================ FILE: doc/admin/management-token.html.textile.liquid ================================================ --- layout: default navsection: admin title: Management token ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} To enable and collect health checks and metrics, services must be configured with a "management token". Services must have ManagementToken configured. This is used to authorize access monitoring endpoints. If ManagementToken is not configured, monitoring endpoints will return the error @404 disabled@. To access a monitoring endpoint, the requester must provide the HTTP header @Authorization: Bearer (ManagementToken)@. h2. API server and other services The following services also support monitoring. * API server * controller * keep-balance * keepproxy * keepstore * keep-web * arvados-ws Set @ManagementToken@ in the appropriate section of @/etc/arvados/config.yml@.
Clusters:
  ClusterID:
    # Token to be included in all healthcheck requests. Disabled by default.
    # Server expects request header of the format "Authorization: Bearer xxx"
    ManagementToken: xxx
================================================ FILE: doc/admin/memory-cpu-profiling.html.textile.liquid ================================================ --- layout: default navsection: admin title: Memory and CPU profiling ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados system services (other than the Rails API server) have an option to provide live profiling data on an HTTP endpoint. This can be analyzed with the @go tool pprof@ program from the Go runtime to help identify memory and CPU usage issues. The @go tool pprof@ program can either connect directly to the profiling endpoint, or read a snapshot from disk. Enable profiling by choosing a listening address and adding @-pprof
@ to the @EXTRA_OPTS@ environment variable in the systemd service. Example:
$ sudo systemctl edit keep-balance
### Editing /etc/systemd/system/keep-balance.service.d/override.conf
### Anything between here and the comment below will become the new contents of the file
[Service]
Environment="EXTRA_OPTS=-pprof 127.0.0.1:3333"
### Lines below this comment will be discarded
[...]
Restart the service.
$ sudo systemctl restart keep-balance
Save a snapshot of the program's active memory usage after garbage collection.
$ curl 'http://localhost:3333/debug/pprof/heap?gc=1' > /tmp/pprof.gz
The following analysis steps can be done on the server itself or on a different machine. To get the @go tool pprof@ command, install the Go runtime from OS packages or from the "Go download page":https://go.dev/doc/install.
$ sudo apt install golang
Run the @go tool pprof@ command to summarize the snapshot.
$ go tool pprof -top /tmp/pprof.gz
File: keep-balance
Build ID: edd0405c97f4235473dba21b7c7fd52c8f755cde
Type: inuse_space
Time: Nov 3, 2025 at 11:12am (EST)
Showing nodes accounting for 443.71MB, 98.02% of 452.67MB total
Dropped 35 nodes (cum <= 2.26MB)
      flat  flat%   sum%        cum   cum%
  217.54MB 48.06% 48.06%   217.54MB 48.06%  git.arvados.org/arvados.git/services/keep-balance.(*BlockState).increaseDesired (inline)
   85.46MB 18.88% 66.94%    85.46MB 18.88%  git.arvados.org/arvados.git/services/keep-balance.(*BlockStateMap).get (inline)
   66.07MB 14.60% 81.53%    76.58MB 16.92%  git.arvados.org/arvados.git/sdk/go/arvados.(*Collection).SizedDigests
   19.87MB  4.39% 85.92%    20.87MB  4.61%  github.com/lib/pq.textDecode
      13MB  2.87% 88.79%       13MB  2.87%  git.arvados.org/arvados.git/services/keep-balance.(*BlockState).addReplica (inline)
   10.51MB  2.32% 91.12%    10.51MB  2.32%  bytes.genSplit
   10.25MB  2.26% 93.38%    10.25MB  2.26%  github.com/lib/pq.(*conn).recvMessage
    7.50MB  1.66% 95.04%    53.63MB 11.85%  git.arvados.org/arvados.git/services/keep-balance.EachCollection
       5MB  1.10% 96.14%    13.50MB  2.98%  encoding/json.Unmarshal
    4.50MB  0.99% 97.14%     4.50MB  0.99%  encoding/json.(*scanner).pushParseState
       4MB  0.88% 98.02%        4MB  0.88%  encoding/json.(*decodeState).literalStore
         0     0% 98.02%    10.51MB  2.32%  bytes.Split (inline)
         0     0% 98.02%    31.13MB  6.88%  database/sql.(*Rows).Next
         0     0% 98.02%    31.13MB  6.88%  database/sql.(*Rows).Next.func1
         0     0% 98.02%    31.13MB  6.88%  database/sql.(*Rows).nextLocked
         0     0% 98.02%    31.13MB  6.88%  database/sql.withLock
         0     0% 98.02%        4MB  0.88%  encoding/json.(*decodeState).array
         0     0% 98.02%        4MB  0.88%  encoding/json.(*decodeState).unmarshal
         0     0% 98.02%        4MB  0.88%  encoding/json.(*decodeState).value
         0     0% 98.02%     4.50MB  0.99%  encoding/json.checkValid
         0     0% 98.02%     4.50MB  0.99%  encoding/json.stateBeginValue
         0     0% 98.02%     2.46MB  0.54%  git.arvados.org/arvados.git/lib/service.(*command).RunCommand.ifCollectionInHost.func9
         0     0% 98.02%     2.46MB  0.54%  git.arvados.org/arvados.git/sdk/go/httpserver.(*metrics).ServeAPI.RequireLiteralToken.func3
...
The @go tool pprof@ command can also connect directly to the profiling endpoint. In this mode, by default it will also save a snapshot in @$HOME/pprof/@. To connect directly to the profiling endpoint and display a sampling of CPU usage over a 2-second interval:
$ go tool pprof -top 'http://localhost:3333/debug/pprof/profile?seconds=2'
Fetching profile over HTTP from http://localhost:3333/debug/pprof/profile?seconds=2
Saved profile in /home/username/pprof/pprof.keep-balance.samples.cpu.001.pb.gz
File: keep-balance
Build ID: edd0405c97f4235473dba21b7c7fd52c8f755cde
Type: cpu
Time: Nov 3, 2025 at 11:12am (EST)
Duration: 2.19s, Total samples = 2.98s (136.17%)
Showing nodes accounting for 2.57s, 86.24% of 2.98s total
Dropped 73 nodes (cum <= 0.01s)
      flat  flat%   sum%        cum   cum%
     0.34s 11.41% 11.41%      0.36s 12.08%  runtime.findObject
     0.15s  5.03% 16.44%      0.32s 10.74%  regexp.(*Regexp).doOnePass
     0.13s  4.36% 20.81%      0.13s  4.36%  runtime.(*mspan).heapBitsSmallForAddr
     0.12s  4.03% 24.83%      0.12s  4.03%  runtime.(*gcBits).bitp (inline)
     0.11s  3.69% 28.52%      0.11s  3.69%  regexp/syntax.(*Inst).MatchRunePos
     0.08s  2.68% 31.21%      0.09s  3.02%  runtime.(*mspan).writeHeapBitsSmall
     0.07s  2.35% 33.56%      0.07s  2.35%  runtime.nextFreeFast (inline)
     0.06s  2.01% 35.57%      0.06s  2.01%  runtime.futex
     0.06s  2.01% 37.58%      0.06s  2.01%  runtime.memclrNoHeapPointers
     0.05s  1.68% 39.26%      0.05s  1.68%  indexbytebody
     0.05s  1.68% 40.94%      0.05s  1.68%  internal/runtime/syscall.Syscall6
     0.05s  1.68% 42.62%      0.43s 14.43%  runtime.mallocgc
     0.05s  1.68% 44.30%      0.05s  1.68%  runtime.memmove
     0.05s  1.68% 45.97%      0.71s 23.83%  runtime.scanobject
     0.05s  1.68% 47.65%      0.05s  1.68%  runtime.usleep
     0.04s  1.34% 48.99%      0.26s  8.72%  runtime.mallocgcSmallScanNoHeader
     0.04s  1.34% 50.34%      0.04s  1.34%  runtime.rand
     0.03s  1.01% 51.34%      0.03s  1.01%  crypto/internal/fips140/aes/gcm.gcmAesDec
     0.03s  1.01% 52.35%      0.57s 19.13%  database/sql.(*Rows).nextLocked
     0.03s  1.01% 53.36%      0.13s  4.36%  database/sql.convertAssignRows
     0.03s  1.01% 54.36%      0.56s 18.79%  git.arvados.org/arvados.git/sdk/go/arvados.(*Collection).SizedDigests
     0.03s  1.01% 55.37%      0.18s  6.04%  git.arvados.org/arvados.git/services/keep-balance.(*BlockStateMap).GetConfirmedReplication
...
@http://localhost:3333/debug/pprof/@ serves an HTML page with a list of available profiles: * @allocs@ -- A sampling of all past memory allocations * @block@ -- Stack traces that led to blocking on synchronization primitives * @cmdline@ -- The command line invocation of the current program * @goroutine@ -- Stack traces of all current goroutines. Use debug=2 as a query parameter to export in the same format as an unrecovered panic. * @heap@ -- A sampling of memory allocations of live objects. You can specify the gc GET parameter to run GC before taking the heap sample. * @mutex@ -- Stack traces of holders of contended mutexes * @profile@ -- CPU profile. You can specify the duration in the seconds GET parameter. After you get the profile file, use the go tool pprof command to investigate the profile. * @symbol@ -- Maps given program counters to function names. Counters can be specified in a GET raw query or POST body, multiple counters are separated by '+'. * @threadcreate@ -- Stack traces that led to the creation of new OS threads * @trace@ -- A trace of execution of the current program. You can specify the duration in the seconds GET parameter. After you get the trace file, use the go tool trace command to investigate the trace. Additional resources: * "pprof tool documentation":https://github.com/google/pprof/blob/main/doc/README.md * "Go profiling data endpoint documentation":https://pkg.go.dev/net/http/pprof ================================================ FILE: doc/admin/metadata-vocabulary.html.textile.liquid ================================================ --- layout: default navsection: admin title: Metadata vocabulary ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Many Arvados objects (like collections and projects) can store metadata as properties that in turn can be used in searches allowing a flexible way of organizing data inside the system. Arvados enables the site administrator to set up a formal metadata vocabulary definition so that users can select from predefined key/value pairs of properties, offering the possibility to add different terms for the same concept on clients' UI such as workbench2. The Controller service loads and caches the configured vocabulary file in memory at startup time, exporting it on a particular endpoint. From time to time, it'll check for updates in the local copy and refresh its cache if validation passes. h2. Configuration The site administrator should place the JSON vocabulary file on the same host as the controller service and set up the config file as follows:
Cluster:
  zzzzz:
    API:
      VocabularyPath: /etc/arvados/vocabulary.json
h2. Definition format The JSON file describes the available keys and values and if the user is allowed to enter free text not defined by the vocabulary. Keys and values are indexed by identifiers so that the concept of a term is preserved even if vocabulary labels are changed. The following is an example of a vocabulary definition: {% codeblock as json %} {% include 'metadata_vocabulary_example' %} {% endcodeblock %} For clients to be able to query the vocabulary definition, a special endpoint is exposed on the @controller@ service: @/arvados/v1/vocabulary@. This endpoint doesn't require authentication and returns the vocabulary definition in JSON format. If the @strict_tags@ flag at the root level is @true@, it will restrict the users from saving property keys other than the ones defined in the vocabulary. This restriction is enforced at the backend level to ensure consistency across different clients. Inside the @tags@ member, IDs are defined (@IDTAGANIMALS@, @IDTAGCOMMENT@, @IDTAGIMPORTANCES@) and can have any format that the current application requires. Every key will declare at least a @labels@ list with zero or more label objects. The @strict@ flag inside a tag definition operates the same as the @strict_tags@ root member, but at the individual tag level. When @strict@ is @true@, a tag’s value options are limited to those defined by the vocabulary. The @values@ member is optional and is used to define valid key/label pairs when applicable. In the example above, @IDTAGCOMMENT@ allows open-ended text by only defining the tag's ID and labels and leaving out @values@. When any key or value has more than one label option, Workbench2's user interface will allow the user to select any of the options. But because only the IDs are saved in the system, when the property is displayed in the user interface, the label shown will be the first of each group defined in the vocabulary file. For example, the user could select the property key @Species@ and @Homo sapiens@ as its value, but the user interface will display it as @Animal: Human@ because those labels are the first in the vocabulary definition. Internally, Workbench2 uses the IDs to do property based searches, so if the user searches by @Animal: Human@ or @Species: Homo sapiens@, both will return the same results. h2. Definition validation Because the vocabulary definition is prone to syntax or logical errors, the @controller@ service needs to do some validation before answering requests. If the vocabulary validation fails, the service won't start. The site administrator can make sure the vocabulary file is correct before even trying to start the @controller@ service by running @arvados-server config-check@. When the vocabulary definition isn't correct, the administrator will get a list of issues like the one below:
# arvados-server config-check -config /etc/arvados/config.yml
Error loading vocabulary file "/etc/arvados/vocabulary.json" for cluster zzzzz:
duplicate JSON key "tags.IDTAGFRUITS.values.IDVALFRUITS1"
tag key "IDTAGCOMMENT" is configured as strict but doesn't provide values
tag value label "Banana" for pair ("IDTAGFRUITS":"IDVALFRUITS8") already seen on value "IDVALFRUITS4"
exit status 1
bq. NOTE: These validation checks are performed only on the node that hosts the vocabulary file defined on the configuration. As the same configuration file is shared between different nodes, those who don't host the file won't produce spurious errors when running @arvados-server config-check@. h2. Live updates Sometimes it may be necessary to modify the vocabulary definition in a running production environment. When a change is detected, the @controller@ service will automatically attempt to load the new vocabulary and check its validity before making it active. If the new vocabulary has some issue, the last valid one will keep being active. The service will export any errors on its health endpoint so that a monitoring solution can send an alert appropriately. With the above mechanisms in place, no outages should occur from making typos or other errors when updating the vocabulary file. h2. Health status To be able for the administrator to guarantee the system's metadata integrity, the @controller@ service exports a specific health endpoint for the vocabulary at @/_health/vocabulary@. As a first measure, the service won't start if the vocabulary file is incorrect. Once running, if there are updates (that may even be periodical), the service needs to keep running while notifying the operator that some fixing is in order. An example of a vocabulary health error is included below:
$ curl --silent -H "Authorization: Bearer xxxtokenxxx" https://controller/_health/vocabulary | jq .
{
  "error": "while loading vocabulary file \"/etc/arvados/vocabulary.json\": duplicate JSON key \"tags.IDTAGSIZES.values.IDVALSIZES3\"",
  "health": "ERROR"
}
h2. Client support Workbench2 currently takes advantage of this vocabulary definition by providing an easy-to-use interface for searching and applying metadata to different objects in the system. Because the definition file only resides on the @controller@ node, and Workbench2 is just a static web application run by every users' web browser, there's a mechanism in place that allows Workbench2 and any other client to request the active vocabulary. The @controller@ service provides an unauthenticated endpoint at @/arvados/v1/vocabulary@ where it exports the contents of the vocabulary JSON file:
$ curl --silent https://controller/arvados/v1/vocabulary | jq .
{
  "kind": "arvados#vocabulary",
  "strict_tags": false,
  "tags": {
    "IDTAGANIMALS": {
      "labels": [
        {
          "label": "Animal"
        },
        {
          "label": "Creature"
        }
      ],
      "strict": false,
...
}
Although the vocabulary enforcement is done on the backend side, clients can use this information to provide helping features to users, like doing ID-to-label translations, preemptive error checking, etc. h2. Properties migration After installing the new vocabulary definition, it may be necessary to migrate preexisting properties that were set up using literal strings. This can be a big task depending on the number of properties on the vocabulary and the amount of collections and projects on the cluster. To help with this task we provide below a migration example script that accepts the new vocabulary definition file as an input, and uses the @ARVADOS_API_TOKEN@ and @ARVADOS_API_HOST@ environment variables to connect to the cluster, search for every collection and group that has properties with labels defined on the vocabulary file, and migrates them to the corresponding identifiers. This script will not run if the vocabulary file has duplicated labels for different keys or for different values inside a key, this is a failsafe mechanism to avoid migration errors. Please take into account that this script requires admin credentials. It also offers a @--dry-run@ flag that will report what changes are required without applying them, so it can be reviewed by an administrator. Also, take into consideration that this example script does case-sensitive matching on labels. {% codeblock as python %} {% include 'vocabulary_migrate_py' %} {% endcodeblock %} ================================================ FILE: doc/admin/metrics.html.textile.liquid ================================================ --- layout: default navsection: admin title: Metrics ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Some Arvados services publish Prometheus/OpenMetrics-compatible metrics at @/metrics@. Metrics can help you understand how components perform under load, find performance bottlenecks, and detect and diagnose problems. To access metrics endpoints, services must be configured with a "management token":management-token.html. When accessing a metrics endpoint, prefix the management token with @"Bearer "@ and supply it in the @Authorization@ request header.
curl -sfH "Authorization: Bearer your_management_token_goes_here" "https://0.0.0.0:25107/metrics"
The plain text export format includes "help" messages with a description of each reported metric. When configuring Prometheus, use a @bearer_token@ or @bearer_token_file@ option to authenticate requests.
scrape_configs:
  - job_name: keepstore
    bearer_token: your_management_token_goes_here
    static_configs:
    - targets:
      - "keep0.ClusterID.example.com:25107"
table(table table-bordered table-condensed table-hover). |_. Component|_. Metrics endpoint| |arvados-api-server|✓| |arvados-controller|✓| |arvados-dispatch-cloud|✓| |arvados-dispatch-lsf|✓| |arvados-ws|✓| |keepproxy|✓| |keepstore|✓| |keep-balance|✓| |keep-web|✓| |workbench2|| ================================================ FILE: doc/admin/migrating-providers.html.textile.liquid ================================================ --- layout: default navsection: admin title: Changing upstream login providers ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} When a user logs in to Arvados, their email address (as returned by the authentication provider) is used as the primary key for their Arvados account. If you reconfigure Arvados to use a different authentication provider after some users have created accounts, you should either ensure the new provider returns the same email addresses as the old one, or update your Arvados users' @email@ attributes to match the email addresses returned by the new provider. Otherwise, next time users log in, they will be given new accounts instead of logging in to their existing accounts. ================================================ FILE: doc/admin/reassign-ownership.html.textile.liquid ================================================ --- layout: default navsection: admin title: "Reassign user data ownership" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} If a user leaves an organization and stops using their Arvados account, it may be desirable to reassign the data owned by that user to another user to maintain easy access. This is currently a command line based, admin-only feature. h3. Step 1: Determine user uuids User uuids can be determined by browsing workbench or using @arv user list@ at the command line. The "old user" is the user that is leaving the organization. The "new user" is the user that will gain ownership of the old user's data. This includes collections, projects, container requests, workflows, and git repositories owned by the old user. It also transfers any permissions granted to the old user, to the new user. In the example below, @x1u39-tpzed-3kz0nwtjehhl0u4@ is the old user and @x1u39-tpzed-fr97h9t4m5jffxs@ is the new user. h3. Step 2: Create a project Create a project owned by the new user that will hold the data from the old user.
$ arv --format=uuid group create --group '{"group_class": "project", "name": "Data from old user", "owner_uuid": "x1u39-tpzed-fr97h9t4m5jffxs"}'
x1u39-j7d0g-mczqiguhil13083
h3. Step 3: Reassign data from the old user to the new user and project The @user merge@ method reassigns data from the old user to the new user.
$ arv user merge --old-user-uuid=x1u39-tpzed-3kz0nwtjehhl0u4 \
  --new-user-uuid=x1u39-tpzed-fr97h9t4m5jffxs \
  --new-owner-uuid=x1u39-j7d0g-mczqiguhil13083
After reassigning data, use @unsetup@ to deactivate the old user's account.
$ arv user unsetup --uuid=x1u39-tpzed-3kz0nwtjehhl0u4
Note that authorization credentials (API tokens, ssh keys) are *not* transferred to the new user, as this would potentially give the old user access to the new user's account. ================================================ FILE: doc/admin/restricting-upload-download.html.textile.liquid ================================================ --- layout: default navsection: admin title: Restricting upload or download ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} For some use cases, you may want to limit the ability of users to upload or download data from outside the cluster. (By "outside" we mean from networks other than the cluster's own private network). For example, this makes it possible to share restricted data sets with users so that they may run their own data analysis on the cluster, while preventing them from easily downloading the data set to their local workstation. This feature exists in addition to the existing Arvados permission system. Users can only download from collections they have @read@ access to, and can only upload to projects and collections they have @write@ access to. There are two services involved in accessing data from outside the cluster. h2. Keepproxy Permissions Permitting @keepproxy@ makes it possible to use @arv-put@ and @arv-get@. It works in terms of individual 64 MiB keep blocks. It prints a log line each time a user uploads or downloads an individual block. Those logs are usually stored by @journald@ or @syslog@. The default policy allows anyone to upload or download.
    Collections:
      KeepproxyPermission:
        User:
          Download: true
          Upload: true
        Admin:
          Download: true
          Upload: true
h2. WebDAV and S3 API Permissions Permitting @WebDAV@ makes it possible to use WebDAV, S3 API, and upload/download with Workbench 2. It works in terms of individual files. It prints a log each time a user uploads or downloads a file ("subject to throttling discussed below":#throttling). When @WebDAVLogEvents@ (default true) is enabled, it also adds an entry into the API server @logs@ table. When a user attempts to upload or download from a service without permission, they will receive a @403 Forbidden@ response. This only applies to file content. Denying download permission does not deny access to access to XML file listings with PROPFIND, or auto-generated HTML documents containing file listings. Denying upload permission does not deny other operations that modify collections without directly accessing file content, such as MOVE and COPY. The default policy allows anyone to upload or download.
    Collections:
      WebDAVPermission:
        User:
          Download: true
          Upload: true
        Admin:
          Download: true
          Upload: true
      WebDAVLogEvents: true
      WebDAVLogDownloadInterval: 30s
When a user or admin creates a sharing link, a custom scoped token is embedded in that link. This effectively allows anonymous user access to the associated data via that link. These custom scoped tokens are always treated as user tokens for the purposes of restricting download access, even when created by an admin user. In other words, these custom scoped tokens, when used in a sharing link, are always subject to the value of the @WebDAVPermission/User/Download@ configuration setting. If that custom scoped token is used with @arv-get@, its use will be subject to the value of the @KeepproxyPermission/User/Download@ configuration setting. h2. Shell node and container permissions Be aware that even when upload and download from outside the network is not allowed, a user who has access to a shell node or runs a container still has internal access to Keep. (This is necessary to be able to run workflows). From the shell node or container, a user could send data outside the network by some other method, although this requires more intent than accidentally clicking on a link and downloading a file. It is possible to set up a firewall to prevent shell and compute nodes from making connections to hosts outside the private network. Exactly how to configure firewalls is out of scope for this page, as it depends on the specific network infrastructure of your cluster. h2. Choosing a policy This distinction between WebDAV and Keepproxy is important for auditing. WebDAV records 'upload' and 'download' events on the API server that are included in the "User Activity Report":user-activity.html, whereas @keepproxy@ only logs upload and download of individual blocks, which require a reverse lookup to determine the collection(s) and file(s) a block is associated with. You set separate permissions for @WebDAV@ and @Keepproxy@, with separate policies for regular users and admin users. These policies apply to only access from outside the cluster, using Workbench or Arvados CLI tools. The @WebDAVLogEvents@ option should be enabled if you intend to the run the "User Activity Report":user-activity.html. If you don't need audits, or you are running a site that is mostly serving public data to anonymous downloaders, you can disable it to avoid the extra API server request. h3. Audited downloads For ease of access auditing, this policy prevents downloads using @arv-get@. Downloads through WebDAV and S3 API are permitted, but logged. Uploads are allowed.
    Collections:
      WebDAVPermission:
        User:
          Download: true
          Upload: true
        Admin:
          Download: true
          Upload: true

      KeepproxyPermission:
        User:
          Download: false
          Upload: true
        Admin:
          Download: false
          Upload: true
      WebDAVLogEvents: true
h3. Disallow downloads by regular users This policy prevents regular users (non-admin) from downloading data. Uploading is allowed. This supports the case where restricted data sets are shared with users so that they may run their own data analysis on the cluster, while preventing them from downloading the data set to their local workstation. Be aware that users won't be able to download the results of their analysis, either, requiring an admin in the loop or some other process to release results.
    Collections:
      WebDAVPermission:
        User:
          Download: false
          Upload: true
        Admin:
          Download: true
          Upload: true

      KeepproxyPermission:
        User:
          Download: false
          Upload: true
        Admin:
          Download: true
          Upload: true
      WebDAVLogEvents: true
h3. Disallow uploads by regular users This policy is suitable for an installation where data is being shared with a group of users who are allowed to download the data, but not permitted to store their own data on the cluster.
    Collections:
      WebDAVPermission:
        User:
          Download: true
          Upload: false
        Admin:
          Download: true
          Upload: true

      KeepproxyPermission:
        User:
          Download: true
          Upload: false
        Admin:
          Download: true
          Upload: true
      WebDAVLogEvents: true
h2(#audit_log). Accessing the audit log When @WebDAVLogEvents@ is enabled, uploads and downloads of files are logged in the Arvados audit log. These events are included in the "User Activity Report":user-activity.html. The audit log can also be accessed via the API, SDKs or command line. For example, to show the 100 most recent file downloads:
arv log list --filters '[["event_type","=","file_download"]]' -o 'created_at desc' -l 100
For uploads, use the @file_upload@ event type. Note that this only covers upload and download activity via WebDAV, S3, and Workbench 2. The @arv-get@ and @arv-put@ tools upload via @Keepproxy@, which does not log activity to the audit log because it operates at the block level, not the file level. @Keepproxy@ records the uuid of the user that owns the token used in the request in its system logs. Those logs are usually stored by @journald@ or @syslog@. A typical log line for such a block download looks like this:
Jul 20 15:03:38 keep.xxxx1.arvadosapi.com keepproxy[63828]: {"level":"info","locator":"abcdefghijklmnopqrstuvwxyz012345+53251584","msg":"Block download","time":"2021-07-20T15:03:38.458792300Z","user_full_name":"Albert User","user_uuid":"ce8i5-tpzed-abcdefghijklmno"}
It is possible to do a reverse lookup from the locator to find all matching collections: the @manifest_text@ field of a collection lists all the block locators that are part of the collection. The @manifest_text@ field also provides the relevant filename in the collection. Because this lookup is rather involved and there is no automated tool to do it, we recommend disabling @KeepproxyPermission.User.Download@ and @KeepproxyPermission.User.Upload@ for sites where the audit log is important and @arv-get@ and @arv-put@ are not essential. h3(#throttling). WebDAV download log throttling If a client requests partial content past the start of a file, and a request from the same client for the same file was logged within the last time interval configured by @WebDAVLogDownloadInterval@, @keep-web@ will not write a new log. This throttling applies to both printed and API server logs. The default value of 30 seconds reduces log output when clients like @aws s3 cp@ download one file in small chunks in parallel. Administrators can set this setting to @0@ to disable log throttling. This setting lets administrators choose how they want to balance full auditability against logging overhead: a shorter interval means more download requests are logged, with all the overhead that entails. ================================================ FILE: doc/admin/scoped-tokens.html.textile.liquid ================================================ --- layout: default navsection: admin title: Securing API access with scoped tokens ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} By default, Arvados API tokens grant unlimited access to a user account, and admin account tokens have unlimited access to the whole system. If you want to grant restricted access to a user account, you can create a "scoped token" which is an Arvados API token which is limited to accessing specific APIs. One use of token scopes is to grant access to data, such as a collection, to users who do not have an Arvados accounts on your cluster. This is done by creating scoped token that only allows getting a specific record. An example of this is "creating a collection sharing link.":{{site.baseurl}}/sdk/python/cookbook.html#sharing_link Another example is situations where admin access is required but there is risk of the token being compromised. Setting a scope prevents the token from being used for any action other than the specific action the token is intended for. For example, "synchronizing user accounts on a shell node.":{{site.baseurl}}/install/install-shell-server.html#scoped-token h2. Defining scopes A "scope" consists of a HTTP method and API path. A token can have multiple scopes. Token scopes act as a whitelist, and the API server checks the HTTP method and the API path of every request against the scopes of the request token. Scopes are also described on the "API Authorization":{{site.baseurl}}/api/tokens.html#scopes page of the "API documentation":{{site.baseurl}}/api/index.html. These examples use @/arvados/v1/collections@, but can be applied to any endpoint. Consult the "API documentation":{{site.baseurl}}/api/index.html to determine the endpoints for specific methods. The scope @["GET", "/arvados/v1/collections"]@ will allow only GET or HEAD requests for the list of collections. Any other HTTP method or path (including requests for a specific collection record, eg a request with path @/arvados/v1/collections/zzzzz-4zz18-0123456789abcde@) will return a permission error. A trailing slash in a scope is signficant. The scope @["GET", "/arvados/v1/collections/"]@ will allow only GET or HEAD requests *starting with* @/arvados/v1/collections/@. A request for an individual record path @/arvados/v1/collections/zzzzz-4zz18-0123456789abcde@) is allowed but a request to list collections (@/arvados/v1/collections@) will be denied because it does not end with @/@ (API requests with a trailing @/@ will have the slash stripped before the scope is checked.) The scope can include an object uuid. The scope @["GET", "/arvados/v1/collections/zzzzz-4zz18-0123456789abcde"]@ only permits requests to read the record @zzzzz-4zz18-0123456789abcde@. Since a token can have multiple scopes, use @[["GET", "/arvados/v1/collections"], ["GET", "/arvados/v1/collections/"]]@ to allow both listing collections and fetching individual collection records. This will reject requests to create or change collections, or access any other API method. Object create calls use the @POST@ method. A scope of @["POST", "/arvados/v1/collections"]@ will allow creating collections, but not reading, listing or updating them (or accessing anything else). Object update calls use the @PATCH@ method. A scope of @["PATCH", "/arvados/v1/collections/"]@ will allow updating collections, but not listing or creating them. (Note: while GET requests are denied an object can be read indirectly by using an empty PATCH which will return the unmodified object as the result). Similarly, you can use a scope of @["PATCH", "/arvados/v1/collections/zzzzz-4zz18-0123456789abcde"]@ to restrict updates to a single collection. There is one special exception to the scope rules: a valid token is always allowed to issue a request to "@GET /arvados/v1/api_client_authorizations/current@":{{ site.baseurl }}/api/methods/api_client_authorizations.html#current regardless of its scopes. This allows clients to reliably determine whether a request failed because a token is invalid, or because the token is not permitted to perform a particular request. The API server itself needs to be able to do this to validate tokens issued by other clusters in a federation. h2. Creating a scoped token A scoped token can be created at the command line:
$ arv api_client_authorization create --api-client-authorization '{"scopes": [["GET", "/arvados/v1/collections"], ["GET", "/arvados/v1/collections/"]]}'
{
 "kind":"arvados#apiClientAuthorization",
 "etag":"9yk144t0v6cvyp0342exoh2vq",
 "uuid":"zzzzz-gj3su-bizbsw0mx5pju3w",
 "owner_uuid":"zzzzz-tpzed-fr97h9t4m5jffxs",
 "created_at":"2020-03-12T20:36:12.517375422Z",
 "modified_by_user_uuid":null,
 "modified_at":null,
 "api_token":"5a74htnoqwkhtfo2upekpfbsg04hv7cy5v4nowf7dtpxer086m",
 "created_by_ip_address":null,
 "expires_at":null,
 "last_used_at":null,
 "last_used_by_ip_address":null,
 "scopes":[
  [
   "GET",
   "/arvados/v1/collections"
  ],
  [
   "GET",
   "/arvados/v1/collections/"
  ]
 ]
}
The response will include @api_token@ field which is the newly issued secret token. It can be passed directly to the API server that issued it, or can be used to construct a @v2@ token. A @v2@ format token is required if the token will be used to access other clusters in an Arvados federation. An Arvados @v2@ format token consists of three fields separate by slashes: the prefix @v2@, followed by the token uuid, followed by the token secret. For example: @v2/x1u39-gj3su-bizbsw0mx5pju3w/5a74htnoqwkhtfo2upekpfbsg04hv7cy5v4nowf7dtpxer086m@. ================================================ FILE: doc/admin/spot-instances.html.textile.liquid ================================================ --- layout: default navsection: admin title: Using Preemptible instances ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} This page describes how to enable preemptible instances. Preemptible instances typically offer lower cost computation with a tradeoff of lower service guarantees. If a compute node is preempted, Arvados will restart the computation on a new instance. Currently Arvados supports preemptible instances using AWS and Azure spot instances. h2. Configuration First, configure some @InstanceTypes@ that have @Preemptible: true@. For a preemptible instance, @Price@ determines the maximum bid price; the actual price paid is dynamic and will likely be lower. Typically you want to add both preemptible and non-preemptible entries for each cloud provider VM type. To do this automatically, use @PreemptiblePriceFactor@ to enable a preemptible version of each listed type, using the given factor to set the maximum bid price relative to the non-preemptible price. Alternatively, you can configure preemptible instance types explicitly. For example, the following two configurations are equivalent:
Clusters:
  ClusterID:
    Containers:
      PreemptiblePriceFactor: 0.8
    InstanceTypes:
      m4.large:
        ProviderType: m4.large
        VCPUs: 2
        RAM: 8GiB
        AddedScratch: 32GB
        Price: 0.1
Clusters:
  ClusterID:
    InstanceTypes:
      m4.large:
        ProviderType: m4.large
        VCPUs: 2
        RAM: 8GiB
        AddedScratch: 32GB
        Price: 0.1
      m4.large.preemptible:
        Preemptible: true
        ProviderType: m4.large
        VCPUs: 2
        RAM: 8GiB
        AddedScratch: 32GB
        Price: 0.08
Next, you can choose to enable automatic use of preemptible instances:
Clusters:
  ClusterID:
    Containers:
      AlwaysUsePreemptibleInstances: true
If @AlwaysUsePreemptibleInstances@ is "true", child containers (workflow steps) will always select preemptible instances, regardless of user option. If @AlwaysUsePreemptibleInstances@ is "false" (the default) or unspecified, preemptible instance are "used when requested by the user.":{{site.baseurl}}/user/cwl/cwl-run-options.html#preemptible Note that regardless of the value of @AlwaysUsePreemptibleInstances@, the top level workflow runner container always runs in a reserved (non-preemptible) instance, to avoid situations where the workflow runner is killed requiring the entire to be restarted. No additional configuration is required, "arvados-dispatch-cloud":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html will now start preemptible instances where appropriate. h3. Cost Tracking Preemptible instances prices are declared at instance request time and defined by the maximum price that the user is willing to pay per hour. By default, this price is the same amount as the on-demand version of each instance type, and this setting is the one that @arvados-dispatch-cloud@ uses for now, as it doesn't include any pricing data to the spot instance request. For AWS, the real price that a spot instance has at any point in time is discovered at the end of each usage hour, depending on instance demand. For this reason, AWS provides a data feed subscription to get hourly logs, as described on "Amazon's User Guide":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html. h2. Preemptible instances on AWS For general information, see "using Amazon EC2 spot instances":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html. h3. Permissions When requesting spot instances, Amazon's API may return an authorization error depending on how users and permissions are set on the account. If this is the case check logs for this error:
BaseHTTPError: AuthFailure.ServiceLinkedRoleCreationNotPermitted: The provided credentials do not have permission to create the service-linked role for EC2 Spot Instances.
The account needs to have a service linked role created. This can be done by logging into the AWS account, go to _IAM Management_ → _Roles_ and create the @AWSServiceRoleForEC2Spot@ role by clicking on the @Create@ button, selecting @EC2@ service and @EC2 - Spot Instances@ use case. h3. Interruption notices When running a container on a spot instance, Arvados monitors the EC2 metadata endpoint for interruption notices. When an interruption notice is received, it is reported in a log entry in the @crunch-run.txt@ file as well as @warning@ and @preemptionNotice@ keys in the @runtime_status@ field of the affected container. Example excerpt from @crunch-run.txt@:
2023-02-21T21:12:42.350719824Z Cloud provider scheduled instance stop at 2023-02-21T21:14:42Z
Example @runtime_status@:
{
  "warning": "preemption notice",
  "warningDetail": "Cloud provider scheduled instance stop at 2023-02-21T21:14:42Z",
  "preemptionNotice": "Cloud provider scheduled instance stop at 2023-02-21T21:14:42Z"
}
h2. Preemptible instances on Azure For general information, see "Use Spot VMs in Azure":https://docs.microsoft.com/en-us/azure/virtual-machines/spot-vms. When starting preemptible instances on Azure, Arvados configures the eviction policy to 'delete', with max price set to '-1'. This has the effect that preemptible VMs will not be evicted for pricing reasons. The price paid for the instance will be the current spot price for the VM type, up to a maximum of the price for a standard, non-spot VM of that type. Please note that Azure provides no SLA for preemptible instances. Even in this configuration, preemptible instances can still be evicted for capacity reasons. If that happens and a container is aborted, Arvados will try to restart it, subject to the usual retry rules. Spot pricing is not available on 'B-series' VMs, those should not be defined in the configuration file with the _Preemptible_ flag set to true. Spot instances have a separate quota pool, make sure you have sufficient quota available. ================================================ FILE: doc/admin/storage-classes.html.textile.liquid ================================================ --- layout: default navsection: admin title: Configuring storage classes ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Storage classes (alternately known as "storage tiers") allow you to control which volumes should be used to store particular collection data blocks. This can be used to implement data storage policies such as moving data to archival storage. In the default Arvados configuration, with no storage classes specified in the configuration file, all volumes belong to a single implicit storage class called "default". Apart from that, names of storage classes are internal to the cluster and decided by the administrator. Other than the implicit "default" class, Arvados currently does not define any standard storage class names. To use multiple storage classes, update the @StorageClasses@ and @Volumes@ sections of your configuration file. * Every storage class you use (including "default") must be defined in the @StorageClasses@ section. * The @StorageClasses@ section must use @Default: true@ to indicate at least one default storage class. When a client/user does not specify storage classes when creating a new collection, the default storage classes are used implicitly. * If some storage classes are faster or cheaper to access than others, assign a higher @Priority@ to the faster ones. When reading data, volumes with high priority storage classes are searched first. Example:
    StorageClasses:

      default:
        # When reading a block that is stored on multiple volumes,
        # prefer a volume with this class.
        Priority: 20

        # When a client does not specify a storage class when saving a
        # new collection, use this one.
        Default: true

      archival:
        Priority: 10

    Volumes:

      ClusterID-nyw5e-000000000000000:
        # This volume is in the "default" storage class.
        StorageClasses:
          default: true

      ClusterID-nyw5e-000000000000001:
        # This volume is in the "archival" storage class.
        StorageClasses:
          archival: true
Refer to the "configuration reference":{{site.baseurl}}/admin/config.html for more details. h3. Using storage classes "Discussed in the user guide":{{site.baseurl}}/user/topics/storage-classes.html h3. Storage management notes When uploading data, if a data block cannot be uploaded to all desired storage classes, it will result in a fatal error. Data blocks will not be uploaded to volumes that do not have the desired storage class. If you change the storage classes for a collection, the data is not moved immediately. The "keep-balance":{{site.baseurl}}/install/install-keep-balance.html service is responsible for deciding which blocks should be placed on which keepstore volumes. As part of the rebalancing behavior, it will determine where a block should go in order to satisfy the desired storage classes, and issue pull requests to copy the block from its original volume to the desired volume. The block will subsequently be moved to trash on the original volume. If a block is assigned to multiple storage classes, the block will be stored on @desired_replication@ number of volumes for storage class, even if that results in overreplication. If a collection has a desired storage class which is not available in any keepstore volume, the collection's blocks will remain in place, and an error will appear in the @keep-balance@ logs. ================================================ FILE: doc/admin/token-expiration-policy.html.textile.liquid ================================================ --- layout: default navsection: admin title: Automatic logout and token expiration ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} When a user logs in to Workbench, they receive a newly created token (a long string of random characters) which grants access to the Arvados API on behalf of that user. In the default configuration, this token does not expire until the user explicitly logs out. Security policies, such as those required to comply with regulations such as HIPAA and GxP, may include policies for "automatic logoff". In order to limit the window of risk associated with unauthorized access of the desktop of an Arvados user, or a token being leaked, Arvados offers options for automatic logout from the web app, and to configure access tokens to expire by default. The @Workbench.IdleTimeout@, @Login.TokenLifetime@, and @API.MaxTokenLifetime@ options give the administrator ways to control automatic expiration of tokens granted through the login flow. If you are looking for information on how to expire a token manually, see how to "delete a single token":user-management-cli.html#delete-token and "delete all tokens belonging to a user":user-management-cli.html#delete-all-tokens . h2. Automatic logout Use @Workbench.IdleTimeout@ to configure Workbench 2 for automatic logout after a period of idle time. For example, this configuration would log the user out after five minutes of no keyboard or pointer activity:
Clusters:
  zzzzz:
    ...
    Workbench:
      IdleTimeout: 5m
    ...
When idle timeout is set, several behaviors and considerations apply: * The user will be automatically logged out after a period of inactivity. When the automatic logout happens, the token associated with that session will be revoked. * Users should use the "open in new tab" functionality of Workbench 2. This will share the same token between tabs without requiring the user to log in again. Logging out will apply to all browser tabs that use the same token. * If the user closes a Workbench tab without first logging out, the browser will forget the token, but not expire the token (this is desirable if the user has several tabs open). * If the user closes all Workbench tabs, they will be required to log in again. * This only affects browser behavior. Automatic logout should be used together automatic token expiration described below. The default value for @Workbench.IdleTimeout@ is zero, which disables auto-logout. h2. Automatic expiration of login tokens Use @Login.TokenLifetime@ to set the lifetime for tokens issued through the login process. This is the maximum amount of time a user can maintain a session before having to log in again. This setting applies to both regular and admin user logins. Here is an example configuration that would require the user to log in again after 12 hours:
Clusters:
  zzzzz:
    ...
    Login:
      TokenLifetime: 12h
    ...
This is independent of @Workbench.IdleTimeout@. Even if Workbench auto-logout is disabled, this option will ensure that the user is always required to log in again after the configured amount of time. The default value of @Login.TokenLifetime@ is zero, meaning login tokens do not expire (unless @API.MaxTokenLifetime@ is set). h2. Untrusted login tokens
Clusters:
  zzzzz:
    ...
    Login:
      IssueTrustedTokens: false
    ...
When @IssueTrustedTokens@ is @false@, tokens are "untrusted" and cannot be used to list other tokens issued to the same user, nor to grant new tokens. This prevents an attacker from leveraging a leaked token to aquire other tokens, but also interferes with some Workbench features that create new tokens on behalf of the user. h2. Automatic expiration of all tokens Use @API.MaxTokenLifetime@ to set the maximum lifetime for any access token created by regular (non-admin) users. For example, this configuration would require that all tokens expire after 24 hours:
Clusters:
  zzzzz:
    ...
    API:
      MaxTokenLifetime: 24h
    ...
Tokens created without an explicit expiration time, or that exceed maximum lifetime, will be set to @API.MaxTokenLifetime@. Similar to @Login.TokenLifetime@, this option ensures that the user is always required to log in again after the configured amount of time. Unlike @Login.TokenLifetime@, this applies to all API operations that manipulate tokens, regardless of whether the token was created by logging in, or by using the API. If @Login.TokenLifetime@ is greater than @API.MaxTokenLifetime@, MaxTokenLifetime takes precedence. Admin users are permitted to create tokens with expiration times further in the future than @MaxTokenLifetime@. The default value @MaxTokenLifetime@ is zero, which means there is no maximum token lifetime. h2. Choosing a policy @Workbench.IdleTimeout@ only affects browser behavior. It is strongly recommended that automatic browser logout be used together with @Login.TokenLifetime@, which is enforced on API side. @IssueTrustedTokens: true@ (default value) is less restrictive. Be aware that an unrestricted token can be "refreshed" to gain access for an indefinite period. This means, during the window that the token is valid, the user is permitted to create a new token, which will have a new expiration further in the future (of course, once the token has expired, this is no longer possible). Unrestricted tokens are required for some Workbench features, as well as ease of use in other contexts, such as the Arvados command line. This option is recommended if many users will interact with the system through the command line. @IssueTrustedTokens: false@ is more restrictive. A token obtained by logging into Workbench cannot be "refreshed" to gain access for an indefinite period. However, it interferes with some Workbench features, as well as ease of use in other contexts, such as the Arvados command line. This option is recommended only if most users will only ever interact with the system through Workbench or WebShell. With this configuration, it is still possible to "create a token at the command line":user-management-cli.html#create-token using the @SystemRootToken@. In every case, admin users may always create tokens with expiration dates far in the future. These policies do not apply to tokens created by the API server for the purposes of authorizing a container to run, as those tokens are automatically expired when the container is finished. h2. Applying policy to existing tokens If you have an existing Arvados installation and want to set a token lifetime policy, there may be long-lived user tokens already granted. The administrator can use the following @rake@ tasks to enforce the new policy. The @db:check_long_lived_tokens@ task will list which users have tokens with no expiration date.
# bin/rake db:check_long_lived_tokens
Found 6 long-lived tokens from users:
user2,user2@example.com,zzzzz-tpzed-5vzt5wc62k46p6r
admin,admin@example.com,zzzzz-tpzed-6drplgwq9nm5cox
user1,user1@example.com,zzzzz-tpzed-ftz2tfurbpf7xox
To apply the new policy to existing tokens, use the @db:fix_long_lived_tokens@ task.
# bin/rake db:fix_long_lived_tokens
Setting token expiration to: 2020-08-25 03:30:50 +0000
6 tokens updated.
NOTE: These rake tasks adjust the expiration of all tokens except those belonging to the system root user (@zzzzz-tpzed-000000000000000@). If you have tokens used by automated service accounts that need to be long-lived, you can "create tokens that don't expire using the command line":user-management-cli.html#create-token . ================================================ FILE: doc/admin/upgrading.html.textile.liquid ================================================ --- layout: default navsection: installguide title: "Arvados upgrade notes" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} For Arvados administrators, this page will cover what you need to know and do in order to ensure a smooth upgrade of your Arvados installation. For general release notes covering features added and bugs fixed, see "Arvados releases":https://arvados.org/releases. Upgrade instructions can be found at "Maintenance and upgrading":{{site.baseurl}}/admin/maintenance-and-upgrading.html#upgrading. h2. Upgrade notes Some versions introduce changes that require special attention when upgrading: e.g., there is a new service to install, or there is a change to the default configuration that you might need to override in order to preserve the old behavior. These notes are listed below, organized by release version. Scroll down to the version number you are upgrading to. {% comment %} Note to developers: Add new items at the top. Include the date, issue number, commit, and considerations/instructions for those about to upgrade. TODO: extract this information based on git commit messages and generate changelogs / release notes automatically. {% endcomment %}
h2(#main). development main "previous: Upgrading to 3.2.1":#v3_2_1 h3. @CUDA@ configuration in @InstanceTypes@ is no longer supported Arvados 3.1 introduced general "GPU" instance type settings and runtime constraints to replace vendor-specific "CUDA" settings. To support new cloud dispatch features, Arvados 3.3 removes support for the old @CUDA@ configuration sections of the cluster's configured @InstanceTypes@. Before you upgrade to Arvados 3.3, "update your cluster's configured @InstanceTypes@ with @GPU@ sections":#v3_1_0_instance_type_cuda. Make sure that each @GPU@ section has an accurate @VRAM@ setting. Without this, the cloud dispatcher may not consider your instance types suitable for GPU-accelerated workflows. You can update and test your current configuration before you start with the rest of your upgrade planning. @cuda@ runtime constraints in container requests are still deprecated but supported in this release. h3. Cloud dispatcher runs multiple containers per instance The cloud dispatcher will now run multiple containers at once on an instance if it has enough RAM and VCPUs to do so _and_ the instance type is suitable for each container individually. These conditions can arise in the following cases: * the configured @MaximumPriceFactor@ is large enough that an instance type with more than the exact number of VCPUs specified by a container is considered suitable; or * no configured instance type has exactly the number of VCPUs specified by a container, _i.e.,_ every suitable instance type has one or more spare VCPUs available to run another container alongside it. To disable the new behavior and ensure that each container runs on a dedicated instance even in the above cases, set the new configuration entry @Containers.MaxRunningContainersPerInstance@ to @1@. h2(#v3_2_1). v3.2.1 (2026-03-02) "previous: Upgrading to 3.2.0":#v3_2_0 h3. Configuration URLs are stricter about bracketed addresses The new version of Go used to build this release has stricter URL parsing: _only_ an IPv6 address can appear inside brackets around the network location of the URL. If your cluster configuration includes URLs with brackets around DNS hostnames or IPv4 addresses, remove those brackets before upgrading. h2(#v3_2_0). v3.2.0 (2025-11-03) "previous: Upgrading to 3.1.2":#v3_1_2 h3. Debian 11 and Ubuntu 20.04 are no longer supported Arvados 3.2 no longer supports some of the older distributions supported by Arvados 3.1: Debian 11 "bullseye" and Ubuntu 20.04 "focal." If you are running Arvados on any of these distributions, you must first upgrade to a supported distribution before you upgrade to Arvados 3.2. Arvados 3.1 supports Debian 12 "bookworm" and Ubuntu 22.04 "jammy." You can upgrade your Arvados cluster to one of those releases, then proceed to upgrade Arvados to 3.2. The list of distributions supported by Arvados 3.2 can be found on the "planning and prerequisites page":{{site.baseurl}}/install/install-manual-prerequisites.html#supportedlinux of the install guide. h3. RPMs now require Red Hat/AlmaLinux/Rocky Linux 8.8 or later Our packages for Red Hat/AlmaLinux/Rocky Linux 8 now depend on appstreams in version 8.8, particularly Python 3.11. Please make sure you are running at least version 8.8 of your distribution before you upgrade to Arvados 3.2.0. If not, you should follow your distributor's instructions to upgrade from your current 8.x release to 8.8 or later before you upgrade Arvados. h3. New GPG key URL for Red Hat, AlmaLinux, and Rocky Linux As part of adding support for the RHEL 9 family of distributions, we have started using a new signing key for packages. For these distributions, the key corresponding to your distribution is now available at a URL that includes the release version. Before you upgrade, on each system where you have the Arvados package repository installed, edit the file with that repository configuration, usually @/etc/yum.repos.d/arvados.repo@. Find the line that defines @gpgkey@:
[arvados]
…
gpgkey=https://rpm.arvados.org/RHEL/RPM-GPG-KEY-arvados
Edit this line to add @$releasever/@ after @RHEL/@, so it looks like this:
gpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados
Then save and close the file. The old key URL still works, so this step is not required to upgrade Arvados itself. However, doing it now will help ensure you retain access to the Arvados repositories next time you upgrade your distribution. h3. SbatchArgumentsList (SLURM) configuration semantics have changed @Containers.SLURM.SbatchArgumentsList@ must now specify arguments that were previously added implicitly. Also, the @%@ character invokes template behavior. If your current configuration looks like this:
SbatchArgumentsList: ["--clusters=all"]
You must update it to add the arguments that were previously added implicitly:
SbatchArgumentsList: ["--job-name=%U", "--mem=%M", "--cpus-per-task=%C", "--tmp=%T", "--constraint=instancetype=%I", "--partition=%P", "--clusters=all"]
If your configuration file does not have an @SbatchArgumentsList@ entry, you do not need to add one. h3. @Users.SendUserSetupNotificationEmail@ is disabled by default If you want to preserve the old default behavior of sending an email to each user when their account has been set up, update your configuration file accordingly.
Users:
  SendUserSetupNotificationEmail: true
h3. Admin container shell access is enabled by default "Container shell access":{{ site.baseurl }}/user/debugging/container-shell-access.html by admin users is now enabled by default to make it easier to diagnose workflow issues on new deployments. If you prefer to leave it disabled, update your configuration file accordingly.
Containers:
  ShellAccess:
    Admin: false
Container shell access for non-admin users is still disabled by default. h3. Configure ExternalURL, DNS, and TLS for container web services Arvados now allows external clients to connect to HTTP services running in containers. To enable this feature: * Add a @Services.ContainerWebServices.ExternalURL@ entry to @/etc/arvados/config.yml@ with a wildcard URL, e.g., @https://*.containers.ClusterID.example.com/@ * Add the wildcard name to the @server_name@ directive in the controller section of your Nginx configuration, e.g., @server_name ClusterID.example.com *.containers.ClusterID.example.com;@ * Add wildcard DNS records so @*.containers.ClusterID.example.com@ names resolve to the same address(es) as your controller's external URL * Update the TLS certificate used by Nginx for @ClusterID.example.com@ so it also validates for @*.containers.ClusterID.example.com@ h3. Loki credentials in @local.params.secrets@ are no longer needed Salt installer's Terraform code replaces the use of AWS access key and secret for Loki's S3 bucket with equivalent permissions through an instance profile. Once applied, the credentials in the @local.params.secrets@ file will be invalid and can be safely removed. h3. arvbox and @arvados-server install@ are no longer supported Arvados 3.2 no longer includes the arvbox Docker image and associated tooling. The @arvados-server install@ subcommand has also been removed from this release. If you were using arvbox in demo mode, consider installing on a Debian-based virtual machine with our "single-node Ansible installer":{{ site.baseurl }}/install/install-single-host.html. If you were using arvbox or @arvados-server install@ for development, we now provide an Ansible playbook to install development dependencies on a Debian-based system. Our "Hacking Prerequisites wiki":https://dev.arvados.org/projects/arvados/wiki/Hacking_prerequisites has instructions for how to use it. h2(#v3_1_2). v3.1.2 (2025-05-27) "previous: Upgrading to 3.1.1":#v3_1_1 There are no changes that require administrator attention in this release. h2(#v3_1_1). v3.1.1 (2025-04-14) "previous: Upgrading to 3.1.0":#v3_1_0 h3. Clusters using cloud dispatch should rebuild a compute node image Arvados 3.1.1 fixes a handful of bugs in installation tools, particularly for deployments on Ubuntu. If you have already successfully upgraded to 3.1.0, the only thing in this release that affects you is a bug fix in the compute node image builder for cloud deployments. If your cluster uses @arvados-dispatch-cloud@, you should "build a new compute node following our install guide":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html and configure your cluster to use it. You do not need to upgrade any cluster services; there are no changes to them since 3.1.0. h2(#v3_1_0). v3.1.0 (2025-03-20) "previous: Upgrading to 3.0.0":#v3_0_0 h3. Rails API server now runs standalone The Arvados Rails API server now runs from a standalone Passenger server to simplify deployment. Before upgrading, existing deployments should remove the Rails API server from their nginx configuration. e.g., remove the entire @server@ block with @root /var/www/arvados-api/current/public@ from @/etc/nginx/conf.d/arvados-api-and-controller.conf@. If you customized this deployment at all, the "updated install instructions":{{ site.baseurl }}/install/install-api-server.html#railsapi-config explain how to customize the standalone Passenger server. Finally, you'll need to enable the new service by running:
# systemctl enable --now arvados-railsapi.service
h3. Rails API server needs PowerTools on Red Hat, AlmaLinux, and Rocky Linux The Arvados Rails API server now needs to be able to link against @libyaml@ development headers. On Red Hat, AlmaLinux, and Rocky Linux, these are provided by the @libyaml-devel@ package in the PowerTools repository. Before you upgrade, make sure you have this repository enabled on the host where you run the Rails API server by running:
# dnf config-manager --set-enabled powertools
h3. "cuda" runtime constraint is deprecated in favor of "gpu" Arvados 3.1.0 adds support for containers that use AMD ROCm alongside our existing support for NVIDIA CUDA. As part of this, the @cuda@ runtime constraint has been deprecated and replaced with a more general @gpu@ constraint. The requested type of GPU is named in the @stack@ field of this object. Other fields have been carried over from @cuda@ and work the same way. Refer to the "runtime constraints reference":{{ site.baseurl }}/api/methods/container_requests.html#runtime_constraints for details. If client software creates or updates a container request with a @cuda@ runtime constraint, the Arvados API server will automatically translate that to a @gpu@ constraint. This client software should still be updated to specify a @gpu@ runtime constraint, but you can safely upgrade to Arvados 3.1.0 and do these updates opportunistically. Client software that reads and reports runtime constraints (like Workbench does) must be updated to read the new @gpu@ constraint. The @cuda@ constraint will no longer appear in API responses. h3(#v3_1_0_instance_type_cuda). Generalized configuration for GPU compute nodes As part of adding support for AMD GPUs in Arvados 3.1, the @CUDA@ section of @InstanceType@ definitions is now deprecated in favor of a new @GPU@ section that is generalized for both CUDA and ROCm. Where previously there would be a @CUDA@ section:
  InstanceTypes:
     gpuInstance:
       CUDA:
         DriverVersion: "11.0"
         HardwareCapability: "9.0"
         DeviceCount: 1
The configuration file should now be updated to use a @GPU@ section: * Rename the section from @CUDA@ to @GPU@. * Rename the setting @HardwareCapability@ to @HardwareTarget@. The value can remain unchanged. * Add the setting @Stack: "cuda"@. * Add a @VRAM@ setting that defines the amount of RAM available on the GPU.
  InstanceTypes:
     gpuInstance:
       GPU:
         Stack: "cuda"
         DriverVersion: "11.0"
         HardwareTarget: "9.0"
         DeviceCount: 1
         VRAM: 8GiB
To minimize disruption, the config loader will continue to accept the deprecated @CUDA@ field and a emit warning. Admins are advised to update the configuration file as the legacy field will be removed in a future version. h3. BsubCUDAArguments renamed to BsubGPUArguments The configuration item @Containers.LSF.BsubCUDAArguments@ has been renamed to @Containers.LSF.BsubGPUArguments@. There is no change in content. To minimize disruption, the config loader will continue to accept the deprecated @BsubCUDAArguments@ field and a emit warning. Admins are advised to update the configuration file as the legacy field will be removed in a future version. h2(#v3_0_0). v3.0.0 (2024-11-12) "previous: Upgrading to 2.7.4":#v2_7_4 h3. Debian 10 and Ubuntu 18.04 are no longer supported Arvados 3.0 no longer supports some of the older distributions supported by Arvados 2.7: Debian 10 "buster" and Ubuntu 18.04 "bionic." If you are running Arvados on any of these distributions, you must first upgrade to a supported distribution before you upgrade to Arvados 3.0. Arvados 2.7 supports Debian 11 "bullseye" and Ubuntu 20.04 "focal." You can upgrade your Arvados cluster to one of those releases, then proceed to upgrade Arvados to 3.0. The list of distributions supported by Arvados 3.0 can be found on "Planning and prerequisites.":{{site.baseurl}}/install/install-manual-prerequisites.html#supportedlinux h3. Red Hat 8 package dependency on package streams The Red Hat 8 package of the Rails API server now depends on the Ruby 3.1 stream, and the various Python packages now depend on the Python 3.9 stream. Plan for these streams to be activated and installed automatically during your upgrade. h3. RVM is no longer supported Some Arvados packages, most notably the Rails API server package @arvados-api-server@, would check whether RVM is installed on the system, and invoke Ruby commands through it if so. Arvados 3.0 no longer specially supports RVM. Instead, Arvados 3.0 supports all the different versions of Ruby that are packaged in our supported distributions, mitigating the need to support separate Ruby installations. Package scripts run plain @ruby@ and @gem@ commands and expect they come from a supported version. If you have a custom install that requires a different version of Ruby than the one included with your distribution, you must configure your system to ensure package scripts find that version of @ruby@ before any others. For example, you might do this on Debian-based distributions by customizing apt's @DPkg::Path@ setting. h3. Keep-web requires PostgreSQL database access The keep-web service now connects directly to the PostgreSQL database. Make sure these connections are supported by your network firewall rules, PostgreSQL connection settings, and PostgreSQL server configuration (in @pg_hba.conf@) as shown in the "PostgreSQL install instructions":{{site.baseurl}}/install/install-postgresql.html. h3. Slow migration on upgrade This upgrade includes a database schema update to rebuild full text search indexes to remove UUID and portable data hash column data. This will provide better search results to users and take less space on the database, but plan for the @arvados-api-server@ package upgrade to take longer than usual. h3. WebDAV service uses @/var/cache@ for file content When running as root, @keep-web@ now stores copies of recently accessed data blocks in @/var/cache/arvados/keep@ instead of in memory. This directory is created automatically. The default cache size is 10% of the filesystem size. Use the new @Collections.WebDAVCache.DiskCacheSize@ config to specify a different percentage or an absolute size. If @keep-web@ is not running as root, it will store the cache in @$HOME/.cache/arvados/keep@. If the previously supported @MaxBlockEntries@ config is present, remove it to avoid warning messages at startup. h3. Python SDK reorganization of internal classes and modules We have reorganized the Python SDK to make it clearer which APIs are intended to be public, and make it easier to find documentation for them. As part of this work, some modules that only included internal support code have been moved, most notably @arvados.diskcache@, @arvados.http_to_keep@, and @arvados.timer@. If you need immediate access to these modules, you can find them under @arvados._internal@, but we do not intend to support them as part of our public SDK API, so they may change or be removed entirely in future versions. If you've written client software that relies on these modules, please "file an issue":https://github.com/arvados/arvados/issues/new to let us know so we can figure out how best to support you. h3. Virtual environments inside distribution Python packages have moved The distribution packages that we publish for Python packages include an entire virtualenv with all required libraries. In Arvados 3.0 these virtualenvs have moved from @/usr/share/python3/dist/PACKAGE_NAME@ to @/usr/lib/PACKAGE_NAME@ to prevent conflicts with distribution packages and better conform to filesystem standards. If you only run the executables installed by these packages, you don't need to change anything. Those are still installed under @/usr/bin@ and will use the new location when you upgrade. If you have written your own scripts or tools that rely on these virtualenvs, you may need to update those with the new location. For example, if you have a shell script that activates the virtualenv by running:
source /usr/share/python3/dist/python3-arvados-python-client/bin/activate
You must update it to:
source /usr/lib/python3-arvados-python-client/bin/activate
If you have a Python script with this shebang line:
#!/usr/share/python3/dist/python3-arvados-python-client/bin/python
You must update it to:
#!/usr/lib/python3-arvados-python-client/bin/python
h3. costanalyzer subcommand replaced by Arvados cluster activity tool The functionality of @arvados-client costanalyzer@ has been replaced by a new @arvados-cluster-activity@ tool. More information can be found at "Analyzing workflow cost":{{site.baseurl}}/user/cwl/costanalyzer.html . h3. @arv-migrate-docker19@ tool removed The @arv-migrate-docker19@ tool that updates images from Docker 1.9 to be used with Docker 1.10+ (released February 2016) has been removed. In the unlikely event you still need to "run this migration":https://doc.arvados.org/v2.7/install/migrate-docker19.html, please do so before you upgrade to Arvados 3.0. h3. Legacy APIs and response fields have been removed The following APIs have been removed: * "api_clients":https://doc.arvados.org/v2.7/api/methods/api_clients.html * "humans":https://doc.arvados.org/v2.7/api/methods/humans.html * "jobs":https://doc.arvados.org/v2.7/api/methods/jobs.html * "job_tasks":https://doc.arvados.org/v2.7/api/methods/job_tasks.html * "nodes":https://doc.arvados.org/v2.7/api/methods/nodes.html * "pipeline_instances":https://doc.arvados.org/v2.7/api/methods/pipeline_instances.html * "pipeline_templates":https://doc.arvados.org/v2.7/api/methods/pipeline_templates.html * "repositories":https://doc.arvados.org/v2.7/api/methods/repositories.html, and "keep_disks":https://doc.arvados.org/v2.7/api/methods/keep_disks.html * "specimens":https://doc.arvados.org/v2.7/api/methods/specimens.html * "traits":https://doc.arvados.org/v2.7/api/methods/traits.html The following fields are no longer returned in API responses. * @api_client_id@, @user_id@, @default_owner_uuid@ ("api_client_authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html API) * @modified_by_client_uuid@ (all APIs) h3. Configuration entries have been removed or renamed The following configuration keys have been renamed or removed. Renamed keys will still be loaded if they appear with their old names, but you should update your @/etc/arvados/config.yml@ file to avoid warnings when services start up. * @API.LogCreateRequestFraction@ has been removed * @Containers.JobsAPI.Enable@ has been removed * @Mail.EmailFrom@ has been removed * @Mail.IssueReporterEmailFrom@ has been removed * @Mail.IssueReporterEmailTo@ has been removed * @Mail.MailchimpAPIKey@ has been removed * @Mail.MailchimpListID@ has been removed * @Mail.SendUserSetupNotificationEmail@ has moved to @Users.SendUserSetupNotificationEmail@ * @Mail.SupportEmailAddress@ has moved to @Users.SupportEmailAddress@ h3. S3 volume IAMRole configuration entry has been removed The @Volumes.*.DriverParameters.IAMRole@ configuration entry for S3 volumes has been removed. You should remove it from your @/etc/arvados/config.yml@ file to avoid warnings when services start up. As before, if @AccessKeyID@ and @SecretAccessKey@ are blank, keepstore will retrieve IAM role credentials from instance metadata. Previously, documentation indicated that keepstore would refuse to use the IAM credentials if @IAMRole@ was specified and did not match the instance metadata, but that check has not been working for some time. h3. Legacy container logging system has been removed The following configuration keys are no longer supported. Remove them from your @/etc/arvados/config.yml@ file to avoid warnings when services start up. * @Containers.Logging.LimitLogBytesPerJob@ * @Containers.Logging.LogBytesPerEvent@ * @Containers.Logging.LogPartialLineThrottlePeriod@ * @Containers.Logging.LogSecondsBetweenEvents@ * @Containers.Logging.LogThrottleBytes@ * @Containers.Logging.LogThrottleLines@ * @Containers.Logging.LogThrottlePeriod@ * @Containers.Logging.MaxAge@ * @Containers.Logging.SweepInterval@ Any container logging content remaining in the database from the legacy system will be deleted. h2(#v2_7_4). v2.7.4 (2024-07-08) "previous: Upgrading to 2.7.3":#v2_7_3 Starting from 2.7.4, Arvados no longer supports CentOS. CentOS users should migrate to an Arvados-supported version of Red Hat Enterprise Linux (RHEL), Rocky Linux or AlmaLinux. There are no other configuration changes requiring administrator attention in this release. h2(#v2_7_3). v2.7.3 (2024-05-24) "previous: Upgrading to 2.7.2":#v2_7_2 There are no configuration changes requiring administrator attention in this release. h2(#v2_7_2). v2.7.2 (2024-04-09) "previous: Upgrading to 2.7.1":#v2_7_1 h3. Check MaxGatewayTunnels config If you use the LSF or Slurm dispatcher, ensure the new @API.MaxGatewayTunnels@ config entry is high enough to support the size of your cluster. See "LSF docs":{{site.baseurl}}/install/crunch2-lsf/install-dispatch.html#MaxGatewayTunnels or "Slurm docs":{{site.baseurl}}/install/crunch2-slurm/install-dispatch.html#MaxGatewayTunnels for details. h3. New LSF dispatcher config items MaxRunTimeOverhead and MaxRunTimeDefault Now supports configuration parameter @Containers.LSF.MaxRunTimeDefault@ as the default value for @max_run_time@ for containers that do not specify a time limit (using CWL @ToolTimeLimit@). Now supports configuration parameter @Containers.LSF.MaxRunTimeOverhead@ so that when @scheduling_constraints.max_run_time@ or @MaxRunTimeDefault@ are non-zero, this adds time to account for crunch-run startup/shutdown overhead. h2(#v2_7_1). v2.7.1 (2023-12-12) "previous: Upgrading to 2.7.0":#v2_7_0 h3. Separate configs for MaxConcurrentRequests and MaxConcurrentRailsRequests The default configuration value @API.MaxConcurrentRequests@ (the number of concurrent requests that will be processed by a single instance of an arvados service process) is raised from 8 to 64. A new configuration key @API.MaxConcurrentRailsRequests@ (default 8) limits the number of concurrent requests processed by a RailsAPI service process. h3. Remove Workbench1 packages after upgrading the salt installer If you installed a previous version of Arvados with the Salt installer, and you upgrade your installer to upgrade the cluster, you should uninstall the @arvados-workbench@ package from the workbench instance afterwards. h3. Remove Workbench1 packages and configuration The Workbench1 application has been removed from the Arvados distribution. We recommend the following follow-up steps. * Remove the Workbench1 package from any service node where it is installed (e.g., @apt remove arvados-workbench@). * In your Nginx configuration, add your Workbench1 URL host (from @Services.Workbench1.ExternalURL@) to the @server_name@ directive in the Workbench2 section. For example:
server {
  listen 443 ssl;
  server_name workbench.ClusterID.example.com workbench2.ClusterID.example.com;
  ...
}
* In your Nginx configuration, remove the @upstream@ and @server@ sections for Workbench1. * Remove the @Services.Workbench1.InternalURLs@ section of your configuration file. (Do not remove @ExternalURL@.) * Run @arvados-server config-check@ to identify any Workbench1-specific entries in your configuration file, and remove them. h3. Check implications of Containers.MaximumPriceFactor 1.5 When scheduling a container, Arvados now considers using instance types other than the lowest-cost type consistent with the container's resource constraints. If a larger instance is already running and idle, or the cloud provider reports that the optimal instance type is not currently available, Arvados will select a larger instance type, provided the cost does not exceed 1.5x the optimal instance type cost. This will typically reduce overall latency for containers and reduce instance booting/shutdown overhead, but may increase costs depending on workload and instance availability. To avoid this behavior, configure @Containers.MaximumPriceFactor: 1.0@. h3. Synchronize keepstore and keep-balance upgrades The internal communication between keepstore and keep-balance about read-only volumes has changed. After keep-balance is upgraded, old versions of keepstore will be treated as read-only. We recommend upgrading and restarting all keepstore services first, then upgrading and restarting keep-balance. h3. Separate configs for MaxConcurrentRequests and MaxConcurrentRailsRequests The default configuration value @API.MaxConcurrentRequests@ (the number of concurrent requests that will be processed by a single instance of an arvados service process) is raised from 8 to 64. A new configuration key @API.MaxConcurrentRailsRequests@ (default 8) limits the number of concurrent requests processed by a RailsAPI service process. h2(#v2_7_0). v2.7.0 (2023-09-21) "previous: Upgrading to 2.6.3":#v2_6_3 h3. New system for live container logs Starting with Arvados 2.7, a new system for fetching live container logs is in place. This system features significantly reduced database load compared to previous releases. When Workbench or another application needs to access the logs of a process (running or completed), they should use the "log endpoint of container_requests":{{ site.baseurl }}/api/methods/container_requests.html which forwards requests to the running container. This supersedes the previous system where compute processes would send all of their logs to the database, which produced significant load. The legacy logging system is now disabled by default for all installations with the setting @Containers.Logging.LimitLogBytesForJob: 0@. If you have an existing Arvados installation where you have customized this value and do not need the legacy container logging system, we recommend removing @LimitLogBytesForJob@ from your configuration. If you need to re-enable the legacy logging system, set @Containers.Logging.LimitLogBytesForJob@ to a positive value (the previous default was @Containers.Logging.LimitLogBytesForJob: 67108864@). h3. Workbench 1 deprecated The original Arvados Workbench application (referred to as "Workbench 1") is deprecated and will be removed in a future major version of Arvados. Users are advised to migrate to "Workbench 2". Starting with this release, new installations of Arvados will only set up Workbench 2 and no longer include Workbench 1 by default. It is also important to note that Workbench 1 only supports the legacy logging system, which is now disabled by default. If you need to re-enable the legacy logging system, see above. h3. Multi-node installer's domain name configuration changes The @domain_name@ variable at @terraform/vpc/terraform.tfvars@ and @DOMAIN@ variable at @local.params@ changed their meaning. In previous versions they were used in combination with @cluster_name@ and @CLUSTER@ to build the cluster's domain name (e.g.: @cluster_name@.@domain_name@). To allow the use of any arbitrary cluster domain, now we don't enforce using the cluster prefix as part of the domain, so @domain_name@ and @DOMAIN@ need to hold the entire domain for the given cluster. For example, if @cluster_name@ is set to @"xarv1"@ and @domain_name@ was previously set to @"example.com"@, it should now be set to @"xarv1.example.com"@ to keep using the same cluster domain. h3. Crunchstat log format change The reported number of CPUs available in a container is now formatted in @crunchstat.txt@ log files and @crunchstat-summary@ text reports as a floating-point number rather than an integer (@2.00 cpus@ rather than @2 cpus@). Programs that parse these files may need to be updated accordingly. h3. arvados-login-sync configuration changes, including ignored groups In the @Users@ section of your cluster configuration, there are now several options to control what system resources are or are not managed by @arvados-login-sync@. These options all have names that begin with @Sync@. The defaults for all of these options match the previous behavior of @arvados-login-sync@ _except_ for @SyncIgnoredGroups@. This list names groups that @arvados-login-sync@ will never modify by adding or removing members. As a security precaution, the default list names security-sensitive system groups on Debian- and Red Hat-based distributions. If you are using Arvados to manage system group membership on shell nodes, especially @sudo@ or @wheel@, you may want to provide your own list. Set @SyncIgnoredGroups: []@ to restore the original behavior of ignoring no groups. h3. API clients can always retrieve their current token, regardless of scopes We have introduced a small exception to the previous behavior of "Arvados API token scopes":{{ site.baseurl }}/admin/scoped-tokens.html in this release. A valid token is now always allowed to issue a request to "@GET /arvados/v1/api_client_authorizations/current@":{{ site.baseurl }}/api/methods/api_client_authorizations.html#current regardless of its scopes. This allows clients to reliably determine whether a request failed because a token is invalid, or because the token is not permitted to perform a particular request. The API server itself needs to be able to do this to validate tokens issued by other clusters in a federation. h3. Deprecated/legacy APIs slated for removal The legacy APIs "humans":https://doc.arvados.org/v2.7/api/methods/humans.html, "specimens":https://doc.arvados.org/v2.7/api/methods/specimens.html, "traits":https://doc.arvados.org/v2.7/api/methods/traits.html, "jobs":https://doc.arvados.org/v2.7/api/methods/jobs.html, "job_tasks":https://doc.arvados.org/v2.7/api/methods/job_tasks.html, "pipeline_instances":https://doc.arvados.org/v2.7/api/methods/pipeline_instances.html, "pipeline_templates":https://doc.arvados.org/v2.7/api/methods/pipeline_templates.html, "nodes":https://doc.arvados.org/v2.7/api/methods/nodes.html, "repositories":https://doc.arvados.org/v2.7/api/methods/repositories.html, and "keep_disks":https://doc.arvados.org/v2.7/api/methods/keep_disks.html are deprecated and will be removed in a future major version of Arvados. In addition, the @default_owner_uuid@, @api_client_id@, and @user_id@ fields of "api_client_authorizations":../api/methods/api_client_authorizations.html are deprecated and will be removed from @api_client_authorization@ responses in a future major version of Arvados. This should not affect clients as @default_owner_uuid@ was never implemented, and @api_client_id@ and @user_id@ returned internal ids that were not meaningful or usable with any other API call. h3. UseAWSS3v2Driver option removed The old "v1" S3 driver for keepstore has been removed. The new "v2" implementation, which has been the default since Arvados 2.5.0, is always used. The @Volumes.*.DriverParameters.UseAWSS3v2Driver@ configuration key is no longer recognized. If your config file uses it, remove it to avoid warning messages at startup. h2(#v2_6_3). v2.6.3 (2023-06-06) h3. Python SDK automatically retries failed requests much more The Python SDK has always provided functionality to retry API requests that fail due to temporary problems like network failures, by passing @num_retries=N@ to a request's @execute()@ method. In this release, API client constructor functions like @arvados.api@ also accept a @num_retries@ argument. This value is stored on the client object and used as a floor for all API requests made with this client. This allows developers to set their preferred retry strategy once, without having to pass it to each @execute()@ call. The default value for @num_retries@ in API constructor functions is 10. This means that an API request that repeatedly encounters temporary problems may spend up to about 35 minutes retrying in the worst case. We believe this is an appropriate default for most users, where eventual success is a much greater concern than responsiveness. If you have client applications where this is undesirable, update them to pass a lower @num_retries@ value to the constructor function. You can even pass @num_retries=0@ to have the API client act as it did before, like this: {% codeblock as python %} import arvados arv_client = arvados.api('v1', num_retries=0, ...) {% endcodeblock %} The first time the Python SDK fetches an Arvados API discovery document, it will ensure that @googleapiclient.http@ logs are handled so you have a way to know about early problems that are being retried. If you prefer to handle these logs your own way, just ensure that the @googleapiclient.http@ logger (or a parent logger) has a handler installed before you call any Arvados API client constructor. h2(#v2_6_2). v2.6.2 (2023-05-22) "previous: Upgrading to 2.6.1":#v2_6_1 This version introduces a new API feature which is used by Workbench 2 to improve page loading performance. To avoid any errors using the new Workbench with an old API server, be sure to upgrade the API server before upgrading Workbench 2. h2(#v2_6_1). v2.6.1 (2023-04-17) "previous: Upgrading to 2.6.0":#v2_6_0 h3. Performance improvement for permission row de-duplication migration The migration which de-duplicates permission links has been optimized. We recommend upgrading from 2.5.0 directly to 2.6.1 in order to avoid the slow permission de-deplication migration in 2.6.0. You should still plan for the arvados-api-server package upgrade to take longer than usual due to the database schema update changing the integer id column in each table from 32-bit to 64-bit. h2(#v2_6_0). v2.6.0 (2023-04-06) "previous: Upgrading to 2.5.0":#v2_5_0 h3. WebDAV InternalURLs must be reachable from controller nodes Ensure your internal keep-web service addresses are listed in the @Services.WebDAV.InternalURLs@ section of your configuration file, and reachable from controller processes, as noted on the "updated install page":{{site.baseurl}}/admin/config-urls.html. h3. Slow migration on upgrade Important! This upgrade includes a database schema update changing the integer id column in each table from 32-bit to 64-bit. Because it touches every row in the table, on moderate to large sized installations *this may be very slow* (on the order of hours). Plan for the arvados-api-server package upgrade to take longer than usual. h3. Default request concurrency, new limit on log requests The configuration value @API.MaxConcurrentRequests@ (the number of concurrent requests that will be accepted by a single instance of arvados-controller) now has a default value of 64, instead of being unlimited. New configuration value @API.LogCreateRequestFraction@ (default 0.50) limits requests that post live container logs to the API server, to avoid situations where log messages crowd out other more important requests. h3. New limit on concurrent workflows New configuration options @CloudVMs.SupervisorFraction@ (default 0.30) limits the number of concurrent workflow supervisors, to avoid situations where too many workflow runners crowds out actual workers. h3. Default limit for cloud VM instances There is a new configuration entry @CloudVMs.MaxInstances@ (default 64) that limits the number of VMs the cloud dispatcher will run at a time. This may need to be adjusted to suit your anticipated workload. Using the obsolete configuration entry @MaxCloudVMs@, which was previously accepted in config files but not obeyed, will now result in a deprecation warning. h3. Default frequency for running keep-balance has changed The frequency that @keep-balance@ will run (@Collections.BalancePeriod@) has been changed from every 10 minutes to every 6 hours. h2(#v2_5_0). v2.5.0 (2022-12-22) "previous: Upgrading to 2.4.4":#v2_4_4 h3. Dispatchers require PostgreSQL database access All dispatchers (cloud, LSF, and Slurm) now connect directly to the PostgreSQL database. Make sure these connections are supported by your network firewall rules, PostgreSQL connection settings, and PostgreSQL server configuration (in @pg_hba.conf@) as shown in the "PostgreSQL install instructions":{{site.baseurl}}/install/install-postgresql.html. h3. Google or OpenID Connect login restricted to trusted clients If you use OpenID Connect or Google login, and your cluster serves as the @LoginCluster@ in a federation _or_ your users log in from a web application other than the Workbench1 and Workbench2 @ExternalURL@ addresses in your configuration file, the additional web application URLs (e.g., the other clusters' Workbench addresses) must be listed explicitly in @Login.TrustedClients@, otherwise login will fail. Previously, login would succeed with a less-privileged token. h3. New keepstore S3 driver enabled by default A more actively maintained S3 client library is now enabled by default for keeepstore services. The previous driver is still available for use in case of unknown issues. To use the old driver, set @DriverParameters.UseAWSS3v2Driver@ to @false@ on the appropriate @Volumes@ config entries. h3. Old container logs are automatically deleted from PostgreSQL Cached copies of log entries from containers that finished more than 1 month ago are now deleted automatically (this only affects the "live" logs saved in the PostgreSQL database, not log collections saved in Keep). If you have an existing cron job that runs @rake db:delete_old_container_logs@, you can remove it. See configuration options @Containers.Logging.MaxAge@ and @Containers.Logging.SweepInterval@. h3. Fixed salt installer template file to support container shell access If you manage your cluster using the salt installer, you may want to update it to the latest version, use the appropriate @config_examples@ subdirectory and re-reploy with your custom @local.params@ file so that the @arvados-controller@'s @nginx@ configuration file gets fixed. h3. Login-sync script requires configuration update on LoginCluster federations If you have @arvados-login-sync@ running on a satellite cluster, please update the environment variable settings by removing the @LOGINCLUSTER_ARVADOS_API_*@ variables and setting @ARVADOS_API_TOKEN@ to a LoginCluster's admin token, as described on the "updated install page":{{site.baseurl}}/install/install-shell-server.html#arvados-login-sync. h3. Renamed keep-web metrics and WebDAV configs Metrics previously reported by keep-web (@arvados_keepweb_collectioncache_requests@, @..._hits@, @..._pdh_hits@, @..._api_calls@, @..._cached_manifests@, and @arvados_keepweb_sessions_cached_collection_bytes@) have been replaced with @arvados_keepweb_cached_session_bytes@. The config entries @Collections.WebDAVCache.UUIDTTL@, @...MaxCollectionEntries@, and @...MaxUUIDEntries@ are no longer used, and should be removed from your config file. h2(#v2_4_4). v2.4.4 (2022-11-18) "previous: Upgrading to 2.4.3":#v2_4_3 This update only consists of improvements to @arvados-cwl-runner@. There are no changes to backend services. h2(#v2_4_3). v2.4.3 (2022-09-21) "previous: Upgrading to 2.4.2":#v2_4_2 h3. Fixed PAM authentication security vulnerability In Arvados 2.4.2 and earlier, when using PAM authentication, if a user presented valid credentials but the account is disabled or otherwise not allowed to access the host, it would still be accepted for access to Arvados. From 2.4.3 onwards, Arvados now also checks that the account is permitted to access the host before completing the PAM login process. Other authentication methods (LDAP, OpenID Connect) are not affected by this flaw. h2(#v2_4_2). v2.4.2 (2022-08-09) "previous: Upgrading to 2.4.1":#v2_4_1 h3. GHSL-2022-063 GitHub Security Lab (GHSL) reported a remote code execution (RCE) vulnerability in the Arvados Workbench that allows authenticated attackers to execute arbitrary code via specially crafted JSON payloads. This vulnerability is fixed in 2.4.2 ("#19316":https://dev.arvados.org/issues/19316). It is likely that this vulnerability exists in all versions of Arvados up to 2.4.1. This vulnerability is specific to the Ruby on Rails Workbench application ("Workbench 1"). We do not believe any other Arvados components, including the TypesScript browser-based Workbench application ("Workbench 2") or API Server, are vulnerable to this attack. h3. CVE-2022-31163 and CVE-2022-32224 As a precaution, Arvados 2.4.2 has includes security updates for Ruby on Rails and the TZInfo Ruby gem. However, there are no known exploits in Arvados based on these CVEs. h3. Disable Sharing URLs UI There is now a configuration option @Workbench.DisableSharingURLsUI@ for admins to disable the user interface for "sharing link" feature (URLs which can be sent to users to access the data in a specific collection in Arvados without an Arvados account), for organizations where sharing links violate their data sharing policy. h2(#v2_4_1). v2.4.1 (2022-06-02) "previous: Upgrading to 2.4.0":#v2_4_0 h3. Slurm dispatcher requires configuration update If you use the Slurm dispatcher (@crunch-dispatch-slurm@) you must add a @Services.DispatchSLURM.InternalURLs@ section to your configuration file, as shown on the "updated install page":{{site.baseurl}}/install/crunch2-slurm/install-dispatch.html. h3. New proxy parameters for arvados-controller We now recommend disabling nginx proxy caching for arvados-controller, to avoid truncation of large responses. In your Nginx configuration file (@/etc/nginx/conf.d/arvados-api-and-controller.conf@), add the following lines to the @location /@ block with @http://controller@ (see "Update nginx configuration":{{site.baseurl}}/install/install-api-server.html#update-nginx for an example) and reload/restart Nginx (@sudo nginx -s reload@).
    proxy_max_temp_file_size 0;
    proxy_request_buffering  off;
    proxy_buffering          off;
    proxy_http_version       1.1;
h3. Now recommending Singularity 3.9.9 The compute image "build script":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html now installs Singularity 3.9.9 instead of 3.7.4. The newer version includes a bugfix that should resolve "intermittent loopback device errors":https://dev.arvados.org/issues/18489 when running containers. h3. Changes to @arvados-cwl-runner --create-workflow@ and @--update-workflow@ When using @arvados-cwl-runner --create-workflow@ or @--update-workflow@, by default it will now make a copy of all collection and Docker image dependencies in the target project. Running workflows retains the old behavior (use the dependencies wherever they are found). The can be controlled explicit with @--copy-deps@ and @--no-copy-deps@. h2(#v2_4_0). v2.4.0 (2022-04-08) "previous: Upgrading to 2.3.1":#v2_3_1 h3. Default result order changed When requesting a list of objects without an explicit @order@ parameter, the default order has changed from @modified_at desc, uuid asc@ to @modified_at desc, uuid desc@. This means that if two objects have identical @modified_at@ timestamps, the tiebreaker will now be based on @uuid@ in decending order where previously it would be ascending order. The practical effect of this should be minor; with microsecond precision it is unusual to have two records with exactly the same timestamp, and order-sensitive queries should already provide an explicit @order@ parameter. h3. Ubuntu 18.04 Arvados Python packages now depend on python-3.8 Ubuntu 18.04 ships with Python 3.6 as the default version of Python 3. Ubuntu also ships a version of Python 3.8, and the Arvados Python packages (@python3-arvados-cwl-runner@, @python3-arvados-fuse@, @python3-arvados-python-client@, @python3-arvados-user-activity@ and @python3-crunchstat-summary@) now depend on the @python-3.8@ system package. This means that they are now installed under @/usr/share/python3.8@ (before, the path was @/usr/share/python3@). If you rely on the @python3@ executable from the packages (e.g. to load a virtualenv), you may need to update the path to that executable. h3. Minimum supported Ruby version is now 2.6 The minimum supported Ruby version is now 2.6. If you are running Arvados on Debian 10 or Ubuntu 18.04, you may need to switch to using RVM or upgrade your OS. See "Install Ruby and Bundler":../install/ruby.html for more information. h3. Anonymous token changes The anonymous token configured in @Users.AnonymousUserToken@ must now be 32 characters or longer. This was already the suggestion in the documentation, now it is enforced. The @script/get_anonymous_user_token.rb@ script that was needed to register the anonymous user token in the database has been removed. Registration of the anonymous token is no longer necessary. h3. Preemptible instance support changes The @Containers.UsePreemptibleInstances@ option has been renamed to @Containers.AlwaysUsePreemptibleInstances@ and has the same behavior when @true@ and one or more preemptible instances are configured. However, a value of @false@ no longer disables support for preemptible instances, instead users can now enable use of preemptible instances at the level of an individual workflow or workflow step. In addition, there is a new configuration option @Containers.PreemptiblePriceFactor@ will automatically add a preemptible instance type corresponding to each regular instance type. See "Using Preemptible instances":spot-instances.html for details. h3. Default LSF arguments have changed If you use LSF and your configuration specifies @Containers.LSF.BsubArgumentsList@, you should update it to include the new arguments (@"-R", "select[mem>=%MMB]", ...@, see "configuration reference":{{site.baseurl}}/admin/config.html). Otherwise, containers that are too big to run on any LSF host will remain in the LSF queue instead of being cancelled. h3. Support for NVIDIA CUDA GPUs Arvados now supports requesting NVIDIA CUDA GPUs for cloud and LSF (Slurm is currently not supported). To be able to request GPU nodes, some additional configuration is needed: "Including GPU support in cloud compute node image":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html#nvidia "Configure cloud dispatcher for GPU support":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html#GPUsupport "LSF GPU configuration":{{site.baseurl}}/install/crunch2-lsf/install-dispatch.html h3. Role groups are visible to all users by default The permission model has changed such that all role groups are visible to all active users. This enables users to share objects with groups they don't belong to. To preserve the previous behavior, where role groups are only visible to members and admins, add @RoleGroupsVisibleToAll: false@ to the @Users@ section of your configuration file. h3. Previously trashed role groups will be deleted Due to a bug in previous versions, the @DELETE@ operation on a role group caused the group to be flagged as trash in the database, but continue to grant permissions regardless. After upgrading, any role groups that had been trashed this way will be deleted. This might surprise some users if they were relying on permissions that were still in effect due to this bug. Future @DELETE@ operations on a role group will immediately delete the group and revoke the associated permissions. h3. Dedicated keepstore process for each container When Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ supervisor process now brings up its own keepstore server to handle I/O for mounted collections, outputs, and logs. With the default configuration, the keepstore process allocates one 64 MiB block buffer per VCPU requested by the container. For most workloads this will increase throughput, reduce total network traffic, and make it possible to run more containers at once without provisioning additional keepstore nodes to handle the I/O load. * If you have containers that can effectively handle multiple I/O threads per VCPU, consider increasing the @Containers.LocalKeepBlobBuffersPerVCPU@ value. * If you already have a robust permanent keepstore infrastructure, you can set @Containers.LocalKeepBlobBuffersPerVCPU@ to 0 to disable this feature and preserve the previous behavior of sending container I/O traffic to your separately provisioned keepstore servers. * This feature is enabled only if no volumes use @AccessViaHosts@, and no volumes have underlying @Replication@ less than @Collections.DefaultReplication@. If the feature is configured but cannot be enabled due to an incompatible volume configuration, this will be noted in the @crunch-run.txt@ file in the container log. h2(#v2_3_1). v2.3.1 (2021-11-24) "previous: Upgrading to 2.3.0":#v2_3_0 h3. Users are visible to other users by default When a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false@. h3. Backend support for vocabulary checking If your installation uses the vocabulary feature on Workbench2, you will need to update the cluster configuration by moving the vocabulary definition file to the node where @controller@ runs, and set the @API.VocabularyPath@ configuration parameter to the local path where the file was placed. This will enable the vocabulary checking cluster-wide, including Workbench2. The @Workbench.VocabularyURL@ configuration parameter is deprecated and will be removed in a future release. You can read more about how this feature works on the "admin page":{{site.baseurl}}/admin/metadata-vocabulary.html. h2(#v2_3_0). v2.3.0 (2021-10-27) "previous: Upgrading to 2.2.0":#v2_2_0 h3. Ubuntu 18.04 packages for arvados-api-server and arvados-workbench now conflict with ruby-bundler Ubuntu 18.04 ships with Bundler version 1.16.1, which is no longer compatible with the Gemfiles in the Arvados packages (made with Bundler 2.2.19). The Ubuntu 18.04 packages for arvados-api-server and arvados-workbench now conflict with the ruby-bundler package to work around this issue. The post-install scripts for arvados-api-server and arvados-workbench install the proper version of Bundler as a gem. h3. Removed unused @update_uuid@ endpoint for users. The @update_uuid@ endpoint was superseded by the "link accounts feature":{{site.baseurl}}/admin/link-accounts.html, so it's no longer available. h3. Removed deprecated '@@' search operator The '@@' full text search operator, previously deprecated, has been removed. To perform a string search across multiple columns, use the 'ilike' operator on 'any' column as described in the "available list method filter section":{{site.baseurl}}/api/methods.html#substringsearchfilter of the API documentation. h3. Storage classes must be defined explicitly If your configuration uses the StorageClasses attribute on any Keep volumes, you must add a new @StorageClasses@ section that lists all of your storage classes. Refer to the updated documentation about "configuring storage classes":{{site.baseurl}}/admin/storage-classes.html for details. h3. keep-balance requires access to PostgreSQL Make sure the keep-balance process can connect to your PostgreSQL server using the settings in your config file. (In previous versions, keep-balance accessed the database through controller instead of connecting to the database server directly.) h3. crunch-dispatch-local now requires config.yml The @crunch-dispatch-local@ dispatcher now reads the API host and token from the system wide @/etc/arvados/config.yml@ . It will fail to start that file is not found or not readable. h3. Multi-file docker image collections Typically a docker image collection contains a single @.tar@ file at the top level. Handling of atypical cases has changed. If a docker image collection contains files with extensions other than @.tar@, they will be ignored (previously they could cause errors). If a docker image collection contains multiple @.tar@ files, it will cause an error at runtime, "cannot choose from multiple tar files in image collection" (previously one of the @.tar@ files was selected). Subdirectories are ignored. The @arv keep docker@ command always creates a collection with a single @.tar@ file, and never uses subdirectories, so this change will not affect most users. h2(#v2_2_0). v2.2.0 (2021-06-03) "previous: Upgrading to 2.1.0":#v2_1_0 h3. New spelling of S3 credential configs If you use the S3 driver for Keep volumes and specify credentials in your configuration file (as opposed to using an IAM role), you should change the spelling of the @AccessKey@ and @SecretKey@ config keys to @AccessKeyID@ and @SecretAccessKey@. If you don't update them, the previous spellings will still be accepted, but warnings will be logged at server startup. h3. New proxy parameters for arvados-controller In your Nginx configuration file (@/etc/nginx/conf.d/arvados-api-and-controller.conf@), add the following lines to the @location /@ block with @http://controller@ (see "Update nginx configuration":{{site.baseurl}}/install/install-api-server.html#update-nginx for an example) and reload/restart Nginx (@sudo nginx -s reload@).
    proxy_set_header      Upgrade           $http_upgrade;
    proxy_set_header      Connection        "upgrade";
h3. Changes on the collection's @preserve_version@ attribute semantics The @preserve_version@ attribute on collections was originally designed to allow clients to persist a preexisting collection version. This forced clients to make 2 requests if the intention is to "make this set of changes in a new version that will be kept", so we have changed the semantics to do just that: When passing @preserve_version=true@ along with other collection updates, the current version is persisted and also the newly created one will be persisted on the next update. h3. System token requirements System services now log a warning at startup if any of the system tokens (@ManagementToken@, @SystemRootToken@, and @Collections.BlobSigningKey@) are less than 32 characters, or contain characters other than a-z, A-Z, and 0-9. After upgrading, run @arvados-server config-check@ and update your configuration file if needed to resolve any warnings. The @API.RailsSessionSecretToken@ configuration key has been removed. Delete this entry from your configuration file after upgrading. h3. Centos7 Python 3 dependency upgraded to python3 Now that Python 3 is part of the base repository in CentOS 7, the Python 3 dependency for Centos7 Arvados packages was changed from SCL rh-python36 to python3. h3. ForceLegacyAPI14 option removed The ForceLegacyAPI14 configuration option has been removed. In the unlikely event it is mentioned in your config file, remove it to avoid "deprecated/unknown config" warning logs. h2(#v2_1_0). v2.1.0 (2020-10-13) "previous: Upgrading to 2.0.0":#v2_0_0 h3. LoginCluster conflicts with other Login providers A satellite cluster that delegates its user login to a central user database must only have `Login.LoginCluster` set, or it will return an error. This is a change in behavior, previously it would return an error if another login provider was _not_ configured, even though the provider would never be used. h3. Minimum supported Python version is now 3.5 We no longer publish Python 2 based distribution packages for our Python components. There are equivalent packages based on Python 3, but their names are slightly different. If you were using the Python 2 based packages, you can install the Python 3 based package for a drop in replacement. On Debian and Ubuntu:
    apt remove python-arvados-fuse && apt install python3-arvados-fuse
    apt remove python-arvados-python-client && apt install python3-arvados-python-client
    apt remove python-arvados-cwl-runner && apt install python3-arvados-cwl-runner
    apt remove python-crunchstat-summary && apt install python3-crunchstat-summary
    apt remove python-cwltest && apt install python3-cwltest
On CentOS:
    yum remove python-arvados-fuse && yum install python3-arvados-fuse
    yum remove python-arvados-python-client && yum install python3-arvados-python-client
    yum remove python-arvados-cwl-runner && yum install python3-arvados-cwl-runner
    yum remove python-crunchstat-summary && yum install python3-crunchstat-summary
    yum remove python-cwltest && yum install python3-cwltest
h3. Minimum supported Ruby version is now 2.5 The minimum supported Ruby version is now 2.5. If you are running Arvados on Debian 9 or Ubuntu 16.04, you may need to switch to using RVM or upgrade your OS. See "Install Ruby and Bundler":../install/ruby.html for more information. h3. Removing libpam-arvados, replaced with libpam-arvados-go The Python-based PAM package has been replaced with a version written in Go. See "using PAM for authentication":{{site.baseurl}}/install/setup-login.html#pam for details. h3. Removing sso-provider The SSO (single sign-on) component is deprecated and will not be supported in future releases. Existing configurations will continue to work in this release, but you should switch to one of the built-in authentication mechanisms as soon as possible. See "setting up web based login":{{site.baseurl}}/install/setup-login.html for details. After migrating your configuration, uninstall the @arvados-sso-provider@ package. h3. S3 signatures Keepstore now uses "V4 signatures":https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html by default for S3 requests. If you are using Amazon S3, no action is needed; all regions support V4 signatures. If you are using a different S3-compatible service that does not support V4 signatures, add @V2Signature: true@ to your volume driver parameters to preserve the old behavior. See "configuring S3 object storage":{{site.baseurl}}/install/configure-s3-object-storage.html. h3. New permission system constraints Some constraints on the permission system have been added, in particular @role@ and @project@ group types now have distinct behavior. These constraints were already de-facto imposed by the Workbench UI, so on most installations the only effect of this migration will be to reassign @role@ groups to the system user and create a @can_manage@ permission link for the previous owner. # The @group_class@ field must be either @role@ or @project@. Invalid group_class are migrated to @role@. # A @role@ cannot own things. Anything owned by a role is migrated to a @can_manage@ link and reassigned to the system user. # Only @role@ and @user@ can have outgoing permission links. Permission links originating from projects are deleted by the migration. # A @role@ is always owned by the system_user. When a group is created, it creates a @can_manage@ link for the object that would have been assigned to @owner_uuid@. Migration adds @can_manage@ links and reassigns roles to the system user. This also has the effect of requiring that all @role@ groups have unique names on the system. If there is a name collision during migration, roles will be renamed to ensure they are unique. # A permission link can have the permission level (@name@) updated but not @head_uuid@, @tail_uuid@ or @link_class@. The @arvados-sync-groups@ tool has been updated to reflect these constraints, so it is important to use the version of @arvados-sync-groups@ that matches the API server version. Before upgrading, use the following commands to find out which groups and permissions in your database will be automatically modified or deleted during the upgrade. To determine which groups have invalid @group_class@ (these will be migrated to @role@ groups):
arv group list --filters '[["group_class", "not in", ["project", "role"]]]'
To list all @role@ groups, which will be reassigned to the system user (unless @owner_uuid@ is already the system user):
arv group list --filters '[["group_class", "=", "role"]]'
To list which @project@ groups have outgoing permission links (such links are now invalid and will be deleted by the migration):
for uuid in $(arv link list --filters '[["link_class", "=", "permission"], ["tail_uuid", "like", "%-j7d0g-%"]]' |
              jq -r .items[].tail_uuid | sort | uniq) ; do
   arv group list --filters '[["group_class", "=", "project"], ["uuid", "=", "'$uuid'"]]' | jq .items
done
h4. "Public favorites" moved to their own project As a side effect of new permission system constraints, "star" links (indicating shortcuts in Workbench) that were previously owned by "All users" (which is now a "role" and cannot own things) will be migrated to a new system project called "Public favorites" which is readable by the "Anonymous users" role. h2(#v2_0_0). v2.0.0 (2020-02-07) "previous: Upgrading to 1.4.1":#v1_4_1 Arvados 2.0 is a major upgrade, with many changes. Please read these upgrade notes carefully before you begin. h3. Migrating to centralized config.yml See "Migrating Configuration":https://doc.arvados.org/v2.1/admin/config-migration.html for notes on migrating legacy per-component configuration files to the new centralized @/etc/arvados/config.yml@. To ensure a smooth transition, the per-component config files continue to be read, and take precedence over the centralized configuration. Your cluster should continue to function after upgrade but before doing the full configuration migration. However, several services (keepstore, keep-web, keepproxy) require a minimal `/etc/arvados/config.yml` to start:
Clusters:
  zzzzz:
    Services:
      Controller:
        ExternalURL: "https://zzzzz.example.com"
h3. Keep-balance configuration migration (feature "#14714":https://dev.arvados.org/issues/14714 ) The keep-balance service can now be configured using the centralized configuration file at @/etc/arvados/config.yml@. The following command line and configuration options have changed. You can no longer specify types of keep services to balance via the @KeepServiceTypes@ config option in the legacy config at @/etc/arvados/keep-balance/keep-balance.yml@. If you are still using the legacy config and @KeepServiceTypes@ has a value other than "disk", keep-balance will produce an error. You can no longer specify individual keep services to balance via the @config.KeepServiceList@ command line option or @KeepServiceList@ legacy config option. Instead, keep-balance will operate on all keepstore servers with @service_type:disk@ as reported by the @arv keep_service list@ command. If you are still using the legacy config, @KeepServiceList@ should be removed or keep-balance will produce an error. Please see the "config migration guide":https://doc.arvados.org/v2.1/admin/config-migration.html and "keep-balance install guide":{{site.baseurl}}/install/install-keep-balance.html for more details. h3. Arv-git-httpd configuration migration (feature "#14712":https://dev.arvados.org/issues/14712 ) The arv-git-httpd package can now be configured using the centralized configuration file at @/etc/arvados/config.yml@. Configuration via individual command line arguments is no longer available. Please see "arv-git-httpd's config migration guide":https://doc.arvados.org/v2.1/admin/config-migration.html#arv-git-httpd for more details. h3. Keepstore and keep-web configuration migration keepstore and keep-web no longer support configuration via (previously deprecated) command line configuration flags and environment variables. keep-web now supports the legacy @keep-web.yml@ config format (used by Arvados 1.4) and the new cluster config file format. Please check "keep-web's install guide":{{site.baseurl}}/install/install-keep-web.html for more details. keepstore now supports the legacy @keepstore.yml@ config format (used by Arvados 1.4) and the new cluster config file format. Please check the "keepstore config migration notes":https://doc.arvados.org/v2.1/admin/config-migration.html#keepstore and "keepstore install guide":{{site.baseurl}}/install/install-keepstore.html for more details. h3. Keepproxy configuration migration (feature "#14715":https://dev.arvados.org/issues/14715 ) Keepproxy can now be configured using the centralized config at @/etc/arvados/config.yml@. Configuration via individual command line arguments is no longer available and the @DisableGet@, @DisablePut@, and @PIDFile@ configuration options are no longer supported. If you are still using the legacy config and @DisableGet@ or @DisablePut@ are set to true or @PIDFile@ has a value, keepproxy will produce an error and fail to start. Please see "keepproxy's config migration guide":https://doc.arvados.org/v2.1/admin/config-migration.html#keepproxy for more details. h3. Delete "keep_services" records After all keepproxy and keepstore configurations have been migrated to the centralized configuration file, all keep_services records you added manually during installation should be removed. System logs from keepstore and keepproxy at startup, as well as the output of @arvados-server config-check@, will remind you to do this.
$ export ARVADOS_API_HOST=...
$ export ARVADOS_API_TOKEN=...
$ arv --format=uuid keep_service list | xargs -n1 arv keep_service delete --uuid
Once these old records are removed, @arv keep_service list@ will instead return the services listed under Services/Keepstore/InternalURLs and Services/Keepproxy/ExternalURL in your centralized configuration file. h3. Enabling Postgres trigram indexes Feature "#15106":https://dev.arvados.org/issues/15106 improves the speed and functionality of full text search by introducing trigram indexes on text searchable database columns via a migration. Prior to updating, you must first install the postgresql-contrib package on your system and subsequently run the CREATE EXTENSION pg_trgm SQL command on the arvados_production database as a postgres superuser. The "postgres-contrib package":https://www.postgresql.org/docs/10/contrib.html has been supported since PostgreSQL version 9.4. The version of the contrib package should match the version of your PostgreSQL installation. Using 9.5 as an example, the package can be installed and the extension enabled using the following: Centos 7
~$ sudo yum install -y postgresql95-contrib
~$ su - postgres -c "psql -d 'arvados_production' -c 'CREATE EXTENSION IF NOT EXISTS pg_trgm'"
RHEL 7
~$ sudo yum install -y rh-postgresql95-postgresql-contrib
~$ su - postgres -c "psql -d 'arvados_production' -c 'CREATE EXTENSION IF NOT EXISTS pg_trgm'"
Debian or Ubuntu
~$ sudo apt-get install -y postgresql-contrib-9.5
~$ sudo -u postgres psql -d 'arvados_production' -c 'CREATE EXTENSION IF NOT EXISTS pg_trgm'
Subsequently, the psql -d 'arvados_production' -c '\dx' command will display the installed extensions for the arvados_production database. This list should now contain @pg_trgm@. h3. New Workbench 2 Workbench 2 is now ready for regular use. Follow the instructions to "install workbench 2":../install/install-workbench2-app.html h3. New property vocabulary format for Workbench2 (feature "#14151":https://dev.arvados.org/issues/14151) Workbench2 supports a new vocabulary format and it isn't compatible with the previous one, please read the "metadata vocabulary format admin page":{{site.baseurl}}/admin/metadata-vocabulary.html for more information. h3. Cloud installations only: node manager replaced by arvados-dispatch-cloud Node manager is deprecated and replaced by @arvados-dispatch-cloud@. No automated config migration is available. Follow the instructions to "install the cloud dispatcher":../install/crunch2-cloud/install-dispatch-cloud.html *Only one dispatch process should be running at a time.* If you are migrating a system that currently runs Node manager and @crunch-dispatch-slurm@, it is safest to remove the @crunch-dispatch-slurm@ service entirely before installing @arvados-dispatch-cloud@.
~$ sudo systemctl --now disable crunch-dispatch-slurm
~$ sudo apt-get remove crunch-dispatch-slurm
h3. Jobs API is read-only (task "#15133":https://dev.arvados.org/issues/15133 ) The legacy 'jobs' API is now read-only. It has been superceded since Arvados 1.1 by containers / container_requests (aka crunch v2). Arvados installations since the end of 2017 (v1.1.0) have probably only used containers, and are unaffected by this change. So that older Arvados sites don't lose access to legacy records, the API has been converted to read-only. Creating and updating jobs (and related types job_task, pipeline_template and pipeline_instance) is disabled and much of the business logic related has been removed, along with various other code specific to the jobs API. Specifically, the following programs associated with the jobs API have been removed: @crunch-dispatch.rb@, @crunch-job@, @crunchrunner@, @arv-run-pipeline-instance@, @arv-run@. h3. "/" prohibited in collection and project names (issue "#15836":https://dev.arvados.org/issues/15836) By default, Arvados now rejects new names containing the @/@ character when creating or renaming collections and projects. Previously, these names were permitted, but the resulting objects were invisible in the WebDAV "home" tree. If you prefer, you can restore the previous behavior, and optionally configure a substitution string to make the affected objects accessible via WebDAV. See @ForwardSlashNameSubstitution@ in the "configuration reference":config.html. h3. No longer stripping ':' from strings in serialized database columns (bug "#15311":https://dev.arvados.org/issues/15311 ) Strings read from serialized columns in the database with a leading ':' would have the ':' stripped after loading the record. This behavior existed due to legacy serialization behavior which stored Ruby symbols with a leading ':'. Unfortunately this corrupted fields where the leading ":" was intentional. This behavior has been removed. You can test if any records in your database are affected by going to the API server directory and running @bundle exec rake symbols:check@. This will report which records contain fields with a leading ':' that would previously have been stripped. If there are records to be updated, you can update the database using @bundle exec rake symbols:stringify@. h3. Scoped tokens should use PATCH for updates The API server accepts both PUT and PATCH for updates, but they will be normalized to PATCH by arvados-controller. Scoped tokens should be updated accordingly. h2(#v1_4_1). v1.4.1 (2019-09-20) "previous: Upgrading to 1.4.0":#v1_4_0 h3. Centos7 Python 3 dependency upgraded to rh-python36 The Python 3 dependency for Centos7 Arvados packages was upgraded from rh-python35 to rh-python36. h2(#v1_4_0). v1.4.0 (2019-06-05) "previous: Upgrading to 1.3.3":#v1_3_3 h3. Populating the new file_count and file_size_total columns on the collections table As part of story "#14484":https://dev.arvados.org/issues/14484, two new columns were added to the collections table in a database migration. If your installation has a large collections table, this migration may take some time. We've seen it take ~5 minutes on an installation with 250k collections, but your mileage may vary. The new columns are initialized with a zero value. In order to populate them, it is necessary to run a script called populate-file-info-columns-in-collections.rb from the scripts directory of the API server. This can be done out of band, ideally directly after the API server has been upgraded to v1.4.0. h3. Stricter collection manifest validation on the API server As a consequence of "#14482":https://dev.arvados.org/issues/14482, the Ruby SDK does a more rigorous collection manifest validation. Collections created after 2015-05 are unlikely to be invalid, however you may check for invalid manifests using the script below. You could set up a new rvm gemset and install the specific arvados gem for testing, like so:
~$ rvm gemset create rubysdk-test
~$ rvm gemset use rubysdk-test
~$ gem install arvados -v 1.3.1.20190301212059
Next, you can run the following script using admin credentials, it will scan the whole collection database and report any collection that didn't pass the check: {% codeblock as ruby %} require 'arvados' require 'arvados/keep' api = Arvados.new offset = 0 batch_size = 100 invalid = [] while true begin req = api.collection.index( :select => [:uuid, :created_at, :manifest_text], :include_trash => true, :include_old_versions => true, :limit => batch_size, :offset => offset) rescue invalid.each {|c| puts "#{c[:uuid]} (Created at #{c[:created_at]}): #{c[:error]}" } raise end req[:items].each do |col| begin Keep::Manifest.validate! col[:manifest_text] rescue Exception => e puts "Collection #{col[:uuid]} manifest not valid" invalid << {uuid: col[:uuid], error: e, created_at: col[:created_at]} end end puts "Checked #{offset} / #{req[:items_available]} - Invalid: #{invalid.size}" offset += req[:limit] break if offset > req[:items_available] end if invalid.empty? puts "No invalid collection manifests found" else invalid.each {|c| puts "#{c[:uuid]} (Created at #{c[:created_at]}): #{c[:error]}" } end {% endcodeblock %} The script will return a final report enumerating any invalid collection by UUID, with its creation date and error message so you can take the proper correction measures, if needed. h3. Python packaging change As part of story "#9945":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. These packages now include a built-in virtualenv to reduce dependencies on system packages. We have also stopped packaging and publishing backports for all the Python dependencies of our packages, as they are no longer needed. One practical consequence of this change is that the use of the Arvados Python SDK (aka "import arvados") will require a tweak if the SDK was installed from a distribution package. It now requires the loading of the virtualenv environment from our packages. The "Install documentation for the Arvados Python SDK":{{ site.baseurl }}/sdk/python/sdk-python.html reflects this change. This does not affect the use of the command line tools (e.g. arv-get, etc.). Python scripts that rely on the distribution Arvados Python SDK packages to import the Arvados SDK will need to be tweaked to load the correct Python environment. This can be done by activating the virtualenv outside of the script:
~$ source /usr/share/python2.7/dist/python-arvados-python-client/bin/activate
(python-arvados-python-client) ~$ path-to-the-python-script
Or alternatively, by updating the shebang line at the start of the script to:
#!/usr/share/python2.7/dist/python-arvados-python-client/bin/python
h3. python-arvados-cwl-runner deb/rpm package now conflicts with python-cwltool deb/rpm package As part of story "#9945":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. The python-arvados-cwl-runner package now includes a version of cwltool. If present, the python-cwltool and cwltool distribution packages will need to be uninstalled before the python-arvados-cwl-runner deb or rpm package can be installed. h3. Centos7 Python 3 dependency upgraded to rh-python35 As part of story "#9945":https://dev.arvados.org/issues/9945, the Python 3 dependency for Centos7 Arvados packages was upgraded from SCL python33 to rh-python35. h3. Centos7 package for libpam-arvados depends on the python-pam package, which is available from EPEL As part of story "#9945":https://dev.arvados.org/issues/9945, it was discovered that the Centos7 package for libpam-arvados was missing a dependency on the python-pam package, which is available from the EPEL repository. The dependency has been added to the libpam-arvados package. This means that going forward, the EPEL repository will need to be enabled to install libpam-arvados on Centos7. h3. New configuration Arvados is migrating to a centralized configuration file for all components. During the migration, legacy configuration files will continue to be loaded. See "Migrating Configuration":https://doc.arvados.org/v2.1/admin/config-migration.html for details. h2(#v1_3_3). v1.3.3 (2019-05-14) "previous: Upgrading to 1.3.0":#v1_3_0 This release corrects a potential data loss issue, if you are running Arvados 1.3.0 or 1.3.1 we strongly recommended disabling @keep-balance@ until you can upgrade to 1.3.3 or 1.4.0. With keep-balance disabled, there is no chance of data loss. We've put together a "wiki page":https://dev.arvados.org/projects/arvados/wiki/Recovering_lost_data which outlines how to recover blocks which have been put in the trash, but not yet deleted, as well as how to identify any collections which have missing blocks so that they can be regenerated. The keep-balance component has been enhanced to provide a list of missing blocks and affected collections and we've provided a "utility script":https://github.com/arvados/arvados/blob/main/tools/keep-xref/keep-xref.py which can be used to identify the workflows that generated those collections and who ran those workflows, so that they can be rerun. h2(#v1_3_0). v1.3.0 (2018-12-05) "previous: Upgrading to 1.2":#v1_2_0 This release includes several database migrations, which will be executed automatically as part of the API server upgrade. On large Arvados installations, these migrations will take a while. We've seen the upgrade take 30 minutes or more on installations with a lot of collections. The @arvados-controller@ component now requires the /etc/arvados/config.yml file to be present. Support for the deprecated "jobs" API is broken in this release. Users who rely on it should not upgrade. This will be fixed in an upcoming 1.3.1 patch release, however users are encouraged to migrate as support for the "jobs" API will be dropped in an upcoming release. Users who are already using the "containers" API are not affected. h2(#v1_2_1). v1.2.1 (2018-11-26) There are no special upgrade notes for this release. h2(#v1_2_0). v1.2.0 (2018-09-05) "previous: Upgrading to 1.1.2 or 1.1.3":#v1_1_2 h3. Regenerate Postgres table statistics It is recommended to regenerate the table statistics for Postgres after upgrading to v1.2.0. If autovacuum is enabled on your installation, this script would do the trick:
#!/bin/bash

set -e
set -u

tables=`echo "\dt" | psql arvados_production | grep public|awk -e '{print $3}'`

for t in $tables; do
    echo "echo 'analyze $t' | psql arvados_production"
    time echo "analyze $t" | psql arvados_production
done
If you also need to do the vacuum, you could adapt the script to run 'vacuum analyze' instead of 'analyze'. h3. New component: arvados-controller Commit "db5107dca":https://dev.arvados.org/projects/arvados/repository/revisions/db5107dca adds a new system service, arvados-controller. More detail is available in story "#13496":https://dev.arvados.org/issues/13497. To add the Arvados Controller to your system please refer to the "installation instructions":../install/install-api-server.html after upgrading your system to 1.2.0. Verify your setup by confirming that API calls appear in the controller's logs (_e.g._, @journalctl -fu arvados-controller@) while loading a workbench page. h2(#v1_1_4). v1.1.4 (2018-04-10) "previous: Upgrading to 1.1.3":#v1_1_3 h3. arvados-cwl-runner regressions (2018-04-05) Secondary files missing from toplevel workflow inputs This only affects workflows that rely on implicit discovery of secondaryFiles. If a workflow input does not declare @secondaryFiles@ corresponding to the @secondaryFiles@ of workflow steps which use the input, the workflow would inconsistently succeed or fail depending on whether the input values were specified as local files or referenced an existing collection (and whether the existing collection contained the secondary files or not). To ensure consistent behavior, the workflow is now required to declare in the top level workflow inputs any secondaryFiles that are expected by workflow steps. As an example, the following workflow will fail because the @toplevel_input@ does not declare the @secondaryFiles@ that are expected by @step_input@:
class: Workflow
cwlVersion: v1.0
inputs:
  toplevel_input: File
outputs: []
steps:
  step1:
    in:
      step_input: toplevel_input
    out: []
    run:
      id: sub
      class: CommandLineTool
      inputs:
        step_input:
          type: File
          secondaryFiles:
            - .idx
      outputs: []
      baseCommand: echo
When run, this produces an error like this:
cwltool ERROR: [step step1] Cannot make job: Missing required secondary file 'hello.txt.idx' from file object: {
    "basename": "hello.txt",
    "class": "File",
    "location": "keep:ade9d0e032044bd7f58daaecc0d06bc6+51/hello.txt",
    "size": 0,
    "nameroot": "hello",
    "nameext": ".txt",
    "secondaryFiles": []
}
To fix this error, add the appropriate @secondaryFiles@ section to @toplevel_input@
class: Workflow
cwlVersion: v1.0
inputs:
  toplevel_input:
    type: File
    secondaryFiles:
      - .idx
outputs: []
steps:
  step1:
    in:
      step_input: toplevel_input
    out: []
    run:
      id: sub
      class: CommandLineTool
      inputs:
        step_input:
          type: File
          secondaryFiles:
            - .idx
      outputs: []
      baseCommand: echo
This bug has been fixed in Arvados release v1.2.0. Secondary files on default file inputs @File@ inputs that have default values and also expect @secondaryFiles@ and will fail to upload default @secondaryFiles@. As an example, the following case will fail:
class: CommandLineTool
inputs:
  step_input:
    type: File
    secondaryFiles:
      - .idx
    default:
      class: File
      location: hello.txt
outputs: []
baseCommand: echo
When run, this produces an error like this:
2018-05-03 10:58:47 cwltool ERROR: Unhandled error, try again with --debug for more information:
  [Errno 2] File not found: u'hello.txt.idx'
To fix this, manually upload the primary and secondary files to keep and explicitly declare @secondaryFiles@ on the default primary file:
class: CommandLineTool
inputs:
  step_input:
    type: File
    secondaryFiles:
      - .idx
    default:
      class: File
      location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt
      secondaryFiles:
       - class: File
         location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt.idx
outputs: []
baseCommand: echo
This bug has been fixed in Arvados release v1.2.0. h2(#v1_1_3). v1.1.3 (2018-02-08) There are no special upgrade notes for this release. h2(#v1_1_2). v1.1.2 (2017-12-22) "previous: Upgrading to 1.1.0 or 1.1.1":#v1_1_0 h3. The minimum version for Postgres is now 9.4 (2017-12-08) As part of story "#11908":https://dev.arvados.org/issues/11908, commit "8f987a9271":https://dev.arvados.org/projects/arvados/repository/revisions/8f987a9271 introduces a dependency on Postgres 9.4. Previously, Arvados required Postgres 9.3. * Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade * Ubuntu 16.04 (pg 9.5) does not require an upgrade * Ubuntu 14.04 (pg 9.3) requires upgrade to Postgres 9.4: https://www.postgresql.org/download/linux/ubuntu/ * CentOS 7 and RHEL7 (pg 9.2) require upgrade to Postgres 9.4. It is necessary to migrate of the contents of your database: https://www.postgresql.org/docs/9.0/static/migration.html *# Create a database backup using @pg_dump@ *# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/ *# Restore from the backup using @psql@ h2(#v1_1_1). v1.1.1 (2017-11-30) There are no special upgrade notes for this release. h2(#v1_1_0). v1.1.0 (2017-10-24) h3. The minimum version for Postgres is now 9.3 (2017-09-25) As part of story "#12032":https://dev.arvados.org/issues/12032, commit "68bdf4cbb1":https://dev.arvados.org/projects/arvados/repository/revisions/68bdf4cbb1 introduces a dependency on Postgres 9.3. Previously, Arvados required Postgres 9.1. * Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade * Ubuntu 16.04 (pg 9.5) does not require an upgrade * Ubuntu 14.04 (pg 9.3) is compatible, however upgrading to Postgres 9.4 is recommended: https://www.postgresql.org/download/linux/ubuntu/ * CentOS 7 and RHEL7 (pg 9.2) should upgrade to Postgres 9.4. It is necessary to migrate of the contents of your database: https://www.postgresql.org/docs/9.0/static/migration.html *# Create a database backup using @pg_dump@ *# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/ *# Restore from the backup using @psql@ h2(#older). Older versions h3. Upgrade slower than usual (2017-06-30) As part of story "#11807":https://dev.arvados.org/issues/11807, commit "55aafbb":https://dev.arvados.org/projects/arvados/repository/revisions/55aafbb converts old "jobs" database records from YAML to JSON, making the upgrade process slower than usual. * The migration can take some time if your database contains a substantial number of YAML-serialized rows (i.e., you installed Arvados before March 3, 2017 "660a614":https://dev.arvados.org/projects/arvados/repository/revisions/660a614 and used the jobs/pipelines APIs). Otherwise, the upgrade will be no slower than usual. * The conversion runs as a database migration, i.e., during the deb/rpm package upgrade process, while your API server is unavailable. * Expect it to take about 1 minute per 20K jobs that have ever been created/run. h3. Service discovery overhead change in keep-web (2017-06-05) As part of story "#9005":https://dev.arvados.org/issues/9005, commit "cb230b0":https://dev.arvados.org/projects/arvados/repository/revisions/cb230b0 reduces service discovery overhead in keep-web requests. * When upgrading keep-web _or keepproxy_ to/past this version, make sure to update API server as well. Otherwise, a bad token in a request can cause keep-web to fail future requests until either keep-web restarts or API server gets upgraded. h3. Node manager now has an http endpoint for management (2017-04-12) As part of story "#11349":https://dev.arvados.org/issues/11349, commit "2c094e2":https://dev.arvados.org/projects/arvados/repository/revisions/2c094e2 adds a "management" http server to nodemanager. * To enable it, add to your configuration file:
[Manage]
  address = 127.0.0.1
  port = 8989
* The server responds to @http://{address}:{port}/status.json@ with a summary of how many nodes are in each state (booting, busy, shutdown, etc.) h3. New websockets component (2017-03-23) As part of story "#10766":https://dev.arvados.org/issues/10766, commit "e8cc0d7":https://dev.arvados.org/projects/arvados/repository/revisions/e8cc0d7 replaces puma with arvados-ws as the recommended websocket server. * See http://doc.arvados.org/install/install-ws.html for install/upgrade instructions. * Remove the old puma server after the upgrade is complete. Example, with runit:
$ sudo sv down /etc/sv/puma
$ sudo rm -r /etc/sv/puma
Example, with systemd:
$ systemctl disable puma
$ systemctl stop puma
h3. Change of database encoding for hashes and arrays (2017-03-06) As part of story "#11168":https://dev.arvados.org/issues/11168, commit "660a614":https://dev.arvados.org/projects/arvados/repository/revisions/660a614 uses JSON instead of YAML to encode hashes and arrays in the database. * Aside from a slight performance improvement, this should have no externally visible effect. * Downgrading past this version is not supported, and is likely to cause errors. If this happens, the solution is to upgrade past this version. * After upgrading, make sure to restart puma and crunch-dispatch-* processes. h3. Docker image format compatibility check (2017-02-03) As part of story "#10969":https://dev.arvados.org/issues/10969, commit "74a9dec":https://dev.arvados.org/projects/arvados/repository/revisions/74a9dec introduces a Docker image format compatibility check: the @arv keep docker@ command prevents users from inadvertently saving docker images that compute nodes won't be able to run. * If your compute nodes run a version of *docker older than 1.10* you must override the default by adding to your API server configuration (@/etc/arvados/api/application.yml@):
docker_image_formats: ["v1"]
* Refer to the comments above @docker_image_formats@ in @/var/www/arvados-api/current/config/application.default.yml@ or source:services/api/config/application.default.yml or issue "#10969":https://dev.arvados.org/issues/10969 for more detail. * *NOTE:* This does *not* include any support for migrating existing Docker images from v1 to v2 format. This will come later: for now, sites running Docker 1.9 or earlier should still *avoid upgrading Docker further than 1.9.* h3. Debian and RPM packages now have systemd unit files (2016-09-27) Several Debian and RPM packages -- keep-balance ("d9eec0b":https://dev.arvados.org/projects/arvados/repository/revisions/d9eec0b), keep-web ("3399e63":https://dev.arvados.org/projects/arvados/repository/revisions/3399e63), keepproxy ("6de67b6":https://dev.arvados.org/projects/arvados/repository/revisions/6de67b6), and arvados-git-httpd ("9e27ddf":https://dev.arvados.org/projects/arvados/repository/revisions/9e27ddf) -- now enable their respective components using systemd. These components prefer YAML configuration files over command line flags ("3bbe1cd":https://dev.arvados.org/projects/arvados/repository/revisions/3bbe1cd). * On Debian-based systems using systemd, services are enabled automatically when packages are installed. * On RedHat-based systems using systemd, unit files are installed but services must be enabled explicitly: e.g., "sudo systemctl enable keep-web; sudo systemctl start keep-web". * The new systemd-supervised services will not start up successfully until configuration files are installed in /etc/arvados/: e.g., "Sep 26 18:23:55 62751f5bb946 keep-web[74]: 2016/09/26 18:23:55 open /etc/arvados/keep-web/keep-web.yml: no such file or directory" * To migrate from runit to systemd after installing the new packages, we recommend the following procedure: *# Bring down the runit service: "sv down /etc/sv/keep-web" *# Create a JSON configuration file (e.g., /etc/arvados/keep-web/keep-web.yml -- see "keep-web -help") *# Ensure the service is running correctly under systemd: "systemctl status keep-web" / "journalctl -u keep-web" *# Remove the runit service so it doesn't start at next boot * Affected services: ** keep-balance - /etc/arvados/keep-balance/keep-balance.yml ** keep-web - /etc/arvados/keep-web/keep-web.yml ** keepproxy - /etc/arvados/keepproxy/keepproxy.yml ** arvados-git-httpd - /etc/arvados/arv-git-httpd/arv-git-httpd.yml h3. Installation paths for Python modules and script changed (2016-05-31) Commits "ae72b172c8":https://dev.arvados.org/projects/arvados/repository/revisions/ae72b172c8 and "3aae316c25":https://dev.arvados.org/projects/arvados/repository/revisions/3aae316c25 change the filesystem location where Python modules and scripts are installed. * Previous packages installed these files to the distribution's preferred path under @/usr/local@ (or the equivalent location in a Software Collection). Now they get installed to a path under @/usr@. This improves compatibility with other Python packages provided by the distribution. See "#9242":https://dev.arvados.org/issues/9242 for more background. * If you simply import Python modules from scripts, or call Python tools relying on $PATH, you don't need to make any changes. If you have hardcoded full paths to some of these files (e.g., in symbolic links or configuration files), you will need to update those paths after this upgrade. h3. Crunchrunner package is required on compute and shell nodes (2016-04-25) Commit "eebcb5e":https://dev.arvados.org/projects/arvados/repository/revisions/eebcb5e requires the crunchrunner package to be installed on compute nodes and shell nodes in order to run CWL workflows. * On each Debian-based compute node and shell node, run: @sudo apt-get install crunchrunner@ * On each Red Hat-based compute node and shell node, run: @sudo yum install crunchrunner@ h3. Keep permission signature algorithm change (2016-04-21) Commit "3c88abd":https://dev.arvados.org/projects/arvados/repository/revisions/3c88abd changes the Keep permission signature algorithm. * All software components that generate signatures must be upgraded together. These are: keepstore, API server, keep-block-check, and keep-rsync. For example, if keepstore < 0.1.20160421183420 but API server >= 0.1.20160421183420, clients will not be able to read or write data in Keep. * Jobs and client operations that are in progress during the upgrade (including arv-put's "resume cache") will fail. h3. Workbench's "Getting Started" popup disabled by default (2015-01-05) Commit "e1276d6e":https://dev.arvados.org/projects/arvados/repository/revisions/e1276d6e disables Workbench's "Getting Started" popup by default. * If you want new users to continue seeing this popup, set @enable_getting_started_popup: true@ in Workbench's @application.yml@ configuration. h3. Crunch jobs now have access to Keep-backed writable scratch storage (2015-12-03) Commit "5590c9ac":https://dev.arvados.org/projects/arvados/repository/revisions/5590c9ac makes a Keep-backed writable scratch directory available in crunch jobs (see "#7751":https://dev.arvados.org/issues/7751) * All compute nodes must be upgraded to arvados-fuse >= 0.1.2015112518060 because crunch-job uses some new arv-mount flags (--mount-tmp, --mount-by-pdh) introduced in merge "346a558":https://dev.arvados.org/projects/arvados/repository/revisions/346a558 * Jobs will fail if the API server (in particular crunch-job from the arvados-cli gem) is upgraded without upgrading arvados-fuse on compute nodes. h3. Recommended configuration change for keep-web (2015-11-11) Commit "1e2ace5":https://dev.arvados.org/projects/arvados/repository/revisions/1e2ace5 changes recommended config for keep-web (see "#5824":https://dev.arvados.org/issues/5824) * proxy/dns/ssl config should be updated to route "https://download.ClusterID.example.com/" requests to keep-web (alongside the existing "collections" routing) * keep-web command line adds @-attachment-only-host download.ClusterID.example.com@ * Workbench config adds @keep_web_download_url@ * More info on the (still beta/non-TOC-linked) "keep-web doc page":http://doc.arvados.org/install/install-keep-web.html h3. Stopped containers are now automatically removed on compute nodes (2015-11-04) Commit "1d1c6de":https://dev.arvados.org/projects/arvados/repository/revisions/1d1c6de removes stopped containers (see "#7444":https://dev.arvados.org/issues/7444) * arvados-docker-cleaner removes _all_ docker containers as soon as they exit, effectively making @docker run@ default to @--rm@. If you run arvados-docker-cleaner on a host that does anything other than run crunch-jobs, and you still want to be able to use @docker start@, read the "new doc page":http://doc.arvados.org/install/install-compute-node.html to learn how to turn this off before upgrading. h3. New keep-web service (2015-11-04) Commit "21006cf":https://dev.arvados.org/projects/arvados/repository/revisions/21006cf adds a new keep-web service (see "#5824":https://dev.arvados.org/issues/5824). * Nothing relies on keep-web yet, but early adopters can install it now by following http://doc.arvados.org/install/install-keep-web.html (it is not yet linked in the TOC).
================================================ FILE: doc/admin/user-activity.html.textile.liquid ================================================ --- layout: default navsection: admin title: "User activity report" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The @arv-user-activity@ tool generates a summary report of user activity on an Arvados instance based on the audit logs (the @logs@ table). h2. Installation h2. Option 1: Install from a distribution package This installation method is recommended to make the CLI tools available system-wide. It can coexist with the installation method described in option 2, below. First, configure the "Arvados package repositories":{{ site.baseurl }}/install/packages.html {% assign arvados_component = 'python3-arvados-user-activity' %} {% include 'install_packages' %} h2. Option 2: Install from source Step 1: Check out the arvados source code Step 2: Change directory to @arvados/tools/user-activity@ Step 3: Run @pip install .@ in an appropriate installation environment, such as a @virtualenv@. Note: depends on the "Arvados Python SDK":{{ site.baseurl }}/sdk/python/sdk-python.html and its associated build prerequisites (e.g. @pycurl@). h2. Usage Set ARVADOS_API_HOST to the api server of the cluster for which the report should be generated. ARVADOS_API_TOKEN needs to be a "v2 token":../admin/scoped-tokens.html for an admin user, or the system root token. Please note that in a login cluster federation, the token needs to be issued by the login cluster, but the report should be generated against the API server of the cluster for which it is desired. In other words, ARVADOS_API_HOST would point at the satellite cluster for which the report is desired, but ARVADOS_API_TOKEN would be a token that belongs to a login cluster user, or the login cluster's system root token. Run the tool with the option @--days@ giving the number of days to report on. It will request activity logs from the API and generate a summary report on standard output. Example run:
$ bin/arv-user-activity --days 14
User activity on pirca between 2020-11-10 16:42 and 2020-11-24 16:42

Peter Amstutz  (https://workbench.pirca.arvadosapi.com/users/jutro-tpzed-a4qnxq3pcfcgtkz)
  organization: "Curii"
  role: "Software Developer"

  2020-11-10 16:51-05:00 to 2020-11-11 13:51-05:00 (21:00) Account activity
  2020-11-13 13:47-05:00 to 2020-11-14 03:32-05:00 (13:45) Account activity
  2020-11-14 04:33-05:00 to 2020-11-15 20:33-05:00 (40:00) Account activity
  2020-11-15 21:34-05:00 to 2020-11-16 13:34-05:00 (16:00) Account activity
  2020-11-16 16:21-05:00 to 2020-11-16 16:28-05:00 (00:07) Account activity
  2020-11-17 15:49-05:00 to 2020-11-17 15:49-05:00 (00:00) Account activity
  2020-11-17 15:51-05:00 Created project "New project" (pirca-j7d0g-7bxvkyr4khfa1a4)
  2020-11-17 15:51-05:00 Updated project "Test run" (pirca-j7d0g-7bxvkyr4khfa1a4)
  2020-11-17 15:51-05:00 Ran container "bwa-mem.cwl container" (pirca-xvhdp-xf2w8dkk17jkk5r)
  2020-11-17 15:51-05:00 to 2020-11-17 15:51-05:00 (0:00) Account activity
  2020-11-17 15:53-05:00 Ran container "WGS processing workflow scattered over samples container" (pirca-xvhdp-u7bm0wdy6lq4r8k)
  2020-11-17 15:53-05:00 to 2020-11-17 15:54-05:00 (00:01) Account activity
  2020-11-17 15:55-05:00 Created collection "output for pirca-dz642-36ffk81c8zzopxz" (pirca-4zz18-np35gw690ndzzk7)
  2020-11-17 15:55-05:00 to 2020-11-17 15:55-05:00 (0:00) Account activity
  2020-11-17 15:55-05:00 Created collection "Output of main" (pirca-4zz18-oiiymetwhnnhhwc)
  2020-11-17 15:55-05:00 Tagged pirca-4zz18-oiiymetwhnnhhwc
  2020-11-17 15:55-05:00 Updated collection "Output of main" (pirca-4zz18-oiiymetwhnnhhwc)
  2020-11-17 15:55-05:00 to 2020-11-17 16:04-05:00 (00:09) Account activity
  2020-11-17 16:04-05:00 Created collection "Output of main" (pirca-4zz18-f6n9n89e3dhtwvl)
  2020-11-17 16:04-05:00 Tagged pirca-4zz18-f6n9n89e3dhtwvl
  2020-11-17 16:04-05:00 Updated collection "Output of main" (pirca-4zz18-f6n9n89e3dhtwvl)
  2020-11-17 16:04-05:00 to 2020-11-17 17:55-05:00 (01:51) Account activity
  2020-11-17 20:09-05:00 to 2020-11-17 20:09-05:00 (00:00) Account activity
  2020-11-17 21:35-05:00 to 2020-11-17 21:35-05:00 (00:00) Account activity
  2020-11-18 10:09-05:00 to 2020-11-18 11:00-05:00 (00:51) Account activity
  2020-11-18 14:37-05:00 Untagged pirca-4zz18-st8yzjan1nhxo1a
  2020-11-18 14:37-05:00 Deleted collection "Output of main" (pirca-4zz18-st8yzjan1nhxo1a)
  2020-11-18 17:44-05:00 to 2020-11-18 17:44-05:00 (00:00) Account activity
  2020-11-19 12:18-05:00 to 2020-11-19 12:19-05:00 (00:01) Account activity
  2020-11-19 13:57-05:00 to 2020-11-19 14:21-05:00 (00:24) Account activity
  2020-11-20 09:48-05:00 to 2020-11-20 22:51-05:00 (13:03) Account activity
  2020-11-20 23:52-05:00 to 2020-11-22 22:32-05:00 (46:40) Account activity
  2020-11-22 23:37-05:00 to 2020-11-23 13:52-05:00 (14:15) Account activity
  2020-11-23 14:53-05:00 to 2020-11-24 11:58-05:00 (21:05) Account activity
  2020-11-24 15:06-05:00 to 2020-11-24 16:38-05:00 (01:32) Account activity

Marc Rubenfield  (https://workbench.pirca.arvadosapi.com/users/jutro-tpzed-v9s9q97pgydh1yf)
  2020-11-11 12:27-05:00 Untagged pirca-4zz18-xmq257bsla4kdco
  2020-11-11 12:27-05:00 Deleted collection "Output of main" (pirca-4zz18-xmq257bsla4kdco)

Ward Vandewege  (https://workbench.pirca.arvadosapi.com/users/jutro-tpzed-9z6foyez9ydn2hl)
  organization: "Curii Corporation, Inc."
  organization_email: "ward@curii.com"
  role: "System Administrator"
  website_url: "https://curii.com"

  2020-11-19 19:30-05:00 to 2020-11-19 19:46-05:00 (00:16) Account activity
  2020-11-20 10:51-05:00 to 2020-11-20 11:26-05:00 (00:35) Account activity
  2020-11-24 12:01-05:00 to 2020-11-24 13:01-05:00 (01:00) Account activity
================================================ FILE: doc/admin/user-management-cli.html.textile.liquid ================================================ --- layout: default navsection: admin title: User management at the CLI ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Initial setup
ARVADOS_API_HOST={{ site.arvados_api_host }}
ARVADOS_API_TOKEN=1234567890qwertyuiopasdfghjklzxcvbnm1234567890zzzz
In these examples, @zzzzz-tpzed-3kz0nwtjehhl0u4@ is the sample user account. Replace with the uuid of the user you wish to manipulate. See "user management":{{site.baseurl}}/admin/user-management.html for an overview of how to use these commands. h3. Setup a user This creates a default git repository and VM login. Enables user to self-activate using Workbench.
$ arv user setup --uuid zzzzz-tpzed-3kz0nwtjehhl0u4
h3. Deactivate user
$ arv user unsetup --uuid zzzzz-tpzed-3kz0nwtjehhl0u4
When deactivating a user, you may also want to "reassign ownership of their data":{{site.baseurl}}/admin/reassign-ownership.html . h3(#activate-user). Directly activate user
$ arv user update --uuid "zzzzz-tpzed-3kz0nwtjehhl0u4" --user '{"is_active":true}'
Note: this bypasses user agreements checks, and does not set up the user with a default git repository or VM login. h3(#create-token). Create a token for a user As an admin, you can create tokens for other users.
$ arv api_client_authorization create --api-client-authorization '{"owner_uuid": "zzzzz-tpzed-fr97h9t4m5jffxs"}'
{
 "kind":"arvados#apiClientAuthorization",
 "etag":"9yk144t0v6cvyp0342exoh2vq",
 "uuid":"zzzzz-gj3su-yyyyyyyyyyyyyyy",
 "owner_uuid":"zzzzz-tpzed-fr97h9t4m5jffxs",
 "created_at":"2020-03-12T20:36:12.517375422Z",
 "modified_by_user_uuid":null,
 "modified_at":null,
 "api_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
 "created_by_ip_address":null,
 "expires_at":null,
 "last_used_at":null,
 "last_used_by_ip_address":null,
 "scopes":["all"]
}
To get the token string, combine the values of @uuid@ and @api_token@ in the form "v2/$uuid/$api_token". In this example the string that goes in @ARVADOS_API_TOKEN@ would be:
ARVADOS_API_TOKEN=v2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
h3(#delete-token). Delete a single token As a user or admin, if you need to revoke a specific, known token, for example a token that may have been leaked to an unauthorized party, you can delete it at the command line. First, determine the token UUID. If it is a "v2" format token (starts with "v2/") then the token UUID is middle section between the two slashes. For example:
v2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
the UUID is "zzzzz-gj3su-yyyyyyyyyyyyyyy" and you can skip to the next step. If you have a "bare" token (only the secret part) then, as an admin, you need to query the token to get the uuid:
$ ARVADOS_API_TOKEN=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx arv --format=uuid api_client_authorization current
zzzzz-gj3su-yyyyyyyyyyyyyyy
Now you can delete the token:
$ ARVADOS_API_TOKEN=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx arv api_client_authorization delete --uuid zzzzz-gj3su-yyyyyyyyyyyyyyy
h3(#delete-all-tokens). Delete all tokens belonging to a user First, "obtain a valid token for the user.":#create-token Then, use that token to get all the user's tokens, and delete each one:
$ ARVADOS_API_TOKEN=xxxxtoken-belonging-to-user-whose-tokens-will-be-deletedxxxxxxxx ; \
for uuid in $(arv --format=uuid api_client_authorization list) ; do \
arv api_client_authorization delete --uuid $uuid ; \
done
h2. Adding Permissions h3(#vm-login). VM login Give @$user_uuid@ permission to log in to @$vm_uuid@ as @$target_username@ and make sure that @$target_username@ is a member of the @docker@ group
user_uuid=xxxxxxxchangeme
vm_uuid=xxxxxxxchangeme
target_username=xxxxxxxchangeme

read -rd $'\000' newlink <


================================================
FILE: doc/admin/user-management.html.textile.liquid
================================================
---
layout: default
navsection: admin
title: User management
...

{% comment %}
Copyright (C) The Arvados Authors. All rights reserved.

SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}

# "Authentication":#authentication
## "Federated Authentication":#federated_auth
# "User activation":#user_activation
# "User agreements and self-activation":#user_agreements
# "User profile":#user_profile
# "User visibility":#user_visibility
# "Pre-setup user by email address":#pre-activated
# "Pre-activate federated user":#pre-activated-fed
# "Auto-setup federated users from trusted clusters":#auto_setup_federated
# "Activation flows":#activation_flows
## "Private instance":#activation_flow_private
## "Federated instance":#federated
## "Open instance":#activation_flow_open
# "Service Accounts":#service_accounts

{% comment %}
TODO: Link to relevant workbench documentation when it gets written
{% endcomment %}

This page describes how user accounts are created, set up and activated.

h2(#authentication). Authentication

"Browser login and management of API tokens is described here.":{{site.baseurl}}/api/tokens.html

After completing the log in and authentication process, the API server receives a user record from the upstream identity provider (Google, LDAP, etc) consisting of the user's name, primary email address, alternate email addresses, and optional unique provider identifier (@identity_url@).

If a provider identifier is given, the API server searches for a matching user record.

If a provider identifier is not given, no match is found, it next searches by primary email and then alternate email address.  This enables "provider migration":migrating-providers.html and "pre-activated accounts.":#pre-activated

If no user account is found, a new user account is created with the information from the identity provider.

If a user account has been "linked":{{site.baseurl}}/user/topics/link-accounts.html or migrated the API server may follow internal redirects (@redirect_to_user_uuid@) to select the linked or migrated user account.

h3(#federated_auth). Federated Authentication

A federated user follows a slightly different flow.  The client presents a token issued by the remote cluster.  The local API server contacts the remote cluster to verify the user's identity.  This results in a user object (representing the remote user) being created on the local cluster.  If the user cannot be verified, the token will be rejected.  If the user is inactive on the remote cluster, a user record will be created, but it will also be inactive.

h2(#user_activation). User activation

This section describes the different user account states.

!(side){{site.baseurl}}/images/user-account-states.svg!

notextile. 
# A new user record is not set up, and not active. An inactive user cannot create or update any object, but can read Arvados objects that the user account has permission to read (such as publicly available items readable by the "anonymous" user). # Using Workbench or the "command line":{{site.baseurl}}/admin/user-management-cli.html, the admin invokes @setup@ on the user. The setup method adds the user to the "All users" group. - If "Users.AutoSetupNewUsers":config.html is true, this happens automatically during user creation, so in that case new users start at step (3). - If "Users.AutoSetupNewUsersWithVmUUID":config.html is set, the user is given login permission to the specified shell node # User is set up, but still not yet active. The browser presents "user agreements":#user_agreements (if any) and then invokes the user @activate@ method on the user's behalf. # The user @activate@ method checks that all "user agreements":#user_agreements are signed. If so, or there are no user agreements, the user is activated. # The user is active. User has normal access to the system. # From steps (1) and (3), an admin user can directly update the @is_active@ flag. This bypasses enforcement that user agreements are signed. If the user was not yet set up (still in step (1)), it adds the user to the "All users", but bypasses creating default git repository and assigning default VM access. # An existing user can have their access revoked using @unsetup@ and "ownership reassigned":reassign-ownership.html . Unsetup removes the user from the "All users" group and makes them inactive, preventing them from re-activating themselves. "Ownership reassignment":reassign-ownership.html moves any objects or permission from the old user to a new user and deletes any credentials for the old user. notextile.
User management can be performed through the web using Workbench or the command line. See "user management at the CLI":{{site.baseurl}}/admin/user-management-cli.html for specific examples. h2(#user_agreements). User agreements and self-activation The @activate@ method of the users controller checks if the user account is part of the "All Users" group and whether the user has "signed" all the user agreements. User agreements are accessed through the "user_agreements API":{{site.baseurl}}/api/methods/user_agreements.html . This returns a list of collection records. The user agreements that users are required to sign should be added to the @links@ table this way:
$ arv link create --link '{
  "link_class": "signature",
  "name": "require",
  "tail_uuid": "*system user uuid*",
  "head_uuid: "*collection uuid*"
}'
The collection should contain a single HTML file with the user agreement text. Workbench displays the clickthrough agreements which the user can "sign". The @user_agreements/sign@ endpoint creates a Link object:
{
  "link_class": "signature"
  "name": "click",
  "tail_uuid": "*user uuid*",
  "head_uuid: "*collection uuid*"
}
The @user_agreements/signatures@ endpoint returns the list of Link objects that represent signatures by the current user (created by @sign@). h2(#user_profile). User profile The fields making up the user profile are described in @Workbench.UserProfileFormFields@ . See "Configuration reference":config.html . The user profile is checked by workbench after checking if user agreements need to be signed. The values entered are stored in the @properties@ field on the user object. Unlike user agreements, the requirement to fill out the user profile is not enforced by the API server. h2(#user_visibility). User visibility Initially, a user is not part of any groups and will not be able to interact with other users on the system. The admin should determine who the user is permited to interact with and use Workbench or the "command line":group-management.html#add to create and add the user to the appropriate group(s). h2(#pre-activated). Pre-setup user by email address You may create a user account for a user that has not yet logged in, and identify the user by email address. 1. As an admin, create a user object:
$ arv --format=uuid user create --user '{"email": "foo@example.com", "username": "foo"}'
clsr1-tpzed-1234567890abcdf
$ arv user setup --uuid clsr1-tpzed-1234567890abcdf
2. When the user logs in the first time, the email address will be recognized and the user will be associated with the existing user object. h2(#pre-activated-fed). Pre-activate federated user 1. As admin, create a user object with the @uuid@ of the federated user (this is the user's uuid on their home cluster, called @clsr2@ in this example):
$ arv user create --user '{"uuid": "clsr2-tpzed-1234567890abcdf", "email": "foo@example.com", "username": "foo", "is_active": true}'
2. When the user logs in, they will be associated with the existing user object. h2(#auto_setup_federated). Auto-setup federated users from trusted clusters By setting @ActivateUsers: true@ for each federated cluster in @RemoteClusters@, a federated user from one of the listed clusters will be automatically set up and activated on this cluster. See configuration example in "Federated instance":#federated . h2(#activation_flows). Activation flows h3(#activation_flow_private). Private instance Policy: users must be manually set up by the admin. Here is the configuration for this policy. This is also the default if not provided.
Users:
  AutoSetupNewUsers: false
# User is created. Not set up. @is_active@ is false. # Workbench checks @is_invited@ and finds it is false. User gets "inactive user" page. # Admin goes to user page and clicks "setup user" or sets @is_active@ to true. # On refreshing workbench, the user is able to self-activate after signing clickthrough agreements (if any). # Alternately, directly setting @is_active@ to true also sets up the user, but skips clickthrough agreements (because the user is already active). h3(#federated). Federated instance Policy: users from other clusters in the federation are activated, users from outside the federation must be manually approved. Here is the configuration for this policy and an example remote cluster @clsr2@.
Users:
  AutoSetupNewUsers: false
RemoteClusters:
  clsr2:
    ActivateUsers: true
# Federated user arrives claiming to be from cluster 'clsr2' # API server authenticates user as being from cluster 'clsr2' # Because 'clsr2' has @ActivateUsers@ the user is set up and activated. # User can immediately start using Workbench. h3(#activation_flow_open). Open instance Policy: anybody who shows up and signs the agreements is activated.
Users:
  AutoSetupNewUsers: true
"Set up user agreements":#user_agreements by creating "signature" "require" links as described earlier. # User is created and auto-setup. At this point, @is_active@ is false, but user has been added to "All users" group. # Workbench checks @is_invited@ and finds it is true, because the user is a member of "All users" group. # Workbench presents user with list of user agreements, user reads and clicks "sign" for each one. # Workbench tries to activate user. # User is activated. h2(#service_accounts). Service Accounts For automation purposes, you can create service accounts that aren't tied to an external authorization system. These kind of accounts don't really differ much from standard user accounts, they just cannot be accessed through a normal login mechanism. As an admin, you can create accounts like described in the "user pre-setup section above":#pre-activated and then "activate them by updating its @is_active@ field":{{site.baseurl}}/admin/user-management-cli.html#activate-user. Once a service account is created you can "use an admin account to set up a token":{{site.baseurl}}/admin/user-management-cli.html#create-token for it, so that the required automations can authenticate. Note that these tokens support having a limited lifetime by using the @expires_at@ field and also "limited scope":{{site.baseurl}}/admin/scoped-tokens.html, if required by your security policies. You can read more about them at "the API reference page":{{site.baseurl}}/api/methods/api_client_authorizations.html. ================================================ FILE: doc/api/dispatch.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "cloud dispatcher" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The cloud dispatcher provides several management/diagnostic APIs, intended to be used by a system administrator. {% include 'notebox_begin' %} The "@arvados-server instance@ subcommand":{{site.baseurl}}/admin/dispatch.html provides a command-line interface for the most commonly used parts of this API. {% include 'notebox_end' %} These APIs are not normally exposed to external clients. To use them, connect directly to the dispatcher's internal URL (see Services.DispatchCloud.InternalURLs in the cluster config file). All requests must include the cluster's management token (@ManagementToken@ in the cluster config file). Example:
curl -H "Authorization: Bearer $management_token" http://localhost:9006/arvados/v1/dispatch/containers
These APIs are not available via @arv@ CLI tool. Note: the term "instance" here refers to a virtual machine provided by a cloud computing service. The alternate terms "cloud VM", "compute node", and "worker node" are sometimes used as well in config files, documentation, and log messages. h3. List containers @GET /arvados/v1/dispatch/containers@ Return a list of containers that are either ready to dispatch, or being started/monitored by the dispatcher. Each entry in the returned list of @items@ includes: * an @instance_type@ entry with the name and attributes of the instance type that will be used to schedule the container (chosen from the @InstanceTypes@ section of your cluster config file); and * a @container@ entry with selected attributes of the container itself, including @uuid@, @priority@, @runtime_constraints@, and @state@. Other fields of the container records are not loaded by the dispatcher, and will have empty/zero values here (e.g., @{...,"created_at":"0001-01-01T00:00:00Z","command":[],...}@). * a @scheduling_status@ field with a brief explanation of the container's status in the dispatch queue, or an empty string if scheduling is not applicable, e.g., the container has already started running. Example response:
{
  "items": [
    {
      "container": {
        "uuid": "zzzzz-dz642-xz68ptr62m49au7",
        ...
        "priority": 562948375092493200,
        ...
        "state": "Locked",
        ...
      },
      "instance_type": {
        "Name": "Standard_E2s_v3",
        "ProviderType": "Standard_E2s_v3",
        "VCPUs": 2,
        "RAM": 17179869184,
        "Scratch": 32000000000,
        "IncludedScratch": 32000000000,
        "AddedScratch": 0,
        "Price": 0.146,
        "Preemptible": false
      },
      "scheduling_status": "Waiting for a Standard_E2s_v3 instance to boot and be ready to accept work."
    },
    ...
  ]
}
h3. Get specified container @GET /arvados/v1/dispatch/container?container_uuid={uuid}@ Return the same information as "list containers" above, but for a single specified container. Example response:
{
  "container": {
    ...
  },
  "instance_type": {
    ...
  },
  "scheduling_status": "Waiting for a Standard_E2s_v3 instance to boot and be ready to accept work."
}
h3. Terminate a container @POST /arvados/v1/dispatch/containers/kill?container_uuid={uuid}&reason={string}@ Make a single attempt to terminate the indicated container on the relevant instance. (The caller can implement a delay-and-retry loop if needed.) A container terminated this way will end with state @Cancelled@ if its docker container had already started, or @Queued@ if it was terminated while setting up the runtime environment. The provided @reason@ string will appear in the dispatcher's log, but not in the user-visible container log. If the provided @container_uuid@ is not scheduled/running on an instance, the response status will be 404. h3. List instances @GET /arvados/v1/dispatch/instances@ Return a list of cloud instances. Example response:
{
  "items": [
    {
      "instance": "/subscriptions/abcdefab-abcd-abcd-abcd-abcdefabcdef/resourceGroups/zzzzz/providers/Microsoft.Compute/virtualMachines/compute-abcdef0123456789abcdef0123456789-abcdefghijklmno",
      "address": "10.23.45.67",
      "price": 0.073,
      "arvados_instance_type": "Standard_DS1_v2",
      "provider_instance_type": "Standard_DS1_v2",
      "last_container_uuid": "zzzzz-dz642-vp7scm21telkadq",
      "last_busy": "2020-01-13T15:20:21.775019617Z",
      "running_container_uuids": ["zzzzz-dz642-vp7scm21telkadq"],
      "worker_state": "running",
      "idle_behavior": "run"
    },
    ...
}
The @instance@ value is the instance's identifier, assigned by the cloud provider. It can be used with the instance APIs below. The @last_container_uuid@ value indicates the most recently started container, if any. (It does not necessarily indicate that the container is still running.) The @worker_state@ value indicates the instance's capability to run containers. * @unknown@: instance was not created by this dispatcher, and a boot probe has not yet succeeded (this state typically appears briefly after the dispatcher restarts). * @booting@: cloud provider says the instance exists, but a boot probe has not yet succeeded. * @idle@: instance is idle and ready to run a container. * @running@: instance is running one or more containers. * @shutdown@: cloud provider has been instructed to terminate the instance. The @idle_behavior@ value determines what the dispatcher will do with the instance when it is idle; see hold/drain/run APIs below. h3. Hold an instance @POST /arvados/v1/dispatch/instances/hold?instance_id={instance}@ Set the indicated instance's idle behavior to @hold@. The instance will not be shut down automatically. If any containers are currently running, they will be allowed to continue, but no new containers will be scheduled. h3. Drain an instance @POST /arvados/v1/dispatch/instances/drain?instance_id={instance}@ Set the indicated instance's idle behavior to @drain@. If any containers are currently running, they will be allowed to continue, but when the instance becomes idle, it will be shut down. h3. Resume an instance @POST /arvados/v1/dispatch/instances/run?instance_id={instance}@ Set the indicated instance's idle behavior to @run@ (the normal behavior). When it becomes idle, it will be eligible to run new containers. It will be shut down automatically when the configured idle threshold is reached. h3. Shut down an instance @POST /arvados/v1/dispatch/instances/kill?instance_id={instance}&reason={string}@ Terminate the indicated instance. If any containers are running on the instance, they will be killed too; no effort is made to wait for them to end gracefully. The provided @reason@ string will appear in the dispatcher's log. ================================================ FILE: doc/api/execution.html.textile.liquid ================================================ --- layout: default navsection: architecture title: Computing with Crunch ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Crunch is the name for the Arvados system for managing computation. It provides an abstract API to various clouds and HPC resource allocation and scheduling systems, and integrates closely with Keep storage and the Arvados permission system. h2. Container API # To submit work, create a "container request":{{site.baseurl}}/api/methods/container_requests.html in the @Committed@ state. # The system will fufill the container request by creating or reusing a "Container object":{{site.baseurl}}/api/methods/containers.html and assigning it to the @container_uuid@ field. If the same request has been submitted in the past, it may reuse an existing container. The reuse behavior can be suppressed with @use_existing: false@ in the container request. # The dispatcher process will notice a new container in @Queued@ state and submit a container executor to the underlying work queuing system (such as Slurm). # The container executes. Upon termination the container goes into the @Complete@ state. If the container execution was interrupted or lost due to system failure, it will go into the @Cancelled@ state. # When the container associated with the container request is completed, the container request will go into the @Final@ state. # The @output_uuid@ field of the container request contains the uuid of output collection produced by container request. !(full-width){{site.baseurl}}/images/Crunch_dispatch.svg! h2(#RAM). Understanding RAM requests for containers The @runtime_constraints@ section of a container specifies working RAM (@ram@) and Keep cache (@keep_cache_ram@). If not specified, containers get a default Keep cache (@container_default_keep_cache_ram@, default 256 MiB). The total RAM requested for a container is the sum of working RAM, Keep cache, and an additional RAM reservation configured by the admin (@ReserveExtraRAM@ in the dispatcher configuration, default zero). The total RAM request is used to schedule containers onto compute nodes. RAM allocation limits are enforced using kernel controls such as cgroups. A container which requests 1 GiB RAM will only be permitted to allocate up to 1 GiB of RAM, even if scheduled on a 4 GiB node. On HPC systems, a multi-core node may run multiple containers at a time. When running on the cloud, the memory request (along with CPU and disk) is used to select (and possibly boot) an instance type with adequate resources to run the container. Instance type RAM is derated 5% from the published specification to accomodate virtual machine, kernel and system services overhead. h3. Calculate minimum instance type RAM for a container (RAM request + Keep cache + ReserveExtraRAM) * (100/95) For example, for a 3 GiB request, default Keep cache, and no extra RAM reserved: (3072 + 256) * 1.0526 = 3494 MiB To run this container, the instance type must have a published RAM size of at least 3494 MiB. h3. Calculate the maximum requestable RAM for an instance type (Instance type RAM * (95/100)) - Keep cache - ReserveExtraRAM For example, for a 3.75 GiB node, default Keep cache, and no extra RAM reserved: (3840 * 0.95) - 256 = 3392 MiB To run on this instance type, the container can request at most 3392 MiB of working RAM. ================================================ FILE: doc/api/index.html.textile.liquid ================================================ --- layout: default navsection: api title: API Reference ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} This reference describes the semantics of Arvados resources and how to programatically access Arvados via its REST API. Each resource listed in this section is exposed on the Arvados API server under the @/arvados/v1/@ path prefix, for example, @https://{{ site.arvados_api_host }}/arvados/v1/collections@. h2. Discovery document The API server publishes a machine-readable description of its endpoints and some additional site configuration values via a JSON-formatted discovery document. This is available at @/discovery/v1/apis/arvados/v1/rest@, for example @https://{{ site.arvados_api_host }}/discovery/v1/apis/arvados/v1/rest@. Some Arvados SDKs use the discovery document to generate language bindings. h2. Exported configuration The Controller exposes a subset of the cluster's configuration and makes it available to clients in JSON format. This public config includes valuable information like several service's URLs, timeout settings, etc. and it is available at @/arvados/v1/config@, for example @https://{{ site.arvados_api_host }}/arvados/v1/config@. Workbench is one example of a client using this information, as it's a client-side application and doesn't have access to the cluster's config file. h2. Exported vocabulary definition When configured, the Controller also exports the "metadata vocabulary definition":{{site.baseurl}}/admin/metadata-vocabulary.html in JSON format. This functionality is useful for clients like Workbench and the Python SDK to translate between identifiers and human-readable labels when reading and writing objects on the system. This is available at @/arvados/v1/vocabulary@, for example @https://{{ site.arvados_api_host }}/arvados/v1/vocabulary@. ================================================ FILE: doc/api/keep-s3.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "S3 API" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The Simple Storage Service (S3) API is a de-facto standard for object storage originally developed by Amazon Web Services. Arvados supports accessing files in Keep using the S3 API. S3 is supported by many "cloud native" applications, and client libraries exist in many languages for programmatic access. h3. Endpoints and Buckets To access Arvados S3 using an S3 client library, you must tell it to use the URL of the keep-web server (this is @Services.WebDAVDownload.ExternalURL@ in the public configuration) as the custom endpoint. The keep-web server will decide to treat it as an S3 API request based on the presence of an AWS-format Authorization header. Requests without an Authorization header, or differently formatted Authorization, will be treated as "WebDAV":keep-webdav.html . The "bucket name" is an Arvados collection uuid, portable data hash, or project uuid. Path-style and virtual host-style requests are supported. * A path-style request uses the hostname indicated by @Services.WebDAVDownload.ExternalURL@, with the bucket name in the first path segment: @https://download.example.com/zzzzz-4zz18-asdfgasdfgasdfg/@. * A virtual host-style request uses the hostname pattern indicated by @Services.WebDAV.ExternalURL@, with a bucket name in place of the leading @*@: @https://zzzzz-4zz18-asdfgasdfgasdfg.collections.example.com/@. If you have wildcard DNS, TLS, and routing set up, an S3 client configured with endpoint @collections.example.com@ should work regardless of which request style it uses. h3. Supported Operations h4. ListObjects Supports the following request query parameters: * delimiter * marker * max-keys * prefix h4. GetObject Supports the @Range@ header. h4. PutObject Can be used to create or replace a file in a collection. An empty PUT with a trailing slash and @Content-Type: application/x-directory@ will create a directory within a collection if Arvados configuration option @Collections.S3FolderObjects@ is true. Missing parent/intermediate directories within a collection are created automatically. Cannot be used to create a collection or project. h4. DeleteObject Can be used to remove files from a collection. If used on a directory marker, it will delete the directory only if the directory is empty. h4. HeadBucket Can be used to determine if a bucket exists and if client has read access to it. h4. HeadObject Can be used to determine if an object exists and if client has read access to it. h4. GetBucketVersioning Bucket versioning is presently not supported, so this will always respond that bucket versioning is not enabled. h3. Accessing collection/project properties as metadata GetObject, HeadObject, and HeadBucket return Arvados object properties as S3 metadata headers, e.g., @X-Amz-Meta-Foo: bar@. If the requested path indicates a file or directory placeholder inside a collection, or the top level of a collection, GetObject and HeadObject return the collection properties. If the requested path indicates a directory placeholder corresponding to a project, GetObject and HeadObject return the properties of the project. HeadBucket returns the properties of the collection or project corresponding to the bucket name. Non-string property values are returned in a JSON representation, e.g., @["foo","bar"]@. As in Amazon S3, property values containing non-ASCII characters are returned in BASE64-encoded form as described in RFC 2047, e.g., @=?UTF-8?b?4pu1?=@. GetBucketTagging and GetObjectTagging APIs are _not_ supported. It is not possible to modify collection or project properties using the S3 API. h3. Authorization mechanisms Keep-web accepts AWS Signature Version 4 (AWS4-HMAC-SHA256) as well as the older V2 AWS signature. If your client uses V4 signatures exclusively _and_ your Arvados token was issued by the same cluster you are connecting to, you can use the Arvados token's UUID part as your S3 Access Key, and its secret part as your S3 Secret Key. This is preferred, where applicable. Example using cluster @zzzzz@: * Arvados token: @v2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@ * Access Key: @zzzzz-gj3su-yyyyyyyyyyyyyyy@ * Secret Key: @xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@ In all other cases, replace every @/@ character in your Arvados token with @_@, and use the resulting string as both Access Key and Secret Key. Example using a cluster other than @zzzzz@ _or_ an S3 client that uses V2 signatures: * Arvados token: @v2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@ * Access Key: @v2_zzzzz-gj3su-yyyyyyyyyyyyyyy_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@ * Secret Key: @v2_zzzzz-gj3su-yyyyyyyyyyyyyyy_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@ ================================================ FILE: doc/api/keep-web-urls.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "Keep-web URL patterns" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Files served by @keep-web@ can be rendered directly in the browser, or @keep-web@ can instruct the browser to only download the file. When serving files that will render directly in the browser, it is important to properly configure the keep-web service to migitate cross-site-scripting (XSS) attacks. A HTML page can be stored in a collection. If an attacker causes a victim to visit that page through Workbench, the HTML will be rendered by the browser. If all collections are served at the same domain, the browser will consider collections as coming from the same origin, which will grant access to the same browsing data (cookies and local storage). This would enable malicious Javascript on that page to access Arvados on behalf of the victim. This can be mitigated by having separate domains for each collection, or limiting preview to circumstances where the collection is not accessed with the user's regular full-access token. For clusters where this risk is acceptable, this protection can also be turned off by setting the @Collections/TrustAllContent@ configuration flag to true, see the "configuration reference":../admin/config.html for more detail. The following "same origin" URL patterns are supported for public collections and collections shared anonymously via secret links (i.e., collections which can be served by keep-web without making use of any implicit credentials like cookies). See "Same-origin URLs" below.
http://collections.example.com/c=uuid_or_pdh/path/file.txt
http://collections.example.com/c=uuid_or_pdh/t=TOKEN/path/file.txt
The following "multiple origin" URL patterns are supported for all collections:
http://uuid_or_pdh--collections.example.com/path/file.txt
http://uuid_or_pdh--collections.example.com/t=TOKEN/path/file.txt
In the "multiple origin" form, the string @--@ can be replaced with @.@ with identical results (assuming the downstream proxy is configured accordingly). These two are equivalent:
http://uuid_or_pdh--collections.example.com/path/file.txt
http://uuid_or_pdh.collections.example.com/path/file.txt
The first form (with @--@ instead of @.@) avoids the cost and effort of deploying a wildcard TLS certificate for @*.collections.example.com@ at sites that already have a wildcard certificate for @*.example.com@ . The second form is likely to be easier to configure, and more efficient to run, on a downstream proxy. In all of the above forms, the @collections.example.com@ part can be anything at all: keep-web itself ignores everything after the first @.@ or @--@. (Of course, in order for clients to connect at all, DNS and any relevant proxies must be configured accordingly.) In all of the above forms, the @uuid_or_pdh@ part can be either a collection UUID or a portable data hash with the @+@ character optionally replaced by @-@ . (When @uuid_or_pdh@ appears in the domain name, replacing @+@ with @-@ is mandatory, because @+@ is not a valid character in a domain name.) In all of the above forms, a top level directory called @_@ is skipped. In cases where the @path/file.txt@ part might start with @t=@ or @c=@ or @_/@, links should be constructed with a leading @_/@ to ensure the top level directory is not interpreted as a token or collection ID. Assuming there is a collection with UUID @zzzzz-4zz18-znfnqtbbv4spc3w@ and portable data hash @1f4b0bc7583c2a7f9102c395f4ffc5e3+45@, the following URLs are interchangeable:
http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/foo/bar.txt
http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/_/foo/bar.txt
http://zzzzz-4zz18-znfnqtbbv4spc3w--collections.example.com/_/foo/bar.txt
The following URLs are read-only, but will return the same content as above:
http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--foo.example.com/foo/bar.txt
http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--.invalid/foo/bar.txt
http://collections.example.com/by_id/1f4b0bc7583c2a7f9102c395f4ffc5e3%2B45/foo/bar.txt
http://collections.example.com/by_id/zzzzz-4zz18-znfnqtbbv4spc3w/foo/bar.txt
If the collection is named "MyCollection" and located in a project called "MyProject" which is in the home project of a user with username is "bob", the following read-only URL is also available when authenticating as bob: pre. http://collections.example.com/users/bob/MyProject/MyCollection/foo/bar.txt An additional form is supported specifically to make it more convenient to maintain support for existing Workbench download links: pre. http://collections.example.com/collections/download/uuid_or_pdh/TOKEN/foo/bar.txt A regular Workbench "download" link is also accepted, but credentials passed via cookie, header, etc. are ignored. Only public data can be served this way: pre. http://collections.example.com/collections/uuid_or_pdh/foo/bar.txt h2(#same-site). Same-site requirements for requests with tokens Although keep-web doesn't care about the domain part of the URL, the clients do: especially when rendering inline content. When a client passes a token in the URL, keep-web sends a redirect response placing the token in a @Set-Cookie@ header with the @SameSite=Lax@ attribute. The browser will ignore the cookie if it's not coming from a _same-site_ request, and thus its subsequent request will fail with a @401 Unauthorized@ error. This mainly affects Workbench's ability to show inline content, so it should be taken into account when configuring both services' URL schemes. You can read more about the definition of a _same-site_ request at the "RFC 6265bis-03 page":https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-5.2 ================================================ FILE: doc/api/keep-webdav.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "WebDAV" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} "Web Distributed Authoring and Versioning (WebDAV)":https://tools.ietf.org/html/rfc4918 is an IETF standard set of extensions to HTTP to manipulate and retrieve hierarchical web resources, similar to directories in a file system. Arvados supports accessing files in Keep using WebDAV. Most major operating systems include built-in support for mounting WebDAV resources as network file systems, see user guide sections for "Windows":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-windows.html, "macOS":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-os-x.html, or "Linux (GNOME)":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html#gnome. WebDAV is also supported by various standalone storage browser applications such as "Cyberduck":https://cyberduck.io/ and client libraries exist in many languages for programmatic access. Keep-web provides read/write HTTP (WebDAV) access to files stored in Keep. It serves public data to anonymous and unauthenticated clients, and serves private data to clients that supply Arvados API tokens. h3. Supported Operations Supports WebDAV HTTP methods @GET@, @PUT@, @DELETE@, @PROPFIND@, @COPY@, and @MOVE@. Does not support @LOCK@ or @UNLOCK@. These methods will be accepted, but are no-ops. h3. Browsing Requests can be authenticated a variety of ways as described below in "Authentication mechanisms":#auth . An unauthenticated request will return a 401 Unauthorized response with a @WWW-Authenticate@ header indicating "support for RFC 7617 Basic Authentication":https://tools.ietf.org/html/rfc7617 . Getting a listing from keep-web starting at the root path @/@ will return two folders, @by_id@ and @users@. The @by_id@ folder will return an empty listing. However, a path which starts with /by_id/ followed by a collection uuid, portable data hash, or project uuid will return the listing of that object. The @users@ folder will return a listing of the users for whom the client has permission to read the "home" project of that user. Browsing an individual user will return the collections and projects directly owned by that user. Browsing those collections and projects return listings of the files, directories, collections, and subprojects they contain, and so forth. In addition to the @/by_id/@ path prefix, the collection or project can be specified using a path prefix of @/c=/@ or (if the cluster is properly configured) as a virtual host. This is described on "Keep-web URLs":keep-web-urls.html It is possible for a project or a "filter group":methods/groups.html#filter to appear as its own descendant in the @by_id@ and @users@ tree (a filter group may match itself, its own ancestor, another filter group that matches its ancestor, etc). When this happens, the descendant appears as an empty read-only directory. For example, if filter group @f@ matches its own parent @p@: * @/users/example/p/f@ will show the filter group's contents (matched projects and collections). * @/users/example/p/f/p@ will appear as an empty directory. * @/by_id/uuid_of_f/p@ will show the parent project's contents, including @f@. * @/by_id/uuid_of_f/p/f@ will appear as an empty directory. h3(#zip). Downloading ZIP archives Keep-web can produce an uncompressed ZIP archive of a collection, or a subset of a collection. To request a ZIP archive: * The request must include an @Accept: application/zip@ header _or_ @?accept=application/zip&disposition=attachment@ in the query. * The request URI must specify the root directory of a collection, e.g., @/by_id//@. See "Keep-web URLs":keep-web-urls.html for more examples. To download a subset of a collection, the request can specify one or more pathnames relative to the collection directory: * A @files@ parameter in the query of a @GET@ request, e.g., @https://.collections.example.com/?files=file1&files=file2@, * A @files@ parameter in the body of a @POST@ request with a @Content-Type: application/x-www-form-urlencoded@ header, or * The value of a @files@ key in a JSON object in the body of a @POST@ request with a @Content-Type: application/json@ header, e.g., @{"files":["file1","file2"]}@. Keep-web returns an error if one of the specified paths does not exist in the requested collection. The ZIP archive comment will include a download URL with the collection UUID or portable data hash, e.g., "Downloaded from https://collections.example.com/by_id/zzzzz-4zz18-0pg114rezrbz46u/". The ZIP archive will also include collection metadata if the request sets an @include_collection_metadata@ parameter, e.g., @https://.collections.example.com/?include_collection_metadata=true@. The resulting ZIP archive will also include a file named @collection.json@ containing the collection's metadata (UUID, name, description, portable data hash, properties, creation time, modification time) and information about the user who last modified it (UUID, full name, username, and email). If the collection is specified by portable data hash rather than name or UUID, @collection.json@ will contain only the portable data hash. Example @collection.json@ content:
{
  "created_at":"2025-04-28T19:50:49.046969000Z",
  "description":"Description of test collection\n",
  "modified_at":"2025-04-28T19:50:49.093166000Z",
  "modified_by_user":{
    "email":"example@example.com",
    "full_name":"Example Name",
    "username":"example",
    "uuid":"zzzzz-tpzed-xurymjxw79nv3jz"
  },
  "name":"collection name",
  "portable_data_hash":"6acf043b102afcf04e3be2443e7ea2ba+223",
  "properties":{
    "key":"value"
  },
  "uuid":"zzzzz-4zz18-0pg114rezrbz46u"
}
The request can also include a @download_filename@ parameter with a desired name for the downloaded zip file. This filename will be included in the @Content-Disposition@ response header. If this parameter is not provided, the filename suggested in the response header will be based on the collection name or portable data hash: * @{collection name}.zip@ if downloading an entire collection * @{collection name} - {file name}.zip@ if a single file was specified in the request * @{collection name} - 3 files.zip@ if a directory or multiple files were specified in the request * @{portable data hash}.zip@, @{portable data hash} - {file name}.zip@, etc., if the source collection was specified by portable data hash rather than name or UUID Example request:
GET /by_id/zzzzz-4zz18-0pg114rezrbz46u
Accept: application/zip
Content-Type: application/json

{
  "download_filename": "odd-numbered files and directories.zip",
  "files": [
    "file1.txt",
    "file3.bin",
    "dir5"
  ],
  "include_collection_metadata": true
}
h3(#auth). Authentication mechanisms A token can be provided in an Authorization header as a @Bearer@ token:
Authorization: Bearer o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
A token can also be provided with "RFC 7617 Basic Authentication":https://tools.ietf.org/html/rfc7617 in this case, the payload is formatted as @username:token@ and encoded with base64. The username must be non-empty, but is ignored. In this example, the username is "user":
Authorization: Basic dXNlcjpvMDdqNHB4N1JsSks0Q3VNWXA3QzBMRFQ0Q3pSMUoxcUJFNUF2bzdlQ2NVak9UaWt4Swo=
A base64-encoded token can be provided in a cookie named "api_token":
Cookie: api_token=bzA3ajRweDdSbEpLNEN1TVlwN0MwTERUNEN6UjFKMXFCRTVBdm83ZUNjVWpPVGlreEs=
A token can be provided in an URL-encoded query string:
GET /foo/bar.txt?api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
A token can be provided in a URL-encoded path (as described in the previous section):
GET /t=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK/_/foo/bar.txt
A suitably encoded token can be provided in a POST body if the request has a content type of application/x-www-form-urlencoded or multipart/form-data:
POST /foo/bar.txt
Content-Type: application/x-www-form-urlencoded
[...]
api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
If a token is provided in a query string or in a POST request, the response is an HTTP 303 redirect to an equivalent GET request, with the token stripped from the query string and added to a cookie instead. h3. Indexes Keep-web returns a generic HTML index listing when a directory is requested with the GET method. It does not serve a default file like "index.html". Directory listings are also returned for WebDAV PROPFIND requests. h3. Range requests Keep-web supports partial resource reads using the HTTP @Range@ header as specified in "RFC 7233":https://tools.ietf.org/html/rfc7233 . h3. Compatibility Client-provided authorization tokens are ignored if the client does not provide a @Host@ header. In order to use the query string or a POST form authorization mechanisms, the client must follow 303 redirects; the client must accept cookies with a 303 response and send those cookies when performing the redirect; and either the client or an intervening proxy must resolve a relative URL ("//host/path") if given in a response Location header. h3. Intranet mode Normally, Keep-web accepts requests for multiple collections using the same host name, provided the client's credentials are not being used. This provides insufficient XSS protection in an installation where the "anonymously accessible" data is not truly public, but merely protected by network topology. In such cases -- for example, a site which is not reachable from the internet, where some data is world-readable from Arvados's perspective but is intended to be available only to users within the local network -- the downstream proxy should configured to return 401 for all paths beginning with "/c=". h3. Same-origin URLs Without the same-origin protection outlined above, a web page stored in collection X could execute JavaScript code that uses the current viewer's credentials to download additional data from collection Y -- data which is accessible to the current viewer, but not to the author of collection X -- from the same origin (``https://collections.example.com/'') and upload it to some other site chosen by the author of collection X. ================================================ FILE: doc/api/methods/api_client_authorizations.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "api_client_authorizations" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_client_authorizations@ Object type: @gj3su@ Example UUID: @zzzzz-gj3su-0123456789abcde@ h2. Resource The @api_client_authorizations@ resource stores the API tokens that have been issued to permit access the API server. An ApiClientAuthorization is *not* a generic Arvados resource. The full list of properties that belong to an ApiClientAuthorization is: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Example| |uuid|string|An identifier used to refer to the token without exposing the actual token.|| |api_token|string|The actual token string that is expected in the Authorization header.|| |created_by_ip_address|string|-|| |last_used_by_ip_address|string|The network address of the most recent client using this token.|| |last_used_at|datetime|Timestamp of the most recent request using this token.|| |expires_at|datetime|Time at which the token is no longer valid. May be set to a time in the past in order to immediately expire a token.|| |owner_uuid|string|The user associated with the token. All operations using this token are checked against the permissions of this user.|| |scopes|array|A list of resources this token is allowed to access. A scope of ["all"] allows all resources. See "API Authorization":{{site.baseurl}}/api/tokens.html#scopes for details.|| h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. h3(#create). create Create a new ApiClientAuthorization. Regular users may only create self-owned API tokens, but may provide a restricted "scope":{{site.baseurl}}/api/tokens.html#scopes . Administrators may create API tokens corresponding to any user. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |api_client_authorization|object||query|| h3. create_system_auth create_system_auth api_client_authorizations Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |scopes|array||query|| h3(#current). current Return the full record associated with the provided API token. This endpoint is often used to check the validity of a given token. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | h3. delete Delete an existing ApiClientAuthorization. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path|| h3. get Gets an ApiClientAuthorization's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path|| h3. list List api_client_authorizations. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. update Update attributes of an existing ApiClientAuthorization. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path|| |api_client_authorization|object||query|| ================================================ FILE: doc/api/methods/authorized_keys.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "authorized_keys" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/authorized_keys@ Object type: @fngyi@ Example UUID: @zzzzz-fngyi-0123456789abcde@ h2. Resource The authorized_keys resource stores SSH public keys which grant access to virtual machines or git repositories on the Arvados cluster as the user in @authorized_user_uuid@. Each AuthorizedKey has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Example| |name|string|A name to help the user manage their keys.|| |key_type|string|Public key type, currently only supports "SSH"|| |authorized_user_uuid|string|The user to which this key belongs. Authentication using this key authenticates as this user.|| |public_key|text|The actual public key material, e.g., from @~/.ssh/id_rsa.pub@|| |expires_at|datetime|Expiration date after which the key is no longer valid.|| h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. h3. create Create a new AuthorizedKey. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |authorized_key|object||query|| h3. delete Delete an existing AuthorizedKey. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path|| h3. get Gets a AuthorizedKey's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path|| h3. list List authorized_keys. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. update Update attributes of an existing AuthorizedKey. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path|| |authorized_key|object||query|| ================================================ FILE: doc/api/methods/collections.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "collections" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/collections@ Object type: @4zz18@ Example UUID: @zzzzz-4zz18-0123456789abcde@ h2. Resource Collections describe sets of files in terms of data blocks stored in Keep. See "Keep - Content-Addressable Storage":{{site.baseurl}}/architecture/storage.html and "using collection versioning":../../user/topics/collection-versioning.html for details. Each collection has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Example| |name|string||| |description|text|Free text description of the group. Allows "HTML formatting.":{{site.baseurl}}/api/resources.html#descriptions|| |properties|hash|User-defined metadata, may be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters || |portable_data_hash|string|The MD5 sum of the manifest text stripped of block hints other than the size hint.|| |manifest_text|text|The manifest describing how to assemble blocks into files, in the "Arvados manifest format":{{site.baseurl}}/architecture/manifest-format.html|| |replication_desired|number|Minimum storage replication level desired for each data block referenced by this collection. A value of @null@ signifies that the site default replication level (typically 2) is desired.|@2@| |replication_confirmed|number|Replication level most recently confirmed by the storage system. This field is null when a collection is first created, and is reset to null when the manifest_text changes in a way that introduces a new data block. An integer value indicates the replication level of the _least replicated_ data block in the collection.|@2@, null| |replication_confirmed_at|datetime|When @replication_confirmed@ was confirmed. If @replication_confirmed@ is null, this field is also null.|| |storage_classes_desired|list|An optional list of storage class names where the blocks should be saved. If not provided, the cluster's default storage class(es) will be set.|@['archival']@| |storage_classes_confirmed|list|Storage classes most recently confirmed by the storage system. This field is an empty list when a collection is first created.|@'archival']@, @[]@| |storage_classes_confirmed_at|datetime|When @storage_classes_confirmed@ was confirmed. If @storage_classes_confirmed@ is @[]@, this field is null.|| |trash_at|datetime|If @trash_at@ is non-null and in the past, this collection will be hidden from API calls. May be untrashed.|| |delete_at|datetime|If @delete_at@ is non-null and in the past, the collection may be permanently deleted.|| |is_trashed|boolean|True if @trash_at@ is in the past, false if not.|| |current_version_uuid|string|UUID of the collection's current version. On new collections, it'll be equal to the @uuid@ attribute.|| |version|number|Version number, starting at 1 on new collections. This attribute is read-only.|| |preserve_version|boolean|When set to true on a current version, it will be persisted. When passing @true@ as part of a bigger update call, both current and newly created versions are persisted.|| |file_count|number|The total number of files in the collection. This attribute is read-only.|| |file_size_total|number|The sum of the file sizes in the collection. This attribute is read-only.|| h3. Conditions of creating a Collection If a new @portable_data_hash@ is specified when creating or updating a Collection, it must match the cryptographic digest of the supplied @manifest_text@. h3. Side effects of creating a Collection Referenced blocks are protected from garbage collection in Keep. Data can be shared with other users via the Arvados permission model. h3(#trashing). Trashing collections Collections can be trashed by updating the record and setting the @trash_at@ field, or with the "delete":#delete method. The delete method sets @trash_at@ to "now". The value of @trash_at@ can be set to a time in the future as a feature to automatically expire collections. When @trash_at@ is set, @delete_at@ will also be set. Normally @delete_at = trash_at + Collections.DefaultTrashLifetime@. When the @trash_at@ time is past but @delete_at@ is in the future, the trashed collection is invisible to most API calls unless the @include_trash@ parameter is true. Collections in the trashed state can be "untrashed":#untrash so long as @delete_at@ has not past. Collections are also trashed if they are contained in a "trashed group":groups.html#trashing Once @delete_at@ is past, the collection and all of its previous versions will be deleted permanently and can no longer be untrashed. h3(#replace_files). Using "replace_files" to create or update a collection The @replace_files@ option can be used with the "create":#create and "update":#update APIs to efficiently and atomically copy individual files and directory trees from other collections, copy/rename/delete items within an existing collection, and add new items to a collection. @replace_files@ keys indicate target paths in the new collection, and values specify sources that should be copied to the target paths. * Each target path must be an absolute canonical path beginning with @/@. It must not contain @.@ or @..@ components, consecutive @/@ characters, or a trailing @/@ after the final component. * Each source must be one of the following: ** an empty string (signifying that the target path is to be deleted), ** @/@ where @@ is the portable data hash of a collection on the cluster and @@ is a file or directory in that collection, ** @manifest_text/@ where @@ is an existing file or directory in a collection supplied in the @manifest_text@ attribute in the request, or ** @current/@ where @@ is an existing file or directory in the collection being updated. In an @update@ request, sources may reference the current portable data hash of the collection being updated. However, in many cases it is more appropriate to use a @current/@ source instead, to ensure the latest content is used even if the collection has been updated since the PDH was last retrieved. h4(#replace_files-delete). Delete a file Delete @foo.txt@.
"replace_files": {
  "/foo.txt": ""
}
h4(#replace_files-rename). Rename a file Rename @foo.txt@ to @bar.txt@.
"replace_files": {
  "/foo.txt": "",
  "/bar.txt": "current/foo.txt"
}
h4(#replace_files-swap). Swap files Swap contents of files @foo@ and @bar@.
"replace_files": {
  "/foo": "current/bar",
  "/bar": "current/foo"
}
h4(#replace_files-add). Add a file
"replace_files": {
  "/new_directory/new_file.txt": "manifest_text/new_file.txt"
},
"collection": {
  "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\n"
}
h4(#replace_files-replace). Replace all content with new content Note this is equivalent to omitting the @replace_files@ argument.
"replace_files": {
  "/": "manifest_text/"
},
"collection": {
  "manifest_text": "./new_directory acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\n"
}
h4(#replace_files-rename-and-replace). Atomic rename and replace Rename @current_file.txt@ to @old_file.txt@ and replace @current_file.txt@ with new content, all in a single atomic operation.
"replace_files": {
  "/current_file.txt": "manifest_text/new_file.txt",
  "/old_file.txt": "current/current_file.txt"
},
"collection": {
  "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\n"
}
h4(#replace_files-combine). Combine collections Delete all current content, then copy content from other collections into new subdirectories.
"replace_files": {
  "/": "",
  "/copy of collection 1": "1f4b0bc7583c2a7f9102c395f4ffc5e3+45/",
  "/copy of collection 2": "ea10d51bcf88862dbcc36eb292017dfd+45/"
}
h4(#replace_files-extract-subdirectory). Extract a subdirectory Replace all current content with a copy of a subdirectory from another collection.
"replace_files": {
  "/": "1f4b0bc7583c2a7f9102c395f4ffc5e3+45/subdir"
}
h4(#replace_files-usage-restrictions). Usage restrictions A target path with a non-empty source cannot be the ancestor of another target path in the same request. For example, the following request is invalid:
"replace_files": {
  "/foo": "fa7aeb5140e2848d39b416daeef4ffc5+45/",
  "/foo/this_will_return_an_error": ""
}
It is an error to supply a non-empty @manifest_text@ that is unused, i.e., the @replace_files@ argument does not contain any values beginning with @"manifest_text/"@. For example, the following request is invalid:
"replace_files": {
  "/foo": "current/bar"
},
"collection": {
  "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\n"
}
Collections on other clusters in a federation cannot be used as sources. Each source must exist on the current cluster and be readable by the current user. Similarly, if @manifest_text@ is provided, it must only reference data blocks that are stored on the current cluster. This API does not copy data from other clusters in a federation. h3(#replace_segments). Using "replace_segments" to repack file data The @replace_segments@ option can be used with the "create":#create or "update":#update API to atomically apply a new file packing, typically with the goal of replacing a number of small blocks with one larger block. The repacking is specified in terms of _block segments_: a block segment is a portion of a stored block that is referenced by a file in a manifest. @replace_segments@ keys indicate existing block segments in the collection, and values specify replacement segments. * Each segment is specified as space-separated tokens: @"locator offset length"@ where @locator@ is a signed block locator and @offset@ and @length@ are decimal-encoded integers specifying a portion of the block that is referenced in the collection. * Each replacement block locator must be properly signed (just as if it appeared in a @manifest_text@). * Each existing block segment must correspond to an entire contiguous portion of a block referenced by a single file (splitting existing segments is not supported). * If a segment to be replaced does not match any existing block segment in the manifest, that segment _and all other @replace_segments@ entries referencing the same replacement block_ will be skipped. Other replacements will still be applied. Replacements that are skipped for this reason do not cause the request to fail. This rule ensures that when concurrent clients compute different repackings and request similar replacements such as @a,b,c,d,e → X@ and @a,b,c,d,e,f → Y@, the resulting manifest references @X@ or @Y@ but not both. Otherwise, the effect could be @a,b,c,d,e → X, f → Y@ where @Y@ is just an inefficient way to reference the same data as @f@. The @replace_files@ and @manifest_text@ options, if present, are applied before @replace_segments@. This means @replace_segments@ can apply to blocks from @manifest_text@ and/or other collections referenced by @replace_files@. In the following example, two files were originally saved by writing two small blocks (@c410@ and @c93e@). After concatenating the two small blocks and writing a single larger block @ca9c@, the manifest is being updated to reference the larger block.
"collection": {
  "manifest_text": ". c4103f122d27677c9db144cae1394a66+2+A3d02f1f3d8a622b2061ad5afe4853dbea42039e2@674dd351 693e9af84d3dfcc71e640e005bdc5e2e+3+A6528480b63d90a24b60b2ee2409040f050cc5d0c@674dd351 0:2:file1.txt 2:3:file2.txt\n"
},
"replace_segments": {
  "c4103f122d27677c9db144cae1394a66+2+A3d02f1f3d8a622b2061ad5afe4853dbea42039e2@674dd351 0 2": "ca9c491ac66b2c62500882e93f3719a8+5+A312fea6de5807e9e77d844450d36533a599c40f1@674dd351 0 2",
  "693e9af84d3dfcc71e640e005bdc5e2e+3+A6528480b63d90a24b60b2ee2409040f050cc5d0c@674dd351 0 3": "ca9c491ac66b2c62500882e93f3719a8+5+A312fea6de5807e9e77d844450d36533a599c40f1@674dd351 2 3"
}
Resulting manifest:
. ca9c491ac66b2c62500882e93f3719a8+5+A312fea6de5807e9e77d844450d36533a599c40f1@674dd351 0:2:file1.txt 2:3:file2.txt
h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. Supports federated @get@ only, which may be called with either a uuid or a portable data hash. When requesting a portable data hash which is not available on the home cluster, the query is forwarded to all the clusters listed in @RemoteClusters@ and returns the first successful result. h3(#create). create Create a new Collection. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |collection|object||query|| |replace_files|object|Initialize files and directories with new content and/or content from other collections|query|| |replace_segments|object|Repack the collection by substituting data blocks|query|| The new collection's content can be initialized by providing a @manifest_text@ key in the provided @collection@ object, or by "using the @replace_files@ option":#replace_files. An alternative file packing can be applied atomically "using the @replace_segments@ option":#replace_segments. h3(#delete). delete Put a Collection in the trash. This sets the @trash_at@ field to @now@ and @delete_at@ field to @now@ + token TTL. A trashed collection is invisible to most API calls unless the @include_trash@ parameter is true. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path|| h3. get Gets a Collection's metadata by UUID or portable data hash. When making a request by portable data hash, attributes other than @portable_data_hash@, @manifest_text@, and @trash_at@ are not returned, even when requested explicitly using the @select@ parameter. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID or portable data hash of the Collection in question.|path|| h3. list List collections. See "common resource list method.":{{site.baseurl}}/api/methods.html#index table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |include_trash|boolean (default false)|Include trashed collections.|query|| |include_old_versions|boolean (default false)|Include past versions of the collection(s) being listed, if any.|query|| Note: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in results by default. If you need it, pass a @select@ parameter that includes @manifest_text@. h4. Searching Collections for names of file or directories You can search collections for specific file or directory names (whole or part) using the following filter in a @list@ query.
filters: [["file_names", "ilike", "%sample1234.fastq%"]]
Note: @file_names@ is a hidden field used for indexing. It is not returned by any API call. On the client, you can programmatically enumerate all the files in a collection using @arv-ls@, the Python SDK @Collection@ class, Go SDK @FileSystem@ struct, the WebDAV API, or the S3-compatible API. As of this writing (Arvados 2.4), you can also search for directory paths, but _not_ complete file paths. In other words, this will work (when @dir3@ is a directory):
filters: [["file_names", "ilike", "%dir1/dir2/dir3%"]]
However, this will _not_ return the desired results (where @sample1234.fastq@ is a file):
filters: [["file_names", "ilike", "%dir1/dir2/dir3/sample1234.fastq%"]]
As a workaround, you can search for both the directory path and file name separately, and then filter on the client side.
filters: [["file_names", "ilike", "%dir1/dir2/dir3%"], ["file_names", "ilike", "%sample1234.fastq%"]]
h3(#update). update Update attributes of an existing Collection. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path|| |collection|object||query|| |replace_files|object|Add, delete, and replace files and directories with new content and/or content from other collections|query|| |replace_segments|object|Repack the collection by substituting data blocks|query|| The collection's existing content can be replaced entirely by providing a @manifest_text@ key in the provided @collection@ object, or updated in place by "using the @replace_files@ option":#replace_files. An alternative file packing can be applied atomically "using the @replace_segments@ option":#replace_segments. h3(#untrash). untrash Remove a Collection from the trash. This sets the @trash_at@ and @delete_at@ fields to @null@. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Collection to untrash.|path|| |ensure_unique_name|boolean (default false)|Rename collection uniquely if untrashing it would fail with a unique name conflict.|query|| h3. provenance Returns a list of objects in the database that directly or indirectly contributed to producing this collection, such as the container request that produced this collection as output. The general algorithm is: # Visit the container request that produced this collection (via @output_uuid@ or @log_uuid@ attributes of the container request) # Visit the input collections to that container request (via @mounts@ and @container_image@ of the container request) # Iterate until there are no more objects to visit Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Collection to get provenance.|path|| h3. used_by Returns a list of objects in the database this collection directly or indirectly contributed to, such as containers that takes this collection as input. The general algorithm is: # Visit containers that take this collection as input (via @mounts@ or @container_image@ of the container) # Visit collections produced by those containers (via @output@ or @log@ of the container) # Iterate until there are no more objects to visit Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Collection to get usage.|path|| ================================================ FILE: doc/api/methods/computed_permissions.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "computed_permissions" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/computed_permissions@ h2. Resource Computed permissions are entries from the internal cache of the highest permission level each user has on each permission target. Each entry has the following attributes: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description| |user_uuid|string|An individual user.| |target_uuid|string|An object (role group, project group, collection, etc.) on which the user has implicit or explicit permission.| |perm_level|string|@can_read@, @can_write@, or @can_manage@| There is only one row for a given (@user_uuid@, @target_uuid@) pair. Computed permissions cannot be created or updated directly. To change permissions, use "groups":groups.html and "links":links.html APIs as described in the "permission model":../permission-model.html. h2. Method h3. list @GET /arvados/v1/computed_permissions@ List computed permissions. The computed permissions API accepts the arguments described in the "common resource list method":{{site.baseurl}}/api/methods.html#index with the following exceptions: * It is an error to supply a non-zero @offset@ argument. * The default value for @order@ is @["user_uuid", "target_uuid"]@. * The default value for @count@ is @"none"@ and no other values are accepted. ================================================ FILE: doc/api/methods/container_requests.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "container_requests" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/container_requests@ Object type: @xvhdp@ Example UUID: @zzzzz-xvhdp-0123456789abcde@ h2. Resource A container request is a request for the Arvados cluster to perform some computational work. See "computing with Crunch":{{site.baseurl}}/api/execution.html for details. Each ContainerRequest offers the following attributes, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: All attributes are optional, unless otherwise marked as required. table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Notes| |name|string|The name of the container_request.|| |description|string|The description of the container_request. Allows "HTML formatting.":{{site.baseurl}}/api/resources.html#descriptions || |properties|hash|User-defined metadata that does not affect how the container is run. May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters|| |state|string|The allowed states are "Uncommitted", "Committed", and "Final".|Once a request is Committed, the only attributes that can be modified are priority, container_uuid, and container_count_max. A request in the "Final" state cannot have any of its functional parts modified (i.e., only name, description, and properties fields can be modified).| |requesting_container_uuid|string|The uuid of the parent container that created this container_request, if any. Represents a process tree.|The priority of this container_request is inherited from the parent container, if the parent container is cancelled, this container_request will be cancelled as well.| |container_uuid|string|The uuid of the container that satisfies this container_request. The system may return a preexisting Container that matches the container request criteria. See "Container reuse":#container_reuse for more details.|Container reuse is the default behavior, but may be disabled with @use_existing: false@ to always create a new container.| |container_count_max|integer|Maximum number of containers to start, i.e., the maximum number of "attempts" to be made.|| |mounts|hash|Objects to attach to the container's filesystem and stdin/stdout.|See "Mount types":#mount_types for more details.| |secret_mounts|hash|Objects to attach to the container's filesystem. Only "json" or "text" mount types allowed.|Not returned in API responses. Reset to empty when state is "Complete" or "Cancelled".| |runtime_constraints|hash|Restrict the container's access to compute resources and the outside world.|Required when in "Committed" state. e.g.,
{
  "ram":12000000000,
  "vcpus":2,
  "API":true
}
See "Runtime constraints":#runtime_constraints for more details.| |scheduling_parameters|hash|Parameters to be passed to the container scheduler when running this container.|e.g.,
{
"partitions":["fastcpu","vfastcpu"]
}
See "Scheduling parameters":#scheduling_parameters for more details.| |container_image|string|Portable data hash of a collection containing the docker image to run the container.|Required.| |environment|hash|Environment variables and values that should be set in the container environment (@docker run --env@). This augments and (when conflicts exist) overrides environment variables given in the image's Dockerfile.|| |cwd|string|Initial working directory, given as an absolute path (in the container) or a path relative to the WORKDIR given in the image's Dockerfile.|Optional. If omitted or blank, @"."@ is implied.| |command|array of strings|Command to execute in the container.|Required. e.g., @["echo","hello"]@| |output_path|string|Path to a directory or file inside the container that should be preserved as container's output when it finishes. This path must be one of the mount targets. For best performance, point output_path to a writable collection mount. See "Pre-populate output using Mount points":#pre-populate-output for details regarding optional output pre-population using mount points and "Symlinks in output":#symlinks-in-output for additional details.|Required.| |output_glob|array of strings|Glob patterns determining which files (of those present in the output directory when the container finishes) will be included in the output collection. If multiple patterns are given, files that match any pattern are included. If null or empty, all files will be included.|e.g., @["**/*.vcf", "**/*.vcf.gz"]@ See "Glob patterns":#glob_patterns for more details.| |output_name|string|Desired name for the output collection. If null or empty, a name will be assigned automatically.|| |output_ttl|integer|Desired lifetime for the output collection, in seconds. If zero, the output collection will not be deleted automatically.|| |priority|integer|Range 0-1000. Indicate scheduling order preference.|Clients are expected to submit container requests with zero priority in order to preview the container that will be used to satisfy it. Priority can be null if and only if state!="Committed". See "priority below for more details.":#priority | |expires_at|datetime|After this time, priority is considered to be zero.|Not yet implemented.| |use_existing|boolean|If possible, use an existing (non-failed) container to satisfy the request instead of creating a new one.|Default is true.| |log_uuid|string|Log collection containing log messages provided by the scheduler and crunch processes.|Null if the container has not yet started running. To retrieve logs in real time while the container is running, use the log API (see below).| |output_uuid|string|Output collection created when the container finished successfully.|Null if the container has failed or not yet completed.| |filters|string|Additional constraints for satisfying the container_request, given in the same form as the filters parameter accepted by the container_requests.list API.|This attribute is not implemented yet. The value should always be null.| |runtime_token|string|A v2 token to be passed into the container itself, used to access Keep-backed mounts, etc. |Not returned in API responses. Reset to null when state is "Complete" or "Cancelled".| |runtime_user_uuid|string|The user permission that will be granted to this container.|| |runtime_auth_scopes|array of string|The scopes associated with the auth token used to run this container.|| |output_storage_classes|array of strings|The storage classes that will be used for the log and output collections of this container request|If omitted, the cluster's configured default storage classes are used.| |output_properties|hash|User metadata properties to set on the output collection. The output collection will also have default properties "type" ("intermediate" or "output") and "container_request" (the uuid of container request that produced the collection).| |cumulative_cost|number|Estimated cost of the cloud VMs used to satisfy the request, including retried attempts and completed subrequests, but not including reused containers.|0 if container was reused or VM price information was not available.| |service|boolean|Indicates that this container is a long-lived service rather than a once-through batch job. Incompatible with @use_existing@|| |published_ports|hash|Web service ports that are published by this container. See "published ports":#published_ports below.|| h2(#lifecycle). Container request lifecycle A container request may be created in the Committed state, or created in the Uncommitted state and then moved into the Committed state. Once a request is in the Committed state, Arvados locates a suitable existing container or schedules a new one. When the assigned container finishes, the request state changes to Final. A client may cancel a committed request early (before the assigned container finishes) by setting the request priority to zero. !{max-width:60em;}{{site.baseurl}}/api/methods/container_request_lifecycle.svg! {% comment %} # svg generated using `graphviz -Tsvg -O` digraph { graph [nojustify=true] [labeljust=l] invisiblestart [label = ""] [color=white] [group=lifecycle]; node [color=black] [fillcolor=white] [style=filled] [shape=box] [nojustify=true]; uncommitted [label = "container request:\l state=Uncommitted\l"] [fillcolor=lightgrey] [group=lifecycle]; { rank=same; committed [label = "container request:\l state=Committed\l priority>0\l"] [group=lifecycle]; reused [label = "container request:\l state=Final\lcontainer:\l state=Complete\l(reused existing container)\l"] [fillcolor=lightblue] [group=endstate]; } invisiblestart -> uncommitted [label = " user creates container request\l"] [color=navy] [fontcolor=navy]; uncommitted -> committed [label = " user updates to\l state=Committed, priority>0\l"] [color=navy] [fontcolor=navy]; queued [label = "container request:\l state=Committed\l priority>0\lcontainer:\l state=Queued\l"] [group=lifecycle]; committed -> queued [label = " Arvados creates a new container\l"]; { rank=same; locked [label = "container request:\l state=Committed\l priority>0\lcontainer:\l state=Locked\l"] [group=lifecycle]; latecancelled [label = "container request:\l state=Final\lcontainer:\l state=Cancelled\l"] [fillcolor=lightblue] [group=endstate]; } queued -> locked [label = " Arvados is ready to dispatch the container\l"]; { rank=same; running [label = "container request:\l state=Committed\l priority>0\lcontainer:\l state=Running\l"] [group=lifecycle]; containerfailed [label = "container request:\l state=Final\lcontainer:\l state=Complete\l exit_code≠0\l"] [fillcolor=lightblue] [group=endstate]; } locked -> running [label = " Arvados starts the container process\l"]; containerfinished [label = "container request:\l state=Final\lcontainer:\l state=Complete\l exit_code=0\l"] [fillcolor=lightblue] [group=lifecycle]; committed -> reused [label = "Arvados selects an existing container"] [constraint=false] [labeldistance=0.5]; queued -> latecancelled [label = "user updates to priority=0"] [color=navy] [fontcolor=navy]; locked -> latecancelled [label = "user updates to priority=0"] [color=navy] [fontcolor=navy] [constraint=false]; running -> latecancelled [label = "user updates to priority=0"] [color=navy] [fontcolor=navy] [constraint=false]; running -> containerfailed [label = "container process fails"]; running -> containerfinished [label = " container process succeeds\l"]; # layout hacks reused -> latecancelled [style=invis]; latecancelled -> containerfailed [style=invis]; } {% endcomment %} h2(#priority). Priority The @priority@ field has a range of 0-1000. Priority 0 means no container should run on behalf of this request, and containers already running will be terminated (setting container priority to 0 is the cancel operation.) Priority 1 is the lowest priority. Priority 1000 is the highest priority. The actual order that containers execute is determined by the underlying scheduling software (e.g. Slurm) and may be based on a combination of container priority, submission time, available resources, and other factors. In the current implementation, the magnitude of difference in priority between two containers affects the weight of priority vs age in determining scheduling order. If two containers have only a small difference in priority (for example, 500 and 501) and the lower priority container has a longer queue time, the lower priority container may be scheduled before the higher priority container. Use a greater magnitude difference (for example, 500 and 600) to give higher weight to priority over queue time. h2(#mount_types). {% include 'mount_types' %} h2(#runtime_constraints). {% include 'container_runtime_constraints' %} h2(#scheduling_parameters). {% include 'container_scheduling_parameters' %} h2(#glob_patterns). {% include 'container_glob_patterns' %} h2(#published_ports). {% include 'container_published_ports' %} h2(#container_reuse). Container reuse When a container request is "Committed", the system will try to find and reuse an existing Container with the same command, cwd, environment, output_path, container_image, mounts, secret_mounts, runtime_constraints, runtime_user_uuid, and runtime_auth_scopes being requested. * The serialized fields environment, mounts, and runtime_constraints are normalized when searching. * The system will also search for containers with minor variations in the keep_cache_disk and keep_cache_ram runtime_constraints that should not affect the result. This searches for other common values for those constraints, so a container that used a non-default value for these constraints may not be reused by later container requests that use a different value. In order of preference, the system will use: * The first matching container to have finished successfully (i.e., reached state "Complete" with an exit_code of 0) whose log and output collections are still available. * The oldest matching "Running" container with the highest progress, i.e., the container that is most likely to finish first. * The oldest matching "Locked" container with the highest priority, i.e., the container that is most likely to start first. * The oldest matching "Queued" container with the highest priority, i.e,, the container that is most likely to start first. * A new container. h2(#cancel_container). Canceling a container request A container request may be canceled by setting its priority to 0, using an update call. When a container request is canceled, it will still reflect the state of the Container it is associated with via the container_uuid attribute. If that Container is being reused by any other container_requests that are still active, i.e., not yet canceled, that Container may continue to run or be scheduled to run by the system in future. However, if no other container_requests are using that Container, then the Container will get canceled as well. h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. Supports federated @create@, @delete@, @get@, @list@, and @update@. h2(#create). create Create a new container request. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|container_request|object|Container request resource.|request body|| |cluster_id|string|The federated cluster to submit the container request.|query|| The request body must include the required attributes @command@, @container_image@, @mounts@, and @output_path@. It can also include other attributes such as @environment@, @published_ports@, @runtime_constraints@, and @scheduling_parameters@. h3. delete Delete an existing container request. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path|| h3. get Get a container request's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path|| h3. list List container requests. See "common resource list method.":{{site.baseurl}}/api/methods.html#index The @filters@ argument can also filter on attributes of the container referenced by @container_uuid@. For example, @[["container.state", "=", "Running"]]@ will match any container request whose container is running now. h3. update Update attributes of an existing container request. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path|| |container_request|object||query|| {% include 'notebox_begin' %} Setting the priority of a committed container_request to 0 may cancel a running container assigned for it. See "Canceling a container request":{{site.baseurl}}/api/methods/container_requests.html#cancel_container for further details. {% include 'notebox_end' %} h3(#container_status). container_status Get container status. table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location | {background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path| Example request: @GET /arvados/v1/container_requests/zzzzz-xvdhp-0123456789abcde/container_status@ Response attributes: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Examples| |uuid|string|The UUID of the container assigned to this request.|| |state|string|The state of the container assigned to this request (see "container resource attributes":containers.html).|| |scheduling_status|string|A brief explanation of the container's status in the dispatch queue, or an empty string if scheduling is not applicable, e.g., the container is running or finished.|@waiting for cloud resources: queue position 3@ @creating new instance@ @preparing runtime environment@| h3(#log). log Get container log data using WebDAV methods. This API retrieves data from the container request's log collection. It can be used at any time in the container request lifecycle. * Before a container has been assigned (the request is @Uncommitted@) it returns an empty directory. * While the container is @Queued@ or @Locked@, it returns an empty directory. * While the container is @Running@, @.../log/{container_uuid}/@ returns real-time logging data. * While the container is @Complete@ or @Cancelled@, @.../log/{container_uuid}/@ returns the final log collection. If a request results in multiple containers being run (see @container_count_max@ above), the logs from prior attempts remain available at @.../log/{old_container_uuid}/@. Currently, this API has a limitation that a directory listing at the top level @/arvados/v1/container_requests/{uuid}/log/@ does not reveal the per-container subdirectories. Instead, clients should look up the container request record and use the @container_uuid@ attribute to request files and directory listings under the per-container directory, as in the examples below. This API supports the @Range@ request header, so it can be used to poll for and retrieve logs incrementally while the container is running. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|method|string|Read-only WebDAV method|HTTP method|@GET@, @OPTIONS@, @PROPFIND@| {background:#ccffcc}.|uuid|string|The UUID of the container request.|path|zzzzz-xvdhp-0123456789abcde| {background:#ccffcc}.|path|string|Path to a file in the log collection.|path|@/zzzzz-dz642-0123456789abcde/stderr.txt@| Examples: * @GET /arvados/v1/container_requests/zzzzz-xvdhp-0123456789abcde/log/zzzzz-dz642-0123456789abcde/stderr.txt@ * @PROPFIND /arvados/v1/container_requests/zzzzz-xvdhp-0123456789abcde/log/zzzzz-dz642-0123456789abcde/@ ================================================ FILE: doc/api/methods/containers.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "containers" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/containers@ Object type: @dz642@ Example UUID: @zzzzz-dz642-0123456789abcde@ h2. Resource A container is work order to be dispatched to an Arvados cluster to perform some computational work. A container is created in response to a container request. See "computing with Crunch":{{site.baseurl}}/api/execution.html for details. Each Container offers the following attributes, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Notes| |state|string|The allowed states are "Queued", "Locked", "Running", "Cancelled" and "Complete".|See "Container states":#container_states for more details.| |started_at|datetime|When this container started running.|Null if container has not yet started.| |finished_at|datetime|When this container finished.|Null if container has not yet finished.| |log|string|Portable data hash of a collection containing the log messages produced when executing the container.|Null if container has not yet started. The Crunch system will periodically update this field for a running container.| |environment|hash|Environment variables and values that should be set in the container environment (@docker run --env@). This augments and (when conflicts exist) overrides environment variables given in the image's Dockerfile.|Must be equal to a ContainerRequest's environment in order to satisfy the ContainerRequest.| |cwd|string|Initial working directory.|Must be equal to a ContainerRequest's cwd in order to satisfy the ContainerRequest.| |command|array of strings|Command to execute.| Must be equal to a ContainerRequest's command in order to satisfy the ContainerRequest.| |output_path|string|Path to a directory or file inside the container that should be preserved as this container's output when it finishes.|Must be equal to a ContainerRequest's output_path in order to satisfy the ContainerRequest.| |output_glob|array of strings|Glob patterns determining which files will be included in the output collection. See corresponding attribute in the "container_requests resource":container_requests.html.|Must be equal to a ContainerRequest's output_glob in order to satisfy the ContainerRequest. See "Glob patterns":#glob_patterns for more details.| |mounts|hash|Must contain the same keys as the ContainerRequest being satisfied. Each value must be within the range of values described in the ContainerRequest at the time the Container is assigned to the ContainerRequest.|See "Mount types":#mount_types for more details.| |secret_mounts|hash|Must contain the same keys as the ContainerRequest being satisfied. Each value must be within the range of values described in the ContainerRequest at the time the Container is assigned to the ContainerRequest.|Not returned in API responses. Reset to empty when state is "Complete" or "Cancelled".| |runtime_constraints|hash|Compute resources, and access to the outside world, that are / were available to the container. Generally this will contain additional keys that are not present in any corresponding ContainerRequests: for example, even if no ContainerRequests specified constraints on the number of CPU cores, the number of cores actually used will be recorded here.|e.g.,
{
  "ram":12000000000,
  "vcpus":2,
  "API":true
}
See "Runtime constraints":#runtime_constraints for more details.| |runtime_status|hash|Information related to the container's run, including its steps. Some keys have specific meaning and are described later in this page.|e.g.,
{
  "error": "This container won't be successful because at least one step has already failed."
}
See "Runtime status":#runtime_status for more details.| |scheduling_parameters|hash|Parameters to be passed to the container scheduler when running this container.|e.g.,
{
"partitions":["fastcpu","vfastcpu"]
}
See "Scheduling parameters":#scheduling_parameters for more details.| |output|string|Portable data hash of the output collection.|Null if the container is not yet finished.| |container_image|string|Portable data hash of a collection containing the docker image used to run the container.|| |progress|number|A number between 0.0 and 1.0 describing the fraction of work done.|| |priority|integer|Range 0-1000. Indicate scheduling order preference.|Currently assigned by the system as the max() of the priorities of all associated ContainerRequests. See "container request priority":container_requests.html#priority.| |exit_code|integer|Process exit code.|Null if container process has not exited yet.| |auth_uuid|string|UUID of a token to be passed into the container itself, used to access Keep-backed mounts, etc. Automatically assigned.|Null if state∉{"Locked","Running"} or if @runtime_token@ was provided.| |locked_by_uuid|string|UUID of a token, indicating which dispatch process changed state to Locked. If null, any token can be used to lock. If not null, only the indicated token can modify this container.|Null if state∉{"Locked","Running"}| |runtime_token|string|A v2 token to be passed into the container itself, used to access Keep-backed mounts, etc.|Not returned in API responses. Reset to null when state is "Complete" or "Cancelled".| |gateway_address|string|Address (host:port) of gateway server.|Internal use only.| |interactive_session_started|boolean|Indicates whether @arvados-client shell@ has been used to run commands in the container, which may have altered the container's behavior and output.|| |output_storage_classes|array of strings|The storage classes that will be used for the log and output collections of this container|| |output_properties|hash|User metadata properties to set on the output collection.| |cost|number|Estimated cost of the cloud VM used to run the container.|0 if not available.| |subrequests_cost|number|Total estimated cumulative cost of container requests submitted by this container.|0 if not available.| |service|boolean|Indicates that this container is a long-lived service rather than a once-through batch job. Incompatible with @use_existing@|| |published_ports|hash|Web service ports that are published by this container. See "published ports":#published_ports below.|| h2(#container_states). Container states table(table table-bordered table-condensed). |_. State value|_. Description|_. Allowed next| |Queued|Waiting for a dispatcher to lock it and try to run the container.|Locked, Cancelled| |Locked|A dispatcher has "taken" the container and is allocating resources for it. The container has not started yet.|Queued, Running, Cancelled| |Running|Resources have been allocated and the contained process has been started (or is about to start). Crunch-run _must_ set state to Running _before_ there is any possibility that user code will run in the container.|Complete, Cancelled| |Complete|Container was running, and the contained process/command has exited.|Cancelled| |Cancelled|The container did not run long enough to produce an exit code. This includes cases where the container didn't even start, cases where the container was interrupted/killed before it exited by itself (e.g., priority changed to 0), and cases where some problem prevented the system from capturing the contained process's exit status (exit code and output).|-| See "Controlling container reuse":{{site.baseurl}}/admin/controlling-container-reuse.html for details about changing state from @Complete@ to @Cancelled@ h2(#mount_types). {% include 'mount_types' %} h2(#runtime_constraints). {% include 'container_runtime_constraints' %} h2(#runtime_status). Runtime status Runtime status provides container's relevant information about its progress even while it's still in Running state. This is used to avoid reusing containers that have not yet failed but will definitely do, and also for easier workflow debugging. The following keys have well known meanings: table(table table-bordered table-condensed). |_. Key|_. Type|_. Description|_. Notes| |error|string|The existance of this key indicates the container will definitely fail, or has already failed.|Optional.| |warning|string|Indicates something unusual happened or is currently happening, but isn't considered fatal.|Optional.| |activity|string|A message for the end user about what state the container is currently in.|Optional.| |errorDetail|string|Additional structured error details.|Optional.| |warningDetail|string|Additional structured warning details.|Optional.| |preemptionNotice|string|Details about any cloud provider scheduled interruption to the instance running this container.|Existence of this key indicates the container likely was (or will soon be) @Cancelled@ due to an instance interruption.| h2(#scheduling_parameters). {% include 'container_scheduling_parameters' %} h2(#glob_patterns). {% include 'container_glob_patterns' %} h2(#published_ports). {% include 'container_published_ports' %} h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. Supports federated @get@ and @list@. h3(#create). create Create a new Container. This API requires admin privileges. In normal operation, it should not be used at all. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|container|object|Container resource|request body|| h3. delete Delete a Container. This API requires admin privileges. In normal operation, it should not be used at all. API clients like Workbench might not work correctly when a container request references a container that has been deleted. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path|| h3. get Get a Container's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path|| h3. list List containers. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. update Update attributes of an existing Container. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path|| |container|object||query|| h3. auth Get the api_client_authorization record indicated by this container's auth_uuid, which belongs to the container's locked_by_uuid. table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string||path|| ================================================ FILE: doc/api/methods/credentials.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "credentials" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/credentials@ Object type: @oss07@ Example UUID: @zzzzz-oss07-0123456789abcde@ h2. Resource Stores a credential, such as a username/password or API token, for use by running containers to access an external resource on the user's behalf. Each Credential offers the following attributes, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description| |name|string|Name for the credential, unique by owner.| |description|string|(optional) Free text description of this credential.| |credential_class|string|The type of credential stored in this record. See below for more information.| |scopes|array of string|(optional) One or more specific resources this credential applies to.| |external_id|string|The non-secret part of the credential.| |secret|string|The secret part of the credential that should kept hidden where possible.| |expires_at|timestamp|Date at which the @secret@ field is not longer valid and can no longer be accessed (and may be scrubbed from the database). If @expires_at@ has past, any attempts to access the @secret@ endpoint (see below) also return an error.| The @secret@ field can be set when the record is created or updated by users with at @can_write@ permission, however the value of @secret@ is not returned in the regular @get@ or @list@ API calls, and cannot be used in queries. Credentials can be read using an Arvados token issued to a container running on behalf of a user who has @can_read@ permission to the credential, using the @secret@ API call (see below). Calling the @secret@ API with a regular Arvados token (i.e. not associated with a running container) will return a permission denied error. This design is intended to minimize accidental exposure of the secret material, but does not inherently protect it from users who have been given @can_read@ access, since it is necessary for code running on those user's behalf to access the secret in order to make use of it. As of Arvados 3.2, all credentials are owned by the system user and the @name@ field must be unique on a given Arvados instance. Credentials are shared using normal permission links. h2. Credential classes The @credential_class@ field is used to identify what kind of credential is stored and how to interpret the other fields of the record. Some credential classes, like @aws_access_key@, are reserved and must be prefixed with @arv:@. Being reserved means that each scope in the associated @scopes@ field is checked to ensure that it is valid for that credential class. h3. aws_access_key table(table table-bordered table-condensed). |_. Attribute|_. Description| |credential_class|String "arv:aws_access_key"| |scopes|A list of S3 buckets (in the form "s3://bucketname") to which these credentials grant access. The special value "s3://*" means this credential can be used for any bucket.| |external_id|The value of "aws_access_key_id" from @~/.aws/credentials@| |secret|The value of "aws_secret_access_key" from @~/.aws/credentials@| h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. h3. create Create a new Credential. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|credential|object|Credential resource|request body|| h3. delete Delete an existing Credential. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path|| h3. get Get a credential by UUID. The @secret@ field is not returned in @get@ API calls. To get the value of @secret@, use the @secret@ API call. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path|| h3. list List credentials. The @secret@ field is not returned in @list@ API calls, and cannot be used in queries. To get the value of @secret@, use the @secret@ API call. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. update Update attributes of an existing credential. May be used to update the value of @secret@. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path|| |credential|object||query|| h3. secret Get the value of @secret@. Returns a JSON object in the form @{"external_id": "...", "secret": "..."}@. Only permitted when called with a Arvados token issued to a container running on behalf of a user who has @can_read@ permission to the credential. Calling this API with a regular Arvados token (i.e. not associated with a running container) will return a permission denied error. If @expires_at@ has passed, this endpoint will return an error. Calls to the @secret@ API endpoint are logged as @event_type: secret_access@ in the audit log table. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path|| ================================================ FILE: doc/api/methods/groups.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "groups" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/groups@ Object type: @j7d0g@ Example UUID: @zzzzz-j7d0g-0123456789abcde@ h2. Resource Groups provides a way to apply the same permissions to a set of Arvados objects. See "permission model":{{site.baseurl}}/api/permission-model.html for details. Each Group has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Example| |name|string||| |group_class|string|Type of group. @project@ and @filter@ indicate that the group should be displayed by Workbench and arv-mount as a project for organizing and naming objects. @role@ is used as part of the "permission system":{{site.baseurl}}/api/permission-model.html. |@"filter"@ @"project"@ @"role"@| |description|text|Free text description of the group. Allows "HTML formatting.":{{site.baseurl}}/api/resources.html#descriptions || |properties|hash|User-defined metadata, may be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters || |can_write|boolean|True if the current user has write permission on this group.|| |can_manage|boolean|True if the current user has manage permission on this group.|| |trash_at|datetime|If @trash_at@ is non-null and in the past, this group and all objects directly or indirectly owned by the group will be hidden from API calls. May be untrashed as long as @delete_at@ is in the future.|| |delete_at|datetime|If @delete_at@ is non-null and in the past, the group and all objects directly or indirectly owned by the group may be permanently deleted.|| |is_trashed|datetime|True if @trash_at@ is in the past, false if not.|| |frozen_by_uuid|string|For a frozen project, indicates the user who froze the project; null in all other cases. When a project is frozen, no further changes can be made to the project or its contents, even by admins. Attempting to add new items or modify, rename, move, trash, or delete the project or its contents, including any subprojects, will return an error.|| h2. Group types and states h3(#project). Project groups Groups with @group_class: project@ are used to organize objects and subprojects through ownership. When "trashed or deleted":#trashing, all items owned by the project (including subprojects, collections, or container requests) as well as permissions (permission links) granted to the project are also trashed or deleted. h3(#role). Role groups Groups with @group_class: role@ are used to grant permissions to users (or other groups) through permission links. Role groups can confer "can_manage" permission but cannot directly own objects. When "trashed and deleted":#trashing group membership and permission grants (expressed as permission links) are deleted as well. h3(#filter). Filter groups Groups with @group_class: filter@ groups are virtual groups; they can not own other objects, but instead their contents (as returned by the "contents":#contents API method) are defined by a query. Filter groups have a special @properties@ field named @filters@, which must be an array of filter conditions. See "list method filters":{{site.baseurl}}/api/methods.html#filters for details on the syntax of valid filters, but keep in mind that the attributes must include the object type (@collections@, @container_requests@, @groups@, @workflows@), separated with a dot from the field to be filtered on. Filters are applied with an implied *and* between them, but each filter only applies to the object type specified. The results are subject to the usual access controls - they are a subset of all objects the user can see. Here is an example:
 "properties":{
  "filters":[
   [
    "groups.name",
    "like",
    "Public%"
   ]
  ]
 },
This @filter@ group will return all groups (projects) that have a name starting with the word @Public@ and are visible to the user issuing the query. Because groups can contain many types of object, it will also return all objects of other types that the user can see. The 'is_a' filter operator is of particular interest to limit the @filter@ group 'content' to the desired object(s). When the 'is_a' operator is used, the attribute must be 'uuid'. The operand may be a string or an array which means objects of either type will match the filter. This example will return all groups (projects) that have a name starting with the word @Public@, as well as all collections that are in the project with uuid @zzzzz-j7d0g-0123456789abcde@.
 "properties":{
  "filters":[
   [
    "groups.name",
    "like",
    "Public%"
   ],
   [
    "collections.owner_uuid",
    "=",
    "zzzzz-j7d0g-0123456789abcde"
   ],
   [
    "uuid",
    "is_a",
    [
     "arvados#group",
     "arvados#collection"
    ]
   ]
  ]
 },
 
"Trashed or deleting":#trashing a filter group causes the group itself to be hidden or deleted, but has no effect on the items returned in "contents", i.e. the database objects in "contents" are not hidden or deleted and may be accessed by other means. h3(#trashing). Trashing groups Groups can be trashed by updating the record and setting the @trash_at@ field, or with the "delete":#delete method. The delete method sets @trash_at@ to "now". The value of @trash_at@ can be set to a time in the future as a feature to automatically expire groups. When @trash_at@ is set, @delete_at@ will also be set. Normally @delete_at = trash_at + Collections.DefaultTrashLifetime@ for projects and filter groups, and @delete_at = trash_at@ for role groups. When the @trash_at@ time is past but @delete_at@ is in the future, the trashed group is invisible to most API calls unless the @include_trash@ parameter is true. All objects directly or indirectly owned by the group (including subprojects, collections, or container requests) are considered trashed as well. Groups in the trashed state can be "untrashed":#untrash so long as @delete_at@ has not past. Once @delete_at@ is past, the group will be deleted permanently and can no longer be untrashed. Different group types have different behavior when deleted, described above. Note: like other groups, "role" groups may have @trash_at@ set to date in the future, however roles groups are required to have @delete_at = trash_at@, so the trash time and delete time expire at the same time. This means once @trash_at@ expires the role group is deleted immediately. Role groups with @trash_at@ set can only be "untrashed":#untrash before they expire. h3(#frozen). Frozen projects A user with @manage@ permission can set the @frozen_by_uuid@ attribute of a @project@ group to their own user UUID. Once this is done, no further changes can be made to the project or its contents, including subprojects. The @frozen_by_uuid@ attribute can be cleared by an admin user. It can also be cleared by a user with @manage@ permission, unless the @API.UnfreezeProjectRequiresAdmin@ configuration setting is active. The optional @API.FreezeProjectRequiresDescription@ and @API.FreezeProjectRequiresProperties@ configuration settings can be used to prevent users from freezing projects that have empty @description@ and/or empty @properties@ entries. h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. h3(#contents). contents Retrieve a list of items owned by the group or user. Use "recursive" to list objects within subprojects as well. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the group or user to enumerate. If this is a user UUID, this method returns the contents of that user's home project.|path|| |limit|integer (default 100)|Maximum number of items to return.|query|| |order|array|Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order. Sort within a resource type by prefixing the attribute with the resource name and a period.|query|@["collections.modified_at desc"]@| |filters|array|Conditions for filtering items.|query|@[["uuid", "is_a", "arvados#job"]]@| |recursive|boolean (default false)|Include items owned by subprojects.|query|@true@| |exclude_home_project|boolean (default false)|Only return items which are visible to the user but not accessible within the user's home project. Use this to get a list of items that are shared with the user. Uses the logic described under the "shared" endpoint.|query|@true@| |include|array|Look up objects referenced by the indicated fields and include them in the response. Only "owner_uuid", "container_uuid" and "collection_uuid" are supported. If "owner_uuid" is given, the parent project or user will be returned. If "container_uuid" is given and container requests are returned in the response, the corresponding container records will also be returned. If "collection_uuid" is given and workflows are returned in the response, the collection records will also be returned. These referenced objects will be returned in the "included" field of the response. For compatibility, a string @"owner_uuid"@ is accepted as equivalent to @["owner_uuid"]@.|query|@"owner_uuid"@ @["owner_uuid","container_uuid"]@| |include_trash|boolean (default false)|Include trashed objects.|query|@true@| |include_old_versions|boolean (default false)|Include past versions of the collections being listed.|query|@true@| |select|array|Attributes of each object to return in the response. Specify an unqualified name like @uuid@ to select that attribute on all object types, or a qualified name like @collections.name@ to select that attribute on objects of the specified type. By default, all available attributes are returned, except on collections, where @manifest_text@ is not returned and cannot be selected due to an implementation limitation. This limitation may be removed in the future.|query|@["uuid", "collections.name"]@| Notes: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in listed collections. If you need it, request a "list of collections":{{site.baseurl}}/api/methods/collections.html with the filter @["owner_uuid", "=", GROUP_UUID]@, and @"manifest_text"@ listed in the select parameter. Use filters with the attribute format @.@ to filter items of a specific type. For example: @["container_requests.state", "=", "Final"]@ to filter @container_requests@ where @state@ is @Final@. All other types of items owned by this group will be unimpacted by this filter and will still be included. When called with “include=owner_uuid”, the @included@ field of the response is populated with users, projects, or other groups that own the objects returned in @items@. This can be used to fetch an object and its parent with a single API call. When called with “include=container_uuid”, the @included@ field of the response is populated with the container associated with each container request in the response. h3. create Create a new Group. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |group|object||query|| |async|boolean (default false)|Defer the permissions graph update by a configured number of seconds. (By default, @async_permissions_update_interval@ is 20 seconds). On success, the response is 202 (Accepted).|query|@true@| h3(#delete). delete Put a Group in the trash. See "Trashing groups":#trashing for details. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path|| h3. get Gets a Group's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path|| h3. list List groups. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. show show groups Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string||path|| h3. update Update attributes of an existing Group. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path|| |group|object||query|| |async|boolean (default false)|Defer the permissions graph update by a configured number of seconds. (By default, @async_permissions_update_interval@ is 20 seconds). On success, the response is 202 (Accepted).|query|@true@| h3(#untrash). untrash Remove a Group from the trash. Only valid when @delete_at@ is in the future. This sets the @trash_at@ and @delete_at@ fields to @null@. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Group to untrash.|path|| |ensure_unique_name|boolean (default false)|Rename project uniquely if untrashing it would fail with a unique name conflict.|query|| h3(#shared). shared This endpoint returns the toplevel set of groups to which access is granted through a chain of one or more permission links rather than through direct ownership by the current user account. This is useful for clients which wish to browse the list of projects the user has permission to read which are not part of the "home" project tree. Similar behavior is also available with the @exclude_home_project@ option of the "contents" endpoint. Specifically, the logic is:
select groups that are readable by current user AND
    (the owner_uuid is a user (but not the current user) OR
     the owner_uuid is not readable by the current user OR
     the owner_uuid is a group but group_class is not a project)
table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |include|string|If provided with the value "owner_uuid", this will return owner objects in the @included@ field of the response.|query|| Notes: When called with “include=owner_uuid”, the @included@ field of the response is populated with users and non-project groups that own the objects returned in @items@. In addition to the "include" parameter this endpoint also supports the same parameters as the "list method.":{{site.baseurl}}/api/methods.html#index ================================================ FILE: doc/api/methods/keep_services.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "keep_services" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_services@ Object type: @bi6l4@ Example UUID: @zzzzz-bi6l4-0123456789abcde@ h2. Resource The keep_services resource keep clients to discover storage servers and proxies available on the cluster for persistent storage and retrieval of keep blocks. Each KeepService has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Example| |service_host|string|hostname of the server|| |service_port|integer|TCP port of the service|| |service_ssl_flag|boolean|if the server uses SSL|| |service_type|string|The service type, one of "disk", "blob" (cloud object store) or "proxy" (keepproxy)|| h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. h3. accessible Get a list of keep services that are accessible to the requesting client. Unlike @list@, this is context-sensitive based on the requester, for example providing the list of actual Keep servers when inside the cluster, but providing a proxy service if client contacts Arvados from outside the cluster. h3. create Create a new KeepService. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |keep_service|object||query|| h3. delete Delete an existing KeepService. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path|| h3. get Gets a KeepService's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path|| h3. list List keep_services. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. update Update attributes of an existing KeepService. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path|| |keep_service|object||query|| ================================================ FILE: doc/api/methods/links.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "links" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/links@ Object type: @o0j2j@ Example UUID: @zzzzz-o0j2j-0123456789abcde@ h2. Resource Links are an extensible way to describe relationships between Arvados objects and metadata about individual objects. Each link has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description| |head_uuid|string|The object being described or acted on.| |tail_uuid|string|The origin or actor in the description or action (may be null).| |link_class|string|Type of link| |name|string|Primary value of the link.| |properties|hash|Additional information, expressed as a key→value hash. Key: string. Value: string, number, array, or hash. May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters| h2. Link classes Some classes are pre-defined by convention and have standard meanings attached to names. h3. permission The significance of permission links is discussed in the "permission links":{{site.baseurl}}/api/permission-model.html#links section of the permission model documentation. h3. star A **star** link is a shortcut to a project that is displayed in the user interface (Workbench) as "favorites". Users can mark their own favorites (implemented by creating or deleting **star** links). An admin can also create **star** links owned by the "Public favorites" project. These are favorites will be displayed to all users that have permission to read the project that has been favorited. The schema for a star link is: table(table table-bordered table-condensed). |_. Field|_. Value|_. Description| |owner_uuid|user or group uuid|Either the user that owns the favorite, or the "Public favorites" group.| |tail_uuid|user or group uuid|Should be the same as owner_uuid| |head_uuid|project uuid|The project being favorited| |link_class|string of value "star"|Indicates this represents a link to a user favorite| h4. Creating a public favorite @owner_uuid@ is either an individual user, or the "Public favorites" group. The @head_uuid@ is the project being favorited.
$ linkuuid=$(arv --format=uuid link create --link '{
    "link_class": "star",
    "owner_uuid": "zzzzz-j7d0g-publicfavorites",
    "tail_uuid": "zzzzz-j7d0g-publicfavorites",
    "head_uuid":  "zzzzz-j7d0g-theprojectuuid"}')
h4. Removing a favorite
$ arv link delete --uuid zzzzz-o0j2j-thestarlinkuuid
h4. Listing favorites To list all 'star' links that will be displayed for a user:
$ arv link list --filters '[
  ["link_class", "=", "star"],
  ["tail_uuid", "in", ["zzzzz-j7d0g-publicfavorites", "zzzzz-tpzed-currentuseruuid"]]]'
h3. tag A **tag** link describes an object using an unparsed plain text string. Tags can be used to annotate objects that are not directly editable by the user, like collections and objects shared as read-only. table(table table-bordered table-condensed). |_. tail_type→head_type|_. name→head_uuid {properties}| |→Collection | _tag name_ → _collection uuid_| |→Job | _tag name_ → _job uuid_| h3. published_port A **published_port** link enables external access to container ports via user-defined domain names. If the cluster is configured as follows to forward HTTP requests from external clients to container ports:
Services:
  ContainerWebServices:
    ExternalURL: https://*.containers.zzzzz.example.com/
A user can create the following link to route HTTP requests like @https://servicename.containers.zzzzz.example.com/@ to port 12345 in the container running for container request @zzzzz-xvhdp-012340123401234@:
{
  "link_class" "published_port",
  "head_uuid": "zzzzz-xvhdp-012340123401234",
  "name": "servicename",
  "properties": {
    "port": 12345
  }
}
Refer to the "documentation about published ports":container_requests.html#published_ports for additional information. h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. h3. create Create a new Link. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |link|object||query|| When you create a new permission link with the same @head_uuid@ and @tail_uuid@ as an existing permission link, the API returns the existing link instead of creating a new one. If the requested permission level is higher than the existing link, the existing link is updated accordingly. Otherwise the existing link is returned unchanged. h3. delete Delete an existing Link. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path|| When you delete a permission link, any other existing permission links that have the same @head_uuid@ and @tail_uuid@ are also deleted. h3. get Gets a Link's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path|| h3. list List links. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. update Update attributes of an existing Link. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path|| |link|object||query|| When you update a permission link such that it has the same @head_uuid@ and @tail_uuid@ as one or more existing permission links, the API deletes the other links. If the highest permission level among the deleted links was higher than the newly updated link, the updated link's permission level is increased accordingly. h3. get_permissions Get all permission links that point directly to given UUID (in the head_uuid field). The requesting user must have @can_manage@ permission or be an admin. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the object.|path|| ================================================ FILE: doc/api/methods/logs.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "logs" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/logs@ Object type: @57u5n@ Example UUID: @zzzzz-57u5n-0123456789abcde@ h2. Resource Each Log has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Example| |object_uuid|string|The arvados object that is the subject of the log.|| |event_at|datetime||| |event_type|string|A user-defined category or type for this event.|@LOGIN@| |summary|text||| |properties|hash||| h3. Creation Any user may create Log entries for any event they find useful. User-generated Logs have no intrinsic meaning to other users or to the Arvados system itself; it is up to each user to choose appropriate log event types and summaries for their project. h3. System Logs Arvados uses Logs to record creation, deletion, and updates of other Arvados resources. h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. h3. create Create a new log entry. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |log|object||query|| h3. delete Delete an existing log entry. This method can only be used by privileged (system administrator) users. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path|| h3. get Retrieve a log entry. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path|| h3. list List log entries. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. update Update attributes of an existing log entry. This method can only be used by privileged (system administrator) users. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path|| |log|object||query|| ================================================ FILE: doc/api/methods/user_agreements.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "user_agreements" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/user_agreements@ h2. Resource This provides an API for inactive users to sign clickthrough agreements prior to being activated. h2. Methods Required arguments are displayed in %{background:#ccffcc}green%. h3. list List user agreements. This is a list of collections which contain HTML files with the text of the clickthrough agreement(s) which can be rendered by Workbench. table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | h3. signatures List user agreements that have already been signed. These are recorded as link objects of @{"link_class": "signature", "name": "click"}@. table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | h3. sign Sign a user agreement. table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the user agreement collection.|path|| ================================================ FILE: doc/api/methods/users.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "users" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/users@ Object type: @tpzed@ Example UUID: @zzzzz-tpzed-0123456789abcde@ h2. Resource Users represent individuals with access to the Arvados cluster. Each User has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Example| |email|string||| |username|string|The username used for the user's git repositories and virtual machine logins. Usernames must start with a letter, and contain only alphanumerics. When a new user is created, a default username is set from their e-mail address. Only administrators may change the username.|| |first_name|string||| |last_name|string||| |identity_url|string||| |is_admin|boolean||| |prefs|hash||| |is_active|boolean||| h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. h3. create Create a new User. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |user|object||query|| h3(#current). current Get the user associated with the provided API token. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | h3. delete Delete an existing User. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the User in question.|path|| Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string||path|| h3. get Gets a User's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the User in question.|path|| h3. list List users. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. system Get the user record for the "system user.":{{site.baseurl}}/api/permission-model.html#system Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | h3. update Update attributes of an existing User. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the User in question.|path|| |user|object|The new attributes.|query|| h3. setup Set up a user. Adds the user to the "All users" group. Enables the user to invoke @activate@. See "user management":{{site.baseurl}}/admin/user-management.html for details. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the User in question.|query|| h3. activate Check that a user has is set up and has signed all the user agreements. If so, activate the user. Users can invoke this for themselves. See "user agreements":{{site.baseurl}}/admin/user-management.html#user_agreements for details. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the User in question.|query|| h3. unsetup Remove the user from the "All users" group and deactivate the user. See "user management":{{site.baseurl}}/admin/user-management.html for details. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the User in question.|path|| h3. merge Transfer ownership of data from the "old" user account to the "new" user account. When @redirect_to_new_user@ is @true@ this also causes logins to the "old" account to be redirected to the "new" account. The "old" user account that was redirected becomes invisible in user listings. See "Merge user accounts":{{site.baseurl}}/admin/link-accounts.html, "Reassign user data ownership":{{site.baseurl}}/admin/reassign-ownership.html, and "Linking alternate login accounts":{{site.baseurl}}/user/topics/link-accounts.html for examples of how this method is used. Must supply either @new_user_token@ (the currently authorized user will be the "old" user), or both @new_user_uuid@ and @old_user_uuid@ (the currently authorized user must be an admin). Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |new_user_token|string|A valid token for the "new" user|query|| |new_user_uuid|uuid|The uuid of the "new" account|query|| |old_user_uuid|uuid|The uuid of the "old" account|query|| |new_owner_uuid|uuid|The uuid of a project to which objects owned by the "old" user will be reassigned.|query|| |redirect_to_new_user|boolean|If true, also redirect login and reassign authorization credentials from "old" user to the "new" user|query|| h3. authenticate Create a new API token based on username/password credentials. Returns an "API client authorization":api_client_authorizations.html object containing the API token, or an "error object.":../requests.html#errors Valid credentials are determined by the choice of "configured login backend.":{{site.baseurl}}/install/setup-login.html Note: this endpoint cannot be used with login backends that use web-based third party authentication, such as Google or OpenID Connect. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|username|string|The username.|body|| {background:#ccffcc}.|password|string|The password.|body|| ================================================ FILE: doc/api/methods/virtual_machines.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "virtual_machines" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/virtual_machines@ Object type: @2x53u@ Example UUID: @zzzzz-2x53u-0123456789abcde@ h2. Resource The virtual_machines resource lists compute resources in the Arvados cluster to which a user may log in to get an interactive shell (via ssh or webshell). Each VirtualMachine has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Example| |hostname|string||| h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. h3. create Create a new VirtualMachine. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |virtual_machine|object||query|| h3. delete Delete an existing VirtualMachine. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path|| h3. get Gets a VirtualMachine's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path|| h3(#logins). logins Get a list of SSH keys and account names that should be able to log in to a given virtual machine. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string||path|| The response is an object with the field @items@ containing an array of objects in the following format: table(table table-bordered table-condensed). |_. Key|_. Value type|_. Description|_. Example| |username|string|Name of the Unix login account to which the user should be able to log in|@"jsmith"@| |hostname|string|Hostname of the virtual machine|@"shell.xyzzy.arvadosapi.com"@| |public_key|string|SSH public key|@"ssh-rsa AAAAB3NzaC1yc2E..."@| |user_uuid|string|UUID of the user who should be able to log in|@"xyzzy-tpzed-mv4d7dy7n91te11"@| |virtual_machine_uuid|string|UUID of the "virtual machine resource":{{site.baseurl}}/api/methods/virtual_machines.html|@"zzzzz-2x53u-kvszmclnbjuv8xc"@| |authorized_key_uuid|string|UUID of the "authorized key resource":{{site.baseurl}}/api/methods/authorized_keys.html|@"zzzzz-fngyi-v9p0cyfmjxbio64"@| h3. get_all_logins Get a list of SSH keys and account names that should be able to log in for every virtual machine in the system. Arguments: none. The response has the same format as the response to the "logins method":#logins above. h3. list List virtual_machines. See "common resource list method.":{{site.baseurl}}/api/methods.html#index h3. update Update attributes of an existing VirtualMachine. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path|| |virtual_machine|object||query|| ================================================ FILE: doc/api/methods/workflows.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: API Methods title: "workflows" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/workflows@ Object type: @7fd4e@ Example UUID: @zzzzz-7fd4e-0123456789abcde@ h2. Resource Stores a "Common Workflow Language":http://commonwl.org (CWL) computational workflow that can be searched for, browsed and executed (submitted to Crunch) from the workbench. Each Workflow offers the following optional attributes, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html: table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description|_. Example| |name|string|If not specified, will be set to any "name" from the "definition" attribute.|| |description|string|If not specified, will be set to any "description" from the "definition" attribute.|| |definition|string|A "Common Workflow Language" document.|Visit "Common Workflow Language":http://www.commonwl.org/ for details.| |collection_uuid|string|This attribute is always null. It is reserved for future development. {% comment until 23057 %} If non-null, a linked workflow definition stored in a Collection. See below. {% endcomment %}|| {% comment until 23057 %} h2. Workflows linked to Collections If @collection_uuid@ is set, this significantly changes the behavior of the workflow record. The linked Collection must have the following properties. These are extracted from and must be synchronized with the workflow in @arv:workflowMain@. They are copied into the workflow collection's @properties@ for ease of processing by client tools such as Workbench. table(table table-bordered table-condensed). |_. Attribute|_. Type|_. Description| |type|string|Value must be 'workflow'| |arv:workflowMain|string|The file path within the collection that is the top-level workflow that will be launched.| |arv:cwl_inputs|array of object|Array of "workflow input parameters":https://www.commonwl.org/v1.2/Workflow.html#WorkflowInputParameter in "fully expanded form":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing | |arv:cwl_outputs|array of object|Array of "workflow output parameters":https://www.commonwl.org/v1.2/Workflow.html#WorkflowOutputParameter in "fully expanded form":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing | |arv:cwl_requirements|array of object|Array of "workflow process requirements":https://www.commonwl.org/v1.2/Workflow.html#Workflow in "fully expanded form":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing (in particular, this must list requirements that affect initial launching of the workflow such as "WorkflowRunnerResources":{{site.baseurl}}/user/cwl/cwl-extensions.html ).| |arv:cwl_hints|array of object|Array of "workflow process hints":https://www.commonwl.org/v1.2/Workflow.html#Workflow in "fully expanded form":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing (in particular, this must list hints that affect initial launching of the workflow such as "WorkflowRunnerResources":{{site.baseurl}}/user/cwl/cwl-extensions.html ).| When @collection_uuid@ is set, the workflow record @name@, @description@, @definition@ and @owner_uuid@ are all set from the linked collection. The workflow record can no longer be updated directly, but changes to the linked collection will be reflected in the workflow record. Trashing the linked collection will cause the workflow record to become trashed and eventually deleted as well. The workflow record cannot be un-linked from a collection, only deleted and re-created. When a workflow is linked to a collection, the collection can be queried and fetched together with the workflow. The @filters@ argument can filter on attributes of the collection referenced by @collection_uuid@. For example, @[["collection.properties.category", "=", "WGS"]]@ will match workflow definitions linked to collections that have a "category" property with the value "WGS". When using the "group contents":groups.html#contents API to fetch workflow records, in addition the previously-described filters, you can use @include=["collection_uuid"]@ to include the collection records corresponding to the @collection_uuid@ of the workflow records in the response. {% endcomment %} h2. Methods See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@. Required arguments are displayed in %{background:#ccffcc}green%. Supports federated @create@, @delete@, @get@, @list@, and @update@. h3. create Create a new Workflow. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|workflow|object|Workflow resource|request body|| h3. delete Delete an existing Workflow. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path|| h3. get Get a Workflow's metadata by UUID. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path|| h3. list List workflows. See "common resource list method.":{{site.baseurl}}/api/methods.html#index {% comment until 23057 %} The @filters@ argument can filter on attributes of the collection referenced by @collection_uuid@. For example, @[["collection.properties.category", "=", "WGS"]]@ will match workflow definitions linked to collections that have a "category" property with the value "WGS". {% endcomment %} h3. update Update attributes of an existing Workflow. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path|| |workflow|object||query|| ================================================ FILE: doc/api/methods.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: Concepts title: Common resource methods ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The following methods are available for most resources. Some resources may limit who can perform certain operations. Consult documentation for individual resource types for details. The methods are relative to the base URI, e.g., @/arvados/v1/resource_type@. For arguments specifying a *Location* of @path@, the value of the argument is incorporated into the path portion of the URI. For example, a @uuid@ of @aaaaa-bbbbb-ccccccccccccccc@ in a path position yields a URI of @/arvados/v1/resource_type/aaaaa-bbbbb-ccccccccccccccc@. Arguments specifying a *Location* of "query" are incorporated into the query portion of the URI or request body. For example, @/arvados/v1/resource_type?count=none@. Certain method calls on certain object types support "federation":{{site.baseurl}}/architecture/federation.html: the ability to operate on objects owned by different clusters. API pages for specific object types list which federated operations are supported for that type (if any) in the "Methods" section. Methods which implicitly include a cluster ID (such as @GET@ on a specific UUID, using the UUID prefix) will be directed to the appropriate cluster. Methods that don't implicitly include the cluster ID (such as @create@) use the @cluster_id@ query parameter to specify which cluster to direct the request. * "create":#create * "delete":#delete * "get":#get * "list":#index ** "Available list method filters":#filters *** "Filtering using substring search":#substringsearchfilter *** "Filtering on subproperties":#subpropertyfilters *** "Filtering using boolean expressions":#filterexpression ** "Federated listing":#federated-list ** "Results of list method":#list-results * "update":#update h2(#create). create The @create@ method creates a new object of the specified type. Note that: * Only the listed attributes (and "standard metadata":resources.html) are set * Unset attributes will get default values * The attributes of a given resource type are fixed (you cannot introduce new toplevel attributes) This method corresponds to the HTTP request @POST /arvados/v1/resource_type@. A successful create call returns a copy of the new object. To create an object on a remote cluster (federated create), provide the @cluster_id@ of the target cluster. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location | |{resource_type}|object|Name is the singular form of the resource type, e.g., for the "collections" resource, this argument is "collection"|body| |{cluster_id}|string|Optional, the cluster on which to create the object if not the current cluster.|query| |select |array |Attributes of the new object to return in the response (by default, all available attributes are returned). Example: @["uuid","name","modified_at"]@|query| h2(#delete). delete The @delete@ method deletes an object of the specified type. It corresponds to the HTTP request @DELETE /arvados/v1/resource_type/uuid@. A successful delete call returns a copy of the deleted object. The cluster ID portion of the @uuid@ is used to determine which cluster owns the object, a federated delete request will be routed to that cluster. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location | {background:#ccffcc}.|uuid|string|The UUID of the object in question.|path| |select |array |Attributes of the deleted object to return in the response (by default, all available attributes are returned). Example: @["uuid","name","modified_at"]@|query| h2(#get). get The @get@ method gets a single object with the specified @uuid@. It corresponds to the HTTP request @GET /arvados/v1/resource_type/uuid@. The cluster ID portion of the @uuid@ is used to determine which cluster owns the object, a federated get request will be routed to that cluster. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location | {background:#ccffcc}.|uuid|string|The UUID of the object in question.|path| |select |array |Attributes of the object to return in the response (by default, all available attributes are returned). Example: @["uuid","name","modified_at"]@|query| h2(#index). list The @list@ method requests an list of resources of that type. It corresponds to the HTTP request @GET /arvados/v1/resource_type@. All resources support the @list@ method unless otherwise noted. Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location | |limit |integer|Maximum number of resources to return. If not provided, server will provide a default limit. Server may also impose a maximum number of records that can be returned in a single request.|query| |offset |integer|Skip the first 'offset' number of resources that would be returned under the given filter conditions.|query| |filters |array |"Conditions for selecting resources to return.":#filters|query| |order |array |Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order. (If not specified, it will be ascending). Example: @["head_uuid asc","modified_at desc"]@ Default: @["modified_at desc", "uuid asc"]@|query| |select |array |Attributes of each object to return in the response (by default, all available attributes are returned, except collections, which do not return @manifest_text@ unless explicitly selected). Example: @["uuid","name","modified_at"]@|query| |distinct|boolean|When returning multiple records whose selected attributes (see @select@) are equal, return them as a single response entry. Default is @false@.|query| |count|string|@"exact"@ (default): Include an @items_available@ response field giving the number of distinct matching items that can be retrieved (irrespective of @limit@ and @offset@ arguments). @"none"@: Omit the @items_available@ response field. This option will produce a faster response.|query| h3(#filters). Available list method filters The value of the @filters@ parameter is an array of conditions. The @list@ method returns only the resources that satisfy all of the given conditions. In other words, the conjunction @AND@ is implicit. Each condition is expressed as an array with three elements: @[attribute, operator, operand]@. table(table table-bordered table-condensed). |_. Index|_. Element|_. Type|_. Description|_. Examples| |0|attribute|string|Name of the attribute to compare (or "any" to return resources with any matching attribute)|@script_version@, @head_uuid@, @any@| |1|operator|string|Comparison operator|@>@, @>=@, @like@, @not in@| |2|operand|string, array, or null|Value to compare with the resource attribute|@"d00220fb%"@, @"1234"@, @["foo","bar"]@, @nil@| The following operators are available. table(table table-bordered table-condensed). |_. Operator|_. Operand type|_. Description|_. Example| |@=@, @!=@, @<>@|string, number, timestamp, JSON-encoded array, JSON-encoded object, or null|Equality comparison|@["tail_uuid","=","xyzzy-j7d0g-fffffffffffffff"]@ @["tail_uuid","!=",null]@ @["storage_classes_desired","=","[\"default\"]"]@| |@<@, @<=@, @>=@, @>@|string, number, or timestamp|Ordering comparison|@["script_version",">","123"]@| |@like@, @ilike@|string|SQL pattern match. Single character match is @_@ and wildcard is @%@. The @ilike@ operator is case-insensitive|@["script_version","like","d00220fb%"]@| |@in@, @not in@|array of strings or integers|Set membership|@["script_version","in",["main","d00220fb38d4b85ca8fc28a8151702a2b9d1dec5"]]@| |@is_a@|string|Arvados object type|@["head_uuid","is_a","arvados#collection"]@| |@exists@|string|Presence of subproperty|@["properties","exists","my_subproperty"]@| |@contains@|string, array of strings|Presence of one or more keys or array elements|@["storage_classes_desired", "contains", ["foo", "bar"]]@ (matches both @["foo", "bar"]@ and @["foo", "bar", "baz"]@) (note @[..., "contains", "foo"]@ is also accepted, and is equivalent to @[..., "contains", ["foo"]]@)| h4(#substringsearchfilter). Filtering using substring search Resources can also be filtered by searching for a substring in attributes of type @string@, @array of strings@, @text@, and @hash@, which are indexed in the database specifically for search. To use substring search, the filter must: * Specify @any@ as the attribute * Use either the @like@ or @ilike@ operator * Have an operand of type @string@ that is wrapped in the SQL pattern match wildcard character @%@ For example, the @["any", "like", "%foo%"]@ filter will return all resources that contain @foo@ in the content of at least one attribute of the previously defined types. This is the recommended way to do keyword and file name search across the entire database. Note that only exact substring matches are returned and results are unranked and returned in the order specified by the @list@ @order@ argument. h4(#subpropertyfilters). Filtering on subproperties Some record types have an additional @properties@ attribute that allows recording and filtering on additional key-value pairs. To filter on a subproperty, the value in the @attribute@ position has the form @properties.user_property@. You may also use JSON-LD / RDF style URIs for property keys by enclosing them in @<...>@ for example @properties.@. Alternately you may also provide a JSON-LD "@context" field, however at this time JSON-LD contexts are not interpreted by Arvados. table(table table-bordered table-condensed). |_. Operator|_. Operand type|_. Description|_. Example| |@=@, @!=@|string, number or boolean|Equality comparison|@["properties.my_subproperty", "=", "fizzy whizy sparkle pop"]@| |@<@, @<=@, @>=@, @>@|string or number|Ordering comparison|@["properties.my_subproperty", "<", 3]@| |@like@, @ilike@|string|SQL pattern match, single character match is @_@ and wildcard is @%@, ilike is case-insensitive|@["properties.my_subproperty", "like", "d00220fb%"]@| |@in@, @not in@|array of strings|Set membership|@["properties.my_subproperty", "in", ["fizz", "buzz"]]@| |@exists@|boolean|Test if a subproperty is present or not (determined by operand).|@["properties.my_subproperty", "exists", true]@| |@contains@|string, number|Filter where subproperty has a value either by exact match or value is element of subproperty list.|@["properties.foo", "contains", "bar"]@ will find both @{"foo": "bar"}@ and @{"foo": ["bar", "baz"]}@.| Note that exclusion filters @!=@ and @not in@ will return records for which the property is not defined at all. To restrict filtering to records on which the subproperty is defined, combine with an @exists@ filter. h4(#filterexpression). Filtering using boolean expressions In addition to the three-element array form described above, a string containing a boolean expression is also accepted. The following restrictions apply: * The expression must contain exactly one operator. * The operator must be @=@, @<@, @<=@, @>@, or @>=@. * There must be exactly one pair of parentheses, surrounding the entire expression. * Each operand must be the name of a numeric attribute like @replication_desired@ (literal values like @3@ and non-numeric attributes like @uuid@ are not accepted). * The expression must not contain whitespace other than an ASCII space (newline and tab characters are not accepted). Examples: * @(replication_desired > replication_confirmed)@ * @(replication_desired = replication_confirmed)@ Both types of filter (boolean expressions and @[attribute, operator, operand]@ filters) can be combined in the same API call. Example: * @{"filters": ["(replication_desired > replication_confirmed)", ["replication_desired", "<", 2]]}@ h3(#federated-list). Federated listing Federated listing forwards a request to multiple clusters and combines the results. Currently only a very restricted form of the "list" method is supported. To query multiple clusters, the list request must: * Have filters only matching @[["uuid", "in", [...]]@ or @["uuid", "=", "..."]@ * Specify @count=none@ * Not specify @limit@, @offset@ or @order@ * Not request more items than the maximum response size This form may be used to request a specific list of objects by UUID which are owned by multiple clusters. h3(#list-results). Results of list method A successful call to list will return the following object. table(table table-bordered table-condensed). |_. Attribute |_. Type |_. Description | |kind|string|type of objects returned| |offset|integer|query offset in effect| |limit|integer|query limit in effect| |items|array|actual query payload, an array of resource objects| |items_available|integer|total items available matching query| h2(#update). update The @update@ method updates fields on the object with the specified @uuid@. It corresponds to the HTTP request @PUT /arvados/v1/resource_type/uuid@. Note that only the listed attributes (and "standard metadata":resources.html) are updated, unset attributes will retain their previous values, and the attributes of a given resource type are fixed (you cannot introduce new toplevel attributes). Also note that updates replace the value of the attribute, so if an attribute has an object value, the entire object is replaced. A successful update call returns the updated copy of the object. The cluster ID portion of the @uuid@ is used to determine which cluster owns the object, a federated update request will be routed to that cluster. table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location | {background:#ccffcc}.|uuid|string|The UUID of the resource in question.|path|| |{resource_type}|object||query|| |select |array |Attributes of the updated object to return in the response (by default, all available attributes are returned). Example: @["uuid","name","modified_at"]@|query| ================================================ FILE: doc/api/permission-model.html.textile.liquid ================================================ --- layout: default navsection: architecture navmenu: Concepts title: "Permission model" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} There are four levels of permission: *none*, *can_read*, *can_write*, and *can_manage*. * *none* is the default state when there are no other permission grants. ** the object is not included in any list query response. ** direct queries of the object by uuid return 404 Not Found. ** Link objects require valid identifiers in @head_uuid@ and @tail_uuid@, so an attempt to create a Link that references an unreadable object will return an error indicating the object is not found. * *can_read* grants read-only access to the record. Attempting to update or delete the record returns an error. ** *can_read* does not allow a reader to see any permission grants on the object except the object's owner_uuid and the reader's own permissions. * *can_write* permits changes to the record, including changing ownership and deleting the object. ** *can_write* cannot read, create, update or delete permission links associated with the object. ** *can_write* also implies *can_read*. * *can_manage* permits the user to read, create, update and delete permission links whose @head_uuid@ is this object's @uuid@. ** *can_manage* also implies *can_write* and *can_read*. h2. Ownership All Arvados objects have an @owner_uuid@ field. Valid uuid types for @owner_uuid@ are "User" and "Group". In the case of a Group, the @group_class@ must be "project". The User or Group specified by @owner_uuid@ has *can_manage* permission on the object. This permission is one way: an object that is owned does not get any special permissions on the User or Group that owns it. To change the @owner_uuid@ field, it is necessary to have @can_write@ permission on both the current owner and the new owner. h2(#links). Permission links A permission link is a link object with: * @owner_uuid@ of the system user. * @link_class@ "permission" * @name@ one of *can_read*, *can_write*, *can_manage* or *can_login* * @head_uuid@ of some Arvados object * @tail_uuid@ of a User or Group. For Group, the @group_class@ must be a "role". This grants the permission in @name@ for @tail_uuid@ accessing @head_uuid@. If a User has *can_manage* permission on some object, the user has the ability to read, create, update and delete permission links with @head_uuid@ of the managed object. In other words, the user has the ability to modify the permission grants on the object. The *can_login* @name@ is only meaningful on a permission link with with @tail_uuid@ a user UUID and @head_uuid@ a Virtual Machine UUID. A permission link of this type gives the user UUID permission to log into the Virtual Machine UUID. The username for the VM is specified in the @properties@ field. Group membership can be specified that way as well, optionally. See the "VM login section on the 'User management at the CLI' page":{{ site.baseurl }}/admin/user-management-cli.html#vm-login for an example. h3. Transitive permissions Permissions can be obtained indirectly through nested ownership (*can_manage*) or by following multiple permission links. * If a User X owns project A, and project A owns project B, then User X *can_manage* project B. * If a User X *can_read* role A, and role A *can_read* Object B, then User X *can_read* Object B. * Permissions are narrowed to the least powerful permission on the path. ** If User X *can_write* role A, and role A *can_read* Object B, then User X *can_read* Object B. ** If User X *can_read* role A, and role A *can_write* Object B, then User X *can_read* Object B. h2. Projects and Roles A "project" is a subtype of Group that is displayed as a "Project" in Workbench, and as a directory by @arv-mount@. * A project can own things (appear in @owner_uuid@) * A project can be owned by a user or another project. * The name of a project is unique only among projects and filters with the same owner_uuid. * Projects can be targets (@head_uuid@) of permission links, but not origins (@tail_uuid@). Putting a project in a @tail_uuid@ field is an error. A "filter" is a subtype of Group that is displayed as a "Project" in Workbench, and as a directory by @arv-mount@. See "the groups API documentation":{{ site.baseurl }}/api/methods/groups.html for more information. * A filter group cannot own things (cannot appear in @owner_uuid@). Putting a filter group in an @owner_uuid@ field is an error. * A filter group can be owned by a user or a project. * The name of a filter is unique only among projects and filters with the same owner_uuid. * Filters can be targets (@head_uuid@) of permission links, but not origins (@tail_uuid@). Putting a filter in a @tail_uuid@ field is an error. A "role" is a subtype of Group that is treated in Workbench as a group of users who have permissions in common (typically an organizational group). * A role cannot own things (cannot appear in @owner_uuid@). Putting a role in an @owner_uuid@ field is an error. * All roles are owned by the system user. * The name of a role is unique across a single Arvados cluster. * Roles can be both targets (@head_uuid@) and origins (@tail_uuid@) of permission links. * By default, all roles are visible to all active users. However, if the configuration entry @Users.RoleGroupsVisibleToAll@ is @false@, visibility is determined by normal permission rules, _i.e._, a role is only visible to users who have that role, and to admins. * By default, any user can create a new role. However, if the configuration entry @Users.CanCreateRoleGroups@ is @false@, only admins can create roles. h3. Access through Roles A "role" consists of a set of users or other roles that have that role, and a set of permissions (primarily read/write/manage access to projects) the role grants. If there is a permission link stating that user A *can_write* role R, then we say A has role R. This means user A has up to *can_write* access to everything the role has access to. Because permissions are one-way, the links A *can_write* R and B *can_write* R does not imply that user A and B will be able to see each other. For users in a role to see each other, read permission should be added going in the opposite direction: R *can_read* A and R *can_read* B. If a user needs to be able to manipulate permissions of objects that are accessed through the role (for example, to share project P with a user outside the role), then role R must have *can_manage* permission on project P (R *can_manage* P) and the user must be granted *can_manage* permission on R (A *can_manage* R). h2. Special cases Log table objects are additionally readable based on whether the User has *can_read* permission on @object_uuid@ (User can access log history about objects it can read). To retain the integrity of the log, the log table denies all update or delete operations. Permission links where @tail_uuid@ is a User allow *can_read* on the link record by that user (User can discover her own permission grants.) At least *can_read* on a Collection grants permission to read the blocks that make up the collection (API server returns signed blocks). A user can only read a container record if the user has read permission to a container_request with that container_uuid. *can_read* and *can_write* access on a user grants access to the user record, but not anything owned by the user. *can_manage* access to a user grants can_manage access to the user, _and everything owned by that user_ . If a user A *can_read* role R, and role R *can_manage* user B, then user A *can_read* user B _and everything owned by that user_ . Modifying a role group requires *can_manage* permission (by contrast, *can_write* is sufficient to modify project groups and other object types). h2(#system). System user and group A privileged user account exists for the use by internal Arvados components. This user manages system objects which should not be "owned" by any particular user. The system user uuid is @{siteprefix}-tpzed-000000000000000@. h2. Anoymous user and group An Arvados site may be configured to allow users to browse resources without requiring a login. In this case, permissions for non-logged-in users are associated with the "anonymous" user. To make objects visible to anyone (both logged-in and non-logged-in users), they can be shared with the "anonymous" role. Note that objects shared with the "anonymous" user will only be visible to non-logged-in users! The anonymous user uuid is @{siteprefix}-tpzed-anonymouspublic@. The anonymous group uuid is @{siteprefix}-j7d0g-anonymouspublic@. h2. Example !(full-width){{site.baseurl}}/images/Arvados_Permissions.svg! ================================================ FILE: doc/api/projects.html.textile.liquid ================================================ --- layout: default navsection: api title: "Projects and filter groups" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados @projects@ are used to organize objects. Projects can contain @collections@, @container requests@, @workflows@, etc. Projects can also contain other projects. An object is part of a project if the @owner_uuid@ of the object is set to the uuid of the project. Projects are implemented as a subtype of the Arvados @group@ object type, with @group_class@ set to the value "project". More information is available in the "groups API reference":{{ site.baseurl }}/api/methods/groups.html. Projects can be manipulated via Workbench, the cli tools, the SDKs, and the Arvados APIs. h2. The home project Each user has a @home project@, which is implemented differently. This is a virtual project that is comprised of all objects owned by the user, in other words, all objects with the @owner_uuid@ set to the @uuid@ of the user. The home project is accessible via Workbench, which makes it easy view its contents and to move objects from and to the home project. The home project is also accessible via FUSE, WebDAV and the S3 interface. The same thing can be done via the APIs. To put something in a user's home project via the cli or SDKs, one would set the @owner_uuid@ of the object to the user's @uuid@. This also implies that this user now has full ownership and control over that object. The contents of the home project can be accessed with the @group contents@ API, e.g. via the cli with this command:
arv group contents --uuid zzzzz-tpzed-123456789012345
In this command, `zzzzz-tpzed-123456789012345` is a @user@ uuid, which is unusual because we are using it as the argument to a @groups@ API. The @group contents@ API is normally used with a @group@ uuid. Because the home project is a virtual project, other operations via the @groups@ API are not supported. h2(#filtergroups). Filter groups Filter groups are another type of virtual project. They are implemented as an Arvados @group@ object with @group_class@ set to the value "filter". Filter groups define one or more filters which are applied to all objects that the current user can see, and returned as the contents of the @group@. Filter groups are described in more detail in the "groups API reference":{{site.baseurl}}/api/methods/groups.html, and the rules for creating valid filters are the same as for "list method filters":{{site.baseurl}}/api/methods.html#filters. Filter groups are accessible (read-only) via Workbench and the Arvados FUSE mount, WebDAV and S3 interface. Filter groups must currently be defined via the API, SDK or cli, there is no Workbench support yet. As an example, create a filter group with the @arv@ cli:
~$  FILTER_GROUP_UUID=`arv -s group create --group '{
    "group_class":"filter",
    "name":"my filter group",
    "properties":{
      "filters":
        [
          ["collections.name","ilike","%test%"],
          ["uuid","is_a","arvados#collection"]
        ]
      }
    }'`

This filter group will contain all collections visible to the current user whose name matches the word @test@ (case insensitive). To see how this works via the keep FUSE mount, create a few matching (and non-matching) collections:
~$ arv collection create --collection '{"name":"empty test collection 1"}'
~$ arv collection create --collection '{"name":"another empty collection"}'
~$ arv collection create --collection '{"name":"empty Test collection 2"}'
~$ mkdir -p keep
~$ arv-mount keep
~$ ls keep/by_id/$FILTER_GROUP_UUID/ -C1
'empty test collection 1'
'empty Test collection 2'
================================================ FILE: doc/api/properties.html.textile.liquid ================================================ --- layout: default navsection: api title: "Metadata properties" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados allows you to attach arbitrary properties to "collection":methods/collections.html, "container_request":methods/container_requests.html, "link":methods/links.html and "group":methods/groups.html records that have a @properties@ field. These are key-value pairs, where the value is a valid JSON type (string, number, null, boolean, array, object). Searching for records using properties is described in "Filtering on subproperties":methods.html#subpropertyfilters . h2. Controlling user-supplied properties Arvados can be configured with a vocabulary file that lists valid properties and the range of valid values for those properties. This is described in "Metadata vocabulary":{{site.baseurl}}/admin/metadata-vocabulary.html . Arvados offers options to set properties automatically and/or prevent certain properties, once set, from being changed by non-admin users. This is described in "Configuring collection's managed properties":{{site.baseurl}}/admin/collection-managed-properties.html . The admin can require that certain properties must be non-empty before "freezing a project":methods/groups.html#frozen . h2. Reserved properties Components that ship with Arvados may automatically set properties on objects. These usually help track provenance or provide additional link metadata. These properties usually have a key that starts with @arv:@, and can always be set even when the system is configured with a strict vocabulary. table(table table-bordered table-condensed). |_. Property name|_. Appears on|_. Value type|_.Description| {% comment %} The arv:git* container properties, and the associated Git commands, primarily come from arvados_cwl.executor.ArvCwlExecutor.get_git_info. {% endcomment -%} |arv:gitBranch|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the name of the branch checked out (the output of @git rev-parse --abbrev-ref HEAD@)| |arv:gitCommitter|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the name and email address of the committer of the most recent commit (the output of @git log --format='%cn <%ce>' -n1 HEAD@)| |arv:gitCommit|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the full checksum of the most recent commit (the output of @git log --format='%H' -n1 HEAD@)| |arv:gitDate|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the commit date of the most recent commit in RFC 2822 format (the output of @git log --format='%cD' -n1 HEAD@)| |arv:gitDescribe|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the name of the most recent tag that is reachable from the most recent commit (the output of @git describe --always --tags@)| |arv:gitOrigin|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the URL of the remote named @origin@, if set (the output of @git remote get-url origin@)| |arv:gitPath|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the absolute path of the checkout on the filesystem| |arv:gitStatus|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with a machine-readable summary of files modified in the checkout since the most recent commit (the output of @git status --untracked-files=no --porcelain@)| |arv:workflowMain|collection of type=workflow|string|Set on a collection containing a workflow created by @arvados-cwl-runner --create-workflow@, this is a relative reference inside the collection to the entry point of the workflow.| |arv:failed_container_resubmitted|container request|uuid|Set on container requests that were automatically resubmitted by the workflow runner with modified run options, such as when using the @PreemptionBehavior@ or @OutOfMemoryRetry@ CWL extensions. Set to the uuid of the new, resubmitted container request.| The following system properties predate the @arv:@ key prefix, but are still reserved and can always be set. table(table table-bordered table-condensed). |_. Property name|_. Appears on|_. Value type|_.Description| |type|collection|string|Appears on collections to indicates the contents or usage. See "Collection type values":#collectiontype below for details.| |container_request|collection|string|The UUID of the container request that produced an output or log collection.| |docker-image-repo-tag|collection|string|For collections containing a Docker image, the repo/name:tag identifier| |container_uuid|collection|string|The UUID of the container that produced a collection (set on collections with type=log)| |container|collection|string|(legacy) The UUID of the container that produced a collection. Set on intermediate collections created by arvados-cwl-runner. Starting with Arvados 2.6.0 arvados-cwl-runner uses @container_uuid@ instead, but older versions may still set the @container@ property.| |cwl_input|container_request|object|On an intermediate container request, the CWL workflow-level input parameters used to generate the container request| |cwl_output|container_request|object|On an intermediate container request, the CWL workflow-level output parameters collected from the container request| |template_uuid|container_request|string|For a workflow runner container request, the workflow record that was used to launch it.| |workflowName|container_request|string|For a workflow runner container request, the "name" of the workflow record in @template_uuid@ at the time of launch (used for display only).| |username|link|string|For a "can_login":permission-model.html#links permission link, the unix username on the VM that the user will have.| |groups|link|array of string|For a "can_login":permission-model.html#links permission link, the unix groups on the VM that the user will be added to.| |image_timestamp|link|string|When resolving a Docker image name and multiple links are found with @link_class=docker_image_repo+tag@ and same @link_name@, the @image_timestamp@ is used to determine precedence (most recent wins).| |filters|group|array of array of string|Used to define "filter groups":projects.html#filtergroup| h3(#collectiontype). Collection "type" values Meaningful values of the @type@ property. These are recognized by Workbench when filtering on types of collections from the project content listing. table(table table-bordered table-condensed). |_. Type|_.Description| |log|The collection contains log files from a container run.| |output|The collection contains the output of a top-level container run (this is a container request where @requesting_container_uuid@ is null).| |intermediate|The collection contains the output of a child container run (this is a container request where @requesting_container_uuid@ is non-empty).| |workflow|A collection created by @arvados-cwl-runner --create-workflow@ containing a workflow definition.| ================================================ FILE: doc/api/requests.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: Concepts title: REST API syntax ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados exposes a REST API using standard HTTP requests. h3. HTTP Method Use @GET@ to request individual resources or lists of resources. Use @POST@ to create new resources. Use @PUT@ to update an existing resource. Use @DELETE@ to remove an existing resource. As a special case, a @POST@ with the query parameter @_method=GET@ will be treated as a GET request. This makes it possible to issue @GET@ requests where the query string exceeds the maximum request URI length, by putting the query string in the body of the request. h3. Request URI The URI portion of the request identifies the specific resource to operate on. For example, operations on "collections":{{site.baseurl}}/api/methods/collections.html use the @https://{{ site.arvados_api_host }}/arvados/v1/collections@ request URI prefix. h3. Authorization header Every request must include an API token. This identifies the user making the request for the purposes of access control. In addition, tokens may be further "restricted in scope":{{site.baseurl}}/api/methods/api_client_authorizations.html#scope to only access certain API endpoints. API requests must provide the API token using the @Authorization@ header in the following format:
$ curl -v -H "Authorization: Bearer xxxxapitokenxxxx" https://192.168.5.2:8000/arvados/v1/collections
> GET /arvados/v1/collections HTTP/1.1
> ...
> Authorization: Bearer xxxxapitokenxxxx
> ...
On a cluster configured to use an OpenID Connect provider (other than Google) as a login backend, Arvados can be configured to accept an OpenID Connect access token in place of an Arvados API token. OIDC access tokens are also accepted by a cluster that delegates login to another cluster (LoginCluster) which in turn has this feature configured. See @Login.OpenIDConnect.AcceptAccessTokenScope@ in the "default config.yml file":{{site.baseurl}}/admin/config.html for details.
$ curl -v -H "Authorization: Bearer xxxx-openid-connect-access-token-xxxx" https://192.168.5.2:8000/arvados/v1/collections
h3. Parameters Request parameters may be provided in one of two ways. They may be provided in the "query" section of request URI, or they may be provided in the body of the request with application/x-www-form-urlencoded encoding. If parameters are provided in both places, their values will be merged. Parameter names must be unique. If a parameter appears multiple times, the behavior is undefined. Structured and nested parameter values must be provided as urlencoded JSON. h3. Result Results are returned JSON-encoded in the response body. h3(#errors). Errors If a request cannot be fulfilled, the API will return 4xx or 5xx HTTP status code. Be aware that the API server may return a 404 (Not Found) status for resources that exist but for which the client does not have read access. The API will also return an error record: table(table table-bordered table-condensed). |*Parameter name*|*Value*|*Description*| |errors|array|An array of one or more error messages| |error_token|string|a unique identifier used to correlate the error in the API server logs| h2. Examples h3. Create a new record
$ curl -v -X POST --data-urlencode 'collection={"name":"empty collection"}' -H "Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections | jq .
> POST /arvados/v1/collections HTTP/1.1
> User-Agent: curl/7.38.0
> Host: 192.168.5.2:8000
> Accept: */*
> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
> Content-Length: 54
> Content-Type: application/x-www-form-urlencoded
>
} [data not shown]
< HTTP/1.1 200 OK
< Content-Type: application/json; charset=utf-8
< Transfer-Encoding: chunked
< Connection: keep-alive
< Status: 200 OK
< Access-Control-Allow-Origin: *
< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
< Access-Control-Allow-Headers: Authorization
< Access-Control-Max-Age: 86486400
< X-UA-Compatible: IE=Edge,chrome=1
< ETag: "2ec9ef5151c1f7a1486ad169c33ae462"
< Cache-Control: max-age=0, private, must-revalidate
< Set-Cookie: _server_session=BAh7BkkiD3Nlc3Npb25faWQGOgZFVEkiJTIwMjQ1NTE5YmEwMzU1MGZkMTBmYmY1YzllY2ZiMjFlBjsAVA%3D%3D--653bc9c20899d48ee8523e18d9a4c1cde0702577; path=/; HttpOnly
< X-Request-Id: 56aa10bc49097f3b44d3ed946bf0e61e
< X-Runtime: 0.049951
< X-Powered-By: Phusion Passenger 4.0.41
< Date: Fri, 28 Oct 2016 19:20:09 GMT
< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
<
{
  "kind": "arvados#collection",
  "etag": "c5ifrv1ox2tu6alb559ymtkb7",
  "uuid": "962eh-4zz18-m1ma0mxxfg3mbcc",
  "owner_uuid": "962eh-tpzed-000000000000000",
  "created_at": "2016-10-28T19:20:09.320771531Z",
  "modified_by_user_uuid": "962eh-tpzed-000000000000000",
  "modified_at": "2016-10-28T19:20:09.319661000Z",
  "name": "empty collection",
  "description": null,
  "properties": {},
  "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
  "manifest_text": "",
  "replication_desired": null,
  "replication_confirmed": null,
  "replication_confirmed_at": null,
  "expires_at": null
}
h3. Delete a record
$ curl -X DELETE -v -H "Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections/962eh-4zz18-m1ma0mxxfg3mbcc | jq .
> DELETE /arvados/v1/collections/962eh-4zz18-m1ma0mxxfg3mbcc HTTP/1.1
> User-Agent: curl/7.38.0
> Host: 192.168.5.2:8000
> Accept: */*
> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
>
< HTTP/1.1 200 OK
< Content-Type: application/json; charset=utf-8
< Transfer-Encoding: chunked
< Connection: keep-alive
< Status: 200 OK
< Access-Control-Allow-Origin: *
< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
< Access-Control-Allow-Headers: Authorization
< Access-Control-Max-Age: 86486400
< X-UA-Compatible: IE=Edge,chrome=1
< ETag: "1e8f72802cf1a6d0a5c4a1ebbfcc46a9"
< Cache-Control: max-age=0, private, must-revalidate
< Set-Cookie: _server_session=BAh7BkkiD3Nlc3Npb25faWQGOgZFVEkiJTc2NDYyY2M0NTNlNmU3M2Y2M2E3YmFiMWQ1MTEyZGZkBjsAVA%3D%3D--d28c7dd640bd24e2b12f01e77088072138dcf145; path=/; HttpOnly
< X-Request-Id: e66fd3ab825bdb87301f5456161fb641
< X-Runtime: 0.028788
< X-Powered-By: Phusion Passenger 4.0.41
< Date: Fri, 28 Oct 2016 19:33:31 GMT
< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
<
{
  "kind": "arvados#collection",
  "etag": "c5ifrv1ox2tu6alb559ymtkb7",
  "uuid": "962eh-4zz18-m1ma0mxxfg3mbcc",
  "owner_uuid": "962eh-tpzed-000000000000000",
  "created_at": "2016-10-28T19:20:09.320771000Z",
  "modified_by_user_uuid": "962eh-tpzed-000000000000000",
  "modified_at": "2016-10-28T19:20:09.319661000Z",
  "name": "empty collection",
  "description": null,
  "properties": {},
  "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
  "manifest_text": "",
  "replication_desired": null,
  "replication_confirmed": null,
  "replication_confirmed_at": null,
  "expires_at": null
}
h3. Get a specific record
$ curl -v -H "Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km | jq .
> GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km HTTP/1.1
> User-Agent: curl/7.38.0
> Host: 192.168.5.2:8000
> Accept: */*
> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
>
< HTTP/1.1 200 OK
< Content-Type: application/json; charset=utf-8
< Transfer-Encoding: chunked
< Connection: keep-alive
< Status: 200 OK
< Access-Control-Allow-Origin: *
< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
< Access-Control-Allow-Headers: Authorization
< Access-Control-Max-Age: 86486400
< X-UA-Compatible: IE=Edge,chrome=1
< ETag: "fec2ddf433a352e5a2b5d356abd6d3d4"
< Cache-Control: max-age=0, private, must-revalidate
< X-Request-Id: 40b447507ff202ae9a0b0b3e0ebe98da
< X-Runtime: 0.011404
< X-Powered-By: Phusion Passenger 4.0.41
< Date: Fri, 28 Oct 2016 18:59:09 GMT
< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
<
{
  "kind": "arvados#collection",
  "etag": "3mmn0s9e1z5s5opfofmtb9k8p",
  "uuid": "962eh-4zz18-xi32mpz2621o8km",
  "owner_uuid": "962eh-tpzed-000000000000000",
  "created_at": "2016-10-27T14:47:43.792587000Z",
  "modified_by_user_uuid": "962eh-tpzed-000000000000000",
  "modified_at": "2016-10-27T14:47:43.792166000Z",
  "name": "Saved at 2016-10-27 14:47:43 UTC by peter@debian",
  "description": null,
  "properties": {},
  "portable_data_hash": "93a45073511646a5c3e2f4953fcf6f61+116",
  "manifest_text": ". eff999f3b5158331eb44a9a93e3b36e1+67108864+Aad3839bea88bce22cbfe71cf4943de7dab3ea52a@5826180f db141bfd11f7da60dce9e5ee85a988b8+34038725+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f 0:101147589:rna.SRR948778.bam\n",
  "replication_desired": null,
  "replication_confirmed": null,
  "replication_confirmed_at": null,
  "expires_at": null
}
h3. List records and filter by date (Note, return result is truncated).
$ curl -v -G --data-urlencode 'filters=[["created_at",">","2016-11-08T21:38:24.124834000Z"]]' -H "Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections | jq .
> GET /arvados/v1/collections?filters=%5B%5B%22uuid%22%2C%20%22%3D%22%2C%20%22962eh-4zz18-xi32mpz2621o8km%22%5D%5D HTTP/1.1
> User-Agent: curl/7.38.0
> Host: 192.168.5.2:8000
> Accept: */*
> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
>
< HTTP/1.1 200 OK
< Content-Type: application/json; charset=utf-8
< Transfer-Encoding: chunked
< Connection: keep-alive
< Status: 200 OK
< Access-Control-Allow-Origin: *
< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
< Access-Control-Allow-Headers: Authorization
< Access-Control-Max-Age: 86486400
< X-UA-Compatible: IE=Edge,chrome=1
< ETag: "76345ef24952f073acc3a0c550241d4e"
< Cache-Control: max-age=0, private, must-revalidate
< X-Request-Id: d34b8ede4ffc707d8ed172dc2f47ff5e
< X-Runtime: 0.012727
< X-Powered-By: Phusion Passenger 4.0.41
< Date: Fri, 28 Oct 2016 19:08:52 GMT
< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
<
{
  "kind": "arvados#collectionList",
  "etag": "",
  "self_link": "",
  "offset": 0,
  "limit": 100,
  "items": [
    {
      "kind": "arvados#collection",
      "etag": "bvgrrsg63zsenb9wnpnp0nsgl",
      "uuid": "962eh-4zz18-ybggo9im899vv60",
      "owner_uuid": "962eh-tpzed-000000000000000",
      "created_at": "2016-11-08T21:47:36.937106000Z",
      "modified_by_user_uuid": "962eh-tpzed-000000000000000",
      "modified_at": "2016-11-08T21:47:36.936625000Z",
      "name": "Log from cwl-runner job 962eh-8i9sb-45jww0k15fi5ldd",
      "description": null,
      "properties": {},
      "portable_data_hash": "a7820b94717eff86229927565fedbd72+85",
      "replication_desired": null,
      "replication_confirmed": null,
      "replication_confirmed_at": null,
      "expires_at": null
    },
   ...
    {
      "kind": "arvados#collection",
      "etag": "2fa07dx52lux8wa1loehwyrc5",
      "uuid": "962eh-4zz18-37i1tfl5de5ild9",
      "owner_uuid": "962eh-tpzed-000000000000000",
      "created_at": "2016-11-08T21:38:46.717798000Z",
      "modified_by_user_uuid": "962eh-tpzed-000000000000000",
      "modified_at": "2016-11-08T21:38:46.717409000Z",
      "name": null,
      "description": null,
      "properties": {},
      "portable_data_hash": "9d43d4c8328640446f6e252cda584e7e+54",
      "replication_desired": null,
      "replication_confirmed": null,
      "replication_confirmed_at": null,
      "expires_at": null
    }
  ],
  "items_available": 99
}
h3. Update a field
$ curl -v -X PUT --data-urlencode 'collection={"name":"rna.SRR948778.bam"}' -H "Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km | jq .
> PUT /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km HTTP/1.1
> User-Agent: curl/7.38.0
> Host: 192.168.5.2:8000
> Accept: */*
> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
> Content-Length: 53
> Content-Type: application/x-www-form-urlencoded
>
} [data not shown]
< HTTP/1.1 200 OK
< Content-Type: application/json; charset=utf-8
< Transfer-Encoding: chunked
< Connection: keep-alive
< Status: 200 OK
< Access-Control-Allow-Origin: *
< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
< Access-Control-Allow-Headers: Authorization
< Access-Control-Max-Age: 86486400
< X-UA-Compatible: IE=Edge,chrome=1
< ETag: "fbb50d2847426eab793e3fcf346ca9eb"
< Cache-Control: max-age=0, private, must-revalidate
< Set-Cookie: _server_session=BAh7BkkiD3Nlc3Npb25faWQGOgZFVEkiJWI3NjFjMzVjMGI5OGExYmNjZDg0ZTg5MjZhMzcwMDE1BjsAVA%3D%3D--0e005d71fad15cb366e47361c38474b7447ba155; path=/; HttpOnly
< X-Request-Id: 76d3cb3c0995af6133b0a73a64f57354
< X-Runtime: 0.030756
< X-Powered-By: Phusion Passenger 4.0.41
< Date: Fri, 28 Oct 2016 19:15:16 GMT
< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
<
{
  "kind": "arvados#collection",
  "etag": "51509hhxo9qqjxqewnoz1b7og",
  "uuid": "962eh-4zz18-xi32mpz2621o8km",
  "owner_uuid": "962eh-tpzed-000000000000000",
  "created_at": "2016-10-27T14:47:43.792587000Z",
  "modified_by_user_uuid": "962eh-tpzed-000000000000000",
  "modified_at": "2016-10-28T19:15:16.137814000Z",
  "name": "rna.SRR948778.bam",
  "description": null,
  "properties": {},
  "portable_data_hash": "93a45073511646a5c3e2f4953fcf6f61+116",
  "manifest_text": ". eff999f3b5158331eb44a9a93e3b36e1+67108864+Acca57af82cc18c5dfa47bdfd16e335fccd09dfa5@582618c4 db141bfd11f7da60dce9e5ee85a988b8+34038725+A7764f122f41f92c2d5bde1852fcdd1bea5f8bd78@582618c4 0:101147589:rna.SRR948778.bam\n",
  "replication_desired": null,
  "replication_confirmed": null,
  "replication_confirmed_at": null,
  "expires_at": null
}
================================================ FILE: doc/api/resources.html.textile.liquid ================================================ --- layout: default navsection: api navmenu: Concepts title: Common resource fields ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} This page describes the common attributes shared by most or all Arvados resources. h2(#resource). Resource table(table table-bordered table-condensed). |_. Attribute |_. Type |_. Description |_. Example| |uuid|string|universally unique object identifier. Set on @create@.|@mk2qn-4zz18-w3anr2hk2wgfpuo@| |owner_uuid|string|UUID of owner (must be a User or Group), set on @create@. Controls who may access the resource. Ownership may be changed explicitly with @update@, see "permission model":{{site.baseurl}}/api/permission-model.html for details.|@mk2qn-tpzed-a4lcehql0dv2u25@| |name|string|Human-assigned name. Not present on all object types, check individual API page. Uniqueness constraint varys by object type.|| |description|string|Free text description of the object. Not present on all object types, check individual API page. May be HTML formatted, "see below for valid HTML tags and attributes":#descriptions .|| |created_at|datetime|When resource was created. Set on @create@.|@2013-01-21T22:17:39Z@| |modified_at|datetime|When resource was last modified. Set on @create@ and @update@.|@2013-01-25T22:29:32Z@| |modified_by_user_uuid|string|The owner of the API token used to authenticate the @create@ or @update@ request.|@mk2qn-tpzed-a4lcehql0dv2u25@| |kind|string|@arvados#{resource_type}@|@arvados#collection@| |etag|string|The ETag[1] of the resource|@1xlmizzjq7wro3dlb2dirf505@| h2. Object UUID Each object is assigned a UUID. This has the format @aaaaa-bbbbb-ccccccccccccccc@. # The first field (@aaaaa@ in the example) is the site prefix. This is unique to a specific Arvados installation. # The second field (@bbbbb@ in the example) is the object type. # The third field (@ccccccccccccccc@ in the example) uniquely identifies the object. h2(#descriptions). Descriptions {% include 'html_tags' %} h2. Timestamps All Arvados timestamps follow ISO 8601 datetime format with fractional seconds (microsecond precision). All timestamps are UTC. Date format: @YYYY-mm-ddTHH:MM:SS.SSSSZ@ example date: @2016-11-08T21:38:24.124834000Z@. h2. ETags fn1. Each response includes an ETag, a string which changes when the resource changes. Clients can use this to check whether a resource has changed since they last retrieved it. If a previous ETag is provided along with a request, and the resource has not changed since, the server may return a "not modified" response. ================================================ FILE: doc/api/tokens.html.textile.liquid ================================================ --- layout: default navsection: api title: API Authorization ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} All requests to the API server must have an API token. API tokens can be issued by going though the login flow, or created via the API. At this time, only browser based applications can perform login from email/password. Command line applications and services must use an API token provided via the @ARVADOS_API_TOKEN@ environment variable or configuration file. h2. Login Browser based applications can log in using one of the two possible flows: h3. Authenticate via a third party # The web application instructs the user to click on a link to the @/login@ endpoint on the API server. This link should include the @return_to@ parameter in the query portion of the URL. For example @https://{{ site.arvados_api_host }}/login?return_to=XXX@ where @return_to=XXX@ is a page in the web application. # The @/login@ endpoint redirects the user to the configured third party authentication provider (e.g. Google or other OpenID Connect provider). # The user logs in to the third party provider, then they are redirected back to the API server. # The API server authenticates the user, issues a new API token, and redirects the browser to the URL provided in @return_to=XXX@ with the addition of @?api_token=xxxxapitokenxxxx@. # The web application gets the authorization token from the query and uses it to access the API server on the user's behalf. h3. Direct username/password authentication # The web application presents username and password fields. # When the submit button is pressed, using Javascript, the browser sends a POST request to @/arvados/v1/users/authenticate@ ** The request payload type is @application/javascript@ ** The request body is a JSON object with @username@ and @password@ fields. # The API server receives the username and password, authenticates them with the upstream provider (such as LDAP or PAM), and responds with the @api_client_authorization@ object for the new API token. # The web application receives the authorization token in the response and uses it to access the API server on the user's behalf. h3. Using an OpenID Connect access token A cluster that uses OpenID Connect as a login provider can be configured to accept OIDC access tokens as well as Arvados API tokens (this is disabled by default; see @Login.OpenIDConnect.AcceptAccessToken@ in the "default config.yml file":{{site.baseurl}}/admin/config.html). # The client obtains an access token from the OpenID Connect provider via some method outside of Arvados. # The client presents the access token with an Arvados API request (e.g., request header @Authorization: Bearer xxxxaccesstokenxxxx@). # Depending on configuration, the API server decodes the access token (which must be a signed JWT) and confirms that it includes the required scope (see @Login.OpenIDConnect.AcceptAccessTokenScope@ in the "default config.yml file":{{site.baseurl}}/admin/config.html). # The API server uses the provider's UserInfo endpoint to validate the presented token. # If the token is valid, it is cached in the Arvados database and accepted in subsequent API calls for the next 10 minutes. h3. Diagram !{{site.baseurl}}/images/Session_Establishment.svg! h2. User activation "Creation and activation of new users is described here.":{{site.baseurl}}/admin/user-management.html h2. Creating tokens via the API The browser login method above issues a new token. Using that token, it is possible to make API calls to create additional tokens. To do so, use the @create@ method of the "API client authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html resource. h2(#scopes). Scopes Scopes can restrict a token so it may only access certain resources. This is in addition to normal permission checks for the user associated with the token. Each entry in scopes consists of a @request_method@ and @request_path@. The @request_method@ is a HTTP method (one of @GET@, @POST@, @PATCH@ or @DELETE@) and @request_path@ is the request URI. A given request is permitted if it matches a scopes exactly, or the scope ends with @/@ and the request string is a prefix of the scope. As a special case, a scope of @["all"]@ allows all resources. This is the default if no scope is given. A valid token is always allowed to issue a request to "@GET /arvados/v1/api_client_authorizations/current@":{{ site.baseurl }}/api/methods/api_client_authorizations.html#current regardless of its scopes. Using scopes is also described on the "Securing API access with scoped tokens":{{site.baseurl}}/admin/scoped-tokens.html page of the admin documentation. h3. Scope examples A scope of @GET /arvados/v1/collections@ permits listing collections. * Requests with different methods, such as creating a new collection using @POST /arvados/v1/collections@, will be rejected. * Requests to access other resources, such as @GET /arvados/v1/groups@, will be rejected (except "@GET /arvados/v1/api_client_authorizations/current@":{{ site.baseurl }}/api/methods/api_client_authorizations.html#current, which is always allowed). * Be aware that requests for specific records, such as @GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km@ will also be rejected. This is because the scope @GET /arvados/v1/collections@ does not end in @/@ A scope of @GET /arvados/v1/collections/@ (with @/@ suffix) will permit access to individual collections. * The request @GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km@ will succeed * Be aware that requests for listing @GET /arvados/v1/collections@ (no @/@ suffix) will be rejected, because it is not a match with the rule @GET /arvados/v1/collections/@ * A listing request @GET /arvados/v1/collections/@ will have the trailing @/@ suffix trimmed before the scope check, as a result it will not match the rule @GET /arvados/v1/collections/@. To allow both listing objects and requesting individual objects, include both in the scope: @["GET /arvados/v1/collections", "GET /arvados/v1/collections/"]@ A narrow scope such as @GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km@ will disallow listing objects as well as disallow requesting any object other than those listed in the scope. ================================================ FILE: doc/architecture/dispatchcloud.html.textile.liquid ================================================ --- layout: default navsection: architecture title: Dispatching containers to cloud VMs ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The arvados-dispatch-cloud component runs Arvados user containers on generic public cloud infrastructure by automatically creating and destroying VMs (“instances”) of various sizes according to demand, preparing the instances’ runtime environments, and running containers on them. This does not use a cloud provider’s container-execution service. h2. Overview In this diagram, the black edges show interactions involved in starting a VM instance and running a container. The blue edges show the “container shell” communication channel. !{max-width:40em}{{site.baseurl}}/architecture/dispatchcloud.svg! {% comment %} # svg generated using https://dreampuf.github.io/ digraph {     subgraph cluster_cloudvm {         node [color=black] [fillcolor=white] [style=filled];         style = filled;         color = lightgrey;         label = "cloud instance (VM)";         "SSH server" -> "crunch-run" [label = "start crunch-run"];         "crunch-run" -> docker [label = "create container"];         "crunch-run" -> docker [label = "shell"] [color = blue] [fontcolor = blue];         "crunch-run" -> container [label = "tcp/http"] [color = blue] [fontcolor = blue];         docker -> container;     }     "cloud provider" [shape=box] [style=dashed];     dispatcher -> controller [label = "get container queue"];     dispatcher -> "cloud provider" [label = "create/destroy/list VMs"];     "cloud provider" -> "SSH server" [label = "add authorized_keys"];     "crunch-run" -> controller [label = "update\ngateway ip:port,\ncontainer state,\noutput, ..."]; client -> controller [label = "shell/tcp/http (https tunnel)"] [color = blue] [fontcolor = blue];     controller -> "crunch-run" [label = "shell/tcp/http (https tunnel)"] [color = blue] [fontcolor = blue];     dispatcher -> "SSH server" [label = "start crunch-run"]; } {% endcomment %} h2. Scheduling The dispatcher periodically polls the "containers API":{{site.baseurl}}/api/methods/containers.html to get a list of containers that are ready to run. Whenever this list changes, the dispatcher runs a scheduling loop that determines the set of suitable instance types for each container, allocates the highest priority containers to instances with sufficient unused resources, requests new instances if needed, and shuts down instances that have been idle for longer than the configured idle timeout. It will run multiple containers on an instance if it has enough RAM and CPUs to do so _and_ the instance type is suitable for each container individually. The lowest-priced instance type with enough resources to run a given container is always suitable. Other instance types that have enough resources to run the container, and whose prices are within @MaximumPriceFactor@ of that lowest-priced type, are also suitable. The dispatcher will select a suitable instance type other than the lowest-priced one when: * the lowest-priced instance that is _already running or requested,_ and has sufficient resources, is one of the suitable types (_e.g.,_ it just finished running a container that needed a higher-priced type), whereas in order to use the lowest-priced type the dispatcher would need to request a new instance, or * the cloud provider indicates that the lowest-priced suitable type is not available (_e.g.,_ due to a per-instance-type quota restriction). h2. Creating instances When creating a new instance, the dispatcher uses the cloud provider’s metadata feature to add a tag with key “InstanceSetID” and a value derived from its Arvados authentication token. This enables the dispatcher to recognize and reconnect to existing instances that belong to it, and continue monitoring existing containers, after a restart or upgrade. When using the Azure cloud service, the dispatcher needs to first create a new network interface, then attach it to a new instance. The network interface is also tagged with “InstanceSetID”. If the cloud provider returns a rate-limiting error when creating a new instance, the dispatcher avoids requesting new instances for a short period, and shuts down idle nodes more aggressively (i.e., without waiting for the usual idle timeout to elapse) until a new instance is successfully created. h2. Recovering state after a restart Restarting the dispatcher does not interrupt containers that are already running. When the dispatcher starts up, it gets the cloud provider’s current list of instances that have the expected InstanceSetID tag value. It ignores instances without that tag, so it won’t interfere with other VM instances in the same cloud account. It runs the boot probe command on each instance, checks for containers that were started by a previous invocation and are still running, and resumes monitoring. Before dispatching any new containers to a previously existing instance, it ensures the crunch-run program is updated if needed. h2. Instance boot process When the cloud provider indicates that a new instance has been created, the dispatcher connects to the instance’s SSH service (see “instance control channel” below) and executes the configured boot probe command. If this fails, the dispatcher retries until the configured boot timeout is reached, then shuts down the instance. When the boot probe succeeds, the dispatcher copies the crunch-run program to the instance, and runs it to check for running containers before reporting the instance’s state as “idle” or “busy”. (Normally of course a freshly booted instance has no containers running, but this covers the case where the dispatcher itself has restarted and containers submitted by the previous dispatcher process are still running.) The dispatcher and crunch-run programs are both packaged in a single executable file: when dispatcher copies crunch-run to an instance, it is really copying itself. This ensures the dispatcher is always using the version of crunch-run that it expects. h2. Boot probe command The purpose of the boot probe command is to ensure the dispatcher does not try to schedule containers on an instance before the instance is ready, even if its SSH daemon comes up early in the boot process. The default boot probe command, @systemctl is-system-running@, is appropriate for images that use @systemd@ to manage the boot process. Another approach is to use a custom startup script in the VM image that writes a file when it finishes, and a boot probe command that checks for that file, such as @cat /var/run/boot.complete@. h2. Automatic instance shutdown Normally, the dispatcher shuts down any instance that has remained idle for 1 minute (see TimeoutIdle configuration) but there are some exceptions to this rule. If the cloud provider returns a quota error when trying to create a new instance, the dispatcher shuts down idle nodes right away, in case the idle nodes are contributing to the quota. Also, the operator can use the management API to set an instance’s idle behavior to “drain” or “hold”. “Drain” shuts down the instance as soon as it becomes idle, which can be used to recycle a suspect node without interrupting a running container. “Hold” keeps the instance alive indefinitely without scheduling additional containers on it, which can be used to investigate problems like a failed startup script. Each instance is tagged with its current idle behavior (using the tag name “IdleBehavior”), which makes it visible in the cloud provider’s console and ensures the behavior is retained if dispatcher restarts. h2. Management API The dispatcher provides an HTTP management interface, which provides the operator with more visibility and control for purposes of troubleshooting and monitoring. APIs are provided to return details of current VM instances and running/scheduled containers as seen by the dispatcher, immediately terminate containers and instances, and control the on-idle behavior of instances. This interface also provides Prometheus metrics. See the "cloud dispatcher management API":{{site.baseurl}}/api/dispatch.html documentation for details. h2. Instance control channel (SSH) The dispatcher uses a multiplexed SSH connection to monitor instance boot progress, install the crunch-run supervisor program, start and stop containers, and detect crashed containers and failing instances. It establishes a persistent SSH connection to each cloud instance when the instance first appears, retrying/reconnecting as needed. Cloud VMs typically generate a random SSH host key at boot time, making host key verification impossible. To provide some assurance the dispatcher is connecting to the intended instance, when it creates a new instance the dispatcher generates a random “instance secret”, uses the cloud provider’s bootstrap command feature to save it in @/var/run/arvados-instance-secret@ on the new instance, and executes @cat /var/run/arvados-instance-secret@ to verify the instance’ identity when first connecting to its SSH server. Each instance is also tagged with its instance secret, so it can still be verified after a dispatcher restart. h2. Container communication channel (https tunnel) The crunch-run program runs a gateway server which facilitates the “container shell” feature without sending traffic through the dispatcher process. The gateway server accepts TLS connections from arvados-controller on a dynamic TCP port (typically in the range 32768-60999, see @sysctl net.ipv4.ip_local_port_range@). Crunch-run saves the selected port, along with the external IP address of the VM instance as seen by the dispatcher, in the @gateway_address@ field in the container record so arvados-controller can connect to it. On the client host (typically a shell node or a user’s workstation) the @arvados-client shell@ command sends an https “connect” request to arvados-controller, which sends an https “connect” request to the gateway server. These tunnels convey SSH protocol traffic between the user’s SSH client and crunch-run’s built-in SSH server, which uses @docker exec@ to run commands inside the container. Arvados-controller and crunch-run gateway server authenticate each other using a self-signed certificate and a shared secret based on the cluster-wide @SystemRootToken@. If that token changes (and the dispatcher restarts to load the new token) while a container is running, the container will stop accepting container shell traffic. h2. Scaling Architecturally, the dispatcher is _designed_ to accommodate multiple concurrent dispatcher processes on multiple hosts, each using a different authorization token, but such a configuration is not yet supported. Currently, each cluster should run a single dispatcher process. A single process can support thousands of concurrent VM instances. ================================================ FILE: doc/architecture/federation.html.textile.liquid ================================================ --- layout: default navsection: architecture title: "Federation" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados federation enables clients to transparently read, create and manipulate objects and collections across clusters in different regions or organizations. Federation supports workfows that integrate and analyze data across multiple clusters by sending computation to where the data is, reducing the technical and legal barriers to analyzing large, sensitive data sets. _This feature is under development. Support for federation is limited to certain types of requests. The behaviors described here should not be interpreted as a stable API._ Detailed configuration information is available on the "federation admin section":{{site.baseurl}}/admin/federation.html. h2(#cluster_id). Cluster identifiers Clusters are identified by a five-digit alphanumeric id (numbers and lowercase letters). There are 36 ^5^ = 60466176 possible cluster identifiers. * For automated test purposes, use "z****" * For experimental/local-only/private clusters that won't ever be visible on the public Internet, use "x****" * For long-lived clusters, we recommend reserving a cluster id. Contact "info@curii.com":mailto:info@curii.com for more information. Cluster identifiers are mapped API server hosts one of two ways: * Through DNS resolution, under the @arvadosapi.com@ domain. For example, the API server for the cluster @pirca@ can be found at @pirca.arvadosapi.com@. To register a cluster id for free under @arvadosapi.com@, contact "info@curii.com":mailto:info@curii.com * Through explicit configuration: The @RemoteClusters@ section of @/etc/arvados/config.yml@ (for arvados-controller)
Clusters:
  clsr1:
    RemoteClusters:
      clsr2:
        Host: api.cluster2.example
        Proxy: true
      clsr3:
        Host: api.cluster3.example
        Proxy: true
In this example, the cluster @clsr1@ is configured to contact @api.cluster2.example@ for requests involving @clsr2@ and @api.cluster3.example@ for requests involving @clsr3@. h2(#identity). Identity The goal is for a federated user to have a single identity across the cluster federation. This identity is a user account on a specific "home cluster". When arvados-controller contacts a remote cluster, the remote cluster verifies the user's identity (see below) and then creates a mirror of the user account with the same uuid of the user's home cluster. On the remote cluster, permissions can then be granted to the federated user, and the federated user can create and own objects. h3. Peer federation: Authenticating remote users with salted tokens When making a request to the home cluster, authorization is established by looking up the API token in the @api_client_authorizations@ table to determine the user identity. When making a request to a remote cluster, we need to provide an API token which can be used to establish the user's identity. The remote cluster will connect back to the home cluster to determine if the token valid and the user it corresponds to. However, we do not want to send along the same API token used for the original request. If the remote cluster is malicious or compromised, sending along user's regular token would compromise the user account on the home cluster. Instead, the controller sends a "salted token". The salted token is restricted to only to fetching the user account and group membership. The salted token consists of the uuid of the token in @api_client_authorizations@ and the SHA1 HMAC of the original token and the cluster id of remote cluster. To verify the token, the remote cluster contacts the home cluster and provides the token uuid, the hash, and its cluster id. The home cluster uses the uuid to look up the token re-computes the SHA1 HMAC of the original token and cluster id. If that hash matches, then the token is valid. To avoid having to re-validate the token on every request, it is cached for a short period. The security properties of this scheme are: * The salted token does not grant access on the home cluster beyond what is needed to verify user identity * Revoking a token on the home cluster also revokes it for remote clusters (after the cache period) * A salted token given to a malicious/compromised cluster cannot be used to gain access to the user account on another remote cluster h3. LoginCluster federation: Centralized user database In a LoginCluster federation, there is a central "home" called the LoginCluster, and one or more "satellite" clusters. The satellite clusters delegate their user management to the LoginCluster. Unlike the peer federation, satellite clusters implicitly trust the home cluster, so the "salted token" scheme is not used. Users arriving at a satellite cluster are redirected to the home cluster for login, the user token is issued by the LoginCluster, and then the user is sent back to the satellite cluster. Tokens issued by the LoginCluster are accepted by all clusters in the federation. All requests for user records on a satellite cluster is forwarded to the LoginCluster. h2(#retrieval). Federated records !(full-width){{site.baseurl}}/images/arvados_federation.svg! h3. Retrieving and updating records In the REST API, GET and PUT/PATCH requests are used to fetch and update records. # the client begins by making a request to the home arvados-controller to retrieve or update a specific record owned by a remote cluster # arvados-controller determines the 5-digit cluster id from the first part of the uuid string # arvados-controller determines the API server host corresponding to the cluster id # arvados-controller creates a "salted" token by combining the API token used for the request and the target cluster id # arvados-controller contacts the remote cluster to request the desired record, providing the salted token # the remote cluster verifies the salted token # the remote cluster processes the request and returns a response # arvados-controller forwards the response to the client h3. Creating records In the REST API, POST requests create new records, so there is no uuid to use for the cluster id. In this case, to create an object on a remote cluster, the request includes the @cluster_id@ parameter. The flow is otherwise the same as described above. h3. Collections and Keep block retrieval Each collection record has @manifest_text@, which describes how to reassemble keep blocks into files as described in the "Manifest format":{{site.baseurl}}/architecture/manifest-format.html. Each block identifier in the manifest has an added signature which is used to confirm permission to read the block. To read a block from a keepstore server, the client must provide the block identifier, the signature, and the same API token used to retrieve the collection record. See "Federation signatures":{{site.baseurl}}/architecture/manifest-format.html#federationsignatures for details on how federation affects block signatures. ================================================ FILE: doc/architecture/hpc.html.textile.liquid ================================================ --- layout: default navsection: architecture title: Dispatching containers to HPC ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados can be configured to run containers on an HPC cluster using Slurm or LSF, as an alternative to "dispatching to cloud VMs":dispatchcloud.html. In this configuration, the appropriate Arvados dispatcher service -- @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@ -- picks up each container as it appears in the Arvados queue and submits a short shell script as a batch job to the HPC job queue. The shell script executes the @crunch-run@ container supervisor which retrieves the container specification from the Arvados controller, starts an arv-mount process, runs the container using @docker exec@ or @singularity exec@, and sends updates (logs, outputs, exit code, etc.) back to the Arvados controller. h2. Container communication channel (reverse https tunnel) The crunch-run program runs a gateway server to facilitate the “container shell” feature. However, depending on the site's network topology, the Arvados controller may not be able to connect directly to the compute node where a given crunch-run process is running. Instead, in the HPC configuration, crunch-run connects to the Arvados controller at startup and sets up a multiplexed tunnel, allowing the controller process to connect to crunch-run's gateway server without initiating a connection to the compute node, or even knowing the compute node's IP address. This means that when a client requests a container shell connection, the traffic goes through two or three servers: # The client connects to a controller host C1. # If the multiplexed tunnel is connected to a different controller host C2, then C1 proxies the incoming request to C2, using C2's InternalURL. # The controller host (C1 or C2) uses the multiplexed tunnel to connect to crunch-run's container gateway. h2. Scaling The @API.MaxConcurrentRequests@ configuration should not be set too low, or the long-lived tunnel connections can starve other clients. ================================================ FILE: doc/architecture/index.html.textile.liquid ================================================ --- layout: default navsection: architecture title: "Arvados components" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Services":#Services # "Arvados-server":#Arvados-server # "SDK":#SDK # "Tools":#Tools # "Arvados-client":#Arvados-client !(full-width){{site.baseurl}}/images/Arvados_arch.svg! h3(#Services). Services Located in @arvados/services@. Many services have been incorporated into @arvados-server@, see below. table(table table-bordered table-condensed). |_. Component|_. Description| |api|Along with Controller, the API server is the core of Arvados. It is backed by a Postgres database and manages information such as metadata for storage, a record of submitted compute jobs, users, groups, and associated permissions.| |crunch-dispatch-local|Get compute requests submitted to the API server and execute them locally.| |dockercleaner|Daemon for cleaning up Docker containers and images.| |fuse|Filesystem in Userspace (FUSE) enabling users to mount Keep collections as a filesystem.| |login-sync|Synchronize virtual machine users with Arvados users and permissions.| |workbench2|Web application providing user interface to Arvados services.| h3(#Arvados-server). Arvados-server Located in @cmd/arvados-server@. It consists of a single @arvados-server@ binary with a number of different subcommands. Although the binary itself is monolithic, subcommands are each a standalone service and only handle requests for that specific service, i.e. a @arvados-server controller@ process will not respond to requests intended for a @arvados-server keep-web@. table(table table-bordered table-condensed). |_. Subcommand|_. Description | |boot|Boot an Arvados cluster from source, used by automated testing.| |check|Contact the a health check endpoint on services and print a report.| |cloudtest|Diagnostic tool which attempts to start a cloud instance using the current settings in the config file.| |config-check|Check that the config file is valid.| |config-defaults|Dump the default config options.| |config-dump|Dump the active config options that would be used by the other @arvados-server@ commands.| |controller|Controller works with the API server to make up the core of Arvados. It intercepts requests and implements additional features such as federation.| |crunch-run|Dispatched by crunch-dispatch, executes a single compute run: setting up a Docker container, running it, and collecting the output.| |crunchstat|Run a program and collect resource usage stats using cgroups.| |dispatch-cloud|Get compute requests submitted to the API server and schedule them on elastic cloud compute, creating and destroying cloud based virtual machines on demand.| |dispatch-lsf|Get compute requests submitted to the API server and submit them to LSF HPC scheduler.| |dispatch-slurm|Get compute requests submitted to the API server and submit them to SLURM HPC scheduler.| |health|Service that aggregates the other health check results to provide a single cluster-wide health status.| |install|Install development dependencies to be able to build and run Arvados from source.| |init|Create an initial configuration file for a new cluster and perform database setup.| |keep-balance|Perform storage utilization reporting, optimization and garbage collection. Moves data blocks to their optimum location, ensures correct replication and storage class, and trashes unreferenced blocks.| |keep-web|Provides high-level to files in collections as either a WebDAV or S3-compatible API endpoint.| |keepproxy|Provides low-level access to keepstore services (block-level data access) for clients outside the internal (private) network.| |keepstore|Provides access to underlying storage (filesystem or object storage such as Amazon S3 or Azure Blob) with Arvados permissions.| |recover-collection|Recovers deleted collections. Recovery is possible when the collection's manifest is still available and all of its data blocks are still available or recoverable.| |workbench2|Serve the HTML/Javascript for the single-page Workbench application.| |ws|Publishes API server change events over websockets.| h3(#SDK). SDK The @arv@ command is located in @arvados/sdk/ruby@, the @arv-*@ tools are located in @arvados/sdk/python@. table(table table-bordered table-condensed). |_. Component|_. Description | |arv|Provides command line access to API, also provides some purpose utilities.| |arv-copy|Copy a collection from one cluster to another| |arv-get|Get files from a collection.| |arv-keepdocker|Upload Docker images from local Docker daemon to Keep.| |arv-ls|List files in a collection| |arv-put|Upload files to a collection.| |arv-ws|Print events from Arvados websocket event source.| h3(#Tools). Tools Located in @arvados/tools@. table(table table-bordered table-condensed). |_. Component|_. Description | |cluster-activity|Generate a HTML and/or CSV report of cluster activity over a time period.| |crunchstat-summary|Read execution metrics (cpu %, ram, network, etc) collected from a compute container and produce a report.| |keep-block-check|Given a list of keep block locators, check that each block exists on one of the configured keepstore servers and verify the block hash.| |keep-exercise|Benchmarking tool to test throughput and reliability of keepstores under various usage patterns.| |keep-rsync|Get lists of blocks from two clusters, copy blocks which exist on source cluster but are missing from destination cluster.| |sync-groups|Takes a CSV file listing with rows in the form (group, user, permission) records and synchronize membership in Arvados groups.| |sync-users|Takes a CSV file listing with rows in the form (email, first name, last name, active, admin) and synchronize Arvados users.| |user-activity|Generate a text report of user activity over a time period.| h3(#Arvados-client). Arvados-client Located in @cmd/arvados-client@. It consists of a single @arvados-client@ binary with a number of different subcommands. table(table table-bordered table-condensed). |_. Subcommand|_. Description | |connect-ssh|Connects stdin/stdout to a container's gateway server. It is intended to be invoked with OpenSSH client's ProxyCommand config.| |deduplication-report|Analyzes the overlap in blocks used by 2 or more collections. It prints a deduplication report that shows the nominal space used by the collections, as well as the actual size and the amount of space that is saved by Keep's deduplication.| |diagnostics|Perform cluster diagnostics to check that all the services are available and responding normally to requests.| |logs|Prints live streaming logs for a container.| |mount|Alternate Keep FUSE mount written in Go.| |shell|Connects the terminal to an interactive shell on a running container.| |sudo|Runs another command using API connection info and SystemRootToken from the system config file instead of the caller's environment vars.| ================================================ FILE: doc/architecture/keep-clients.html.textile.liquid ================================================ --- layout: default navsection: architecture title: Keep clients ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Keep clients are applications such as @arv-get@, @arv-put@ and @arv-mount@ which store and retrieve data from Keep. In doing so, these programs interact with both the API server (which stores file metadata in the form of @collection@ objects) and individual @keepstore@ servers (which store the actual data blocks). !(full-width){{site.baseurl}}/images/Keep_reading_writing_block.svg! h2. Storing a file # The client discovers keep servers (or proxies) using the @accessible@ method on "keep_services":{{site.baseurl}}/api/methods/keep_services.html # Data is split into 64 MiB blocks and the MD5 hash is computed for each block. # The client uploads each block to one or more Keep servers, based on the number of desired replicas. The priority order is determined using rendezvous hashing, described below. # The Keep server returns a block locator (the MD5 sum of the block) and a "signed token" which the client can use as proof of knowledge for the block. # The client constructs a @manifest@ which lists the blocks by MD5 hash and how to reassemble them into the original files. # The client creates a "collection":{{site.baseurl}}/api/methods/collections.html and provides the @manifest_text@ # The API server accepts the collection after validating the signed tokens (proof of knowledge) for each block. h2. Fetching a file # The client requests a @collection@ object including @manifest_text@ from the APIs server # The server adds "token signatures" to the @manifest_text@ and returns it to the client. # The client discovers keep servers (or proxies) using the @accessible@ method on "keep_services":{{site.baseurl}}/api/methods/keep_services.html # For each data block, the client chooses the highest priority server using rendezvous hashing, described below. # The client sends the data block request to the keep server, along with the token signature from the API which proves to Keep servers that the client is permitted to read a given block. # The server provides the block data after validating the token signature for the block (if the server does not have the block, it returns a 404 and the client tries the next highest priority server) h2(#rendezvous). Rendezvous hashing !(full-width){{site.baseurl}}/images/Keep_rendezvous_hashing.svg! Each @keep_service@ resource has an assigned uuid. To determine priority assignments of blocks to servers, for each keep service compute the MD5 sum of the string concatenation of the block locator (hex-coded hash part only) and service uuid, then sort this list in descending order. Blocks are preferentially placed on servers with the highest weight. ================================================ FILE: doc/architecture/keep-components-overview.html.textile.liquid ================================================ --- layout: default navsection: architecture title: Keep components overview ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Keep has a number of components. This page describes each component and the role it plays. h3. Keep clients for data access In order to access data in Keep, a client is needed to store data in and retrieve data from Keep. Different types of Keep clients exist: * a command line client like "@arv-get@":{{ site.baseurl }}/user/tutorials/tutorial-keep-get.html#download-using-arv or "@arv-put@":{{ site.baseurl }}/user/tutorials/tutorial-keep.html#upload-using-command * a FUSE mount provided by "@arv-mount@":{{ site.baseurl }}/user/tutorials/tutorial-keep-mount-gnu-linux.html * a WebDAV mount provided by @keep-web@ * an S3-compatible endpoint provided by @keep-web@ * programmatic access via the "Arvados SDKs":{{ site.baseurl }}/sdk/index.html In essense, these clients all do the same thing: they translate file and directory references into requests for Keep blocks and collection manifests. How Keep clients work, and how they use rendezvous hashing, is described in greater detail in "the next section":{{ site.baseurl }}/architecture/keep-clients.html. For example, when a request comes in to read a file from Keep, the client will * request the collection object (including its manifest) from the API server * look up the file in the collection manifest, and retrieve the hashes of the block(s) that contain its content * ask the keepstore(s) for the block hashes * return the contents of the file to the requestor All of those steps are subject to access control, which applies at the level of the collection: in the example above, the API server and the keepstore daemons verify that the client has permission to read the collection, and will reject the request if it does not. h3. API server The API server stores collection objects and all associated metadata. That includes data about where the blocks for a collection are to be stored, e.g. when "storage classes":{{ site.baseurl }}/admin/storage-classes.html are configured, as well as the desired and confirmed replication count for each block. It also stores the ACLs that control access to the collections. Finally, the API server provides Keep clients with time-based block signatures for access. h3. Keepstore The @keepstore@ daemon is Keep's workhorse, the storage server that stores and retrieves data from an underlying storage system. Keepstore exposes an HTTP REST API. Keepstore only handles requests for blocks. Because blocks are content-addressed, they can be written and deleted, but there is no _update_ operation: blocks are immutable. So what happens if the content of a file changes? When a client changes a file, it first writes any new blocks to the keepstore(s). Then, it updates the manifest for the collection the file belongs to with the references to the new blocks. A keepstore can store its blocks in object storage (S3 or an S3-compatible system, or Azure Blob Storage). It can also store blocks on a POSIX file system. A keepstore can be configured with multiple storage volumes. Each keepstore volume is configured with a replication number; e.g. a POSIX file system backed by a single disk would have a replication factor of 1, while an Azure 'LRS' storage volume could be configured with a replication factor of 3 (that is how many copies LRS stores under the hood, according to the Azure documentation). By default, Arvados uses a replication factor of 2. See the @DefaultReplication@ configuration parameter in "the configuration reference":https://doc.arvados.org/admin/config.html. Additionally, each collection can be configured with its own replication factor. It's worth noting that it is the responsibility of the Keep clients to make sure that all blocks are stored subject to their desired replica count, which is derived from the collections the blocks belong to. @keepstore@ itself does not provide replication; all it does is store blocks on the volumes it knows about. The @keepproxy@ and @keep-balance@ processes (see below) make sure that blocks are replicated properly. The maximum block size for @keepstore@ is 64 MiB, and keep clients typically combine small files into larger blocks. In a typical Arvados installation, the majority of blocks stored in Keep will be 64 MiB, though some fraction will be smaller. h3. Keepproxy The @keepproxy@ server is a gateway into your Keep storage. Unlike the Keepstore servers, which are only accessible on the local LAN, Keepproxy is suitable for clients located elsewhere on the internet. A client writing through Keepproxy only writes one copy of each block; the Keepproxy server will write additional copies of the data to the Keepstore servers, to fulfill the requested replication factor. Keepproxy also checks API token validity before processing requests. h3. Keep-web The @keep-web@ server provides read/write access to files stored in Keep using the HTTP, WebDAV and S3 protocols. This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. h3. Keep-balance Keep is a garbage-collected system. When a block is no longer referenced in any collection manifest in the system, it becomes eligible for garbage collection. When the desired replication factor for a block (derived from the default replication factor, in addition to the replication factor of any collection(s) the block belongs to) does not match reality, the number of copies stored in the available Keepstore servers needs to be adjusted. The @keep-balance@ program takes care of these things. It runs as a service, and wakes up periodically to do a scan of the system and send instructions to the Keepstore servers. That process is described in more detail at "Balancing Keep servers":https://doc.arvados.org/admin/keep-balance.html. ================================================ FILE: doc/architecture/keep-data-lifecycle.html.textile.liquid ================================================ --- layout: default navsection: architecture title: "Data lifecycle" ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2(#overview). Overview Arvados collections consist of a "manifest":{{site.baseurl}}/architecture/manifest-format.html and the data blocks referenced in that manifest. Manifests are stored in the PosgreSQL database, @data blocks@ are stored by a @keepstore@. Data blocks are frequently shared between collections. Each collection has its own @manifest@. Collection manifests and data blocks have a separate lifecycle, which is described in detail below. h2(#collection_lifecycle). Collection lifecycle During its lifetime, a collection can be in various states. These states are *persisted*, *expiring*, *trashed* and *permanently deleted*. The nominal state is *persisted* which means the data can be can be accessed normally and will be retained indefinitely. A collection is *expiring* when it has a *trash_at* time in the future. An expiring collection can be accessed as normal, but is scheduled to be trashed automatically at the *trash_at* time. A collection is *trashed* when it has a *trash_at* time in the past. The *is_trashed* attribute will also be "true". The delete operation immediately puts the collection in the trash by setting the *trash_at* time to "now", and *delete_at* defaults to "now" + @Collections.DefaultTrashLifetime@. Once trashed, the collection is no longer readable through normal data access APIs. The collection will have *delete_at* set to some time in the future. The trashed collection is recoverable until the *delete_at* time passes, at which point the collection is permanently deleted. See "Recovering trashed collections":{{ site.baseurl }}/user/tutorials/tutorial-keep-collection-lifecycle.html#trash-recovery for instructions to recover trashed collections. h3(#collection_attributes). Collection lifecycle attributes As listed above the attributes that are used to manage a collection lifecycle are *is_trashed*, *trash_at*, and *delete_at*. The table below lists the values of these attributes and how they influence the state of a collection and its accessibility. table(table table-bordered table-condensed). |_. collection state|_. is_trashed|_. trash_at|_. delete_at|_. get|_. list|_. list?include_trash=true|_. can be modified| |persisted collection|false |null |null |yes |yes |yes |yes | |expiring collection|false |future |future |yes |yes |yes |yes | |trashed collection|true |past |future |no |no |yes |only is_trashed, trash_at and delete_at attributes| |deleted collection|true|past |past |no |no |no |no | h2(#block_lifecycle). Block lifecycle During its lifetime, a data block can be in various states. These states are *persisted*, *unreferenced*, *trashed* and *permanently deleted*. The nominal state is *persisted* which means the block can be can be retrieved normally from a @keepstore@ process. A block is *unreferenced* when there are no collection manifests in the PostgreSQL collections table that reference it. The block can still be retrieved normally from a @keepstore@ process, e.g. by creating a new collection with a manifest that references the hash of the block. Unreferenced blocks will be moved to the *trashed* state by @keep-balance@ after @BlobSigningTTL@, if @BlobTrash@ is enabled and @keep-balance@ is running and configured to send trash lists to the keepstores. A block is *trashed* when @keep-balance@ has asked a @keepstore@ to move it to its trash and @BlobTrash@ is enabled. It will stay there for a period of time, subject to the @BlobTrashLifetime@ settings. A block is *permanently deleted* on the first wakeup of its @keepstore@ trash process after the block has spent @BlobTrashLifetime@ in that keepstore's trash. The trash process wakes up with a frequency defined by the @BlobTrashCheckInterval@. table(table table-bordered table-condensed). |_. block state|_. duration|_. retrievable via Keep|_. can be recovered| |persisted block|indefinitely|yes |n/a | |unreferenced block|@BlobSigningTTL@ + up to @BalancePeriod@ + duration of keep-balance run|yes |n/a | |trashed block|@BlobTrashLifetime@ + up to @BlobTrashCheckInterval@|no |yes | |deleted block||no |no | ================================================ FILE: doc/architecture/manifest-format.html.textile.liquid ================================================ --- layout: default navsection: architecture title: Manifest format ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Each collection record has a @manifest_text@ field, which describes how to reassemble keep blocks into files. Each block identifier in the manifest has an added signature which is used to confirm permission to read the block. To read a block from a keepstore server, the client must provide the block identifier, the signature, and the same API token used to retrieve the collection record. !(full-width){{site.baseurl}}/images/Keep_manifests.svg! h2. Manifest v1 A manifest is utf-8 encoded text, consisting of zero or more newline-terminated streams.
manifest       ::= stream*
stream         ::= stream-name (" " locator)+ (" " file-segment)+ "\n"
stream-name    ::= "." ("/" path-component)*
path-component ::= +
file-segment   ::= position ":" size ":" filename
position       ::= [0-9]+
size           ::= [0-9]+
filename       ::= path-component ("/" path-component)*
Notes: * The first token is the stream name, consisting of one or more path components, delimited by @"/"@. ** The first path component is always @"."@. ** No path component is empty. ** No path component following the first one can be "." or "..". ** The stream name never begins or ends with @"/"@. * The next N tokens are "keep locators":#locator ** These describe the "data stream". By logically concatenating the blocks in the order that they appear, we can refer to "positions" in the data stream. * File tokens come after the sequence of keep locators. ** A file token has three parts, delimited by @":"@: position, size, filename. ** Position and size are given in decimal ** The position is the position in the data stream ** The size is the count of bytes following the position in the data stream. A file size may cross multiple blocks in the data stream. ** Filename may contain @"/"@ characters, but must not start or end with @"/"@, and must not contain @"//"@. ** Filename components (delimited by @"/"@) must not be @"."@ or @".."@. ** There may be multiple file tokens. It is legal to have multiple file tokens in the manifest (possible across different streams) with the same combined path name @stream name + "/" + filename@. This must be interpreted as a concatenation of file content, in the order that the file tokens appear in the manifest. Spaces are represented by the escape sequence @\040@. Spaces in stream names and filenames must be translated when reading and writing manifests. A manifest may not contain TAB characters, nor other ASCII whitespace characters or control codes other than the spaces or newlines used as delimiters specified above. A manifest always ends with a newline -- except the empty (zero-length) string, which is a valid manifest. h3. Normalized manifest v1 A normalized manifest is a manifest that meets the following additional restrictions: * Streams are in alphanumeric order. * Each stream name is unique within the manifest. * Files within a stream are listed in alphanumeric order. * Blocks within a stream are ordered based on order of file tokens of the stream. A given block is listed at most once in a stream. * Filename must not contain @"/"@ (the stream name represents the path prefix) h3. Estimating manifest size Here's a formula for estimating manifest size as stored in the database, assuming efficiently packed blocks.
manifest_size =
   + (total data size / 64 MB) * 40
   + sum(number of files * 20)
   + sum(size of all directory paths)
   + sum(size of all file names)
Here is the size when including block signatures. The block signatures authorize access to fetch each block from a Keep server, as described below. The signed manifest text is what is actually transferred to/from the API server and stored in RAM by @arv-mount@. The effective upper limit on how large a collection manifest can be is determined by @API.MaxRequestSize@ in @config.yml@ as well as the maximum request size configuration in your reverse proxy or load balancer (e.g. @client_max_body_size@ in Nginx).
manifest_size =
   + (total data size / 64 MB) * 94
   + sum(number of files * 20)
   + sum(size of all directory paths)
   + sum(size of all file names)
h3. Example manifests A manifest with four files in two directories:
. 930625b054ce894ac40596c3f5a0d947+33 0:0:a 0:0:b 0:33:output.txt
./c d41d8cd98f00b204e9800998ecf8427e+0 0:0:d
The same manifest with permission signatures on each block:
. 930625b054ce894ac40596c3f5a0d947+33+A1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc 0:0:a 0:0:b 0:33:output.txt
./c d41d8cd98f00b204e9800998ecf8427e+0+A27117dcd30c013a6e85d6d74c9a50179a1446efa@5835c8bc 0:0:d
A manifest containing a file consisting of multiple blocks and a space in the file name:
. c449ed86671e4a34a8b8b9430850beba+67108864 09fcfea01c3a141b89dd0dcfa1b7768e+22534144 0:89643008:Docker\040image.tar
h2(#locator). Keep locator format BNF notation for a valid Keep locator string (with hints). For example: *d41d8cd98f00b204e9800998ecf8427e+0+Z+Ada39a3ee5e6b4b0d3255bfef95601890afd80709@53bed294*
locator          ::= sized-digest hint*
sized-digest     ::= digest size-hint
digest           ::= <32 lowercase hexadecimal digits>
size-hint        ::= "+" [0-9]+
hint             ::= "+" hint-type hint-content
hint-type        ::= [A-Z]+
hint-content     ::= [A-Za-z0-9@_-]*
sign-hint        ::= "+A" <40 lowercase hexadecimal digits> "@" sign-timestamp
remote-sign-hint ::= "+R" [A-Za-z0-9]{5} "-" <40 lowercase hexadecimal digits> "@" sign-timestamp
sign-timestamp   ::= <8 lowercase hexadecimal digits>
h3. Regular expression to validate locator
/^([0-9a-f]{32})\+([0-9]+)(\+[A-Z][-A-Za-z0-9@_]*)*$/
h3. Valid locators table(table table-bordered table-condensed). |@d41d8cd98f00b204e9800998ecf8427e+0@| |@d41d8cd98f00b204e9800998ecf8427e+0+Z@| |d41d8cd98f00b204e9800998ecf8427e+0+Z+Ada39a3ee5e6b4b0d3255bfef95601890afd80709@53bed294| |930625b054ce894ac40596c3f5a0d947+33+Rzzzzz-1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc| h3. Invalid locators table(table table-bordered table-condensed). ||Why| |@d41d8cd98f00b204e9800998ecf8427e@|No size hint| |@d41d8cd98f00b204e9800998ecf8427e+Z+0@|Other hint before size hint| |@d41d8cd98f00b204e9800998ecf8427e+0+0@|Multiple size hints| |@d41d8cd98f00b204e9800998ecf8427e+0+z@|Hint does not start with uppercase letter| |@d41d8cd98f00b204e9800998ecf8427e+0+Zfoo*bar@|Hint contains invalid character @*@| h3(#token_signatures). Token signatures A token signature (sign-hint) provides proof-of-access for a data block. It is computed by taking a SHA1 HMAC of the blob signing token (a shared secret between the API server and keep servers), block digest, current API token, expiration timestamp, and blob signature TTL. When communicating with the @keepstore@ to fetch a block, or the API server to create or update a collection, the service computes the expected token signature for each block and compares it to the token signature that was presented by the client. Keep clients receive valid block signatures when uploading a block to a keep store (getting back a signed token as proof of knowledge) or, from the API server, getting the manifest text of a collection on which the user has read permission. Security of a token signature is derived from the following characteristics: # Valid signatures can only be generated by entities that know the shared secret (the "blob signing token") # A signature can only be used by an entity that also know the API token that was used to generate it. # It expires after a set date (the expiration time, based on the "blob signature time-to-live (TTL)") h3(#federationsignatures). Federation and signatures When a collection record is returned through a federation request, the keep blocks listed in the manifest may not be available on the local cluster, and the keep block signatures returned by the remote cluster are not valid for the local cluster. To solve this, @arvados-controller@ rewrites the signatures in the manifest to "remote cluster" signatures. A local signature comes after the block identifier and block size, and starts with @+A@: 930625b054ce894ac40596c3f5a0d947+33+A1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc A remote cluster signature starts with @+R@, then the cluster id of the cluster it originated from (@zzzzz@ in this example), a dash, and then the original signature: 930625b054ce894ac40596c3f5a0d947+33+Rzzzzz-1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc When the client provides a remote-signed block locator to keepstore, the keepstore proxies the request to the remote cluster. # keepstore determines the cluster id to contact from the first part of the @+R@ signature # creates a salted token using the API token and cluster id # contacts the "accessible" endpoint on the remote cluster to determine the remote cluster's keepstore or keepproxy hosts # converts the remote signature @+R@ back to a local signature @+A@ # contacts the remote keepstore or keepproxy host and requests the block using the local signature # returns the block contents back to the client h3(#example). Example This example uses @c1bad4b39ca5a924e481008009d94e32+210@, which is the content hash of a @collection@ that was added to Keep in "how to upload data":{{ site.baseurl }}/user/tutorials/tutorial-keep.html. Get the collection manifest using @arv-get@:
~$ arv-get c1bad4b39ca5a924e481008009d94e32+210
. 204e43b8a1185621ca55a94839582e6f+67108864+Aasignatureforthisblockaaaaaaaaaaaaaaaaaa@5f612ee6 b9677abbac956bd3e86b1deb28dfac03+67108864+Aasignatureforthisblockbbbbbbbbbbbbbbbbbb@5f612ee6 fc15aff2a762b13f521baf042140acec+67108864+Aasignatureforthisblockcccccccccccccccccc@5f612ee6 323d2a3ce20370c4ca1d3462a344f8fd+25885655+Aasignatureforthisblockdddddddddddddddddd@5f612ee6 0:227212247:var-GS000016015-ASM.tsv.bz2
This collection includes a single file @var-GS000016015-ASM.tsv.bz2@ which is 227212247 bytes long. It is stored using four sequential data blocks with hashes @204e43b8a1185621ca55a94839582e6f+67108864@, @b9677abbac956bd3e86b1deb28dfac03+67108864@, @fc15aff2a762b13f521baf042140acec+67108864@, and @323d2a3ce20370c4ca1d3462a344f8fd+25885655@. Each of the block hashes is followed by the rest of their "locator":#locator. Use @arv-get@ to download the first data block: notextile.
~$ arv-get 204e43b8a1185621ca55a94839582e6f+67108864+Aasignatureforthisblockaaaaaaaaaaaaaaaaaa@5f612ee6 > block1
Inspect the size and compute the MD5 hash of @block1@:
~$ ls -l block1
-rw-r--r-- 1 you group 67108864 Dec  9 20:14 block1
~$ md5sum block1
204e43b8a1185621ca55a94839582e6f  block1
As expected, the md5sum of the contents of the block matches the @digest@ part of the "locator":#locator, and the size of the contents matches the @size-hint@. ================================================ FILE: doc/architecture/singularity.html.textile.liquid ================================================ --- layout: default navsection: architecture title: Singularity ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a Slurm/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. For on premises Slurm/LSF clusters, see the "Set up a compute node with Singularity":{{ site.baseurl }}/install/crunch2/install-compute-node-singularity.html page. For cloud compute clusters, see the "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html page. h2. Design overview When Arvados is configured to use Singularity as the runtime engine for Crunch, containers are executed by Singularity. The images specified in workflows and tool definitions must be Docker images uploaded via @arv-keepdocker@ or @arvados-cwl-runner@. When Singularity is the runtime engine, these images are converted to Singularity format (@.sif@) at runtime, as needed. To avoid repeating this conversion work unnecessarily, the @.sif@ files are cached in @Keep@. This is done on a per-user basis. If it does not exist yet, a new Arvados project named @.cache@ is automatically created in the user's home project. Similarly, a subproject named @auto-generated singularity images@ will be created in the @.cache@ project. The automatically generated @.sif@ files are stored in collections in that project, with an expiration date two weeks in the future. If the cached image exists when Crunch runs a new container, the expiration date will be pushed out, so that it is always 2 weeks in the future from the most recent start of a container using the image. It is safe to empty out or even remove the .cache project or any of its contents; if necessary the cache projects and the @.sif@ files will automatically be regenerated. h2. Notes * Programs running in Singularity containers may behave differently than when run in Docker, due to differences between Singularity and Docker. For example, the root (image) filesystem is read-only in a Singularity container. Programs that attempt to write outside a designated output or temporary directory are likely to fail. * When using Singularity as the runtime engine, the compute node needs to have a compatible Singularity executable installed, as well as the @mksquashfs@ program used to convert Docker images to Singularity's @.sif@ format. The Arvados "compute node image build script":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html includes these executables since Arvados 2.3.0. h2. Limitations Arvados @Singularity@ support is a work in progress. These are the current limitations of the implementation: * Even when using the Singularity runtime, users' container images are expected to be saved in Docker format. Specifying a @.sif@ file as an image when submitting a container request is not yet supported. * Arvados' Singularity implementation does not yet limit the amount of memory available in a container. Each container will have access to all memory on the host where it runs, unless memory use is restricted by Slurm/LSF. * The Docker ENTRYPOINT instruction is ignored. * Arvados is tested with Singularity version 3.10.4. Other versions may not work. ================================================ FILE: doc/architecture/storage.html.textile.liquid ================================================ --- layout: default navsection: architecture title: Introduction to Keep ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Keep is a content-addressable storage system that yields high performance for I/O-bound workloads. Keep is designed to run on low-cost commodity hardware or cloud services and is tightly integrated with the rest of the Arvados system. It provides high fault tolerance and high aggregate performance to a large number of clients. h2. Design goals and core features * *Scale* - Keep installations are managing petabytes of data today. Keep scales horizontally. * *Data deduplication* - Keep automatically deduplicates data through its use of content addressing. * *Flexibility* - Keep can store data in S3, S3-compatible storage systems (e.g. Ceph) and Azure blob storage. Keep can also store data on POSIX file systems. * *Fault-Tolerance* - Errors and failure are expected. Keep has redundancy and recovery capabilities at its core. * *Optimized for Aggregate Throughput* - Like S3 and Azure blob storage, Keep is optimized for aggregate throughput. This is optimal in a scenario with many reader/writer processes. * *Complex Data Management* - Keep operates well in environments where there are many independent users accessing the same data or users who want to organize data in many different ways. Keep facilitates data sharing without expecting users either to agree with one another about directory structures or to create redundant copies of the data. * *Security* - Keep works well combined with encryption at rest and transport encryption. All data is managed through @collection@ objects, which implement a rich "permission model":{{site.baseurl}}/api/permission-model.html. h2. How Keep works Keep is a content-addressable file system. This means that files are managed using special unique identifiers derived from the _contents_ of the file (specifically, the MD5 hash), rather than human-assigned file names. This has a number of advantages: * Files can be stored and replicated across a cluster of servers without requiring a central name server. * Both the server and client systematically validate data integrity because the checksum is built into the identifier. * Data duplication is minimized—two files with the same contents will have in the same identifier, and will not be stored twice. * It avoids data race conditions, since an identifier always points to the same data. In Keep, information is stored in @data blocks@. Data blocks are normally between 1 byte and 64 megabytes in size. If a file exceeds the maximum size of a single data block, the file will be split across multiple data blocks until the entire file can be stored. These data blocks may be stored and replicated across multiple disks, servers, or clusters. Each data block has its own identifier for the contents of that specific data block. In order to reassemble the file, Keep stores a @collection@ manifest which lists in sequence the data blocks that make up the original file. A @manifest@ may store the information for multiple files, including a directory structure. See "manifest format":{{site.baseurl}}/architecture/manifest-format.html for more information on how manifests are structured. ================================================ FILE: doc/css/R.css ================================================ body { background: white; color: black; } a:link { background: white; color: blue; } a:visited { background: white; color: rgb(50%, 0%, 50%); } h1 { background: white; color: rgb(55%, 55%, 55%); font-family: monospace; font-size: x-large; text-align: center; } h2 { background: white; color: rgb(40%, 40%, 40%); font-family: monospace; font-size: large; text-align: center; } h3 { background: white; color: rgb(40%, 40%, 40%); font-family: monospace; font-size: large; } h4 { background: white; color: rgb(40%, 40%, 40%); font-family: monospace; font-style: italic; font-size: large; } h5 { background: white; color: rgb(40%, 40%, 40%); font-family: monospace; } h6 { background: white; color: rgb(40%, 40%, 40%); font-family: monospace; font-style: italic; } img.toplogo { width: 4em; vertical-align: middle; } img.arrow { width: 30px; height: 30px; border: 0; } span.acronym { font-size: small; } span.env { font-family: monospace; } span.file { font-family: monospace; } span.option{ font-family: monospace; } span.pkg { font-weight: bold; } span.samp{ font-family: monospace; } div.vignettes a:hover { background: rgb(85%, 85%, 85%); } ================================================ FILE: doc/css/badges.css ================================================ /* Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 */ /* Colors * Contextual variations of badges * Bootstrap 3.0 removed contexts for badges, we re-introduce them, based on what is done for labels */ .badge.badge-error { background-color: #b94a48; } .badge.badge-warning { background-color: #f89406; } .badge.badge-success { background-color: #468847; } .badge.badge-info { background-color: #3a87ad; } .badge.badge-inverse { background-color: #333333; } .badge.badge-alert { background: red; } ================================================ FILE: doc/css/bootstrap-theme.css ================================================ /*! * Bootstrap v3.1.0 (http://getbootstrap.com) * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ .btn-default, .btn-primary, .btn-success, .btn-info, .btn-warning, .btn-danger { text-shadow: 0 -1px 0 rgba(0, 0, 0, .2); -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075); } .btn-default:active, .btn-primary:active, .btn-success:active, .btn-info:active, .btn-warning:active, .btn-danger:active, .btn-default.active, .btn-primary.active, .btn-success.active, .btn-info.active, .btn-warning.active, .btn-danger.active { -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); } .btn:active, .btn.active { background-image: none; } .btn-default { text-shadow: 0 1px 0 #fff; background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%); background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #dbdbdb; border-color: #ccc; } .btn-default:hover, .btn-default:focus { background-color: #e0e0e0; background-position: 0 -15px; } .btn-default:active, .btn-default.active { background-color: #e0e0e0; border-color: #dbdbdb; } .btn-primary { background-image: -webkit-linear-gradient(top, #428bca 0%, #2d6ca2 100%); background-image: linear-gradient(to bottom, #428bca 0%, #2d6ca2 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #2b669a; } .btn-primary:hover, .btn-primary:focus { background-color: #2d6ca2; background-position: 0 -15px; } .btn-primary:active, .btn-primary.active { background-color: #2d6ca2; border-color: #2b669a; } .btn-success { background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%); background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #3e8f3e; } .btn-success:hover, .btn-success:focus { background-color: #419641; background-position: 0 -15px; } .btn-success:active, .btn-success.active { background-color: #419641; border-color: #3e8f3e; } .btn-info { background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%); background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #28a4c9; } .btn-info:hover, .btn-info:focus { background-color: #2aabd2; background-position: 0 -15px; } .btn-info:active, .btn-info.active { background-color: #2aabd2; border-color: #28a4c9; } .btn-warning { background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%); background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #e38d13; } .btn-warning:hover, .btn-warning:focus { background-color: #eb9316; background-position: 0 -15px; } .btn-warning:active, .btn-warning.active { background-color: #eb9316; border-color: #e38d13; } .btn-danger { background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%); background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-color: #b92c28; } .btn-danger:hover, .btn-danger:focus { background-color: #c12e2a; background-position: 0 -15px; } .btn-danger:active, .btn-danger.active { background-color: #c12e2a; border-color: #b92c28; } .thumbnail, .img-thumbnail { -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075); box-shadow: 0 1px 2px rgba(0, 0, 0, .075); } .dropdown-menu > li > a:hover, .dropdown-menu > li > a:focus { background-color: #e8e8e8; background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); background-repeat: repeat-x; } .dropdown-menu > .active > a, .dropdown-menu > .active > a:hover, .dropdown-menu > .active > a:focus { background-color: #357ebd; background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%); background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); background-repeat: repeat-x; } .navbar-default { background-image: -webkit-linear-gradient(top, #fff 0%, #f8f8f8 100%); background-image: linear-gradient(to bottom, #fff 0%, #f8f8f8 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; border-radius: 4px; -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075); } .navbar-default .navbar-nav > .active > a { background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f3f3f3 100%); background-image: linear-gradient(to bottom, #ebebeb 0%, #f3f3f3 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0); background-repeat: repeat-x; -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075); box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075); } .navbar-brand, .navbar-nav > li > a { text-shadow: 0 1px 0 rgba(255, 255, 255, .25); } .navbar-inverse { background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%); background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0); filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); background-repeat: repeat-x; } .navbar-inverse .navbar-nav > .active > a { background-image: -webkit-linear-gradient(top, #222 0%, #282828 100%); background-image: linear-gradient(to bottom, #222 0%, #282828 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0); background-repeat: repeat-x; -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25); box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25); } .navbar-inverse .navbar-brand, .navbar-inverse .navbar-nav > li > a { text-shadow: 0 -1px 0 rgba(0, 0, 0, .25); } .navbar-static-top, .navbar-fixed-top, .navbar-fixed-bottom { border-radius: 0; } .alert { text-shadow: 0 1px 0 rgba(255, 255, 255, .2); -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05); box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05); } .alert-success { background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%); background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0); background-repeat: repeat-x; border-color: #b2dba1; } .alert-info { background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%); background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0); background-repeat: repeat-x; border-color: #9acfea; } .alert-warning { background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%); background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0); background-repeat: repeat-x; border-color: #f5e79e; } .alert-danger { background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%); background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0); background-repeat: repeat-x; border-color: #dca7a7; } .progress { background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%); background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0); background-repeat: repeat-x; } .progress-bar { background-image: -webkit-linear-gradient(top, #428bca 0%, #3071a9 100%); background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0); background-repeat: repeat-x; } .progress-bar-success { background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%); background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); background-repeat: repeat-x; } .progress-bar-info { background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); background-repeat: repeat-x; } .progress-bar-warning { background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); background-repeat: repeat-x; } .progress-bar-danger { background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%); background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); background-repeat: repeat-x; } .list-group { border-radius: 4px; -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075); box-shadow: 0 1px 2px rgba(0, 0, 0, .075); } .list-group-item.active, .list-group-item.active:hover, .list-group-item.active:focus { text-shadow: 0 -1px 0 #3071a9; background-image: -webkit-linear-gradient(top, #428bca 0%, #3278b3 100%); background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0); background-repeat: repeat-x; border-color: #3278b3; } .panel { -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .05); box-shadow: 0 1px 2px rgba(0, 0, 0, .05); } .panel-default > .panel-heading { background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); background-repeat: repeat-x; } .panel-primary > .panel-heading { background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%); background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0); background-repeat: repeat-x; } .panel-success > .panel-heading { background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%); background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0); background-repeat: repeat-x; } .panel-info > .panel-heading { background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%); background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0); background-repeat: repeat-x; } .panel-warning > .panel-heading { background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%); background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0); background-repeat: repeat-x; } .panel-danger > .panel-heading { background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%); background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0); background-repeat: repeat-x; } .well { background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%); background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0); background-repeat: repeat-x; border-color: #dcdcdc; -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1); box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1); } /*# sourceMappingURL=bootstrap-theme.css.map */ ================================================ FILE: doc/css/bootstrap.css ================================================ /*! * Bootstrap v3.1.0 (http://getbootstrap.com) * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ /*! normalize.css v3.0.0 | MIT License | git.io/normalize */ html { font-family: sans-serif; -webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; } body { margin: 0; } article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } audio, canvas, progress, video { display: inline-block; vertical-align: baseline; } audio:not([controls]) { display: none; height: 0; } [hidden], template { display: none; } a { background: transparent; } a:active, a:hover { outline: 0; } abbr[title] { border-bottom: 1px dotted; } b, strong { font-weight: bold; } dfn { font-style: italic; } h1 { margin: .67em 0; font-size: 2em; } mark { color: #000; background: #ff0; } small { font-size: 80%; } sub, sup { position: relative; font-size: 75%; line-height: 0; vertical-align: baseline; } sup { top: -.5em; } sub { bottom: -.25em; } img { border: 0; } svg:not(:root) { overflow: hidden; } figure { margin: 1em 40px; } hr { height: 0; -moz-box-sizing: content-box; box-sizing: content-box; } pre { overflow: auto; } code, kbd, pre, samp { font-family: monospace, monospace; font-size: 1em; } button, input, optgroup, select, textarea { margin: 0; font: inherit; color: inherit; } button { overflow: visible; } button, select { text-transform: none; } button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; cursor: pointer; } button[disabled], html input[disabled] { cursor: default; } button::-moz-focus-inner, input::-moz-focus-inner { padding: 0; border: 0; } input { line-height: normal; } input[type="checkbox"], input[type="radio"] { box-sizing: border-box; padding: 0; } input[type="number"]::-webkit-inner-spin-button, input[type="number"]::-webkit-outer-spin-button { height: auto; } input[type="search"] { -webkit-box-sizing: content-box; -moz-box-sizing: content-box; box-sizing: content-box; -webkit-appearance: textfield; } input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } fieldset { padding: .35em .625em .75em; margin: 0 2px; border: 1px solid #c0c0c0; } legend { padding: 0; border: 0; } textarea { overflow: auto; } optgroup { font-weight: bold; } table { border-spacing: 0; border-collapse: collapse; } td, th { padding: 0; } @media print { * { color: #000 !important; text-shadow: none !important; background: transparent !important; box-shadow: none !important; } a, a:visited { text-decoration: underline; } a[href]:after { content: " (" attr(href) ")"; } abbr[title]:after { content: " (" attr(title) ")"; } a[href^="javascript:"]:after, a[href^="#"]:after { content: ""; } pre, blockquote { border: 1px solid #999; page-break-inside: avoid; } thead { display: table-header-group; } tr, img { page-break-inside: avoid; } img { max-width: 100% !important; } p, h2, h3 { orphans: 3; widows: 3; } h2, h3 { page-break-after: avoid; } select { background: #fff !important; } .navbar { display: none; } .table td, .table th { background-color: #fff !important; } .btn > .caret, .dropup > .btn > .caret { border-top-color: #000 !important; } .label { border: 1px solid #000; } .table { border-collapse: collapse !important; } .table-bordered th, .table-bordered td { border: 1px solid #ddd !important; } } * { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } *:before, *:after { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } html { font-size: 62.5%; -webkit-tap-highlight-color: rgba(0, 0, 0, 0); } body { font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: 14px; line-height: 1.428571429; color: #333; background-color: #fff; } input, button, select, textarea { font-family: inherit; font-size: inherit; line-height: inherit; } a { color: #428bca; text-decoration: none; } a:hover, a:focus { color: #2a6496; text-decoration: underline; } a:focus { outline: thin dotted; outline: 5px auto -webkit-focus-ring-color; outline-offset: -2px; } figure { margin: 0; } img { vertical-align: middle; } .img-responsive { display: block; max-width: 100%; height: auto; } .img-rounded { border-radius: 6px; } .img-thumbnail { display: inline-block; max-width: 100%; height: auto; padding: 4px; line-height: 1.428571429; background-color: #fff; border: 1px solid #ddd; border-radius: 4px; -webkit-transition: all .2s ease-in-out; transition: all .2s ease-in-out; } .img-circle { border-radius: 50%; } hr { margin-top: 20px; margin-bottom: 20px; border: 0; border-top: 1px solid #eee; } .sr-only { position: absolute; width: 1px; height: 1px; padding: 0; margin: -1px; overflow: hidden; clip: rect(0, 0, 0, 0); border: 0; } h1, h2, h3, h4, h5, h6, .h1, .h2, .h3, .h4, .h5, .h6 { font-family: inherit; font-weight: 500; line-height: 1.1; color: inherit; } h1 small, h2 small, h3 small, h4 small, h5 small, h6 small, .h1 small, .h2 small, .h3 small, .h4 small, .h5 small, .h6 small, h1 .small, h2 .small, h3 .small, h4 .small, h5 .small, h6 .small, .h1 .small, .h2 .small, .h3 .small, .h4 .small, .h5 .small, .h6 .small { font-weight: normal; line-height: 1; color: #999; } h1, .h1, h2, .h2, h3, .h3 { margin-top: 20px; margin-bottom: 10px; } h1 small, .h1 small, h2 small, .h2 small, h3 small, .h3 small, h1 .small, .h1 .small, h2 .small, .h2 .small, h3 .small, .h3 .small { font-size: 65%; } h4, .h4, h5, .h5, h6, .h6 { margin-top: 10px; margin-bottom: 10px; } h4 small, .h4 small, h5 small, .h5 small, h6 small, .h6 small, h4 .small, .h4 .small, h5 .small, .h5 .small, h6 .small, .h6 .small { font-size: 75%; } h1, .h1 { font-size: 36px; } h2, .h2 { font-size: 30px; } h3, .h3 { font-size: 24px; } h4, .h4 { font-size: 18px; } h5, .h5 { font-size: 14px; } h6, .h6 { font-size: 12px; } p { margin: 0 0 10px; } .lead { margin-bottom: 20px; font-size: 16px; font-weight: 200; line-height: 1.4; } @media (min-width: 768px) { .lead { font-size: 21px; } } small, .small { font-size: 85%; } cite { font-style: normal; } .text-left { text-align: left; } .text-right { text-align: right; } .text-center { text-align: center; } .text-justify { text-align: justify; } .text-muted { color: #999; } .text-primary { color: #428bca; } a.text-primary:hover { color: #3071a9; } .text-success { color: #3c763d; } a.text-success:hover { color: #2b542c; } .text-info { color: #31708f; } a.text-info:hover { color: #245269; } .text-warning { color: #8a6d3b; } a.text-warning:hover { color: #66512c; } .text-danger { color: #a94442; } a.text-danger:hover { color: #843534; } .bg-primary { color: #fff; background-color: #428bca; } a.bg-primary:hover { background-color: #3071a9; } .bg-success { background-color: #dff0d8; } a.bg-success:hover { background-color: #c1e2b3; } .bg-info { background-color: #d9edf7; } a.bg-info:hover { background-color: #afd9ee; } .bg-warning { background-color: #fcf8e3; } a.bg-warning:hover { background-color: #f7ecb5; } .bg-danger { background-color: #f2dede; } a.bg-danger:hover { background-color: #e4b9b9; } .page-header { padding-bottom: 9px; margin: 40px 0 20px; border-bottom: 1px solid #eee; } ul, ol { margin-top: 0; margin-bottom: 10px; } ul ul, ol ul, ul ol, ol ol { margin-bottom: 0; } .list-unstyled { padding-left: 0; list-style: none; } .list-inline { padding-left: 0; list-style: none; } .list-inline > li { display: inline-block; padding-right: 5px; padding-left: 5px; } .list-inline > li:first-child { padding-left: 0; } dl { margin-top: 0; margin-bottom: 20px; } dt, dd { line-height: 1.428571429; } dt { font-weight: bold; } dd { margin-left: 0; } @media (min-width: 768px) { .dl-horizontal dt { float: left; width: 160px; overflow: hidden; clear: left; text-align: right; text-overflow: ellipsis; white-space: nowrap; } .dl-horizontal dd { margin-left: 180px; } } abbr[title], abbr[data-original-title] { cursor: help; border-bottom: 1px dotted #999; } .initialism { font-size: 90%; text-transform: uppercase; } blockquote { padding: 10px 20px; margin: 0 0 20px; font-size: 17.5px; border-left: 5px solid #eee; } blockquote p:last-child, blockquote ul:last-child, blockquote ol:last-child { margin-bottom: 0; } blockquote footer, blockquote small, blockquote .small { display: block; font-size: 80%; line-height: 1.428571429; color: #999; } blockquote footer:before, blockquote small:before, blockquote .small:before { content: '\2014 \00A0'; } .blockquote-reverse, blockquote.pull-right { padding-right: 15px; padding-left: 0; text-align: right; border-right: 5px solid #eee; border-left: 0; } .blockquote-reverse footer:before, blockquote.pull-right footer:before, .blockquote-reverse small:before, blockquote.pull-right small:before, .blockquote-reverse .small:before, blockquote.pull-right .small:before { content: ''; } .blockquote-reverse footer:after, blockquote.pull-right footer:after, .blockquote-reverse small:after, blockquote.pull-right small:after, .blockquote-reverse .small:after, blockquote.pull-right .small:after { content: '\00A0 \2014'; } blockquote:before, blockquote:after { content: ""; } address { margin-bottom: 20px; font-style: normal; line-height: 1.428571429; } code, kbd, pre, samp { font-family: Menlo, Monaco, Consolas, "Courier New", monospace; } code { padding: 2px 4px; font-size: 90%; color: #c7254e; white-space: nowrap; background-color: #f9f2f4; border-radius: 4px; } kbd { padding: 2px 4px; font-size: 90%; color: #fff; background-color: #333; border-radius: 3px; box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25); } pre { display: block; padding: 9.5px; margin: 0 0 10px; font-size: 13px; line-height: 1.428571429; color: #333; word-break: break-all; word-wrap: break-word; background-color: #f5f5f5; border: 1px solid #ccc; border-radius: 4px; } pre code { padding: 0; font-size: inherit; color: inherit; white-space: pre-wrap; background-color: transparent; border-radius: 0; } .pre-scrollable { max-height: 340px; overflow-y: scroll; } .container { padding-right: 15px; padding-left: 15px; margin-right: auto; margin-left: auto; } @media (min-width: 768px) { .container { width: 750px; } } @media (min-width: 992px) { .container { width: 970px; } } @media (min-width: 1200px) { .container { width: 1170px; } } .container-fluid { padding-right: 15px; padding-left: 15px; margin-right: auto; margin-left: auto; } .row { margin-right: -15px; margin-left: -15px; } .col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 { position: relative; min-height: 1px; padding-right: 15px; padding-left: 15px; } .col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 { float: left; } .col-xs-12 { width: 100%; } .col-xs-11 { width: 91.66666666666666%; } .col-xs-10 { width: 83.33333333333334%; } .col-xs-9 { width: 75%; } .col-xs-8 { width: 66.66666666666666%; } .col-xs-7 { width: 58.333333333333336%; } .col-xs-6 { width: 50%; } .col-xs-5 { width: 41.66666666666667%; } .col-xs-4 { width: 33.33333333333333%; } .col-xs-3 { width: 25%; } .col-xs-2 { width: 16.666666666666664%; } .col-xs-1 { width: 8.333333333333332%; } .col-xs-pull-12 { right: 100%; } .col-xs-pull-11 { right: 91.66666666666666%; } .col-xs-pull-10 { right: 83.33333333333334%; } .col-xs-pull-9 { right: 75%; } .col-xs-pull-8 { right: 66.66666666666666%; } .col-xs-pull-7 { right: 58.333333333333336%; } .col-xs-pull-6 { right: 50%; } .col-xs-pull-5 { right: 41.66666666666667%; } .col-xs-pull-4 { right: 33.33333333333333%; } .col-xs-pull-3 { right: 25%; } .col-xs-pull-2 { right: 16.666666666666664%; } .col-xs-pull-1 { right: 8.333333333333332%; } .col-xs-pull-0 { right: 0; } .col-xs-push-12 { left: 100%; } .col-xs-push-11 { left: 91.66666666666666%; } .col-xs-push-10 { left: 83.33333333333334%; } .col-xs-push-9 { left: 75%; } .col-xs-push-8 { left: 66.66666666666666%; } .col-xs-push-7 { left: 58.333333333333336%; } .col-xs-push-6 { left: 50%; } .col-xs-push-5 { left: 41.66666666666667%; } .col-xs-push-4 { left: 33.33333333333333%; } .col-xs-push-3 { left: 25%; } .col-xs-push-2 { left: 16.666666666666664%; } .col-xs-push-1 { left: 8.333333333333332%; } .col-xs-push-0 { left: 0; } .col-xs-offset-12 { margin-left: 100%; } .col-xs-offset-11 { margin-left: 91.66666666666666%; } .col-xs-offset-10 { margin-left: 83.33333333333334%; } .col-xs-offset-9 { margin-left: 75%; } .col-xs-offset-8 { margin-left: 66.66666666666666%; } .col-xs-offset-7 { margin-left: 58.333333333333336%; } .col-xs-offset-6 { margin-left: 50%; } .col-xs-offset-5 { margin-left: 41.66666666666667%; } .col-xs-offset-4 { margin-left: 33.33333333333333%; } .col-xs-offset-3 { margin-left: 25%; } .col-xs-offset-2 { margin-left: 16.666666666666664%; } .col-xs-offset-1 { margin-left: 8.333333333333332%; } .col-xs-offset-0 { margin-left: 0; } @media (min-width: 768px) { .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 { float: left; } .col-sm-12 { width: 100%; } .col-sm-11 { width: 91.66666666666666%; } .col-sm-10 { width: 83.33333333333334%; } .col-sm-9 { width: 75%; } .col-sm-8 { width: 66.66666666666666%; } .col-sm-7 { width: 58.333333333333336%; } .col-sm-6 { width: 50%; } .col-sm-5 { width: 41.66666666666667%; } .col-sm-4 { width: 33.33333333333333%; } .col-sm-3 { width: 25%; } .col-sm-2 { width: 16.666666666666664%; } .col-sm-1 { width: 8.333333333333332%; } .col-sm-pull-12 { right: 100%; } .col-sm-pull-11 { right: 91.66666666666666%; } .col-sm-pull-10 { right: 83.33333333333334%; } .col-sm-pull-9 { right: 75%; } .col-sm-pull-8 { right: 66.66666666666666%; } .col-sm-pull-7 { right: 58.333333333333336%; } .col-sm-pull-6 { right: 50%; } .col-sm-pull-5 { right: 41.66666666666667%; } .col-sm-pull-4 { right: 33.33333333333333%; } .col-sm-pull-3 { right: 25%; } .col-sm-pull-2 { right: 16.666666666666664%; } .col-sm-pull-1 { right: 8.333333333333332%; } .col-sm-pull-0 { right: 0; } .col-sm-push-12 { left: 100%; } .col-sm-push-11 { left: 91.66666666666666%; } .col-sm-push-10 { left: 83.33333333333334%; } .col-sm-push-9 { left: 75%; } .col-sm-push-8 { left: 66.66666666666666%; } .col-sm-push-7 { left: 58.333333333333336%; } .col-sm-push-6 { left: 50%; } .col-sm-push-5 { left: 41.66666666666667%; } .col-sm-push-4 { left: 33.33333333333333%; } .col-sm-push-3 { left: 25%; } .col-sm-push-2 { left: 16.666666666666664%; } .col-sm-push-1 { left: 8.333333333333332%; } .col-sm-push-0 { left: 0; } .col-sm-offset-12 { margin-left: 100%; } .col-sm-offset-11 { margin-left: 91.66666666666666%; } .col-sm-offset-10 { margin-left: 83.33333333333334%; } .col-sm-offset-9 { margin-left: 75%; } .col-sm-offset-8 { margin-left: 66.66666666666666%; } .col-sm-offset-7 { margin-left: 58.333333333333336%; } .col-sm-offset-6 { margin-left: 50%; } .col-sm-offset-5 { margin-left: 41.66666666666667%; } .col-sm-offset-4 { margin-left: 33.33333333333333%; } .col-sm-offset-3 { margin-left: 25%; } .col-sm-offset-2 { margin-left: 16.666666666666664%; } .col-sm-offset-1 { margin-left: 8.333333333333332%; } .col-sm-offset-0 { margin-left: 0; } } @media (min-width: 992px) { .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 { float: left; } .col-md-12 { width: 100%; } .col-md-11 { width: 91.66666666666666%; } .col-md-10 { width: 83.33333333333334%; } .col-md-9 { width: 75%; } .col-md-8 { width: 66.66666666666666%; } .col-md-7 { width: 58.333333333333336%; } .col-md-6 { width: 50%; } .col-md-5 { width: 41.66666666666667%; } .col-md-4 { width: 33.33333333333333%; } .col-md-3 { width: 25%; } .col-md-2 { width: 16.666666666666664%; } .col-md-1 { width: 8.333333333333332%; } .col-md-pull-12 { right: 100%; } .col-md-pull-11 { right: 91.66666666666666%; } .col-md-pull-10 { right: 83.33333333333334%; } .col-md-pull-9 { right: 75%; } .col-md-pull-8 { right: 66.66666666666666%; } .col-md-pull-7 { right: 58.333333333333336%; } .col-md-pull-6 { right: 50%; } .col-md-pull-5 { right: 41.66666666666667%; } .col-md-pull-4 { right: 33.33333333333333%; } .col-md-pull-3 { right: 25%; } .col-md-pull-2 { right: 16.666666666666664%; } .col-md-pull-1 { right: 8.333333333333332%; } .col-md-pull-0 { right: 0; } .col-md-push-12 { left: 100%; } .col-md-push-11 { left: 91.66666666666666%; } .col-md-push-10 { left: 83.33333333333334%; } .col-md-push-9 { left: 75%; } .col-md-push-8 { left: 66.66666666666666%; } .col-md-push-7 { left: 58.333333333333336%; } .col-md-push-6 { left: 50%; } .col-md-push-5 { left: 41.66666666666667%; } .col-md-push-4 { left: 33.33333333333333%; } .col-md-push-3 { left: 25%; } .col-md-push-2 { left: 16.666666666666664%; } .col-md-push-1 { left: 8.333333333333332%; } .col-md-push-0 { left: 0; } .col-md-offset-12 { margin-left: 100%; } .col-md-offset-11 { margin-left: 91.66666666666666%; } .col-md-offset-10 { margin-left: 83.33333333333334%; } .col-md-offset-9 { margin-left: 75%; } .col-md-offset-8 { margin-left: 66.66666666666666%; } .col-md-offset-7 { margin-left: 58.333333333333336%; } .col-md-offset-6 { margin-left: 50%; } .col-md-offset-5 { margin-left: 41.66666666666667%; } .col-md-offset-4 { margin-left: 33.33333333333333%; } .col-md-offset-3 { margin-left: 25%; } .col-md-offset-2 { margin-left: 16.666666666666664%; } .col-md-offset-1 { margin-left: 8.333333333333332%; } .col-md-offset-0 { margin-left: 0; } } @media (min-width: 1200px) { .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 { float: left; } .col-lg-12 { width: 100%; } .col-lg-11 { width: 91.66666666666666%; } .col-lg-10 { width: 83.33333333333334%; } .col-lg-9 { width: 75%; } .col-lg-8 { width: 66.66666666666666%; } .col-lg-7 { width: 58.333333333333336%; } .col-lg-6 { width: 50%; } .col-lg-5 { width: 41.66666666666667%; } .col-lg-4 { width: 33.33333333333333%; } .col-lg-3 { width: 25%; } .col-lg-2 { width: 16.666666666666664%; } .col-lg-1 { width: 8.333333333333332%; } .col-lg-pull-12 { right: 100%; } .col-lg-pull-11 { right: 91.66666666666666%; } .col-lg-pull-10 { right: 83.33333333333334%; } .col-lg-pull-9 { right: 75%; } .col-lg-pull-8 { right: 66.66666666666666%; } .col-lg-pull-7 { right: 58.333333333333336%; } .col-lg-pull-6 { right: 50%; } .col-lg-pull-5 { right: 41.66666666666667%; } .col-lg-pull-4 { right: 33.33333333333333%; } .col-lg-pull-3 { right: 25%; } .col-lg-pull-2 { right: 16.666666666666664%; } .col-lg-pull-1 { right: 8.333333333333332%; } .col-lg-pull-0 { right: 0; } .col-lg-push-12 { left: 100%; } .col-lg-push-11 { left: 91.66666666666666%; } .col-lg-push-10 { left: 83.33333333333334%; } .col-lg-push-9 { left: 75%; } .col-lg-push-8 { left: 66.66666666666666%; } .col-lg-push-7 { left: 58.333333333333336%; } .col-lg-push-6 { left: 50%; } .col-lg-push-5 { left: 41.66666666666667%; } .col-lg-push-4 { left: 33.33333333333333%; } .col-lg-push-3 { left: 25%; } .col-lg-push-2 { left: 16.666666666666664%; } .col-lg-push-1 { left: 8.333333333333332%; } .col-lg-push-0 { left: 0; } .col-lg-offset-12 { margin-left: 100%; } .col-lg-offset-11 { margin-left: 91.66666666666666%; } .col-lg-offset-10 { margin-left: 83.33333333333334%; } .col-lg-offset-9 { margin-left: 75%; } .col-lg-offset-8 { margin-left: 66.66666666666666%; } .col-lg-offset-7 { margin-left: 58.333333333333336%; } .col-lg-offset-6 { margin-left: 50%; } .col-lg-offset-5 { margin-left: 41.66666666666667%; } .col-lg-offset-4 { margin-left: 33.33333333333333%; } .col-lg-offset-3 { margin-left: 25%; } .col-lg-offset-2 { margin-left: 16.666666666666664%; } .col-lg-offset-1 { margin-left: 8.333333333333332%; } .col-lg-offset-0 { margin-left: 0; } } table { max-width: 100%; background-color: transparent; } th { text-align: left; } .table { width: 100%; margin-bottom: 20px; } .table > thead > tr > th, .table > tbody > tr > th, .table > tfoot > tr > th, .table > thead > tr > td, .table > tbody > tr > td, .table > tfoot > tr > td { padding: 8px; line-height: 1.428571429; vertical-align: top; border-top: 1px solid #ddd; } .table > thead > tr > th { vertical-align: bottom; border-bottom: 2px solid #ddd; } .table > caption + thead > tr:first-child > th, .table > colgroup + thead > tr:first-child > th, .table > thead:first-child > tr:first-child > th, .table > caption + thead > tr:first-child > td, .table > colgroup + thead > tr:first-child > td, .table > thead:first-child > tr:first-child > td { border-top: 0; } .table > tbody + tbody { border-top: 2px solid #ddd; } .table .table { background-color: #fff; } .table-condensed > thead > tr > th, .table-condensed > tbody > tr > th, .table-condensed > tfoot > tr > th, .table-condensed > thead > tr > td, .table-condensed > tbody > tr > td, .table-condensed > tfoot > tr > td { padding: 5px; } .table-bordered { border: 1px solid #ddd; } .table-bordered > thead > tr > th, .table-bordered > tbody > tr > th, .table-bordered > tfoot > tr > th, .table-bordered > thead > tr > td, .table-bordered > tbody > tr > td, .table-bordered > tfoot > tr > td { border: 1px solid #ddd; } .table-bordered > thead > tr > th, .table-bordered > thead > tr > td { border-bottom-width: 2px; } .table-striped > tbody > tr:nth-child(odd) > td, .table-striped > tbody > tr:nth-child(odd) > th { background-color: #f9f9f9; } .table-hover > tbody > tr:hover > td, .table-hover > tbody > tr:hover > th { background-color: #f5f5f5; } table col[class*="col-"] { position: static; display: table-column; float: none; } table td[class*="col-"], table th[class*="col-"] { position: static; display: table-cell; float: none; } .table > thead > tr > td.active, .table > tbody > tr > td.active, .table > tfoot > tr > td.active, .table > thead > tr > th.active, .table > tbody > tr > th.active, .table > tfoot > tr > th.active, .table > thead > tr.active > td, .table > tbody > tr.active > td, .table > tfoot > tr.active > td, .table > thead > tr.active > th, .table > tbody > tr.active > th, .table > tfoot > tr.active > th { background-color: #f5f5f5; } .table-hover > tbody > tr > td.active:hover, .table-hover > tbody > tr > th.active:hover, .table-hover > tbody > tr.active:hover > td, .table-hover > tbody > tr.active:hover > th { background-color: #e8e8e8; } .table > thead > tr > td.success, .table > tbody > tr > td.success, .table > tfoot > tr > td.success, .table > thead > tr > th.success, .table > tbody > tr > th.success, .table > tfoot > tr > th.success, .table > thead > tr.success > td, .table > tbody > tr.success > td, .table > tfoot > tr.success > td, .table > thead > tr.success > th, .table > tbody > tr.success > th, .table > tfoot > tr.success > th { background-color: #dff0d8; } .table-hover > tbody > tr > td.success:hover, .table-hover > tbody > tr > th.success:hover, .table-hover > tbody > tr.success:hover > td, .table-hover > tbody > tr.success:hover > th { background-color: #d0e9c6; } .table > thead > tr > td.info, .table > tbody > tr > td.info, .table > tfoot > tr > td.info, .table > thead > tr > th.info, .table > tbody > tr > th.info, .table > tfoot > tr > th.info, .table > thead > tr.info > td, .table > tbody > tr.info > td, .table > tfoot > tr.info > td, .table > thead > tr.info > th, .table > tbody > tr.info > th, .table > tfoot > tr.info > th { background-color: #d9edf7; } .table-hover > tbody > tr > td.info:hover, .table-hover > tbody > tr > th.info:hover, .table-hover > tbody > tr.info:hover > td, .table-hover > tbody > tr.info:hover > th { background-color: #c4e3f3; } .table > thead > tr > td.warning, .table > tbody > tr > td.warning, .table > tfoot > tr > td.warning, .table > thead > tr > th.warning, .table > tbody > tr > th.warning, .table > tfoot > tr > th.warning, .table > thead > tr.warning > td, .table > tbody > tr.warning > td, .table > tfoot > tr.warning > td, .table > thead > tr.warning > th, .table > tbody > tr.warning > th, .table > tfoot > tr.warning > th { background-color: #fcf8e3; } .table-hover > tbody > tr > td.warning:hover, .table-hover > tbody > tr > th.warning:hover, .table-hover > tbody > tr.warning:hover > td, .table-hover > tbody > tr.warning:hover > th { background-color: #faf2cc; } .table > thead > tr > td.danger, .table > tbody > tr > td.danger, .table > tfoot > tr > td.danger, .table > thead > tr > th.danger, .table > tbody > tr > th.danger, .table > tfoot > tr > th.danger, .table > thead > tr.danger > td, .table > tbody > tr.danger > td, .table > tfoot > tr.danger > td, .table > thead > tr.danger > th, .table > tbody > tr.danger > th, .table > tfoot > tr.danger > th { background-color: #f2dede; } .table-hover > tbody > tr > td.danger:hover, .table-hover > tbody > tr > th.danger:hover, .table-hover > tbody > tr.danger:hover > td, .table-hover > tbody > tr.danger:hover > th { background-color: #ebcccc; } @media (max-width: 767px) { .table-responsive { width: 100%; margin-bottom: 15px; overflow-x: scroll; overflow-y: hidden; -webkit-overflow-scrolling: touch; -ms-overflow-style: -ms-autohiding-scrollbar; border: 1px solid #ddd; } .table-responsive > .table { margin-bottom: 0; } .table-responsive > .table > thead > tr > th, .table-responsive > .table > tbody > tr > th, .table-responsive > .table > tfoot > tr > th, .table-responsive > .table > thead > tr > td, .table-responsive > .table > tbody > tr > td, .table-responsive > .table > tfoot > tr > td { white-space: nowrap; } .table-responsive > .table-bordered { border: 0; } .table-responsive > .table-bordered > thead > tr > th:first-child, .table-responsive > .table-bordered > tbody > tr > th:first-child, .table-responsive > .table-bordered > tfoot > tr > th:first-child, .table-responsive > .table-bordered > thead > tr > td:first-child, .table-responsive > .table-bordered > tbody > tr > td:first-child, .table-responsive > .table-bordered > tfoot > tr > td:first-child { border-left: 0; } .table-responsive > .table-bordered > thead > tr > th:last-child, .table-responsive > .table-bordered > tbody > tr > th:last-child, .table-responsive > .table-bordered > tfoot > tr > th:last-child, .table-responsive > .table-bordered > thead > tr > td:last-child, .table-responsive > .table-bordered > tbody > tr > td:last-child, .table-responsive > .table-bordered > tfoot > tr > td:last-child { border-right: 0; } .table-responsive > .table-bordered > tbody > tr:last-child > th, .table-responsive > .table-bordered > tfoot > tr:last-child > th, .table-responsive > .table-bordered > tbody > tr:last-child > td, .table-responsive > .table-bordered > tfoot > tr:last-child > td { border-bottom: 0; } } fieldset { min-width: 0; padding: 0; margin: 0; border: 0; } legend { display: block; width: 100%; padding: 0; margin-bottom: 20px; font-size: 21px; line-height: inherit; color: #333; border: 0; border-bottom: 1px solid #e5e5e5; } label { display: inline-block; margin-bottom: 5px; font-weight: bold; } input[type="search"] { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } input[type="radio"], input[type="checkbox"] { margin: 4px 0 0; margin-top: 1px \9; /* IE8-9 */ line-height: normal; } input[type="file"] { display: block; } input[type="range"] { display: block; width: 100%; } select[multiple], select[size] { height: auto; } input[type="file"]:focus, input[type="radio"]:focus, input[type="checkbox"]:focus { outline: thin dotted; outline: 5px auto -webkit-focus-ring-color; outline-offset: -2px; } output { display: block; padding-top: 7px; font-size: 14px; line-height: 1.428571429; color: #555; } .form-control { display: block; width: 100%; height: 34px; padding: 6px 12px; font-size: 14px; line-height: 1.428571429; color: #555; background-color: #fff; background-image: none; border: 1px solid #ccc; border-radius: 4px; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; } .form-control:focus { border-color: #66afe9; outline: 0; -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6); box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6); } .form-control:-moz-placeholder { color: #999; } .form-control::-moz-placeholder { color: #999; opacity: 1; } .form-control:-ms-input-placeholder { color: #999; } .form-control::-webkit-input-placeholder { color: #999; } .form-control[disabled], .form-control[readonly], fieldset[disabled] .form-control { cursor: not-allowed; background-color: #eee; opacity: 1; } textarea.form-control { height: auto; } input[type="date"] { line-height: 34px; } .form-group { margin-bottom: 15px; } .radio, .checkbox { display: block; min-height: 20px; padding-left: 20px; margin-top: 10px; margin-bottom: 10px; } .radio label, .checkbox label { display: inline; font-weight: normal; cursor: pointer; } .radio input[type="radio"], .radio-inline input[type="radio"], .checkbox input[type="checkbox"], .checkbox-inline input[type="checkbox"] { float: left; margin-left: -20px; } .radio + .radio, .checkbox + .checkbox { margin-top: -5px; } .radio-inline, .checkbox-inline { display: inline-block; padding-left: 20px; margin-bottom: 0; font-weight: normal; vertical-align: middle; cursor: pointer; } .radio-inline + .radio-inline, .checkbox-inline + .checkbox-inline { margin-top: 0; margin-left: 10px; } input[type="radio"][disabled], input[type="checkbox"][disabled], .radio[disabled], .radio-inline[disabled], .checkbox[disabled], .checkbox-inline[disabled], fieldset[disabled] input[type="radio"], fieldset[disabled] input[type="checkbox"], fieldset[disabled] .radio, fieldset[disabled] .radio-inline, fieldset[disabled] .checkbox, fieldset[disabled] .checkbox-inline { cursor: not-allowed; } .input-sm { height: 30px; padding: 5px 10px; font-size: 12px; line-height: 1.5; border-radius: 3px; } select.input-sm { height: 30px; line-height: 30px; } textarea.input-sm, select[multiple].input-sm { height: auto; } .input-lg { height: 46px; padding: 10px 16px; font-size: 18px; line-height: 1.33; border-radius: 6px; } select.input-lg { height: 46px; line-height: 46px; } textarea.input-lg, select[multiple].input-lg { height: auto; } .has-feedback { position: relative; } .has-feedback .form-control { padding-right: 42.5px; } .has-feedback .form-control-feedback { position: absolute; top: 25px; right: 0; display: block; width: 34px; height: 34px; line-height: 34px; text-align: center; } .has-success .help-block, .has-success .control-label, .has-success .radio, .has-success .checkbox, .has-success .radio-inline, .has-success .checkbox-inline { color: #3c763d; } .has-success .form-control { border-color: #3c763d; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); } .has-success .form-control:focus { border-color: #2b542c; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168; box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168; } .has-success .input-group-addon { color: #3c763d; background-color: #dff0d8; border-color: #3c763d; } .has-success .form-control-feedback { color: #3c763d; } .has-warning .help-block, .has-warning .control-label, .has-warning .radio, .has-warning .checkbox, .has-warning .radio-inline, .has-warning .checkbox-inline { color: #8a6d3b; } .has-warning .form-control { border-color: #8a6d3b; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); } .has-warning .form-control:focus { border-color: #66512c; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b; box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b; } .has-warning .input-group-addon { color: #8a6d3b; background-color: #fcf8e3; border-color: #8a6d3b; } .has-warning .form-control-feedback { color: #8a6d3b; } .has-error .help-block, .has-error .control-label, .has-error .radio, .has-error .checkbox, .has-error .radio-inline, .has-error .checkbox-inline { color: #a94442; } .has-error .form-control { border-color: #a94442; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075); } .has-error .form-control:focus { border-color: #843534; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483; box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483; } .has-error .input-group-addon { color: #a94442; background-color: #f2dede; border-color: #a94442; } .has-error .form-control-feedback { color: #a94442; } .form-control-static { margin-bottom: 0; } .help-block { display: block; margin-top: 5px; margin-bottom: 10px; color: #737373; } @media (min-width: 768px) { .form-inline .form-group { display: inline-block; margin-bottom: 0; vertical-align: middle; } .form-inline .form-control { display: inline-block; width: auto; vertical-align: middle; } .form-inline .control-label { margin-bottom: 0; vertical-align: middle; } .form-inline .radio, .form-inline .checkbox { display: inline-block; padding-left: 0; margin-top: 0; margin-bottom: 0; vertical-align: middle; } .form-inline .radio input[type="radio"], .form-inline .checkbox input[type="checkbox"] { float: none; margin-left: 0; } .form-inline .has-feedback .form-control-feedback { top: 0; } } .form-horizontal .control-label, .form-horizontal .radio, .form-horizontal .checkbox, .form-horizontal .radio-inline, .form-horizontal .checkbox-inline { padding-top: 7px; margin-top: 0; margin-bottom: 0; } .form-horizontal .radio, .form-horizontal .checkbox { min-height: 27px; } .form-horizontal .form-group { margin-right: -15px; margin-left: -15px; } .form-horizontal .form-control-static { padding-top: 7px; } @media (min-width: 768px) { .form-horizontal .control-label { text-align: right; } } .form-horizontal .has-feedback .form-control-feedback { top: 0; right: 15px; } .btn { display: inline-block; padding: 6px 12px; margin-bottom: 0; font-size: 14px; font-weight: normal; line-height: 1.428571429; text-align: center; white-space: nowrap; vertical-align: middle; cursor: pointer; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; -o-user-select: none; user-select: none; background-image: none; border: 1px solid transparent; border-radius: 4px; } .btn:focus { outline: thin dotted; outline: 5px auto -webkit-focus-ring-color; outline-offset: -2px; } .btn:hover, .btn:focus { color: #333; text-decoration: none; } .btn:active, .btn.active { background-image: none; outline: 0; -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); } .btn.disabled, .btn[disabled], fieldset[disabled] .btn { pointer-events: none; cursor: not-allowed; filter: alpha(opacity=65); -webkit-box-shadow: none; box-shadow: none; opacity: .65; } .btn-default { color: #333; background-color: #fff; border-color: #ccc; } .btn-default:hover, .btn-default:focus, .btn-default:active, .btn-default.active, .open .dropdown-toggle.btn-default { color: #333; background-color: #ebebeb; border-color: #adadad; } .btn-default:active, .btn-default.active, .open .dropdown-toggle.btn-default { background-image: none; } .btn-default.disabled, .btn-default[disabled], fieldset[disabled] .btn-default, .btn-default.disabled:hover, .btn-default[disabled]:hover, fieldset[disabled] .btn-default:hover, .btn-default.disabled:focus, .btn-default[disabled]:focus, fieldset[disabled] .btn-default:focus, .btn-default.disabled:active, .btn-default[disabled]:active, fieldset[disabled] .btn-default:active, .btn-default.disabled.active, .btn-default[disabled].active, fieldset[disabled] .btn-default.active { background-color: #fff; border-color: #ccc; } .btn-default .badge { color: #fff; background-color: #333; } .btn-primary { color: #fff; background-color: #428bca; border-color: #357ebd; } .btn-primary:hover, .btn-primary:focus, .btn-primary:active, .btn-primary.active, .open .dropdown-toggle.btn-primary { color: #fff; background-color: #3276b1; border-color: #285e8e; } .btn-primary:active, .btn-primary.active, .open .dropdown-toggle.btn-primary { background-image: none; } .btn-primary.disabled, .btn-primary[disabled], fieldset[disabled] .btn-primary, .btn-primary.disabled:hover, .btn-primary[disabled]:hover, fieldset[disabled] .btn-primary:hover, .btn-primary.disabled:focus, .btn-primary[disabled]:focus, fieldset[disabled] .btn-primary:focus, .btn-primary.disabled:active, .btn-primary[disabled]:active, fieldset[disabled] .btn-primary:active, .btn-primary.disabled.active, .btn-primary[disabled].active, fieldset[disabled] .btn-primary.active { background-color: #428bca; border-color: #357ebd; } .btn-primary .badge { color: #428bca; background-color: #fff; } .btn-success { color: #fff; background-color: #5cb85c; border-color: #4cae4c; } .btn-success:hover, .btn-success:focus, .btn-success:active, .btn-success.active, .open .dropdown-toggle.btn-success { color: #fff; background-color: #47a447; border-color: #398439; } .btn-success:active, .btn-success.active, .open .dropdown-toggle.btn-success { background-image: none; } .btn-success.disabled, .btn-success[disabled], fieldset[disabled] .btn-success, .btn-success.disabled:hover, .btn-success[disabled]:hover, fieldset[disabled] .btn-success:hover, .btn-success.disabled:focus, .btn-success[disabled]:focus, fieldset[disabled] .btn-success:focus, .btn-success.disabled:active, .btn-success[disabled]:active, fieldset[disabled] .btn-success:active, .btn-success.disabled.active, .btn-success[disabled].active, fieldset[disabled] .btn-success.active { background-color: #5cb85c; border-color: #4cae4c; } .btn-success .badge { color: #5cb85c; background-color: #fff; } .btn-info { color: #fff; background-color: #5bc0de; border-color: #46b8da; } .btn-info:hover, .btn-info:focus, .btn-info:active, .btn-info.active, .open .dropdown-toggle.btn-info { color: #fff; background-color: #39b3d7; border-color: #269abc; } .btn-info:active, .btn-info.active, .open .dropdown-toggle.btn-info { background-image: none; } .btn-info.disabled, .btn-info[disabled], fieldset[disabled] .btn-info, .btn-info.disabled:hover, .btn-info[disabled]:hover, fieldset[disabled] .btn-info:hover, .btn-info.disabled:focus, .btn-info[disabled]:focus, fieldset[disabled] .btn-info:focus, .btn-info.disabled:active, .btn-info[disabled]:active, fieldset[disabled] .btn-info:active, .btn-info.disabled.active, .btn-info[disabled].active, fieldset[disabled] .btn-info.active { background-color: #5bc0de; border-color: #46b8da; } .btn-info .badge { color: #5bc0de; background-color: #fff; } .btn-warning { color: #fff; background-color: #f0ad4e; border-color: #eea236; } .btn-warning:hover, .btn-warning:focus, .btn-warning:active, .btn-warning.active, .open .dropdown-toggle.btn-warning { color: #fff; background-color: #ed9c28; border-color: #d58512; } .btn-warning:active, .btn-warning.active, .open .dropdown-toggle.btn-warning { background-image: none; } .btn-warning.disabled, .btn-warning[disabled], fieldset[disabled] .btn-warning, .btn-warning.disabled:hover, .btn-warning[disabled]:hover, fieldset[disabled] .btn-warning:hover, .btn-warning.disabled:focus, .btn-warning[disabled]:focus, fieldset[disabled] .btn-warning:focus, .btn-warning.disabled:active, .btn-warning[disabled]:active, fieldset[disabled] .btn-warning:active, .btn-warning.disabled.active, .btn-warning[disabled].active, fieldset[disabled] .btn-warning.active { background-color: #f0ad4e; border-color: #eea236; } .btn-warning .badge { color: #f0ad4e; background-color: #fff; } .btn-danger { color: #fff; background-color: #d9534f; border-color: #d43f3a; } .btn-danger:hover, .btn-danger:focus, .btn-danger:active, .btn-danger.active, .open .dropdown-toggle.btn-danger { color: #fff; background-color: #d2322d; border-color: #ac2925; } .btn-danger:active, .btn-danger.active, .open .dropdown-toggle.btn-danger { background-image: none; } .btn-danger.disabled, .btn-danger[disabled], fieldset[disabled] .btn-danger, .btn-danger.disabled:hover, .btn-danger[disabled]:hover, fieldset[disabled] .btn-danger:hover, .btn-danger.disabled:focus, .btn-danger[disabled]:focus, fieldset[disabled] .btn-danger:focus, .btn-danger.disabled:active, .btn-danger[disabled]:active, fieldset[disabled] .btn-danger:active, .btn-danger.disabled.active, .btn-danger[disabled].active, fieldset[disabled] .btn-danger.active { background-color: #d9534f; border-color: #d43f3a; } .btn-danger .badge { color: #d9534f; background-color: #fff; } .btn-link { font-weight: normal; color: #428bca; cursor: pointer; border-radius: 0; } .btn-link, .btn-link:active, .btn-link[disabled], fieldset[disabled] .btn-link { background-color: transparent; -webkit-box-shadow: none; box-shadow: none; } .btn-link, .btn-link:hover, .btn-link:focus, .btn-link:active { border-color: transparent; } .btn-link:hover, .btn-link:focus { color: #2a6496; text-decoration: underline; background-color: transparent; } .btn-link[disabled]:hover, fieldset[disabled] .btn-link:hover, .btn-link[disabled]:focus, fieldset[disabled] .btn-link:focus { color: #999; text-decoration: none; } .btn-lg { padding: 10px 16px; font-size: 18px; line-height: 1.33; border-radius: 6px; } .btn-sm { padding: 5px 10px; font-size: 12px; line-height: 1.5; border-radius: 3px; } .btn-xs { padding: 1px 5px; font-size: 12px; line-height: 1.5; border-radius: 3px; } .btn-block { display: block; width: 100%; padding-right: 0; padding-left: 0; } .btn-block + .btn-block { margin-top: 5px; } input[type="submit"].btn-block, input[type="reset"].btn-block, input[type="button"].btn-block { width: 100%; } .fade { opacity: 0; -webkit-transition: opacity .15s linear; transition: opacity .15s linear; } .fade.in { opacity: 1; } .collapse { display: none; } .collapse.in { display: block; } .collapsing { position: relative; height: 0; overflow: hidden; -webkit-transition: height .35s ease; transition: height .35s ease; } @font-face { font-family: 'Glyphicons Halflings'; src: url('../fonts/glyphicons-halflings-regular.eot'); src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg'); } .glyphicon { position: relative; top: 1px; display: inline-block; font-family: 'Glyphicons Halflings'; font-style: normal; font-weight: normal; line-height: 1; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } .glyphicon-asterisk:before { content: "\2a"; } .glyphicon-plus:before { content: "\2b"; } .glyphicon-euro:before { content: "\20ac"; } .glyphicon-minus:before { content: "\2212"; } .glyphicon-cloud:before { content: "\2601"; } .glyphicon-envelope:before { content: "\2709"; } .glyphicon-pencil:before { content: "\270f"; } .glyphicon-glass:before { content: "\e001"; } .glyphicon-music:before { content: "\e002"; } .glyphicon-search:before { content: "\e003"; } .glyphicon-heart:before { content: "\e005"; } .glyphicon-star:before { content: "\e006"; } .glyphicon-star-empty:before { content: "\e007"; } .glyphicon-user:before { content: "\e008"; } .glyphicon-film:before { content: "\e009"; } .glyphicon-th-large:before { content: "\e010"; } .glyphicon-th:before { content: "\e011"; } .glyphicon-th-list:before { content: "\e012"; } .glyphicon-ok:before { content: "\e013"; } .glyphicon-remove:before { content: "\e014"; } .glyphicon-zoom-in:before { content: "\e015"; } .glyphicon-zoom-out:before { content: "\e016"; } .glyphicon-off:before { content: "\e017"; } .glyphicon-signal:before { content: "\e018"; } .glyphicon-cog:before { content: "\e019"; } .glyphicon-trash:before { content: "\e020"; } .glyphicon-home:before { content: "\e021"; } .glyphicon-file:before { content: "\e022"; } .glyphicon-time:before { content: "\e023"; } .glyphicon-road:before { content: "\e024"; } .glyphicon-download-alt:before { content: "\e025"; } .glyphicon-download:before { content: "\e026"; } .glyphicon-upload:before { content: "\e027"; } .glyphicon-inbox:before { content: "\e028"; } .glyphicon-play-circle:before { content: "\e029"; } .glyphicon-repeat:before { content: "\e030"; } .glyphicon-refresh:before { content: "\e031"; } .glyphicon-list-alt:before { content: "\e032"; } .glyphicon-lock:before { content: "\e033"; } .glyphicon-flag:before { content: "\e034"; } .glyphicon-headphones:before { content: "\e035"; } .glyphicon-volume-off:before { content: "\e036"; } .glyphicon-volume-down:before { content: "\e037"; } .glyphicon-volume-up:before { content: "\e038"; } .glyphicon-qrcode:before { content: "\e039"; } .glyphicon-barcode:before { content: "\e040"; } .glyphicon-tag:before { content: "\e041"; } .glyphicon-tags:before { content: "\e042"; } .glyphicon-book:before { content: "\e043"; } .glyphicon-bookmark:before { content: "\e044"; } .glyphicon-print:before { content: "\e045"; } .glyphicon-camera:before { content: "\e046"; } .glyphicon-font:before { content: "\e047"; } .glyphicon-bold:before { content: "\e048"; } .glyphicon-italic:before { content: "\e049"; } .glyphicon-text-height:before { content: "\e050"; } .glyphicon-text-width:before { content: "\e051"; } .glyphicon-align-left:before { content: "\e052"; } .glyphicon-align-center:before { content: "\e053"; } .glyphicon-align-right:before { content: "\e054"; } .glyphicon-align-justify:before { content: "\e055"; } .glyphicon-list:before { content: "\e056"; } .glyphicon-indent-left:before { content: "\e057"; } .glyphicon-indent-right:before { content: "\e058"; } .glyphicon-facetime-video:before { content: "\e059"; } .glyphicon-picture:before { content: "\e060"; } .glyphicon-map-marker:before { content: "\e062"; } .glyphicon-adjust:before { content: "\e063"; } .glyphicon-tint:before { content: "\e064"; } .glyphicon-edit:before { content: "\e065"; } .glyphicon-share:before { content: "\e066"; } .glyphicon-check:before { content: "\e067"; } .glyphicon-move:before { content: "\e068"; } .glyphicon-step-backward:before { content: "\e069"; } .glyphicon-fast-backward:before { content: "\e070"; } .glyphicon-backward:before { content: "\e071"; } .glyphicon-play:before { content: "\e072"; } .glyphicon-pause:before { content: "\e073"; } .glyphicon-stop:before { content: "\e074"; } .glyphicon-forward:before { content: "\e075"; } .glyphicon-fast-forward:before { content: "\e076"; } .glyphicon-step-forward:before { content: "\e077"; } .glyphicon-eject:before { content: "\e078"; } .glyphicon-chevron-left:before { content: "\e079"; } .glyphicon-chevron-right:before { content: "\e080"; } .glyphicon-plus-sign:before { content: "\e081"; } .glyphicon-minus-sign:before { content: "\e082"; } .glyphicon-remove-sign:before { content: "\e083"; } .glyphicon-ok-sign:before { content: "\e084"; } .glyphicon-question-sign:before { content: "\e085"; } .glyphicon-info-sign:before { content: "\e086"; } .glyphicon-screenshot:before { content: "\e087"; } .glyphicon-remove-circle:before { content: "\e088"; } .glyphicon-ok-circle:before { content: "\e089"; } .glyphicon-ban-circle:before { content: "\e090"; } .glyphicon-arrow-left:before { content: "\e091"; } .glyphicon-arrow-right:before { content: "\e092"; } .glyphicon-arrow-up:before { content: "\e093"; } .glyphicon-arrow-down:before { content: "\e094"; } .glyphicon-share-alt:before { content: "\e095"; } .glyphicon-resize-full:before { content: "\e096"; } .glyphicon-resize-small:before { content: "\e097"; } .glyphicon-exclamation-sign:before { content: "\e101"; } .glyphicon-gift:before { content: "\e102"; } .glyphicon-leaf:before { content: "\e103"; } .glyphicon-fire:before { content: "\e104"; } .glyphicon-eye-open:before { content: "\e105"; } .glyphicon-eye-close:before { content: "\e106"; } .glyphicon-warning-sign:before { content: "\e107"; } .glyphicon-plane:before { content: "\e108"; } .glyphicon-calendar:before { content: "\e109"; } .glyphicon-random:before { content: "\e110"; } .glyphicon-comment:before { content: "\e111"; } .glyphicon-magnet:before { content: "\e112"; } .glyphicon-chevron-up:before { content: "\e113"; } .glyphicon-chevron-down:before { content: "\e114"; } .glyphicon-retweet:before { content: "\e115"; } .glyphicon-shopping-cart:before { content: "\e116"; } .glyphicon-folder-close:before { content: "\e117"; } .glyphicon-folder-open:before { content: "\e118"; } .glyphicon-resize-vertical:before { content: "\e119"; } .glyphicon-resize-horizontal:before { content: "\e120"; } .glyphicon-hdd:before { content: "\e121"; } .glyphicon-bullhorn:before { content: "\e122"; } .glyphicon-bell:before { content: "\e123"; } .glyphicon-certificate:before { content: "\e124"; } .glyphicon-thumbs-up:before { content: "\e125"; } .glyphicon-thumbs-down:before { content: "\e126"; } .glyphicon-hand-right:before { content: "\e127"; } .glyphicon-hand-left:before { content: "\e128"; } .glyphicon-hand-up:before { content: "\e129"; } .glyphicon-hand-down:before { content: "\e130"; } .glyphicon-circle-arrow-right:before { content: "\e131"; } .glyphicon-circle-arrow-left:before { content: "\e132"; } .glyphicon-circle-arrow-up:before { content: "\e133"; } .glyphicon-circle-arrow-down:before { content: "\e134"; } .glyphicon-globe:before { content: "\e135"; } .glyphicon-wrench:before { content: "\e136"; } .glyphicon-tasks:before { content: "\e137"; } .glyphicon-filter:before { content: "\e138"; } .glyphicon-briefcase:before { content: "\e139"; } .glyphicon-fullscreen:before { content: "\e140"; } .glyphicon-dashboard:before { content: "\e141"; } .glyphicon-paperclip:before { content: "\e142"; } .glyphicon-heart-empty:before { content: "\e143"; } .glyphicon-link:before { content: "\e144"; } .glyphicon-phone:before { content: "\e145"; } .glyphicon-pushpin:before { content: "\e146"; } .glyphicon-usd:before { content: "\e148"; } .glyphicon-gbp:before { content: "\e149"; } .glyphicon-sort:before { content: "\e150"; } .glyphicon-sort-by-alphabet:before { content: "\e151"; } .glyphicon-sort-by-alphabet-alt:before { content: "\e152"; } .glyphicon-sort-by-order:before { content: "\e153"; } .glyphicon-sort-by-order-alt:before { content: "\e154"; } .glyphicon-sort-by-attributes:before { content: "\e155"; } .glyphicon-sort-by-attributes-alt:before { content: "\e156"; } .glyphicon-unchecked:before { content: "\e157"; } .glyphicon-expand:before { content: "\e158"; } .glyphicon-collapse-down:before { content: "\e159"; } .glyphicon-collapse-up:before { content: "\e160"; } .glyphicon-log-in:before { content: "\e161"; } .glyphicon-flash:before { content: "\e162"; } .glyphicon-log-out:before { content: "\e163"; } .glyphicon-new-window:before { content: "\e164"; } .glyphicon-record:before { content: "\e165"; } .glyphicon-save:before { content: "\e166"; } .glyphicon-open:before { content: "\e167"; } .glyphicon-saved:before { content: "\e168"; } .glyphicon-import:before { content: "\e169"; } .glyphicon-export:before { content: "\e170"; } .glyphicon-send:before { content: "\e171"; } .glyphicon-floppy-disk:before { content: "\e172"; } .glyphicon-floppy-saved:before { content: "\e173"; } .glyphicon-floppy-remove:before { content: "\e174"; } .glyphicon-floppy-save:before { content: "\e175"; } .glyphicon-floppy-open:before { content: "\e176"; } .glyphicon-credit-card:before { content: "\e177"; } .glyphicon-transfer:before { content: "\e178"; } .glyphicon-cutlery:before { content: "\e179"; } .glyphicon-header:before { content: "\e180"; } .glyphicon-compressed:before { content: "\e181"; } .glyphicon-earphone:before { content: "\e182"; } .glyphicon-phone-alt:before { content: "\e183"; } .glyphicon-tower:before { content: "\e184"; } .glyphicon-stats:before { content: "\e185"; } .glyphicon-sd-video:before { content: "\e186"; } .glyphicon-hd-video:before { content: "\e187"; } .glyphicon-subtitles:before { content: "\e188"; } .glyphicon-sound-stereo:before { content: "\e189"; } .glyphicon-sound-dolby:before { content: "\e190"; } .glyphicon-sound-5-1:before { content: "\e191"; } .glyphicon-sound-6-1:before { content: "\e192"; } .glyphicon-sound-7-1:before { content: "\e193"; } .glyphicon-copyright-mark:before { content: "\e194"; } .glyphicon-registration-mark:before { content: "\e195"; } .glyphicon-cloud-download:before { content: "\e197"; } .glyphicon-cloud-upload:before { content: "\e198"; } .glyphicon-tree-conifer:before { content: "\e199"; } .glyphicon-tree-deciduous:before { content: "\e200"; } .caret { display: inline-block; width: 0; height: 0; margin-left: 2px; vertical-align: middle; border-top: 4px solid; border-right: 4px solid transparent; border-left: 4px solid transparent; } .dropdown { position: relative; } .dropdown-toggle:focus { outline: 0; } .dropdown-menu { position: absolute; top: 100%; left: 0; z-index: 1000; display: none; float: left; min-width: 160px; padding: 5px 0; margin: 2px 0 0; font-size: 14px; list-style: none; background-color: #fff; background-clip: padding-box; border: 1px solid #ccc; border: 1px solid rgba(0, 0, 0, .15); border-radius: 4px; -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175); box-shadow: 0 6px 12px rgba(0, 0, 0, .175); } .dropdown-menu.pull-right { right: 0; left: auto; } .dropdown-menu .divider { height: 1px; margin: 9px 0; overflow: hidden; background-color: #e5e5e5; } .dropdown-menu > li > a { display: block; padding: 3px 20px; clear: both; font-weight: normal; line-height: 1.428571429; color: #333; white-space: nowrap; } .dropdown-menu > li > a:hover, .dropdown-menu > li > a:focus { color: #262626; text-decoration: none; background-color: #f5f5f5; } .dropdown-menu > .active > a, .dropdown-menu > .active > a:hover, .dropdown-menu > .active > a:focus { color: #fff; text-decoration: none; background-color: #428bca; outline: 0; } .dropdown-menu > .disabled > a, .dropdown-menu > .disabled > a:hover, .dropdown-menu > .disabled > a:focus { color: #999; } .dropdown-menu > .disabled > a:hover, .dropdown-menu > .disabled > a:focus { text-decoration: none; cursor: not-allowed; background-color: transparent; background-image: none; filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); } .open > .dropdown-menu { display: block; } .open > a { outline: 0; } .dropdown-menu-right { right: 0; left: auto; } .dropdown-menu-left { right: auto; left: 0; } .dropdown-header { display: block; padding: 3px 20px; font-size: 12px; line-height: 1.428571429; color: #999; } .dropdown-backdrop { position: fixed; top: 0; right: 0; bottom: 0; left: 0; z-index: 990; } .pull-right > .dropdown-menu { right: 0; left: auto; } .dropup .caret, .navbar-fixed-bottom .dropdown .caret { content: ""; border-top: 0; border-bottom: 4px solid; } .dropup .dropdown-menu, .navbar-fixed-bottom .dropdown .dropdown-menu { top: auto; bottom: 100%; margin-bottom: 1px; } @media (min-width: 768px) { .navbar-right .dropdown-menu { right: 0; left: auto; } .navbar-right .dropdown-menu-left { right: auto; left: 0; } } .btn-group, .btn-group-vertical { position: relative; display: inline-block; vertical-align: middle; } .btn-group > .btn, .btn-group-vertical > .btn { position: relative; float: left; } .btn-group > .btn:hover, .btn-group-vertical > .btn:hover, .btn-group > .btn:focus, .btn-group-vertical > .btn:focus, .btn-group > .btn:active, .btn-group-vertical > .btn:active, .btn-group > .btn.active, .btn-group-vertical > .btn.active { z-index: 2; } .btn-group > .btn:focus, .btn-group-vertical > .btn:focus { outline: none; } .btn-group .btn + .btn, .btn-group .btn + .btn-group, .btn-group .btn-group + .btn, .btn-group .btn-group + .btn-group { margin-left: -1px; } .btn-toolbar { margin-left: -5px; } .btn-toolbar .btn-group, .btn-toolbar .input-group { float: left; } .btn-toolbar > .btn, .btn-toolbar > .btn-group, .btn-toolbar > .input-group { margin-left: 5px; } .btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { border-radius: 0; } .btn-group > .btn:first-child { margin-left: 0; } .btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { border-top-right-radius: 0; border-bottom-right-radius: 0; } .btn-group > .btn:last-child:not(:first-child), .btn-group > .dropdown-toggle:not(:first-child) { border-top-left-radius: 0; border-bottom-left-radius: 0; } .btn-group > .btn-group { float: left; } .btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { border-radius: 0; } .btn-group > .btn-group:first-child > .btn:last-child, .btn-group > .btn-group:first-child > .dropdown-toggle { border-top-right-radius: 0; border-bottom-right-radius: 0; } .btn-group > .btn-group:last-child > .btn:first-child { border-top-left-radius: 0; border-bottom-left-radius: 0; } .btn-group .dropdown-toggle:active, .btn-group.open .dropdown-toggle { outline: 0; } .btn-group-xs > .btn { padding: 1px 5px; font-size: 12px; line-height: 1.5; border-radius: 3px; } .btn-group-sm > .btn { padding: 5px 10px; font-size: 12px; line-height: 1.5; border-radius: 3px; } .btn-group-lg > .btn { padding: 10px 16px; font-size: 18px; line-height: 1.33; border-radius: 6px; } .btn-group > .btn + .dropdown-toggle { padding-right: 8px; padding-left: 8px; } .btn-group > .btn-lg + .dropdown-toggle { padding-right: 12px; padding-left: 12px; } .btn-group.open .dropdown-toggle { -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125); } .btn-group.open .dropdown-toggle.btn-link { -webkit-box-shadow: none; box-shadow: none; } .btn .caret { margin-left: 0; } .btn-lg .caret { border-width: 5px 5px 0; border-bottom-width: 0; } .dropup .btn-lg .caret { border-width: 0 5px 5px; } .btn-group-vertical > .btn, .btn-group-vertical > .btn-group, .btn-group-vertical > .btn-group > .btn { display: block; float: none; width: 100%; max-width: 100%; } .btn-group-vertical > .btn-group > .btn { float: none; } .btn-group-vertical > .btn + .btn, .btn-group-vertical > .btn + .btn-group, .btn-group-vertical > .btn-group + .btn, .btn-group-vertical > .btn-group + .btn-group { margin-top: -1px; margin-left: 0; } .btn-group-vertical > .btn:not(:first-child):not(:last-child) { border-radius: 0; } .btn-group-vertical > .btn:first-child:not(:last-child) { border-top-right-radius: 4px; border-bottom-right-radius: 0; border-bottom-left-radius: 0; } .btn-group-vertical > .btn:last-child:not(:first-child) { border-top-left-radius: 0; border-top-right-radius: 0; border-bottom-left-radius: 4px; } .btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { border-radius: 0; } .btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child, .btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle { border-bottom-right-radius: 0; border-bottom-left-radius: 0; } .btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child { border-top-left-radius: 0; border-top-right-radius: 0; } .btn-group-justified { display: table; width: 100%; table-layout: fixed; border-collapse: separate; } .btn-group-justified > .btn, .btn-group-justified > .btn-group { display: table-cell; float: none; width: 1%; } .btn-group-justified > .btn-group .btn { width: 100%; } [data-toggle="buttons"] > .btn > input[type="radio"], [data-toggle="buttons"] > .btn > input[type="checkbox"] { display: none; } .input-group { position: relative; display: table; border-collapse: separate; } .input-group[class*="col-"] { float: none; padding-right: 0; padding-left: 0; } .input-group .form-control { float: left; width: 100%; margin-bottom: 0; } .input-group-lg > .form-control, .input-group-lg > .input-group-addon, .input-group-lg > .input-group-btn > .btn { height: 46px; padding: 10px 16px; font-size: 18px; line-height: 1.33; border-radius: 6px; } select.input-group-lg > .form-control, select.input-group-lg > .input-group-addon, select.input-group-lg > .input-group-btn > .btn { height: 46px; line-height: 46px; } textarea.input-group-lg > .form-control, textarea.input-group-lg > .input-group-addon, textarea.input-group-lg > .input-group-btn > .btn, select[multiple].input-group-lg > .form-control, select[multiple].input-group-lg > .input-group-addon, select[multiple].input-group-lg > .input-group-btn > .btn { height: auto; } .input-group-sm > .form-control, .input-group-sm > .input-group-addon, .input-group-sm > .input-group-btn > .btn { height: 30px; padding: 5px 10px; font-size: 12px; line-height: 1.5; border-radius: 3px; } select.input-group-sm > .form-control, select.input-group-sm > .input-group-addon, select.input-group-sm > .input-group-btn > .btn { height: 30px; line-height: 30px; } textarea.input-group-sm > .form-control, textarea.input-group-sm > .input-group-addon, textarea.input-group-sm > .input-group-btn > .btn, select[multiple].input-group-sm > .form-control, select[multiple].input-group-sm > .input-group-addon, select[multiple].input-group-sm > .input-group-btn > .btn { height: auto; } .input-group-addon, .input-group-btn, .input-group .form-control { display: table-cell; } .input-group-addon:not(:first-child):not(:last-child), .input-group-btn:not(:first-child):not(:last-child), .input-group .form-control:not(:first-child):not(:last-child) { border-radius: 0; } .input-group-addon, .input-group-btn { width: 1%; white-space: nowrap; vertical-align: middle; } .input-group-addon { padding: 6px 12px; font-size: 14px; font-weight: normal; line-height: 1; color: #555; text-align: center; background-color: #eee; border: 1px solid #ccc; border-radius: 4px; } .input-group-addon.input-sm { padding: 5px 10px; font-size: 12px; border-radius: 3px; } .input-group-addon.input-lg { padding: 10px 16px; font-size: 18px; border-radius: 6px; } .input-group-addon input[type="radio"], .input-group-addon input[type="checkbox"] { margin-top: 0; } .input-group .form-control:first-child, .input-group-addon:first-child, .input-group-btn:first-child > .btn, .input-group-btn:first-child > .btn-group > .btn, .input-group-btn:first-child > .dropdown-toggle, .input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle), .input-group-btn:last-child > .btn-group:not(:last-child) > .btn { border-top-right-radius: 0; border-bottom-right-radius: 0; } .input-group-addon:first-child { border-right: 0; } .input-group .form-control:last-child, .input-group-addon:last-child, .input-group-btn:last-child > .btn, .input-group-btn:last-child > .btn-group > .btn, .input-group-btn:last-child > .dropdown-toggle, .input-group-btn:first-child > .btn:not(:first-child), .input-group-btn:first-child > .btn-group:not(:first-child) > .btn { border-top-left-radius: 0; border-bottom-left-radius: 0; } .input-group-addon:last-child { border-left: 0; } .input-group-btn { position: relative; font-size: 0; white-space: nowrap; } .input-group-btn > .btn { position: relative; } .input-group-btn > .btn + .btn { margin-left: -1px; } .input-group-btn > .btn:hover, .input-group-btn > .btn:focus, .input-group-btn > .btn:active { z-index: 2; } .input-group-btn:first-child > .btn, .input-group-btn:first-child > .btn-group { margin-right: -1px; } .input-group-btn:last-child > .btn, .input-group-btn:last-child > .btn-group { margin-left: -1px; } .nav { padding-left: 0; margin-bottom: 0; list-style: none; } .nav > li { position: relative; display: block; } .nav > li > a { position: relative; display: block; padding: 10px 15px; } .nav > li > a:hover, .nav > li > a:focus { text-decoration: none; background-color: #eee; } .nav > li.disabled > a { color: #999; } .nav > li.disabled > a:hover, .nav > li.disabled > a:focus { color: #999; text-decoration: none; cursor: not-allowed; background-color: transparent; } .nav .open > a, .nav .open > a:hover, .nav .open > a:focus { background-color: #eee; border-color: #428bca; } .nav .nav-divider { height: 1px; margin: 9px 0; overflow: hidden; background-color: #e5e5e5; } .nav > li > a > img { max-width: none; } .nav-tabs { border-bottom: 1px solid #ddd; } .nav-tabs > li { float: left; margin-bottom: -1px; } .nav-tabs > li > a { margin-right: 2px; line-height: 1.428571429; border: 1px solid transparent; border-radius: 4px 4px 0 0; } .nav-tabs > li > a:hover { border-color: #eee #eee #ddd; } .nav-tabs > li.active > a, .nav-tabs > li.active > a:hover, .nav-tabs > li.active > a:focus { color: #555; cursor: default; background-color: #fff; border: 1px solid #ddd; border-bottom-color: transparent; } .nav-tabs.nav-justified { width: 100%; border-bottom: 0; } .nav-tabs.nav-justified > li { float: none; } .nav-tabs.nav-justified > li > a { margin-bottom: 5px; text-align: center; } .nav-tabs.nav-justified > .dropdown .dropdown-menu { top: auto; left: auto; } @media (min-width: 768px) { .nav-tabs.nav-justified > li { display: table-cell; width: 1%; } .nav-tabs.nav-justified > li > a { margin-bottom: 0; } } .nav-tabs.nav-justified > li > a { margin-right: 0; border-radius: 4px; } .nav-tabs.nav-justified > .active > a, .nav-tabs.nav-justified > .active > a:hover, .nav-tabs.nav-justified > .active > a:focus { border: 1px solid #ddd; } @media (min-width: 768px) { .nav-tabs.nav-justified > li > a { border-bottom: 1px solid #ddd; border-radius: 4px 4px 0 0; } .nav-tabs.nav-justified > .active > a, .nav-tabs.nav-justified > .active > a:hover, .nav-tabs.nav-justified > .active > a:focus { border-bottom-color: #fff; } } .nav-pills > li { float: left; } .nav-pills > li > a { border-radius: 4px; } .nav-pills > li + li { margin-left: 2px; } .nav-pills > li.active > a, .nav-pills > li.active > a:hover, .nav-pills > li.active > a:focus { color: #fff; background-color: #428bca; } .nav-stacked > li { float: none; } .nav-stacked > li + li { margin-top: 2px; margin-left: 0; } .nav-justified { width: 100%; } .nav-justified > li { float: none; } .nav-justified > li > a { margin-bottom: 5px; text-align: center; } .nav-justified > .dropdown .dropdown-menu { top: auto; left: auto; } @media (min-width: 768px) { .nav-justified > li { display: table-cell; width: 1%; } .nav-justified > li > a { margin-bottom: 0; } } .nav-tabs-justified { border-bottom: 0; } .nav-tabs-justified > li > a { margin-right: 0; border-radius: 4px; } .nav-tabs-justified > .active > a, .nav-tabs-justified > .active > a:hover, .nav-tabs-justified > .active > a:focus { border: 1px solid #ddd; } @media (min-width: 768px) { .nav-tabs-justified > li > a { border-bottom: 1px solid #ddd; border-radius: 4px 4px 0 0; } .nav-tabs-justified > .active > a, .nav-tabs-justified > .active > a:hover, .nav-tabs-justified > .active > a:focus { border-bottom-color: #fff; } } .tab-content > .tab-pane { display: none; } .tab-content > .active { display: block; } .nav-tabs .dropdown-menu { margin-top: -1px; border-top-left-radius: 0; border-top-right-radius: 0; } .navbar { position: relative; min-height: 50px; margin-bottom: 20px; border: 1px solid transparent; } @media (min-width: 768px) { .navbar { border-radius: 4px; } } @media (min-width: 768px) { .navbar-header { float: left; } } .navbar-collapse { max-height: 340px; padding-right: 15px; padding-left: 15px; overflow-x: visible; -webkit-overflow-scrolling: touch; border-top: 1px solid transparent; box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1); } .navbar-collapse.in { overflow-y: auto; } @media (min-width: 768px) { .navbar-collapse { width: auto; border-top: 0; box-shadow: none; } .navbar-collapse.collapse { display: block !important; height: auto !important; padding-bottom: 0; overflow: visible !important; } .navbar-collapse.in { overflow-y: visible; } .navbar-fixed-top .navbar-collapse, .navbar-static-top .navbar-collapse, .navbar-fixed-bottom .navbar-collapse { padding-right: 0; padding-left: 0; } } .container > .navbar-header, .container-fluid > .navbar-header, .container > .navbar-collapse, .container-fluid > .navbar-collapse { margin-right: -15px; margin-left: -15px; } @media (min-width: 768px) { .container > .navbar-header, .container-fluid > .navbar-header, .container > .navbar-collapse, .container-fluid > .navbar-collapse { margin-right: 0; margin-left: 0; } } .navbar-static-top { z-index: 1000; border-width: 0 0 1px; } @media (min-width: 768px) { .navbar-static-top { border-radius: 0; } } .navbar-fixed-top, .navbar-fixed-bottom { position: fixed; right: 0; left: 0; z-index: 1030; } @media (min-width: 768px) { .navbar-fixed-top, .navbar-fixed-bottom { border-radius: 0; } } .navbar-fixed-top { top: 0; border-width: 0 0 1px; } .navbar-fixed-bottom { bottom: 0; margin-bottom: 0; border-width: 1px 0 0; } .navbar-brand { float: left; height: 20px; padding: 15px 15px; font-size: 18px; line-height: 20px; } .navbar-brand:hover, .navbar-brand:focus { text-decoration: none; } @media (min-width: 768px) { .navbar > .container .navbar-brand, .navbar > .container-fluid .navbar-brand { margin-left: -15px; } } .navbar-toggle { position: relative; float: right; padding: 9px 10px; margin-top: 8px; margin-right: 15px; margin-bottom: 8px; background-color: transparent; background-image: none; border: 1px solid transparent; border-radius: 4px; } .navbar-toggle:focus { outline: none; } .navbar-toggle .icon-bar { display: block; width: 22px; height: 2px; border-radius: 1px; } .navbar-toggle .icon-bar + .icon-bar { margin-top: 4px; } @media (min-width: 768px) { .navbar-toggle { display: none; } } .navbar-nav { margin: 7.5px -15px; } .navbar-nav > li > a { padding-top: 10px; padding-bottom: 10px; line-height: 20px; } @media (max-width: 767px) { .navbar-nav .open .dropdown-menu { position: static; float: none; width: auto; margin-top: 0; background-color: transparent; border: 0; box-shadow: none; } .navbar-nav .open .dropdown-menu > li > a, .navbar-nav .open .dropdown-menu .dropdown-header { padding: 5px 15px 5px 25px; } .navbar-nav .open .dropdown-menu > li > a { line-height: 20px; } .navbar-nav .open .dropdown-menu > li > a:hover, .navbar-nav .open .dropdown-menu > li > a:focus { background-image: none; } } @media (min-width: 768px) { .navbar-nav { float: left; margin: 0; } .navbar-nav > li { float: left; } .navbar-nav > li > a { padding-top: 15px; padding-bottom: 15px; } .navbar-nav.navbar-right:last-child { margin-right: -15px; } } @media (min-width: 768px) { .navbar-left { float: left !important; } .navbar-right { float: right !important; } } .navbar-form { padding: 10px 15px; margin-top: 8px; margin-right: -15px; margin-bottom: 8px; margin-left: -15px; border-top: 1px solid transparent; border-bottom: 1px solid transparent; -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1); box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1); } @media (min-width: 768px) { .navbar-form .form-group { display: inline-block; margin-bottom: 0; vertical-align: middle; } .navbar-form .form-control { display: inline-block; width: auto; vertical-align: middle; } .navbar-form .control-label { margin-bottom: 0; vertical-align: middle; } .navbar-form .radio, .navbar-form .checkbox { display: inline-block; padding-left: 0; margin-top: 0; margin-bottom: 0; vertical-align: middle; } .navbar-form .radio input[type="radio"], .navbar-form .checkbox input[type="checkbox"] { float: none; margin-left: 0; } .navbar-form .has-feedback .form-control-feedback { top: 0; } } @media (max-width: 767px) { .navbar-form .form-group { margin-bottom: 5px; } } @media (min-width: 768px) { .navbar-form { width: auto; padding-top: 0; padding-bottom: 0; margin-right: 0; margin-left: 0; border: 0; -webkit-box-shadow: none; box-shadow: none; } .navbar-form.navbar-right:last-child { margin-right: -15px; } } .navbar-nav > li > .dropdown-menu { margin-top: 0; border-top-left-radius: 0; border-top-right-radius: 0; } .navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { border-bottom-right-radius: 0; border-bottom-left-radius: 0; } .navbar-btn { margin-top: 8px; margin-bottom: 8px; } .navbar-btn.btn-sm { margin-top: 10px; margin-bottom: 10px; } .navbar-btn.btn-xs { margin-top: 14px; margin-bottom: 14px; } .navbar-text { margin-top: 15px; margin-bottom: 15px; } @media (min-width: 768px) { .navbar-text { float: left; margin-right: 15px; margin-left: 15px; } .navbar-text.navbar-right:last-child { margin-right: 0; } } .navbar-default { background-color: #f8f8f8; border-color: #e7e7e7; } .navbar-default .navbar-brand { color: #777; } .navbar-default .navbar-brand:hover, .navbar-default .navbar-brand:focus { color: #5e5e5e; background-color: transparent; } .navbar-default .navbar-text { color: #777; } .navbar-default .navbar-nav > li > a { color: #777; } .navbar-default .navbar-nav > li > a:hover, .navbar-default .navbar-nav > li > a:focus { color: #333; background-color: transparent; } .navbar-default .navbar-nav > .active > a, .navbar-default .navbar-nav > .active > a:hover, .navbar-default .navbar-nav > .active > a:focus { color: #555; background-color: #e7e7e7; } .navbar-default .navbar-nav > .disabled > a, .navbar-default .navbar-nav > .disabled > a:hover, .navbar-default .navbar-nav > .disabled > a:focus { color: #ccc; background-color: transparent; } .navbar-default .navbar-toggle { border-color: #ddd; } .navbar-default .navbar-toggle:hover, .navbar-default .navbar-toggle:focus { background-color: #ddd; } .navbar-default .navbar-toggle .icon-bar { background-color: #888; } .navbar-default .navbar-collapse, .navbar-default .navbar-form { border-color: #e7e7e7; } .navbar-default .navbar-nav > .open > a, .navbar-default .navbar-nav > .open > a:hover, .navbar-default .navbar-nav > .open > a:focus { color: #555; background-color: #e7e7e7; } @media (max-width: 767px) { .navbar-default .navbar-nav .open .dropdown-menu > li > a { color: #777; } .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { color: #333; background-color: transparent; } .navbar-default .navbar-nav .open .dropdown-menu > .active > a, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { color: #555; background-color: #e7e7e7; } .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { color: #ccc; background-color: transparent; } } .navbar-default .navbar-link { color: #777; } .navbar-default .navbar-link:hover { color: #333; } .navbar-inverse { background-color: #222; border-color: #080808; } .navbar-inverse .navbar-brand { color: #999; } .navbar-inverse .navbar-brand:hover, .navbar-inverse .navbar-brand:focus { color: #fff; background-color: transparent; } .navbar-inverse .navbar-text { color: #999; } .navbar-inverse .navbar-nav > li > a { color: #999; } .navbar-inverse .navbar-nav > li > a:hover, .navbar-inverse .navbar-nav > li > a:focus { color: #fff; background-color: transparent; } .navbar-inverse .navbar-nav > .active > a, .navbar-inverse .navbar-nav > .active > a:hover, .navbar-inverse .navbar-nav > .active > a:focus { color: #fff; background-color: #080808; } .navbar-inverse .navbar-nav > .disabled > a, .navbar-inverse .navbar-nav > .disabled > a:hover, .navbar-inverse .navbar-nav > .disabled > a:focus { color: #444; background-color: transparent; } .navbar-inverse .navbar-toggle { border-color: #333; } .navbar-inverse .navbar-toggle:hover, .navbar-inverse .navbar-toggle:focus { background-color: #333; } .navbar-inverse .navbar-toggle .icon-bar { background-color: #fff; } .navbar-inverse .navbar-collapse, .navbar-inverse .navbar-form { border-color: #101010; } .navbar-inverse .navbar-nav > .open > a, .navbar-inverse .navbar-nav > .open > a:hover, .navbar-inverse .navbar-nav > .open > a:focus { color: #fff; background-color: #080808; } @media (max-width: 767px) { .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { border-color: #080808; } .navbar-inverse .navbar-nav .open .dropdown-menu .divider { background-color: #080808; } .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { color: #999; } .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { color: #fff; background-color: transparent; } .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { color: #fff; background-color: #080808; } .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { color: #444; background-color: transparent; } } .navbar-inverse .navbar-link { color: #999; } .navbar-inverse .navbar-link:hover { color: #fff; } .breadcrumb { padding: 8px 15px; margin-bottom: 20px; list-style: none; background-color: #f5f5f5; border-radius: 4px; } .breadcrumb > li { display: inline-block; } .breadcrumb > li + li:before { padding: 0 5px; color: #ccc; content: "/\00a0"; } .breadcrumb > .active { color: #999; } .pagination { display: inline-block; padding-left: 0; margin: 20px 0; border-radius: 4px; } .pagination > li { display: inline; } .pagination > li > a, .pagination > li > span { position: relative; float: left; padding: 6px 12px; margin-left: -1px; line-height: 1.428571429; color: #428bca; text-decoration: none; background-color: #fff; border: 1px solid #ddd; } .pagination > li:first-child > a, .pagination > li:first-child > span { margin-left: 0; border-top-left-radius: 4px; border-bottom-left-radius: 4px; } .pagination > li:last-child > a, .pagination > li:last-child > span { border-top-right-radius: 4px; border-bottom-right-radius: 4px; } .pagination > li > a:hover, .pagination > li > span:hover, .pagination > li > a:focus, .pagination > li > span:focus { color: #2a6496; background-color: #eee; border-color: #ddd; } .pagination > .active > a, .pagination > .active > span, .pagination > .active > a:hover, .pagination > .active > span:hover, .pagination > .active > a:focus, .pagination > .active > span:focus { z-index: 2; color: #fff; cursor: default; background-color: #428bca; border-color: #428bca; } .pagination > .disabled > span, .pagination > .disabled > span:hover, .pagination > .disabled > span:focus, .pagination > .disabled > a, .pagination > .disabled > a:hover, .pagination > .disabled > a:focus { color: #999; cursor: not-allowed; background-color: #fff; border-color: #ddd; } .pagination-lg > li > a, .pagination-lg > li > span { padding: 10px 16px; font-size: 18px; } .pagination-lg > li:first-child > a, .pagination-lg > li:first-child > span { border-top-left-radius: 6px; border-bottom-left-radius: 6px; } .pagination-lg > li:last-child > a, .pagination-lg > li:last-child > span { border-top-right-radius: 6px; border-bottom-right-radius: 6px; } .pagination-sm > li > a, .pagination-sm > li > span { padding: 5px 10px; font-size: 12px; } .pagination-sm > li:first-child > a, .pagination-sm > li:first-child > span { border-top-left-radius: 3px; border-bottom-left-radius: 3px; } .pagination-sm > li:last-child > a, .pagination-sm > li:last-child > span { border-top-right-radius: 3px; border-bottom-right-radius: 3px; } .pager { padding-left: 0; margin: 20px 0; text-align: center; list-style: none; } .pager li { display: inline; } .pager li > a, .pager li > span { display: inline-block; padding: 5px 14px; background-color: #fff; border: 1px solid #ddd; border-radius: 15px; } .pager li > a:hover, .pager li > a:focus { text-decoration: none; background-color: #eee; } .pager .next > a, .pager .next > span { float: right; } .pager .previous > a, .pager .previous > span { float: left; } .pager .disabled > a, .pager .disabled > a:hover, .pager .disabled > a:focus, .pager .disabled > span { color: #999; cursor: not-allowed; background-color: #fff; } .label { display: inline; padding: .2em .6em .3em; font-size: 75%; font-weight: bold; line-height: 1; color: #fff; text-align: center; white-space: nowrap; vertical-align: baseline; border-radius: .25em; } .label[href]:hover, .label[href]:focus { color: #fff; text-decoration: none; cursor: pointer; } .label:empty { display: none; } .btn .label { position: relative; top: -1px; } .label-default { background-color: #999; } .label-default[href]:hover, .label-default[href]:focus { background-color: #808080; } .label-primary { background-color: #428bca; } .label-primary[href]:hover, .label-primary[href]:focus { background-color: #3071a9; } .label-success { background-color: #5cb85c; } .label-success[href]:hover, .label-success[href]:focus { background-color: #449d44; } .label-info { background-color: #5bc0de; } .label-info[href]:hover, .label-info[href]:focus { background-color: #31b0d5; } .label-warning { background-color: #f0ad4e; } .label-warning[href]:hover, .label-warning[href]:focus { background-color: #ec971f; } .label-danger { background-color: #d9534f; } .label-danger[href]:hover, .label-danger[href]:focus { background-color: #c9302c; } .badge { display: inline-block; min-width: 10px; padding: 3px 7px; font-size: 12px; font-weight: bold; line-height: 1; color: #fff; text-align: center; white-space: nowrap; vertical-align: baseline; background-color: #999; border-radius: 10px; } .badge:empty { display: none; } .btn .badge { position: relative; top: -1px; } .btn-xs .badge { top: 0; padding: 1px 5px; } a.badge:hover, a.badge:focus { color: #fff; text-decoration: none; cursor: pointer; } a.list-group-item.active > .badge, .nav-pills > .active > a > .badge { color: #428bca; background-color: #fff; } .nav-pills > li > a > .badge { margin-left: 3px; } .jumbotron { padding: 30px; margin-bottom: 30px; color: inherit; background-color: #eee; } .jumbotron h1, .jumbotron .h1 { color: inherit; } .jumbotron p { margin-bottom: 15px; font-size: 21px; font-weight: 200; } .container .jumbotron { border-radius: 6px; } .jumbotron .container { max-width: 100%; } @media screen and (min-width: 768px) { .jumbotron { padding-top: 48px; padding-bottom: 48px; } .container .jumbotron { padding-right: 60px; padding-left: 60px; } .jumbotron h1, .jumbotron .h1 { font-size: 63px; } } .thumbnail { display: block; padding: 4px; margin-bottom: 20px; line-height: 1.428571429; background-color: #fff; border: 1px solid #ddd; border-radius: 4px; -webkit-transition: all .2s ease-in-out; transition: all .2s ease-in-out; } .thumbnail > img, .thumbnail a > img { display: block; max-width: 100%; height: auto; margin-right: auto; margin-left: auto; } a.thumbnail:hover, a.thumbnail:focus, a.thumbnail.active { border-color: #428bca; } .thumbnail .caption { padding: 9px; color: #333; } .alert { padding: 15px; margin-bottom: 20px; border: 1px solid transparent; border-radius: 4px; } .alert h4 { margin-top: 0; color: inherit; } .alert .alert-link { font-weight: bold; } .alert > p, .alert > ul { margin-bottom: 0; } .alert > p + p { margin-top: 5px; } .alert-dismissable { padding-right: 35px; } .alert-dismissable .close { position: relative; top: -2px; right: -21px; color: inherit; } .alert-success { color: #3c763d; background-color: #dff0d8; border-color: #d6e9c6; } .alert-success hr { border-top-color: #c9e2b3; } .alert-success .alert-link { color: #2b542c; } .alert-info { color: #31708f; background-color: #edf6fa; border-color: #bce8f1; } .alert-info hr { border-top-color: #a6e1ec; } .alert-info .alert-link { color: #245269; font-weight: bold; } .alert-info a { font-weight: bold; } .alert-warning { color: #8a6d3b; background-color: #fcf8e3; border-color: #faebcc; } .alert-warning hr { border-top-color: #f7e1b5; } .alert-warning .alert-link { color: #66512c; } .alert-danger { color: #a94442; background-color: #f2dede; border-color: #ebccd1; } .alert-danger hr { border-top-color: #e4b9c0; } .alert-danger .alert-link { color: #843534; } @-webkit-keyframes progress-bar-stripes { from { background-position: 40px 0; } to { background-position: 0 0; } } @keyframes progress-bar-stripes { from { background-position: 40px 0; } to { background-position: 0 0; } } .progress { height: 20px; margin-bottom: 20px; overflow: hidden; background-color: #f5f5f5; border-radius: 4px; -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1); box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1); } .progress-bar { float: left; width: 0; height: 100%; font-size: 12px; line-height: 20px; color: #fff; text-align: center; background-color: #428bca; -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15); box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15); -webkit-transition: width .6s ease; transition: width .6s ease; } .progress-striped .progress-bar { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-size: 40px 40px; } .progress.active .progress-bar { -webkit-animation: progress-bar-stripes 2s linear infinite; animation: progress-bar-stripes 2s linear infinite; } .progress-bar-success { background-color: #5cb85c; } .progress-striped .progress-bar-success { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); } .progress-bar-info { background-color: #5bc0de; } .progress-striped .progress-bar-info { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); } .progress-bar-warning { background-color: #f0ad4e; } .progress-striped .progress-bar-warning { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); } .progress-bar-danger { background-color: #d9534f; } .progress-striped .progress-bar-danger { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent); } .media, .media-body { overflow: hidden; zoom: 1; } .media, .media .media { margin-top: 15px; } .media:first-child { margin-top: 0; } .media-object { display: block; } .media-heading { margin: 0 0 5px; } .media > .pull-left { margin-right: 10px; } .media > .pull-right { margin-left: 10px; } .media-list { padding-left: 0; list-style: none; } .list-group { padding-left: 0; margin-bottom: 20px; } .list-group-item { position: relative; display: block; padding: 10px 15px; margin-bottom: -1px; background-color: #fff; border: 1px solid #ddd; } .list-group-item:first-child { border-top-left-radius: 4px; border-top-right-radius: 4px; } .list-group-item:last-child { margin-bottom: 0; border-bottom-right-radius: 4px; border-bottom-left-radius: 4px; } .list-group-item > .badge { float: right; } .list-group-item > .badge + .badge { margin-right: 5px; } a.list-group-item { color: #555; } a.list-group-item .list-group-item-heading { color: #333; } a.list-group-item:hover, a.list-group-item:focus { text-decoration: none; background-color: #f5f5f5; } a.list-group-item.active, a.list-group-item.active:hover, a.list-group-item.active:focus { z-index: 2; color: #fff; background-color: #428bca; border-color: #428bca; } a.list-group-item.active .list-group-item-heading, a.list-group-item.active:hover .list-group-item-heading, a.list-group-item.active:focus .list-group-item-heading { color: inherit; } a.list-group-item.active .list-group-item-text, a.list-group-item.active:hover .list-group-item-text, a.list-group-item.active:focus .list-group-item-text { color: #e1edf7; } .list-group-item-success { color: #3c763d; background-color: #dff0d8; } a.list-group-item-success { color: #3c763d; } a.list-group-item-success .list-group-item-heading { color: inherit; } a.list-group-item-success:hover, a.list-group-item-success:focus { color: #3c763d; background-color: #d0e9c6; } a.list-group-item-success.active, a.list-group-item-success.active:hover, a.list-group-item-success.active:focus { color: #fff; background-color: #3c763d; border-color: #3c763d; } .list-group-item-info { color: #31708f; background-color: #d9edf7; } a.list-group-item-info { color: #31708f; } a.list-group-item-info .list-group-item-heading { color: inherit; } a.list-group-item-info:hover, a.list-group-item-info:focus { color: #31708f; background-color: #c4e3f3; } a.list-group-item-info.active, a.list-group-item-info.active:hover, a.list-group-item-info.active:focus { color: #fff; background-color: #31708f; border-color: #31708f; } .list-group-item-warning { color: #8a6d3b; background-color: #fcf8e3; } a.list-group-item-warning { color: #8a6d3b; } a.list-group-item-warning .list-group-item-heading { color: inherit; } a.list-group-item-warning:hover, a.list-group-item-warning:focus { color: #8a6d3b; background-color: #faf2cc; } a.list-group-item-warning.active, a.list-group-item-warning.active:hover, a.list-group-item-warning.active:focus { color: #fff; background-color: #8a6d3b; border-color: #8a6d3b; } .list-group-item-danger { color: #a94442; background-color: #f2dede; } a.list-group-item-danger { color: #a94442; } a.list-group-item-danger .list-group-item-heading { color: inherit; } a.list-group-item-danger:hover, a.list-group-item-danger:focus { color: #a94442; background-color: #ebcccc; } a.list-group-item-danger.active, a.list-group-item-danger.active:hover, a.list-group-item-danger.active:focus { color: #fff; background-color: #a94442; border-color: #a94442; } .list-group-item-heading { margin-top: 0; margin-bottom: 5px; } .list-group-item-text { margin-bottom: 0; line-height: 1.3; } .panel { margin-bottom: 20px; background-color: #fff; border: 1px solid transparent; border-radius: 4px; -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05); box-shadow: 0 1px 1px rgba(0, 0, 0, .05); } .panel-body { padding: 15px; } .panel > .list-group { margin-bottom: 0; } .panel > .list-group .list-group-item { border-width: 1px 0; border-radius: 0; } .panel > .list-group .list-group-item:first-child { border-top: 0; } .panel > .list-group .list-group-item:last-child { border-bottom: 0; } .panel > .list-group:first-child .list-group-item:first-child { border-top-left-radius: 3px; border-top-right-radius: 3px; } .panel > .list-group:last-child .list-group-item:last-child { border-bottom-right-radius: 3px; border-bottom-left-radius: 3px; } .panel-heading + .list-group .list-group-item:first-child { border-top-width: 0; } .panel > .table, .panel > .table-responsive > .table { margin-bottom: 0; } .panel > .table:first-child > thead:first-child > tr:first-child td:first-child, .panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child, .panel > .table:first-child > tbody:first-child > tr:first-child td:first-child, .panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child, .panel > .table:first-child > thead:first-child > tr:first-child th:first-child, .panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child, .panel > .table:first-child > tbody:first-child > tr:first-child th:first-child, .panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child { border-top-left-radius: 3px; } .panel > .table:first-child > thead:first-child > tr:first-child td:last-child, .panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child, .panel > .table:first-child > tbody:first-child > tr:first-child td:last-child, .panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child, .panel > .table:first-child > thead:first-child > tr:first-child th:last-child, .panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child, .panel > .table:first-child > tbody:first-child > tr:first-child th:last-child, .panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child { border-top-right-radius: 3px; } .panel > .table:last-child > tbody:last-child > tr:last-child td:first-child, .panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child, .panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child, .panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child, .panel > .table:last-child > tbody:last-child > tr:last-child th:first-child, .panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child, .panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child, .panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child { border-bottom-left-radius: 3px; } .panel > .table:last-child > tbody:last-child > tr:last-child td:last-child, .panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child, .panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child, .panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child, .panel > .table:last-child > tbody:last-child > tr:last-child th:last-child, .panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child, .panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child, .panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child { border-bottom-right-radius: 3px; } .panel > .panel-body + .table, .panel > .panel-body + .table-responsive { border-top: 1px solid #ddd; } .panel > .table > tbody:first-child > tr:first-child th, .panel > .table > tbody:first-child > tr:first-child td { border-top: 0; } .panel > .table-bordered, .panel > .table-responsive > .table-bordered { border: 0; } .panel > .table-bordered > thead > tr > th:first-child, .panel > .table-responsive > .table-bordered > thead > tr > th:first-child, .panel > .table-bordered > tbody > tr > th:first-child, .panel > .table-responsive > .table-bordered > tbody > tr > th:first-child, .panel > .table-bordered > tfoot > tr > th:first-child, .panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child, .panel > .table-bordered > thead > tr > td:first-child, .panel > .table-responsive > .table-bordered > thead > tr > td:first-child, .panel > .table-bordered > tbody > tr > td:first-child, .panel > .table-responsive > .table-bordered > tbody > tr > td:first-child, .panel > .table-bordered > tfoot > tr > td:first-child, .panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child { border-left: 0; } .panel > .table-bordered > thead > tr > th:last-child, .panel > .table-responsive > .table-bordered > thead > tr > th:last-child, .panel > .table-bordered > tbody > tr > th:last-child, .panel > .table-responsive > .table-bordered > tbody > tr > th:last-child, .panel > .table-bordered > tfoot > tr > th:last-child, .panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child, .panel > .table-bordered > thead > tr > td:last-child, .panel > .table-responsive > .table-bordered > thead > tr > td:last-child, .panel > .table-bordered > tbody > tr > td:last-child, .panel > .table-responsive > .table-bordered > tbody > tr > td:last-child, .panel > .table-bordered > tfoot > tr > td:last-child, .panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child { border-right: 0; } .panel > .table-bordered > thead > tr:first-child > th, .panel > .table-responsive > .table-bordered > thead > tr:first-child > th, .panel > .table-bordered > tbody > tr:first-child > th, .panel > .table-responsive > .table-bordered > tbody > tr:first-child > th, .panel > .table-bordered > tfoot > tr:first-child > th, .panel > .table-responsive > .table-bordered > tfoot > tr:first-child > th, .panel > .table-bordered > thead > tr:first-child > td, .panel > .table-responsive > .table-bordered > thead > tr:first-child > td, .panel > .table-bordered > tbody > tr:first-child > td, .panel > .table-responsive > .table-bordered > tbody > tr:first-child > td, .panel > .table-bordered > tfoot > tr:first-child > td, .panel > .table-responsive > .table-bordered > tfoot > tr:first-child > td { border-top: 0; } .panel > .table-bordered > thead > tr:last-child > th, .panel > .table-responsive > .table-bordered > thead > tr:last-child > th, .panel > .table-bordered > tbody > tr:last-child > th, .panel > .table-responsive > .table-bordered > tbody > tr:last-child > th, .panel > .table-bordered > tfoot > tr:last-child > th, .panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th, .panel > .table-bordered > thead > tr:last-child > td, .panel > .table-responsive > .table-bordered > thead > tr:last-child > td, .panel > .table-bordered > tbody > tr:last-child > td, .panel > .table-responsive > .table-bordered > tbody > tr:last-child > td, .panel > .table-bordered > tfoot > tr:last-child > td, .panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td { border-bottom: 0; } .panel > .table-responsive { margin-bottom: 0; border: 0; } .panel-heading { padding: 10px 15px; border-bottom: 1px solid transparent; border-top-left-radius: 3px; border-top-right-radius: 3px; } .panel-heading > .dropdown .dropdown-toggle { color: inherit; } .panel-title { margin-top: 0; margin-bottom: 0; font-size: 16px; color: inherit; } .panel-title > a { color: inherit; } .panel-footer { padding: 10px 15px; background-color: #f5f5f5; border-top: 1px solid #ddd; border-bottom-right-radius: 3px; border-bottom-left-radius: 3px; } .panel-group { margin-bottom: 20px; } .panel-group .panel { margin-bottom: 0; overflow: hidden; border-radius: 4px; } .panel-group .panel + .panel { margin-top: 5px; } .panel-group .panel-heading { border-bottom: 0; } .panel-group .panel-heading + .panel-collapse .panel-body { border-top: 1px solid #ddd; } .panel-group .panel-footer { border-top: 0; } .panel-group .panel-footer + .panel-collapse .panel-body { border-bottom: 1px solid #ddd; } .panel-default { border-color: #ddd; } .panel-default > .panel-heading { color: #333; background-color: #f5f5f5; border-color: #ddd; } .panel-default > .panel-heading + .panel-collapse .panel-body { border-top-color: #ddd; } .panel-default > .panel-footer + .panel-collapse .panel-body { border-bottom-color: #ddd; } .panel-primary { border-color: #428bca; } .panel-primary > .panel-heading { color: #fff; background-color: #428bca; border-color: #428bca; } .panel-primary > .panel-heading + .panel-collapse .panel-body { border-top-color: #428bca; } .panel-primary > .panel-footer + .panel-collapse .panel-body { border-bottom-color: #428bca; } .panel-success { border-color: #d6e9c6; } .panel-success > .panel-heading { color: #3c763d; background-color: #dff0d8; border-color: #d6e9c6; } .panel-success > .panel-heading + .panel-collapse .panel-body { border-top-color: #d6e9c6; } .panel-success > .panel-footer + .panel-collapse .panel-body { border-bottom-color: #d6e9c6; } .panel-info { border-color: #bce8f1; } .panel-info > .panel-heading { color: #31708f; background-color: #d9edf7; border-color: #bce8f1; } .panel-info > .panel-heading + .panel-collapse .panel-body { border-top-color: #bce8f1; } .panel-info > .panel-footer + .panel-collapse .panel-body { border-bottom-color: #bce8f1; } .panel-warning { border-color: #faebcc; } .panel-warning > .panel-heading { color: #8a6d3b; background-color: #fcf8e3; border-color: #faebcc; } .panel-warning > .panel-heading + .panel-collapse .panel-body { border-top-color: #faebcc; } .panel-warning > .panel-footer + .panel-collapse .panel-body { border-bottom-color: #faebcc; } .panel-danger { border-color: #ebccd1; } .panel-danger > .panel-heading { color: #a94442; background-color: #f2dede; border-color: #ebccd1; } .panel-danger > .panel-heading + .panel-collapse .panel-body { border-top-color: #ebccd1; } .panel-danger > .panel-footer + .panel-collapse .panel-body { border-bottom-color: #ebccd1; } .well { min-height: 20px; padding: 19px; margin-bottom: 20px; background-color: #f5f5f5; border: 1px solid #e3e3e3; border-radius: 4px; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05); box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05); } .well blockquote { border-color: #ddd; border-color: rgba(0, 0, 0, .15); } .well-lg { padding: 24px; border-radius: 6px; } .well-sm { padding: 9px; border-radius: 3px; } .close { float: right; font-size: 21px; font-weight: bold; line-height: 1; color: #000; text-shadow: 0 1px 0 #fff; filter: alpha(opacity=20); opacity: .2; } .close:hover, .close:focus { color: #000; text-decoration: none; cursor: pointer; filter: alpha(opacity=50); opacity: .5; } button.close { -webkit-appearance: none; padding: 0; cursor: pointer; background: transparent; border: 0; } .modal-open { overflow: hidden; } .modal { position: fixed; top: 0; right: 0; bottom: 0; left: 0; z-index: 1050; display: none; overflow: auto; overflow-y: scroll; -webkit-overflow-scrolling: touch; outline: 0; } .modal.fade .modal-dialog { -webkit-transition: -webkit-transform .3s ease-out; -moz-transition: -moz-transform .3s ease-out; -o-transition: -o-transform .3s ease-out; transition: transform .3s ease-out; -webkit-transform: translate(0, -25%); -ms-transform: translate(0, -25%); transform: translate(0, -25%); } .modal.in .modal-dialog { -webkit-transform: translate(0, 0); -ms-transform: translate(0, 0); transform: translate(0, 0); } .modal-dialog { position: relative; width: auto; margin: 10px; } .modal-content { position: relative; background-color: #fff; background-clip: padding-box; border: 1px solid #999; border: 1px solid rgba(0, 0, 0, .2); border-radius: 6px; outline: none; -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5); box-shadow: 0 3px 9px rgba(0, 0, 0, .5); } .modal-backdrop { position: fixed; top: 0; right: 0; bottom: 0; left: 0; z-index: 1040; background-color: #000; } .modal-backdrop.fade { filter: alpha(opacity=0); opacity: 0; } .modal-backdrop.in { filter: alpha(opacity=50); opacity: .5; } .modal-header { min-height: 16.428571429px; padding: 15px; border-bottom: 1px solid #e5e5e5; } .modal-header .close { margin-top: -2px; } .modal-title { margin: 0; line-height: 1.428571429; } .modal-body { position: relative; padding: 20px; } .modal-footer { padding: 19px 20px 20px; margin-top: 15px; text-align: right; border-top: 1px solid #e5e5e5; } .modal-footer .btn + .btn { margin-bottom: 0; margin-left: 5px; } .modal-footer .btn-group .btn + .btn { margin-left: -1px; } .modal-footer .btn-block + .btn-block { margin-left: 0; } @media (min-width: 768px) { .modal-dialog { width: 600px; margin: 30px auto; } .modal-content { -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5); box-shadow: 0 5px 15px rgba(0, 0, 0, .5); } .modal-sm { width: 300px; } .modal-lg { width: 900px; } } .tooltip { position: absolute; z-index: 1030; display: block; font-size: 12px; line-height: 1.4; visibility: visible; filter: alpha(opacity=0); opacity: 0; } .tooltip.in { filter: alpha(opacity=90); opacity: .9; } .tooltip.top { padding: 5px 0; margin-top: -3px; } .tooltip.right { padding: 0 5px; margin-left: 3px; } .tooltip.bottom { padding: 5px 0; margin-top: 3px; } .tooltip.left { padding: 0 5px; margin-left: -3px; } .tooltip-inner { max-width: 200px; padding: 3px 8px; color: #fff; text-align: center; text-decoration: none; background-color: #000; border-radius: 4px; } .tooltip-arrow { position: absolute; width: 0; height: 0; border-color: transparent; border-style: solid; } .tooltip.top .tooltip-arrow { bottom: 0; left: 50%; margin-left: -5px; border-width: 5px 5px 0; border-top-color: #000; } .tooltip.top-left .tooltip-arrow { bottom: 0; left: 5px; border-width: 5px 5px 0; border-top-color: #000; } .tooltip.top-right .tooltip-arrow { right: 5px; bottom: 0; border-width: 5px 5px 0; border-top-color: #000; } .tooltip.right .tooltip-arrow { top: 50%; left: 0; margin-top: -5px; border-width: 5px 5px 5px 0; border-right-color: #000; } .tooltip.left .tooltip-arrow { top: 50%; right: 0; margin-top: -5px; border-width: 5px 0 5px 5px; border-left-color: #000; } .tooltip.bottom .tooltip-arrow { top: 0; left: 50%; margin-left: -5px; border-width: 0 5px 5px; border-bottom-color: #000; } .tooltip.bottom-left .tooltip-arrow { top: 0; left: 5px; border-width: 0 5px 5px; border-bottom-color: #000; } .tooltip.bottom-right .tooltip-arrow { top: 0; right: 5px; border-width: 0 5px 5px; border-bottom-color: #000; } .popover { position: absolute; top: 0; left: 0; z-index: 1010; display: none; max-width: 276px; padding: 1px; text-align: left; white-space: normal; background-color: #fff; background-clip: padding-box; border: 1px solid #ccc; border: 1px solid rgba(0, 0, 0, .2); border-radius: 6px; -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2); box-shadow: 0 5px 10px rgba(0, 0, 0, .2); } .popover.top { margin-top: -10px; } .popover.right { margin-left: 10px; } .popover.bottom { margin-top: 10px; } .popover.left { margin-left: -10px; } .popover-title { padding: 8px 14px; margin: 0; font-size: 14px; font-weight: normal; line-height: 18px; background-color: #f7f7f7; border-bottom: 1px solid #ebebeb; border-radius: 5px 5px 0 0; } .popover-content { padding: 9px 14px; } .popover .arrow, .popover .arrow:after { position: absolute; display: block; width: 0; height: 0; border-color: transparent; border-style: solid; } .popover .arrow { border-width: 11px; } .popover .arrow:after { content: ""; border-width: 10px; } .popover.top .arrow { bottom: -11px; left: 50%; margin-left: -11px; border-top-color: #999; border-top-color: rgba(0, 0, 0, .25); border-bottom-width: 0; } .popover.top .arrow:after { bottom: 1px; margin-left: -10px; content: " "; border-top-color: #fff; border-bottom-width: 0; } .popover.right .arrow { top: 50%; left: -11px; margin-top: -11px; border-right-color: #999; border-right-color: rgba(0, 0, 0, .25); border-left-width: 0; } .popover.right .arrow:after { bottom: -10px; left: 1px; content: " "; border-right-color: #fff; border-left-width: 0; } .popover.bottom .arrow { top: -11px; left: 50%; margin-left: -11px; border-top-width: 0; border-bottom-color: #999; border-bottom-color: rgba(0, 0, 0, .25); } .popover.bottom .arrow:after { top: 1px; margin-left: -10px; content: " "; border-top-width: 0; border-bottom-color: #fff; } .popover.left .arrow { top: 50%; right: -11px; margin-top: -11px; border-right-width: 0; border-left-color: #999; border-left-color: rgba(0, 0, 0, .25); } .popover.left .arrow:after { right: 1px; bottom: -10px; content: " "; border-right-width: 0; border-left-color: #fff; } .carousel { position: relative; } .carousel-inner { position: relative; width: 100%; overflow: hidden; } .carousel-inner > .item { position: relative; display: none; -webkit-transition: .6s ease-in-out left; transition: .6s ease-in-out left; } .carousel-inner > .item > img, .carousel-inner > .item > a > img { display: block; max-width: 100%; height: auto; line-height: 1; } .carousel-inner > .active, .carousel-inner > .next, .carousel-inner > .prev { display: block; } .carousel-inner > .active { left: 0; } .carousel-inner > .next, .carousel-inner > .prev { position: absolute; top: 0; width: 100%; } .carousel-inner > .next { left: 100%; } .carousel-inner > .prev { left: -100%; } .carousel-inner > .next.left, .carousel-inner > .prev.right { left: 0; } .carousel-inner > .active.left { left: -100%; } .carousel-inner > .active.right { left: 100%; } .carousel-control { position: absolute; top: 0; bottom: 0; left: 0; width: 15%; font-size: 20px; color: #fff; text-align: center; text-shadow: 0 1px 2px rgba(0, 0, 0, .6); filter: alpha(opacity=50); opacity: .5; } .carousel-control.left { background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, .5) 0%), color-stop(rgba(0, 0, 0, .0001) 100%)); background-image: linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); background-repeat: repeat-x; } .carousel-control.right { right: 0; left: auto; background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, .0001) 0%), color-stop(rgba(0, 0, 0, .5) 100%)); background-image: linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%); filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); background-repeat: repeat-x; } .carousel-control:hover, .carousel-control:focus { color: #fff; text-decoration: none; filter: alpha(opacity=90); outline: none; opacity: .9; } .carousel-control .icon-prev, .carousel-control .icon-next, .carousel-control .glyphicon-chevron-left, .carousel-control .glyphicon-chevron-right { position: absolute; top: 50%; z-index: 5; display: inline-block; } .carousel-control .icon-prev, .carousel-control .glyphicon-chevron-left { left: 50%; } .carousel-control .icon-next, .carousel-control .glyphicon-chevron-right { right: 50%; } .carousel-control .icon-prev, .carousel-control .icon-next { width: 20px; height: 20px; margin-top: -10px; margin-left: -10px; font-family: serif; } .carousel-control .icon-prev:before { content: '\2039'; } .carousel-control .icon-next:before { content: '\203a'; } .carousel-indicators { position: absolute; bottom: 10px; left: 50%; z-index: 15; width: 60%; padding-left: 0; margin-left: -30%; text-align: center; list-style: none; } .carousel-indicators li { display: inline-block; width: 10px; height: 10px; margin: 1px; text-indent: -999px; cursor: pointer; background-color: #000 \9; background-color: rgba(0, 0, 0, 0); border: 1px solid #fff; border-radius: 10px; } .carousel-indicators .active { width: 12px; height: 12px; margin: 0; background-color: #fff; } .carousel-caption { position: absolute; right: 15%; bottom: 20px; left: 15%; z-index: 10; padding-top: 20px; padding-bottom: 20px; color: #fff; text-align: center; text-shadow: 0 1px 2px rgba(0, 0, 0, .6); } .carousel-caption .btn { text-shadow: none; } @media screen and (min-width: 768px) { .carousel-control .glyphicons-chevron-left, .carousel-control .glyphicons-chevron-right, .carousel-control .icon-prev, .carousel-control .icon-next { width: 30px; height: 30px; margin-top: -15px; margin-left: -15px; font-size: 30px; } .carousel-caption { right: 20%; left: 20%; padding-bottom: 30px; } .carousel-indicators { bottom: 20px; } } .clearfix:before, .clearfix:after, .container:before, .container:after, .container-fluid:before, .container-fluid:after, .row:before, .row:after, .form-horizontal .form-group:before, .form-horizontal .form-group:after, .btn-toolbar:before, .btn-toolbar:after, .btn-group-vertical > .btn-group:before, .btn-group-vertical > .btn-group:after, .nav:before, .nav:after, .navbar:before, .navbar:after, .navbar-header:before, .navbar-header:after, .navbar-collapse:before, .navbar-collapse:after, .pager:before, .pager:after, .panel-body:before, .panel-body:after, .modal-footer:before, .modal-footer:after { display: table; content: " "; } .clearfix:after, .container:after, .container-fluid:after, .row:after, .form-horizontal .form-group:after, .btn-toolbar:after, .btn-group-vertical > .btn-group:after, .nav:after, .navbar:after, .navbar-header:after, .navbar-collapse:after, .pager:after, .panel-body:after, .modal-footer:after { clear: both; } .center-block { display: block; margin-right: auto; margin-left: auto; } .pull-right { float: right !important; } .pull-left { float: left !important; } .hide { display: none !important; } .show { display: block !important; } .invisible { visibility: hidden; } .text-hide { font: 0/0 a; color: transparent; text-shadow: none; background-color: transparent; border: 0; } .hidden { display: none !important; visibility: hidden !important; } .affix { position: fixed; } @-ms-viewport { width: device-width; } .visible-xs, tr.visible-xs, th.visible-xs, td.visible-xs { display: none !important; } @media (max-width: 767px) { .visible-xs { display: block !important; } table.visible-xs { display: table; } tr.visible-xs { display: table-row !important; } th.visible-xs, td.visible-xs { display: table-cell !important; } } .visible-sm, tr.visible-sm, th.visible-sm, td.visible-sm { display: none !important; } @media (min-width: 768px) and (max-width: 991px) { .visible-sm { display: block !important; } table.visible-sm { display: table; } tr.visible-sm { display: table-row !important; } th.visible-sm, td.visible-sm { display: table-cell !important; } } .visible-md, tr.visible-md, th.visible-md, td.visible-md { display: none !important; } @media (min-width: 992px) and (max-width: 1199px) { .visible-md { display: block !important; } table.visible-md { display: table; } tr.visible-md { display: table-row !important; } th.visible-md, td.visible-md { display: table-cell !important; } } .visible-lg, tr.visible-lg, th.visible-lg, td.visible-lg { display: none !important; } @media (min-width: 1200px) { .visible-lg { display: block !important; } table.visible-lg { display: table; } tr.visible-lg { display: table-row !important; } th.visible-lg, td.visible-lg { display: table-cell !important; } } @media (max-width: 767px) { .hidden-xs, tr.hidden-xs, th.hidden-xs, td.hidden-xs { display: none !important; } } @media (min-width: 768px) and (max-width: 991px) { .hidden-sm, tr.hidden-sm, th.hidden-sm, td.hidden-sm { display: none !important; } } @media (min-width: 992px) and (max-width: 1199px) { .hidden-md, tr.hidden-md, th.hidden-md, td.hidden-md { display: none !important; } } @media (min-width: 1200px) { .hidden-lg, tr.hidden-lg, th.hidden-lg, td.hidden-lg { display: none !important; } } .visible-print, tr.visible-print, th.visible-print, td.visible-print { display: none !important; } @media print { .visible-print { display: block !important; } table.visible-print { display: table; } tr.visible-print { display: table-row !important; } th.visible-print, td.visible-print { display: table-cell !important; } } @media print { .hidden-print, tr.hidden-print, th.hidden-print, td.hidden-print { display: none !important; } } /*# sourceMappingURL=bootstrap.css.map */ ================================================ FILE: doc/css/button-override.css ================================================ /* Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 */ .btn:hover, .btn:focus, .btn:active, .btn.active, .open .dropdown-toggle.btn { opacity: 0.4; } ================================================ FILE: doc/css/carousel-override.css ================================================ /* Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 */ .carousel-control { width: 5%; } .carousel-caption { position: static; background: rgba(0,0,0,0.6); color: white; padding-bottom: 35px; padding-left: 1em; padding-right: 1em; padding-top: 15px; } .carousel { overflow: hidden; border-radius: 5px; max-width: 900px; margin: 1em; } .carousel-indicators { bottom: 0px; } ================================================ FILE: doc/css/code.css ================================================ /* Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 */ table.code { font-family: Menlo,Monaco,Consolas,"Courier New",monospace; display: block; padding: 9.5px; margin: 0px 0px 10px; font-size: 13px; line-height: 1.42857; color: rgb(51, 51, 51); word-break: break-all; word-wrap: break-word; background-color: rgb(245, 245, 245); border: 1px solid rgb(204, 204, 204); border-radius: 4px 4px 4px 4px; } table.code tr td { white-space: pre; } table.code tr td:nth-child(2) { color: #d14; padding-left: .5em; } .userinput { color: #d14; } table.CodeRay { margin-left: 3em; width: calc(100% - 6em); } td.line-numbers { width: 2em; } .releasenotes h2 { margin-top: 1.5em; text-decoration: underline; } ================================================ FILE: doc/css/font-awesome.css ================================================ /*! * Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) */ /* FONT PATH * -------------------------- */ @font-face { font-family: 'FontAwesome'; src: url('../fonts/fontawesome-webfont.eot?v=4.1.0'); src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg'); font-weight: normal; font-style: normal; } .fa { display: inline-block; font-family: 'FontAwesome', sans-serif; font-style: normal; font-weight: normal; line-height: 1; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } /* makes the font 33% larger relative to the icon container */ .fa-lg { font-size: 1.33333333em; line-height: 0.75em; vertical-align: -15%; } .fa-2x { font-size: 2em; } .fa-3x { font-size: 3em; } .fa-4x { font-size: 4em; } .fa-5x { font-size: 5em; } .fa-fw { width: 1.28571429em; text-align: center; } .fa-ul { padding-left: 0; margin-left: 2.14285714em; list-style-type: none; } .fa-ul > li { position: relative; } .fa-li { position: absolute; left: -2.14285714em; width: 2.14285714em; top: 0.14285714em; text-align: center; } .fa-li.fa-lg { left: -1.85714286em; } .fa-border { padding: .2em .25em .15em; border: solid 0.08em #eeeeee; border-radius: .1em; } .pull-right { float: right; } .pull-left { float: left; } .fa.pull-left { margin-right: .3em; } .fa.pull-right { margin-left: .3em; } .fa-spin { -webkit-animation: spin 2s infinite linear; -moz-animation: spin 2s infinite linear; -o-animation: spin 2s infinite linear; animation: spin 2s infinite linear; } @-moz-keyframes spin { 0% { -moz-transform: rotate(0deg); } 100% { -moz-transform: rotate(359deg); } } @-webkit-keyframes spin { 0% { -webkit-transform: rotate(0deg); } 100% { -webkit-transform: rotate(359deg); } } @-o-keyframes spin { 0% { -o-transform: rotate(0deg); } 100% { -o-transform: rotate(359deg); } } @keyframes spin { 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } 100% { -webkit-transform: rotate(359deg); transform: rotate(359deg); } } .fa-rotate-90 { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1); -webkit-transform: rotate(90deg); -moz-transform: rotate(90deg); -ms-transform: rotate(90deg); -o-transform: rotate(90deg); transform: rotate(90deg); } .fa-rotate-180 { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2); -webkit-transform: rotate(180deg); -moz-transform: rotate(180deg); -ms-transform: rotate(180deg); -o-transform: rotate(180deg); transform: rotate(180deg); } .fa-rotate-270 { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3); -webkit-transform: rotate(270deg); -moz-transform: rotate(270deg); -ms-transform: rotate(270deg); -o-transform: rotate(270deg); transform: rotate(270deg); } .fa-flip-horizontal { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1); -webkit-transform: scale(-1, 1); -moz-transform: scale(-1, 1); -ms-transform: scale(-1, 1); -o-transform: scale(-1, 1); transform: scale(-1, 1); } .fa-flip-vertical { filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1); -webkit-transform: scale(1, -1); -moz-transform: scale(1, -1); -ms-transform: scale(1, -1); -o-transform: scale(1, -1); transform: scale(1, -1); } .fa-stack { position: relative; display: inline-block; width: 2em; height: 2em; line-height: 2em; vertical-align: middle; } .fa-stack-1x, .fa-stack-2x { position: absolute; left: 0; width: 100%; text-align: center; } .fa-stack-1x { line-height: inherit; } .fa-stack-2x { font-size: 2em; } .fa-inverse { color: #ffffff; } /* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen readers do not read off random characters that represent icons */ .fa-glass:before { content: "\f000"; } .fa-music:before { content: "\f001"; } .fa-search:before { content: "\f002"; } .fa-envelope-o:before { content: "\f003"; } .fa-heart:before { content: "\f004"; } .fa-star:before { content: "\f005"; } .fa-star-o:before { content: "\f006"; } .fa-user:before { content: "\f007"; } .fa-film:before { content: "\f008"; } .fa-th-large:before { content: "\f009"; } .fa-th:before { content: "\f00a"; } .fa-th-list:before { content: "\f00b"; } .fa-check:before { content: "\f00c"; } .fa-times:before { content: "\f00d"; } .fa-search-plus:before { content: "\f00e"; } .fa-search-minus:before { content: "\f010"; } .fa-power-off:before { content: "\f011"; } .fa-signal:before { content: "\f012"; } .fa-gear:before, .fa-cog:before { content: "\f013"; } .fa-trash-o:before { content: "\f014"; } .fa-home:before { content: "\f015"; } .fa-file-o:before { content: "\f016"; } .fa-clock-o:before { content: "\f017"; } .fa-road:before { content: "\f018"; } .fa-download:before { content: "\f019"; } .fa-arrow-circle-o-down:before { content: "\f01a"; } .fa-arrow-circle-o-up:before { content: "\f01b"; } .fa-inbox:before { content: "\f01c"; } .fa-play-circle-o:before { content: "\f01d"; } .fa-rotate-right:before, .fa-repeat:before { content: "\f01e"; } .fa-refresh:before { content: "\f021"; } .fa-list-alt:before { content: "\f022"; } .fa-lock:before { content: "\f023"; } .fa-flag:before { content: "\f024"; } .fa-headphones:before { content: "\f025"; } .fa-volume-off:before { content: "\f026"; } .fa-volume-down:before { content: "\f027"; } .fa-volume-up:before { content: "\f028"; } .fa-qrcode:before { content: "\f029"; } .fa-barcode:before { content: "\f02a"; } .fa-tag:before { content: "\f02b"; } .fa-tags:before { content: "\f02c"; } .fa-book:before { content: "\f02d"; } .fa-bookmark:before { content: "\f02e"; } .fa-print:before { content: "\f02f"; } .fa-camera:before { content: "\f030"; } .fa-font:before { content: "\f031"; } .fa-bold:before { content: "\f032"; } .fa-italic:before { content: "\f033"; } .fa-text-height:before { content: "\f034"; } .fa-text-width:before { content: "\f035"; } .fa-align-left:before { content: "\f036"; } .fa-align-center:before { content: "\f037"; } .fa-align-right:before { content: "\f038"; } .fa-align-justify:before { content: "\f039"; } .fa-list:before { content: "\f03a"; } .fa-dedent:before, .fa-outdent:before { content: "\f03b"; } .fa-indent:before { content: "\f03c"; } .fa-video-camera:before { content: "\f03d"; } .fa-photo:before, .fa-image:before, .fa-picture-o:before { content: "\f03e"; } .fa-pencil:before { content: "\f040"; } .fa-map-marker:before { content: "\f041"; } .fa-adjust:before { content: "\f042"; } .fa-tint:before { content: "\f043"; } .fa-edit:before, .fa-pencil-square-o:before { content: "\f044"; } .fa-share-square-o:before { content: "\f045"; } .fa-check-square-o:before { content: "\f046"; } .fa-arrows:before { content: "\f047"; } .fa-step-backward:before { content: "\f048"; } .fa-fast-backward:before { content: "\f049"; } .fa-backward:before { content: "\f04a"; } .fa-play:before { content: "\f04b"; } .fa-pause:before { content: "\f04c"; } .fa-stop:before { content: "\f04d"; } .fa-forward:before { content: "\f04e"; } .fa-fast-forward:before { content: "\f050"; } .fa-step-forward:before { content: "\f051"; } .fa-eject:before { content: "\f052"; } .fa-chevron-left:before { content: "\f053"; } .fa-chevron-right:before { content: "\f054"; } .fa-plus-circle:before { content: "\f055"; } .fa-minus-circle:before { content: "\f056"; } .fa-times-circle:before { content: "\f057"; } .fa-check-circle:before { content: "\f058"; } .fa-question-circle:before { content: "\f059"; } .fa-info-circle:before { content: "\f05a"; } .fa-crosshairs:before { content: "\f05b"; } .fa-times-circle-o:before { content: "\f05c"; } .fa-check-circle-o:before { content: "\f05d"; } .fa-ban:before { content: "\f05e"; } .fa-arrow-left:before { content: "\f060"; } .fa-arrow-right:before { content: "\f061"; } .fa-arrow-up:before { content: "\f062"; } .fa-arrow-down:before { content: "\f063"; } .fa-mail-forward:before, .fa-share:before { content: "\f064"; } .fa-expand:before { content: "\f065"; } .fa-compress:before { content: "\f066"; } .fa-plus:before { content: "\f067"; } .fa-minus:before { content: "\f068"; } .fa-asterisk:before { content: "\f069"; } .fa-exclamation-circle:before { content: "\f06a"; } .fa-gift:before { content: "\f06b"; } .fa-leaf:before { content: "\f06c"; } .fa-fire:before { content: "\f06d"; } .fa-eye:before { content: "\f06e"; } .fa-eye-slash:before { content: "\f070"; } .fa-warning:before, .fa-exclamation-triangle:before { content: "\f071"; } .fa-plane:before { content: "\f072"; } .fa-calendar:before { content: "\f073"; } .fa-random:before { content: "\f074"; } .fa-comment:before { content: "\f075"; } .fa-magnet:before { content: "\f076"; } .fa-chevron-up:before { content: "\f077"; } .fa-chevron-down:before { content: "\f078"; } .fa-retweet:before { content: "\f079"; } .fa-shopping-cart:before { content: "\f07a"; } .fa-folder:before { content: "\f07b"; } .fa-folder-open:before { content: "\f07c"; } .fa-arrows-v:before { content: "\f07d"; } .fa-arrows-h:before { content: "\f07e"; } .fa-bar-chart-o:before { content: "\f080"; } .fa-twitter-square:before { content: "\f081"; } .fa-facebook-square:before { content: "\f082"; } .fa-camera-retro:before { content: "\f083"; } .fa-key:before { content: "\f084"; } .fa-gears:before, .fa-cogs:before { content: "\f085"; } .fa-comments:before { content: "\f086"; } .fa-thumbs-o-up:before { content: "\f087"; } .fa-thumbs-o-down:before { content: "\f088"; } .fa-star-half:before { content: "\f089"; } .fa-heart-o:before { content: "\f08a"; } .fa-sign-out:before { content: "\f08b"; } .fa-linkedin-square:before { content: "\f08c"; } .fa-thumb-tack:before { content: "\f08d"; } .fa-external-link:before { content: "\f08e"; } .fa-sign-in:before { content: "\f090"; } .fa-trophy:before { content: "\f091"; } .fa-github-square:before { content: "\f092"; } .fa-upload:before { content: "\f093"; } .fa-lemon-o:before { content: "\f094"; } .fa-phone:before { content: "\f095"; } .fa-square-o:before { content: "\f096"; } .fa-bookmark-o:before { content: "\f097"; } .fa-phone-square:before { content: "\f098"; } .fa-twitter:before { content: "\f099"; } .fa-facebook:before { content: "\f09a"; } .fa-github:before { content: "\f09b"; } .fa-unlock:before { content: "\f09c"; } .fa-credit-card:before { content: "\f09d"; } .fa-rss:before { content: "\f09e"; } .fa-hdd-o:before { content: "\f0a0"; } .fa-bullhorn:before { content: "\f0a1"; } .fa-bell:before { content: "\f0f3"; } .fa-certificate:before { content: "\f0a3"; } .fa-hand-o-right:before { content: "\f0a4"; } .fa-hand-o-left:before { content: "\f0a5"; } .fa-hand-o-up:before { content: "\f0a6"; } .fa-hand-o-down:before { content: "\f0a7"; } .fa-arrow-circle-left:before { content: "\f0a8"; } .fa-arrow-circle-right:before { content: "\f0a9"; } .fa-arrow-circle-up:before { content: "\f0aa"; } .fa-arrow-circle-down:before { content: "\f0ab"; } .fa-globe:before { content: "\f0ac"; } .fa-wrench:before { content: "\f0ad"; } .fa-tasks:before { content: "\f0ae"; } .fa-filter:before { content: "\f0b0"; } .fa-briefcase:before { content: "\f0b1"; } .fa-arrows-alt:before { content: "\f0b2"; } .fa-group:before, .fa-users:before { content: "\f0c0"; } .fa-chain:before, .fa-link:before { content: "\f0c1"; } .fa-cloud:before { content: "\f0c2"; } .fa-flask:before { content: "\f0c3"; } .fa-cut:before, .fa-scissors:before { content: "\f0c4"; } .fa-copy:before, .fa-files-o:before { content: "\f0c5"; } .fa-paperclip:before { content: "\f0c6"; } .fa-save:before, .fa-floppy-o:before { content: "\f0c7"; } .fa-square:before { content: "\f0c8"; } .fa-navicon:before, .fa-reorder:before, .fa-bars:before { content: "\f0c9"; } .fa-list-ul:before { content: "\f0ca"; } .fa-list-ol:before { content: "\f0cb"; } .fa-strikethrough:before { content: "\f0cc"; } .fa-underline:before { content: "\f0cd"; } .fa-table:before { content: "\f0ce"; } .fa-magic:before { content: "\f0d0"; } .fa-truck:before { content: "\f0d1"; } .fa-pinterest:before { content: "\f0d2"; } .fa-pinterest-square:before { content: "\f0d3"; } .fa-google-plus-square:before { content: "\f0d4"; } .fa-google-plus:before { content: "\f0d5"; } .fa-money:before { content: "\f0d6"; } .fa-caret-down:before { content: "\f0d7"; } .fa-caret-up:before { content: "\f0d8"; } .fa-caret-left:before { content: "\f0d9"; } .fa-caret-right:before { content: "\f0da"; } .fa-columns:before { content: "\f0db"; } .fa-unsorted:before, .fa-sort:before { content: "\f0dc"; } .fa-sort-down:before, .fa-sort-desc:before { content: "\f0dd"; } .fa-sort-up:before, .fa-sort-asc:before { content: "\f0de"; } .fa-envelope:before { content: "\f0e0"; } .fa-linkedin:before { content: "\f0e1"; } .fa-rotate-left:before, .fa-undo:before { content: "\f0e2"; } .fa-legal:before, .fa-gavel:before { content: "\f0e3"; } .fa-dashboard:before, .fa-tachometer:before { content: "\f0e4"; } .fa-comment-o:before { content: "\f0e5"; } .fa-comments-o:before { content: "\f0e6"; } .fa-flash:before, .fa-bolt:before { content: "\f0e7"; } .fa-sitemap:before { content: "\f0e8"; } .fa-umbrella:before { content: "\f0e9"; } .fa-paste:before, .fa-clipboard:before { content: "\f0ea"; } .fa-lightbulb-o:before { content: "\f0eb"; } .fa-exchange:before { content: "\f0ec"; } .fa-cloud-download:before { content: "\f0ed"; } .fa-cloud-upload:before { content: "\f0ee"; } .fa-user-md:before { content: "\f0f0"; } .fa-stethoscope:before { content: "\f0f1"; } .fa-suitcase:before { content: "\f0f2"; } .fa-bell-o:before { content: "\f0a2"; } .fa-coffee:before { content: "\f0f4"; } .fa-cutlery:before { content: "\f0f5"; } .fa-file-text-o:before { content: "\f0f6"; } .fa-building-o:before { content: "\f0f7"; } .fa-hospital-o:before { content: "\f0f8"; } .fa-ambulance:before { content: "\f0f9"; } .fa-medkit:before { content: "\f0fa"; } .fa-fighter-jet:before { content: "\f0fb"; } .fa-beer:before { content: "\f0fc"; } .fa-h-square:before { content: "\f0fd"; } .fa-plus-square:before { content: "\f0fe"; } .fa-angle-double-left:before { content: "\f100"; } .fa-angle-double-right:before { content: "\f101"; } .fa-angle-double-up:before { content: "\f102"; } .fa-angle-double-down:before { content: "\f103"; } .fa-angle-left:before { content: "\f104"; } .fa-angle-right:before { content: "\f105"; } .fa-angle-up:before { content: "\f106"; } .fa-angle-down:before { content: "\f107"; } .fa-desktop:before { content: "\f108"; } .fa-laptop:before { content: "\f109"; } .fa-tablet:before { content: "\f10a"; } .fa-mobile-phone:before, .fa-mobile:before { content: "\f10b"; } .fa-circle-o:before { content: "\f10c"; } .fa-quote-left:before { content: "\f10d"; } .fa-quote-right:before { content: "\f10e"; } .fa-spinner:before { content: "\f110"; } .fa-circle:before { content: "\f111"; } .fa-mail-reply:before, .fa-reply:before { content: "\f112"; } .fa-github-alt:before { content: "\f113"; } .fa-folder-o:before { content: "\f114"; } .fa-folder-open-o:before { content: "\f115"; } .fa-smile-o:before { content: "\f118"; } .fa-frown-o:before { content: "\f119"; } .fa-meh-o:before { content: "\f11a"; } .fa-gamepad:before { content: "\f11b"; } .fa-keyboard-o:before { content: "\f11c"; } .fa-flag-o:before { content: "\f11d"; } .fa-flag-checkered:before { content: "\f11e"; } .fa-terminal:before { content: "\f120"; } .fa-code:before { content: "\f121"; } .fa-mail-reply-all:before, .fa-reply-all:before { content: "\f122"; } .fa-star-half-empty:before, .fa-star-half-full:before, .fa-star-half-o:before { content: "\f123"; } .fa-location-arrow:before { content: "\f124"; } .fa-crop:before { content: "\f125"; } .fa-code-fork:before { content: "\f126"; } .fa-unlink:before, .fa-chain-broken:before { content: "\f127"; } .fa-question:before { content: "\f128"; } .fa-info:before { content: "\f129"; } .fa-exclamation:before { content: "\f12a"; } .fa-superscript:before { content: "\f12b"; } .fa-subscript:before { content: "\f12c"; } .fa-eraser:before { content: "\f12d"; } .fa-puzzle-piece:before { content: "\f12e"; } .fa-microphone:before { content: "\f130"; } .fa-microphone-slash:before { content: "\f131"; } .fa-shield:before { content: "\f132"; } .fa-calendar-o:before { content: "\f133"; } .fa-fire-extinguisher:before { content: "\f134"; } .fa-rocket:before { content: "\f135"; } .fa-maxcdn:before { content: "\f136"; } .fa-chevron-circle-left:before { content: "\f137"; } .fa-chevron-circle-right:before { content: "\f138"; } .fa-chevron-circle-up:before { content: "\f139"; } .fa-chevron-circle-down:before { content: "\f13a"; } .fa-html5:before { content: "\f13b"; } .fa-css3:before { content: "\f13c"; } .fa-anchor:before { content: "\f13d"; } .fa-unlock-alt:before { content: "\f13e"; } .fa-bullseye:before { content: "\f140"; } .fa-ellipsis-h:before { content: "\f141"; } .fa-ellipsis-v:before { content: "\f142"; } .fa-rss-square:before { content: "\f143"; } .fa-play-circle:before { content: "\f144"; } .fa-ticket:before { content: "\f145"; } .fa-minus-square:before { content: "\f146"; } .fa-minus-square-o:before { content: "\f147"; } .fa-level-up:before { content: "\f148"; } .fa-level-down:before { content: "\f149"; } .fa-check-square:before { content: "\f14a"; } .fa-pencil-square:before { content: "\f14b"; } .fa-external-link-square:before { content: "\f14c"; } .fa-share-square:before { content: "\f14d"; } .fa-compass:before { content: "\f14e"; } .fa-toggle-down:before, .fa-caret-square-o-down:before { content: "\f150"; } .fa-toggle-up:before, .fa-caret-square-o-up:before { content: "\f151"; } .fa-toggle-right:before, .fa-caret-square-o-right:before { content: "\f152"; } .fa-euro:before, .fa-eur:before { content: "\f153"; } .fa-gbp:before { content: "\f154"; } .fa-dollar:before, .fa-usd:before { content: "\f155"; } .fa-rupee:before, .fa-inr:before { content: "\f156"; } .fa-cny:before, .fa-rmb:before, .fa-yen:before, .fa-jpy:before { content: "\f157"; } .fa-ruble:before, .fa-rouble:before, .fa-rub:before { content: "\f158"; } .fa-won:before, .fa-krw:before { content: "\f159"; } .fa-bitcoin:before, .fa-btc:before { content: "\f15a"; } .fa-file:before { content: "\f15b"; } .fa-file-text:before { content: "\f15c"; } .fa-sort-alpha-asc:before { content: "\f15d"; } .fa-sort-alpha-desc:before { content: "\f15e"; } .fa-sort-amount-asc:before { content: "\f160"; } .fa-sort-amount-desc:before { content: "\f161"; } .fa-sort-numeric-asc:before { content: "\f162"; } .fa-sort-numeric-desc:before { content: "\f163"; } .fa-thumbs-up:before { content: "\f164"; } .fa-thumbs-down:before { content: "\f165"; } .fa-youtube-square:before { content: "\f166"; } .fa-youtube:before { content: "\f167"; } .fa-xing:before { content: "\f168"; } .fa-xing-square:before { content: "\f169"; } .fa-youtube-play:before { content: "\f16a"; } .fa-dropbox:before { content: "\f16b"; } .fa-stack-overflow:before { content: "\f16c"; } .fa-instagram:before { content: "\f16d"; } .fa-flickr:before { content: "\f16e"; } .fa-adn:before { content: "\f170"; } .fa-bitbucket:before { content: "\f171"; } .fa-bitbucket-square:before { content: "\f172"; } .fa-tumblr:before { content: "\f173"; } .fa-tumblr-square:before { content: "\f174"; } .fa-long-arrow-down:before { content: "\f175"; } .fa-long-arrow-up:before { content: "\f176"; } .fa-long-arrow-left:before { content: "\f177"; } .fa-long-arrow-right:before { content: "\f178"; } .fa-apple:before { content: "\f179"; } .fa-windows:before { content: "\f17a"; } .fa-android:before { content: "\f17b"; } .fa-linux:before { content: "\f17c"; } .fa-dribbble:before { content: "\f17d"; } .fa-skype:before { content: "\f17e"; } .fa-foursquare:before { content: "\f180"; } .fa-trello:before { content: "\f181"; } .fa-female:before { content: "\f182"; } .fa-male:before { content: "\f183"; } .fa-gittip:before { content: "\f184"; } .fa-sun-o:before { content: "\f185"; } .fa-moon-o:before { content: "\f186"; } .fa-archive:before { content: "\f187"; } .fa-bug:before { content: "\f188"; } .fa-vk:before { content: "\f189"; } .fa-weibo:before { content: "\f18a"; } .fa-renren:before { content: "\f18b"; } .fa-pagelines:before { content: "\f18c"; } .fa-stack-exchange:before { content: "\f18d"; } .fa-arrow-circle-o-right:before { content: "\f18e"; } .fa-arrow-circle-o-left:before { content: "\f190"; } .fa-toggle-left:before, .fa-caret-square-o-left:before { content: "\f191"; } .fa-dot-circle-o:before { content: "\f192"; } .fa-wheelchair:before { content: "\f193"; } .fa-vimeo-square:before { content: "\f194"; } .fa-turkish-lira:before, .fa-try:before { content: "\f195"; } .fa-plus-square-o:before { content: "\f196"; } .fa-space-shuttle:before { content: "\f197"; } .fa-slack:before { content: "\f198"; } .fa-envelope-square:before { content: "\f199"; } .fa-wordpress:before { content: "\f19a"; } .fa-openid:before { content: "\f19b"; } .fa-institution:before, .fa-bank:before, .fa-university:before { content: "\f19c"; } .fa-mortar-board:before, .fa-graduation-cap:before { content: "\f19d"; } .fa-yahoo:before { content: "\f19e"; } .fa-google:before { content: "\f1a0"; } .fa-reddit:before { content: "\f1a1"; } .fa-reddit-square:before { content: "\f1a2"; } .fa-stumbleupon-circle:before { content: "\f1a3"; } .fa-stumbleupon:before { content: "\f1a4"; } .fa-delicious:before { content: "\f1a5"; } .fa-digg:before { content: "\f1a6"; } .fa-pied-piper-square:before, .fa-pied-piper:before { content: "\f1a7"; } .fa-pied-piper-alt:before { content: "\f1a8"; } .fa-drupal:before { content: "\f1a9"; } .fa-joomla:before { content: "\f1aa"; } .fa-language:before { content: "\f1ab"; } .fa-fax:before { content: "\f1ac"; } .fa-building:before { content: "\f1ad"; } .fa-child:before { content: "\f1ae"; } .fa-paw:before { content: "\f1b0"; } .fa-spoon:before { content: "\f1b1"; } .fa-cube:before { content: "\f1b2"; } .fa-cubes:before { content: "\f1b3"; } .fa-behance:before { content: "\f1b4"; } .fa-behance-square:before { content: "\f1b5"; } .fa-steam:before { content: "\f1b6"; } .fa-steam-square:before { content: "\f1b7"; } .fa-recycle:before { content: "\f1b8"; } .fa-automobile:before, .fa-car:before { content: "\f1b9"; } .fa-cab:before, .fa-taxi:before { content: "\f1ba"; } .fa-tree:before { content: "\f1bb"; } .fa-spotify:before { content: "\f1bc"; } .fa-deviantart:before { content: "\f1bd"; } .fa-soundcloud:before { content: "\f1be"; } .fa-database:before { content: "\f1c0"; } .fa-file-pdf-o:before { content: "\f1c1"; } .fa-file-word-o:before { content: "\f1c2"; } .fa-file-excel-o:before { content: "\f1c3"; } .fa-file-powerpoint-o:before { content: "\f1c4"; } .fa-file-photo-o:before, .fa-file-picture-o:before, .fa-file-image-o:before { content: "\f1c5"; } .fa-file-zip-o:before, .fa-file-archive-o:before { content: "\f1c6"; } .fa-file-sound-o:before, .fa-file-audio-o:before { content: "\f1c7"; } .fa-file-movie-o:before, .fa-file-video-o:before { content: "\f1c8"; } .fa-file-code-o:before { content: "\f1c9"; } .fa-vine:before { content: "\f1ca"; } .fa-codepen:before { content: "\f1cb"; } .fa-jsfiddle:before { content: "\f1cc"; } .fa-life-bouy:before, .fa-life-saver:before, .fa-support:before, .fa-life-ring:before { content: "\f1cd"; } .fa-circle-o-notch:before { content: "\f1ce"; } .fa-ra:before, .fa-rebel:before { content: "\f1d0"; } .fa-ge:before, .fa-empire:before { content: "\f1d1"; } .fa-git-square:before { content: "\f1d2"; } .fa-git:before { content: "\f1d3"; } .fa-hacker-news:before { content: "\f1d4"; } .fa-tencent-weibo:before { content: "\f1d5"; } .fa-qq:before { content: "\f1d6"; } .fa-wechat:before, .fa-weixin:before { content: "\f1d7"; } .fa-send:before, .fa-paper-plane:before { content: "\f1d8"; } .fa-send-o:before, .fa-paper-plane-o:before { content: "\f1d9"; } .fa-history:before { content: "\f1da"; } .fa-circle-thin:before { content: "\f1db"; } .fa-header:before { content: "\f1dc"; } .fa-paragraph:before { content: "\f1dd"; } .fa-sliders:before { content: "\f1de"; } .fa-share-alt:before { content: "\f1e0"; } .fa-share-alt-square:before { content: "\f1e1"; } .fa-bomb:before { content: "\f1e2"; } ================================================ FILE: doc/css/images.css ================================================ /* Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 */ img.full-width { width: 100% } img.screenshot { max-width: calc(100% - 2em); border: 3px; border-style: solid; margin-left: 2em; margin-bottom: 2em; } img.side { float: left; width: 50%; } ================================================ FILE: doc/css/layout.css ================================================ /* Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 */ html { height:100%; } body { padding-top: 61px; height: 90%; /* If calc() is not supported */ height: calc(100% - 46px); /* Sets the body full height minus the padding for the menu bar */ } @media (max-width: 1050px) { body { padding-top: 121px; } div.frontpagehero { margin-left: -20px; margin-right: -20px; padding-left: 20px; } } .sidebar-nav { padding: 9px 0; } .section-block { background: #eeeeee; padding: 1em; -webkit-border-radius: 12px; -moz-border-radius: 12px; border-radius: 12px; margin: 0 2em; } .row-fluid :first-child .section-block { margin-left: 0; } .row-fluid :last-child .section-block { margin-right: 0; } .rarr { font-size: 1.5em; } .darr { font-size: 4em; text-align: center; margin-bottom: 1em; } :target { padding-top: 61px; margin-top: -61px; } #annotate-notify { position: fixed; right: 40px; top: 3px; } figure { margin-bottom: 20px; } ================================================ FILE: doc/css/nav-list.css ================================================ /* Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 */ /* NAV LIST -------- */ .nav-list { padding-left: 15px; padding-right: 15px; margin-bottom: 0; } .nav-list > li > a, .nav-list .nav-header { margin-left: -15px; margin-right: -15px; text-shadow: 0 1px 0 rgba(255,255,255,.5); } .nav-list > li > a { padding: 3px 15px; } .nav-list > .active > a, .nav-list > .active > a:hover, .nav-list > .active > a:focus { color: white; text-shadow: 0 -1px 0 rgba(0,0,0,.2); background-color: rgb(66, 139, 202); } .spaced-out li { padding-bottom: 1em; } .inside-list ul { list-style-position: inside; padding-left: 0; } ================================================ FILE: doc/development/CodingStandards.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Coding Standards The rules are always up for debate. However, when debate is needed, it should happen outside the source tree. In other words, if the rules are wrong, first debate the rules at sprint retrospective, then fix the rules, then follow the new rules. ## Git commit messages - Prefix the summary line with the issue number this addresses. - Describe the delta between the old and new tree. If possible, describe the delta in **behavior** rather than the source code itself. - Good: “1234: Support use of spaces in filenames.” - Good: “1234: Fix crash when user_id is nil.” - Less good: “Add some controller methods.” (What do they do?) - Less good: “More progress on UI branch.” (What is different?) - Less good: “Incorporate Tom’s suggestions.” (Who cares whose suggestions — what changed?) If further background or explanation is needed, separate it from the summary with a blank line. - Example: “Users found it confusing that the boxes had different colors even though they represented the same kinds of things.” **Every commit** (including merge commits) must have a DCO sign-off. See `CONTRIBUTING.md` for the full terms of what this means. - Example: `Arvados-DCO-1.1-Signed-off-by: Alex Doe ` Full examples: commit 9c6540b9d42adc4a397a28be1ac23f357ba14ab5 Author: Tom Clegg Date: Mon Aug 7 09:58:04 2017 -0400 12027: Recognize a new "node failed" error message. "srun: error: Cannot communicate with node 0. Aborting job." Arvados-DCO-1.1-Signed-off-by: Tom Clegg commit 0b4800608e6394d66deec9cecea610c5fbbd75ad Merge: 6f2ce94 3a356c4 Author: Tom Clegg Date: Thu Aug 17 13:16:36 2017 -0400 Merge branch '12081-crunch-job-retry' refs #12080 refs #12081 refs #12108 Arvados-DCO-1.1-Signed-off-by: Tom Clegg ## Source code formatting These are general baseline rules except when a language-specific guide specifies otherwise. No TAB characters in source files [except Go](https://golang.org/cmd/gofmt/). - For Emacs, add `(setq-default indent-tabs-mode nil)` to `~/.emacs`. - For Vim, add `:set expandtab` to `~/.vimrc`. Avoid long (\>100 column) lines. No whitespace at the end of lines unless technically required (like Markdown line breaks). ## What to include No commented-out blocks of code that have been replaced or obsoleted. - It is in the git history if we want it back. - If its absence would confuse someone reading the new code (despite never having read the old code), explain its absence in an English comment. If the old code is really still needed to support the English explanation, then go ahead — now we know why it’s there. No commented-out debug statements. - If the debug statements are likely to be needed in the future, use a logging facility that can be enabled at run time. `logger.debug "foo"` ## Style mismatch Adopt indentation style of surrounding lines or (when starting a new file) the nearest existing source code in this tree/language. If you fix up existing indentation/formatting, do that in a separate commit. - If you bundle formatting changes with functional changes, it makes functional changes hard to find in the diff. ## Go Follow gofmt, golint, etc., and Use `%w` when wrapping an error with fmt.Errorf(), so errors.As() can access the wrapped error. ```go if err != nil { return fmt.Errorf("could not swap widgets: %w", err) } ``` Use `(logrus.FieldLogger)WithError()` (instead of `Logf("blah: %s", err)`) when logging an error. ```go if err != nil { logger.WithError(err).Warn("error swapping widgets") } ``` ## Ruby Follow ## Python ### Python code For code, follow [PEP 8](https://peps.python.org/pep-0008/). When you add functions, methods, or attributes that SDK users should not use, their name should start with a leading underscore. This is a common convention to signal that an interface is not intended to be public. Anything named this way will be excluded from our SDK web documentation by default. You’re encouraged to add type annotations to functions and methods. As of May 2024 these are purely for documentation: we are not type checking any of our Python. Note that your annotations must be understood by the oldest version of Python we currently support (3.10). ### Python docstrings Public classes, methods, and functions should all have docstrings. The content of the docstring should follow [PEP 257](https://peps.python.org/pep-0257/). Format docstrings with Markdown and follow these style rules: * Document function argument lists after the high-level description following this format for each argument: * name: type --- Description Use exactly three minus-hyphens to get an em dash in the web rendering. Provide a helpful type hint whenever practical. The type hint should be written in “modern” style with builtin subscripting and type union syntax, like `list[str | bytes]`. Use fully qualified names for custom types. This way pdoc hyperlinks them. * When something is deprecated, write a `.. WARNING:: Deprecated` admonition immediately after the first line. Its text should explain that the thing is deprecated, and suggest what to use instead. For example: def add(a, b): """Add two things. .. WARNING:: Deprecated This function is deprecated. Use the `+` operator instead. … """ You can similarly note private methods with `.. ATTENTION:: Internal`. * Mark up all identifiers outside the type hint with backticks. When the identifier exists in the current module, use the short name. Otherwise, use the fully-qualified name. Our web documentation will automatically link these identifiers to their corresponding documentation. * Mark up links using Markdown’s footnote style. For example: """Python docstring following [PEP 257][pep257]. """ This looks best in plaintext. A descriptive identifier is nice if you can keep it short, but if that’s challenging, plain ordinals are fine too. * Mark up headers (e.g., in a module docstring) using underline style. For example: """Generic utility module Filesystem functions -------------------- … Regular expressions ------------------- … """ This looks best in plaintext. The goal of these style rules is to provide a readable, consistent appearance whether people read the documentation in plain text (e.g., using `pydoc`) or their browser (as rendered by `pdoc`). ## JavaScript We already have 4-space indents everywhere, so do that. Other than that, follow the [Airbnb Javascript coding style](https://github.com/airbnb/javascript) guide unless otherwise stated. ## Workbench Design Guidelines ### Font Sizes - Minimum 12pt (16px) - Minimum 9 pt (12px) for things like by copyright, footer This should be able to be-resized up to 200% without loss of content or functionality. ### Color - Text and images of text have a color contrast ratio of at least 4.5:1 You can use [this contrast tool](https://snook.ca/technical/colour_contrast/colour.html#fg=1F7EA1,bg=FFFFFF) to check. - Non-text icon, controls, etc - 3:1 must have a color contrast ratio of 3:1. - Avoid hard-coding colors. Use theme colors. If a new color is needed, add it to the theme. - Used defined grays when possible using RGB value and changing the a value to indicate different meanings (i.e. Active icons have an opacity of 87, Inactive icons have an opacity of 60, Disabled icons have an opacity of 38%) ### Icons #### General - Interaction target size of at least 44 x 44 pixels - Label should be on right, icon on left for maximum readability - Use minimum 3:1 color contrast (see Color above) - User appropriate concise alt text for people using screen readers #### Menu/Navigation - No navigation should only supported via breadcrumbs - If less than 5 menu options, consider visible navigation options - If more than 5 menu options, consider a combination navigation where some options are visible and some are hidden - Use the following menu consistently: - Hamburger (three bars stacked vertically): Used to indicate navigation bar/menu that toggles between being collapsed behind the button or displayed on the screen, often used for global/site-wide/whole application navigation - Döner (three bars that narrow vertically): Indicates a group filtering menu - Bento (3×3 grid of squares): Indicates a menu presenting a grid of options (not currently applicable to WB) - Kebab (three dots stacked vertically): Indicates a smaller inline-menu or an overflow/combination menu - Meatballs (three dots stacked horizontally): Used to indicate a smaller inline-menu. Often used to indicate action on a related item (i.e. item next to the meatball), good for repeated use in tables, or horizontal elements - If component is an accordion window, use caret(‸) Preferred Icon Repositories: - - - ### Buttons - Label button with action for usability/to reduce ambiguity (avoid generic button labels for actions) - Buttons vs Links - Buttons should cause change in current context - Links should navigate to a different content or a new resource (e.g. different page) - If text on button - color contract should be 4.5 :1 between button and text - Button color and background color contrast should be 3:1 ### Arvados Specific Components Use chips for displaying tokenized values/arrays ### Loading Indicators #### Page Navigation - Navigation between pages should be indicated using `progressIndicatorActions.START_WORKING` and `progressIndicatorActions.STOP_WORKING` to show the global top-of-page pulser - Only the initial load or refresh of the full page (eg. triggered by the upper right refresh button) should use this indicator. Partial refreshes should use a more local indicator. - Refreshes of only one section of a page should only show its own loading indicator in that section - Full page refreshes where the location is unchanged should avoid using the initial full-page spinner in favor of the top-of-page spinner, with updated values substituting in the UI when loaded #### User Actions - Form submissions or user actions should be indicated by both the `progressIndicatorActions.START_WORKING` and by enabling the spinner on the submit button of the form (if the action takes place through a form AND if the form stays open for the duration of the action in order to show errors). If the form closes immediately then the page spinner is the only indicator. - Toasts should not be used to notify the user of an in-progress action but only completion / error #### Lazy-loaded fields - Fields that load or update (eg. with extra info) after the main view should wait 3-5 seconds before showing a spinner/pulser icon while loading - if the request for extra data fails, a placeholder icon should show with a hint (text or tooltip) indicating that the data failed to load. - The delayed indicator should be implemented as a reusable component (tbd) - Suggested loading indicator for inline fields: https://mhnpd.github.io/react-loader-spinner/docs/components/three-dots) ### References [WCAG2.1](https://www.w3.org/WAI/WCAG21/Understanding/) [Sarah’s talk for references](https://docs.google.com/presentation/d/1HNrhvK7zVZ7jgH3ELbX7KB97SdXCZXrvov_I4Oe1l2c/edit?usp=sharing) ================================================ FILE: doc/development/DevelopmentProcess.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Development Process This document is intended for core engineers who work on the `main` branch of Arvados. ## Two Remotes This document assumes you have two remotes, where `origin` refers to `git.arvados.org` and `github` refers to `github.com`: ```sh $ git remote -v github git@github.com:arvados/arvados.git (fetch) github git@github.com:arvados/arvados.git (push) origin git@git.arvados.org:arvados.git (fetch) origin git@git.arvados.org:arvados.git (push) ``` ## Fetch GitHub Pull Requests as Branches GitHub tracks pull requests under `refs/pull/`. You can configure Git to map these to your local repository when you fetch GitHub: ```sh $ git config set --append remote.github.fetch '+refs/pull/*:refs/remotes/ghpr/*' ``` Now after you fetch, you can refer to pull request #123 as `ghpr/123/head`. If you prefer to fetch pull requests individually, the command to do that is: ```sh $ git fetch github "pull/PRNUM/head:BRANCHNAME" ``` ## Review a Pull Request Reviewing a pull request is about verifying that the branch follows all our [coding standards](CodingStandards.md). You should be able to verify that the ready-to-merge checklist is complete and accurate: the branch does what it says, tests pass, it follows our style, etc. If you notice scale issues, bugs, missing documentation, etc., you can bring that up as part of the review and it should be addressed. However, the *point* of review is *not* to try to find problems. The *point* is to verify that the branch solves a problem and the code is maintainable. ## Merge a Pull Request When a branch passes review, it should be merged to `main`. Core engineers can (and normally do) merge their own branches. Contributions from others need to be merged by a core engineer. Either way, the process is: ```sh $ git switch main $ git pull --ff-only $ git merge --no-ff BRANCHREF # Make sure the commit message includes an issue ref and your DCO signoff. $ git push origin main ``` ================================================ FILE: doc/development/DistroVersions.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Distribution dependency versions For runtime dependencies that we aim to get from the distribution, this page lists the versions included with each distribution we support. For RHEL releases, this table shows the *latest* version available from AppStreams for **both** the earliest point release we support and the latest point release at the time of the last update. | Distribution | Release Date | PostgreSQL | Python | Ruby | |---------------------------|---------------|------------|---------|-------| | Ubuntu 22.04 “jammy” | April 2022 | 14.17 | 3.10.12 | 3.0.2 | | RHEL 8.8 with AppStreams | May 2023 | 15.6 | 3.11.5 | 3.1.4 | | RHEL 9.2 with AppStreams | May 2023 | 15.6 | 3.11.5 | 3.1.4 | | Debian 12 “bookworm” | June 2023 | 15.13 | 3.11.2 | 3.1.2 | | RHEL 8.9 with AppStreams | November 2023 | 15.6 | 3.11.5 | 3.1.4 | | Ubuntu 24.04 “noble” | April 2024 | 16.2 | 3.12.3 | 3.2.3 | | RHEL 9.5 with AppStreams | November 2024 | 16.8 | 3.12.5 | 3.3.8 | | RHEL 10.0 with AppStreams | May 2025 | 16.8 | 3.12.9 | 3.3.8 | | Debian 13 “trixie” | August 2025 | 17.5 | 3.13.3 | 3.3.8 | ## For Arvados 3.1.x | Distribution | Release Date | PostgreSQL | Python | Ruby | |--------------------------|--------------|------------|--------|-------| | Ubuntu 20.04 “focal” | April 2020 | 12.22 | 3.8.10 | 2.7.0 | | RHEL 8.4 with AppStreams | June 2021 | 13.3 | 3.9.2 | 2.7.4 | | Debian 11 “bullseye” | August 2021 | 13.16 | 3.9.2 | 2.7.4 | ================================================ FILE: doc/development/Prerequisites.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Hacking prerequisites This page describes how to install all the software necessary to develop Arvados and run tests. ## Host options You must have a system running a supported distribution. That system can be installed directly on hardware; running on a cloud instance; or in a virtual machine. ### Supported distributions As of March 2026/Arvados 3.2, these instructions and the entire test suite are known to work on Debian 12 "bookworm" and Debian 13 “trixie.” You may try to run these instructions and tests on Ubuntu 22.04 “jammy”/24.04 “noble,” but they have not been tested and you may find some bugs throughout. These instructions are not suitable for any Red Hat-based distribution. Our Ansible playbook will refuse to run on them. ### Base configuration On your development system, you should have a user account with full permission to use sudo. You can run the Ansible playbook to install your development system on a different system. To do this, you must have permission to SSH into your user account from the system running Ansible (the “control node”) to the development system you’re installing (the “target node”). ### Virtual machine requirements If you run your development system in a virtual machine, it needs some permissions. Many environments will allow these operations by default, but they could be limited by your virtual machine setup. - It must be able to create and manage FUSE mounts (`/dev/fuse`) - It must be able to create and run Docker containers - It must be able to create and run Singularity containers—this requires creating and managing block loopback devices (`/dev/block-loop`) - It must have the `fs.inotify.max_user_watches` sysctl set to at least 524288. Our Ansible playbook will try to set this on the managed host, but if it is unable to do so, you may need to set it on the parent host instead. ## Install development environment with Ansible ### Clone Arvados source You will need the Arvados source code to follow this process. ```sh $ git clone https://github.com/arvados/arvados.git ``` If you want to switch to a specific branch or revision like `3.2-release`, do that here. ### Install Ansible Install Ansible following the instructions in `arvados/tools/ansible/README.md`. This ensures you get the right versions of everything. ### Write an Arvados database configuration Make a copy of the default test configuration: ```sh $ cp arvados/tools/ansible/files/default-test-config.yml ~/zzzzz-config.yml ``` You can copy the file to a different location if you like. This page will use `~/zzzzz-config.yml` as the placeholder path throughout. Edit this file with the database configuration you’d like to use. The cluster ID **must** be `zzzzz`. You can change the `user`, `password`, and `dbname` settings freely. Our Ansible playbook will configure PostgreSQL so your settings here work. The playbook will always install the `postgresql` server package. It will **not** change any PostgreSQL configuration except to add `pg_hba.conf` entries for this user. You should only change `host` and `port` if you need to use a PostgreSQL server that is already installed and running somewhere else. ### Write an Ansible inventory An inventory file tells Ansible what host(s) to manage, how to connect to them, and what settings they use. Write an inventory file to `~/zzzzz-inventory.yml` like this: ```yaml arvados_test_all: # This is the list of host(s) where we're installing the test environment. # This example installs on the same system running Ansible. # If you want to manage remote hosts, you can write your own host list: # hosts: localhost: ansible_connection: local vars: # The path to the Arvados cluster configuration you wrote in the previous section. arvados_config_file: "{{ lookup('env', 'HOME') }}/zzzzz-config.yml" # The primary user doing Arvados development and tests. # This user will be added to the `docker` group. # It defaults to the name of the user running `ansible-playbook`. # If you want to configure a different user, set that here: #arvados_dev_user: USERNAME # By default, the playbook installs old versions of Python and Ruby from source. # This helps you make sure you don't accidentally use too-new features during # development. If you're sure you don't need that—for example, you specifically # want to test a distribution's packaged version—set this flag: #arvados_dev_from_pkgs: true ``` ### Run the playbook The basic command to run the playbook is: ```sh $ cd arvados/tools/ansible $ ansible-playbook -K -i ~/zzzzz-inventory.yml install-dev-tools.yml ``` When you are prompted for the `BECOME password:`, enter the password for your user account on the development host that lets you run `sudo` commands. `ansible-playbook` has many options to control how it runs that you can add if you like. Refer to [the `ansible-playbook` documentation](https://docs.ansible.com/ansible/latest/cli/ansible-playbook.html) for more information. ## Run Arvados tests After the playbook runs successfully, you should be able to run the Arvados tests from a source checkout on your development host. This document will walk you through setting up and running a single test suite to verify your setup. `cd` to your Arvados checkout and run: ```sh $ mkdir -p ~/.cache/arvados-test $ build/run-tests.sh --temp ~/.cache/arvados-test --interactive ``` This will install baseline prerequisites, then list commands and test targets, then prompt you with: What next? install deps Accept that command. It will install the rest of the dependencies that are necessary for running a test cluster, then report: All test suites passed. At this stage, this message simply means that the "install deps" command has succeeded. Now we can run a test suite. The controller tests are a good first example, because they interact with a test cluster but not much else. At the `What next?` prompt, enter `test lib/controller`, and you'll see the test cluster start: What next? test lib/controller Starting API, controller, keepproxy, keep-web, ws, and nginx ssl proxy... You'll see logs from individual services, then, hopefully, the controller tests starting and passing: ======= test lib/controller ok git.arvados.org/arvados.git/lib/controller 64.679s coverage: 82.0% of statements ======= test lib/controller -- 68s Pass: lib/controller tests (68s) All test suites passed. Refer to [Running tests](RunningTests.md) for details about running specific test suites, test selection, and other features. ## Troubleshooting If the playbook succeeds but you can't get tests running, there might be a disconnect between your shell configuration and what the system expects. This section documents some places you can look. ### Dependencies in `$PATH` The playbook will install symlinks for Go, Node, Python, Ruby, Singularity, and Yarn under `/usr/local/bin`. The actual tools are installed under `/opt`. When you run Arvados tests or other development tools, you must ensure `/usr/local/bin` appears in your `$PATH` before any directories with other versions like `/usr/bin`. ### Arvados `$CONFIGSRC` The playbook writes the Arvados test cluster's database configuration at `~/.config/arvados/config.yml`, and sets up a hook `/etc/profile.d/arvados-test.sh` to set your `CONFIGSRC` environment variable to that file's base directory. If most tests fail with a database connection error, check that this variable is set: ```sh $ echo "${CONFIGSRC:-UNSET}" /home/you/.config/arvados ``` If that reports `UNSET`, first check if you're using a stale shell session started before the Ansible playbook run. You may need to log out of that session and start a new one. If that doesn't work, you may add a line to set `CONFIGSRC="$HOME/.config/arvados"` to your shell configuration, or set it manually when you run `run-tests.sh`: ```sh $ CONFIGSRC="$HOME/.config/arvados" build/run-tests.sh ... ``` ================================================ FILE: doc/development/RunningTests.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Running Tests Arvados includes a script at `build/run-tests.sh` which tests (nearly) all of the components in the source tree. This is the script that [Arvados CI tests](https://ci.arvados.org) use, so running it locally is the most consistent entry point to all Arvados tests. This document assumes you have [installed a development environment](Prerequisites.md) following that guide. ## Running interactively Most developers want to run tests with `--temp` and `--interactive`: ```sh $ mkdir -p ~/.cache/arvados-test $ build/run-tests.sh --temp ~/.cache/arvados-test --interactive ``` This will display help with a list of commands and test targets. When you run with a fresh temp directory, the tool will probably prompt you to `install deps`. You should do this to install dependencies to the temp directory. ### Dealing with state Before you change `run-tests.sh` itself—including pulling changes from other developers—you should end any interactive test sessions. If you make changes to a low-level library or SDK and want to see how it affects dependent tests, `install` your changed component, then `test` the dependents. If you make changes to a cluster component and want to see how they affect tests, `reset` the test cluster, then `test` the components you're interested in. If you want to clean your `--temp` directory—because you pulled a bad dependency or just want to recover some disk space—it is safe to end any interactive sessions, remove it, then `mkdir` it again. ### Running individual test cases #### Golang Most Go packages use gocheck. Use gocheck command line args like `-check.f` to select tests and `-check.v` to show more output. What next? test lib/controller/router -check.f=RouterSuite -check.v ======= test lib/controller/router PASS: request_test.go:135: RouterSuite.TestAttrsInBody 0.000s PASS: request_test.go:164: RouterSuite.TestBoolParam 0.000s PASS: router_test.go:55: RouterSuite.TestOptions 0.002s PASS: request_test.go:209: RouterSuite.TestStringOrArrayParam 0.000s OK: 4 passed PASS ok git.arvados.org/arvados.git/lib/controller/router 0.012s ======= test lib/controller/router -- 1s #### Python Tests for Python components run under pytest. If what you really want to do is to focus on failing or newly-added tests, consider passing the appropriate switches to do that: -x, --exitfirst Exit instantly on first error or failed test --lf, --last-failed Rerun only the tests that failed at the last run (or all if none failed) --ff, --failed-first Run all tests, but run the last failures first. This may re-order tests and thus lead to repeated fixture setup/teardown. --nf, --new-first Run tests from new files first, then the rest of the tests sorted by file mtime If you want to manually select tests: FILENAME Run tests from FILENAME, relative to the source root FILENAME::CLASSNAME Run tests from CLASSNAME FILENAME::FUNCNAME, FILENAME::CLASSNAME::FUNCNAME Run only the named test function -k EXPRESSION Only run tests which match the given substring expression. An expression is a Python evaluable expression where all names are substring-matched against test names and their parent classes. Example: -k 'test_method or test_other' matches all test functions and classes whose name contains 'test_method' or 'test_other', while -k 'not test_method' matches those that don't contain 'test_method' in their names. -k 'not test_method and not test_other' will eliminate the matches. Additionally keywords are matched to classes and functions containing extra names in their 'extra_keyword_matches' set, as well as functions which have names assigned directly to them. The matching is case-insensitive. -m MARKEXPR Only run tests matching given mark expression. For example: -m 'mark1 and not mark2'. For even more options, refer to the [pytest command line reference](https://docs.pytest.org/en/stable/reference/reference.html#command-line-flags). Example: What next? test sdk/python --disable-warnings --tb=no --no-showlocals tests/test_keep_client.py::KeepDiskCacheTestCase ======= test sdk/python […pip output…] ========================================================== test session starts ========================================================== platform linux -- Python 3.10.19, pytest-9.0.2, pluggy-1.6.0 rootdir: /home/brett/Curii/arvados/sdk/python configfile: pytest.ini collected 9 items tests/test_keep_client.py F........ [100%] ======================================================== short test summary info ======================================================== FAILED tests/test_keep_client.py::KeepDiskCacheTestCase::test_disk_cache_cap - AssertionError: True is not false ====================================================== 1 failed, 8 passed in 0.16s ====================================================== ======= sdk/python tests -- FAILED ======= test sdk/python -- 2s Failures (1): Fail: sdk/python tests (2s) What next? test sdk/python --disable-warnings --tb=no --no-showlocals --lf ======= test sdk/python […pip output…] ========================================================== test session starts ========================================================== platform linux -- Python 3.10.19, pytest-9.0.2, pluggy-1.6.0 rootdir: /home/brett/Curii/arvados/sdk/python configfile: pytest.ini testpaths: tests collected 964 items / 963 deselected / 1 selected run-last-failure: rerun previous 1 failure tests/test_keep_client.py F [100%] ======================================================== short test summary info ======================================================== FAILED tests/test_keep_client.py::KeepDiskCacheTestCase::test_disk_cache_cap - AssertionError: True is not false ============================================= 1 failed, 963 deselected, 1 warning in 0.43s ============================================== ======= sdk/python tests -- FAILED ======= test sdk/python -- 2s Failures (1): Fail: sdk/python tests (2s) #### RailsAPI Rails parses `TESTOPTS` and passes them to the test runner: What next? test services/api TESTOPTS=--name=/.*signed.locators.*/ [...] # Running: .... Finished in 1.080084s, 3.7034 runs/s, 461.0751 assertions/s. ##### Controlling Rails test order Rails tests start off with a line like this Run options: -v -d --seed 57089 The seed value determines the order tests are run. To reproduce reproduce an order-dependent test failure, specify the same seed as a previous failed run: What next? test services/api TESTOPTS="-v -d --seed 57089" ## Environment variables The following variables affect test setup and execution: Variable | Value ------------|----------------------------------------------------------------- `CONFIGSRC` | A directory with an Arvados cluster `config.yml`. Tests will read `Clusters.zzzzz.PostgreSQL.Connection` from that file to determine how to connect to the test database. If not set, the tests will use default connection settings. `WORKSPACE` | A directory with an Arvados Git checkout. Defaults to what Git reports for `run-tests.sh` itself. `run-tests.sh` cleans the `ARVADOS_[…]` variables from the environment to help ensure consistent test execution. Sometimes you may want to set these variables nonetheless, but then you must pass them as arguments to `run-tests.sh` rather than export them to the environment directly. Variable | Value -----------------------|------------------------------------------------------ `ARVADOS_DEBUG` | If 1, lots of components will log more information. `ARVADOS_TEST_PRIVESC` | A literal string. If `sudo`, various tests that need to perform privileged operations with run with `sudo` to get them. Otherwise, those tests are skipped. ## Scripting run-tests If you run `run-tests.sh` without `--interactive`, by default it runs all the tests and reports their results. This is how CI runs. Run the script with `--help` to see the options you can use to control this behavior. Common options include: Option | Behavior -----------------|------------------------------------------------------------ `--only` | Run a single set of tests `--skip` | Skip a set of tests during a full run `NAME_test=ARGS` | Pass arguments to a set of tests ## Running Workbench tests in Docker If you do not have a full development environment, Workbench tests can be run in Docker. The `services/workbench2` subfolder includes Makefile targets that preinstall the necessary dependencies in a Docker container using Ansible. With Docker and Ansible installed (see `arvados/tools/ansible/README.md`), run this command from within the `arvados/services/workbench2` directory: make workbench-docker-image You can verify the docker image was built by looking for `arvados/workbench` in `docker image ls` Then, start the interactive tests with this command: make interactive-tests-in-docker Non-interactive (headless) tests can be run with the targets: # Both e2e & component tests make tests-in-docker # Integration (e2e) only make integration-tests-in-docker # Unit (component) only make unit-tests-in-docker ### Troubleshooting #### Missing X server or `$DISPLAY` Run: xhost +local:root #### No version of Cypress is installed / other error starting Cypress Recreate the home volume which re-installs Cypress and other persisted dependencies by running: make clean-docker-volume make workbench-docker-volume ================================================ FILE: doc/development/UpdatingDependencies.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Updating dependencies ## Go (see also: [real documentation](https://go.dev/doc/modules/managing-dependencies)) Update a single dependency: ~/arvados\$ go get github.com/docker/docker@latest Update all dependencies: ~/arvados\$ go get -u -t ./… Then sync: ~/arvados\$ go mod tidy This is a good time to review “replace” directives in source:go.mod and find better solutions to issues that are currently handled by pinning modules to old versions or unmaintained forks. ================================================ FILE: doc/development/git.conf ================================================ # Suggested Git configuration for Arvados # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 # Arvados standards forbid trailing whitespace. # Configure Git to highlight it. [color "diff"] whitespace = red reverse [core] whitespace = trailing-space [merge] # Merges to main should never be fast-forward. # Easiest to turn it off by default: ff = false [user] # The Arvados DCO sign-off requires your real name and email. # Refer to CONTRIBUTING.md for full details. #name = Your Name #email = yourmail@example.local [trailer.arvados] key = Arvados-DCO-1.1-Signed-off-by ifexists = doNothing # If you uncomment cmd, the prepare-commit-msg hook will prepare commit # messages with your DCO sign-off, which attests you have permission to # contribute the code. Refer to CONTRIBUTING.md for full terms. #cmd = echo \"$(git config user.name) <$(git config user.email)>\" ================================================ FILE: doc/development/prepare-commit-msg.sh ================================================ #!/bin/sh # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 # # This Git hook adds refs to branch merges and adds the Arvados DCO sign-off # if you have configured it to do so. set -e set -u msgfile="$1"; shift trailer="$(git interpret-trailers --trailer arvados /dev/null | grep @ || :)" new_msg="$(mktemp --tmpdir="$(dirname "$msgfile")" commit-XXXXXX.txt)" trap 'rm -f "$new_msg"' EXIT INT TERM QUIT gawk -f - -v source="${1:-}" -v trailer="$trailer" -- "$msgfile" >"$new_msg" <<'EOF' BEGIN { $0=trailer; trailer_key=$1; } function write_trailer() { if (trailer) { if (last1 != trailer_key) { print ""; } print trailer; trailer=""; } } END { write_trailer(); } ($0 == trailer) { trailer=""; } ((last1 == trailer_key && $1 != trailer_key) || $1 == "#" || $1 == "---" || $1 == "diff") { write_trailer(); } (NR == 1 && $1 == "Merge" && $(NF - 1) == "branch") { match($NF, /[[:punct:]]([0-9]+)[[:punct:]]/, bmatch); sub(/^'.*\//, "'", $NF); print "Merge branch", $NF; if (RSTART) { printf("%sRefs #%s.%s", ORS, bmatch[1], ORS); } last1=$1; next; } { print; last1=$1; } EOF mv -f "$new_msg" "$msgfile" ================================================ FILE: doc/development/release/Checklist.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Release Checklist Pre-process: 1. Create an issue for the release. 2. Add each of the following steps (starting at step 1) as tasks with the step number in the subject 3. Assign each task 4. The current task goes into the “In Progress” column 5. When the current task is finished, move it to resolved, and move the next task into “In Progress” 6. Notify the assignee of the next task that it is ready to begin Meta-process: 1. Periodically review this documented process reflects our actual process & update it 2. When steps are added/changed/rearranged/removed, be sure to update [`cmd/art/TASKS` in the `arvados-dev` repository](https://dev.arvados.org/projects/arvados/repository/arvados-dev/revisions/main/show/cmd/art).
Step Who What
0 engineering Build new features, refine good code into great code
1 ops Build a new tordo compute image against the latest development packages.
Update the tordo configuration and test it with a couple representative workflows (at least one bioinformatics workflow and one S3 download workflow).
If everything works well, update version pins based on the versions installed in the new image. Update:
  • tools/ansible/roles/arvados_docker/files/arvados-docker.pref
  • tools/ansible/roles/compute_amd_rocm/defaults/main.yml (update arvados_compute_amd_rocm_version)
  • tools/ansible/roles/compute_nvidia/files/arvados-nvidia.pref
2 engineering Prepare release branch on the arvados and arvados-formula repositories. For major releases, this means branching a new X.Y-staging from main. For minor releases, this means cherry-picking features onto the existing X.Y-staging branch. Ensure that Redmine issues for features or bugfixes that are appearing for the first time in this version are associated with the correct release (for major releases, use art redmine issues find-and-associate).
3 engineering Ensure that the release staging branch passes automated tests on Jenkins.
4 engineering Review release branch to make sure all commits that need to be in the release are in the release. If new commits are added, resume checklist from step 3.
5 product mgr Write release notes and publish them on the www-dev site.
6 everyone Review release notes
7 product mgr Create a Redmine release for the next patch release after the current one.
8 release eng Build release candidate packages with version X.Y.Z~rcN-1 using the Jenkins job build-and-publish-rc-packages. Add a comment on the release ticket identifying the Git commit hash used for the build, and link to your Jenkins run.
9 release eng Publish release candidate arvados/jobs Docker image using docker-jobs-image-release
10 ops Test installer formula / provision scripts with RC packages. Run the test-provision Jenkins job where git_hash is your X.Y-staging commit and RELEASE is testing.
11 ops Update pirca to use the RC packages: build a new compute image, update the Arvados version in Salt and deploy.
After Salt updates the cluster, check that your new version deployed successfully by running arvados-server version and then arvados-server check to verify other running services have the same version.
12 bfx Run CWL integration tests and fastq-to-gvcf pipeline on pirca (more about running fastq-to-gvcf).
After the workflow succeeds, check the versions reported at the top of the workflow logs to verify it ran your RC for crunch-run, arv-mount, and a-c-r.
13 engineering Perform final manual testing based on risk assessment, the release notes and manual testing plan. This should involve at least a "smell check" to confirm that key features, improvements or bug fixes intended to appear in the release are present and behave as intended.
14 product mgr Approve RC for release
15 release eng Publish Ruby gems using build-publish-packages-python-ruby with only the BUILD_RUBY box checked.
16 release eng On the X.Y-staging branch, update these files to refer to the release version:
  • doc/admin/upgrading.html.textile.liquid the "Upgrading Arvados and Release notes" doc page with the version and date of the release.
  • contrib/arvados-bootstrap/pyproject.toml, update project.version and project.dependencies
  • contrib/R-sdk/DESCRIPTION, update Version:
  • services/api/Gemfile to depend on the newly published Arvados gem and run bundle install to update Gemfile.lock
  • tools/ansible/roles/arvados_apt/defaults/main.yml update arvados_pin_version
17 release eng Build final release packages with version X.Y.Z-1 using the Jenkins job build-and-publish-rc-packages. Add a comment on the release ticket identifying the Git commit hash used for the build, and link to your Jenkins run.
18 release eng Publish stable release arvados/jobs Docker image using docker-jobs-image-release
19 release eng Push packages to stable repos using publish-packages-to-stable-repo (more info)
20 release eng Publish Python packages using build-publish-packages-python-ruby with only the BUILD_PYTHON box checked.
21 release eng Publish Java package using build-java-sdk and following Releasing Java SDK packages
22 release eng Publish R package using build-package-r
23 release eng Tag the commits in each repo used to build the release in Git. Create an annotated tag (git tag --annotate) with a message like "Release notes at https://arvados.org/release-notes/X.Y.Z/" That makes the GitHub releases page look good. See GitHub documentation for more details about how to automate releases.
Create or fast forward the X.Y-release branch to match X.Y-staging.
Cherry-pick the upgrade notes commit (from step 2) onto main.
24 release eng Ensure new release is published on https://doc.arvados.org/.
Ensure that release notes & any other materials are pointing to correct version of the docs.
(If anything goes wrong, see https://dev.arvados.org/projects/arvados-private/wiki/Docarvadosorg_deployment)
25 ops Update pirca and jutro to the new stable release: build new compute images, update the Arvados version in Salt and deploy.
26 product mgr Merge release notes (step 6) from "develop" branch to "main" branch of the arvados-www Git repository and check that the https://arvados.org front page is updated
27 product mgr Send out the release notes via MailChimp, tweet from the Arvados account, announce on the Discourse forum, Matrix, etc.
28 release eng In Jenkins:
  • For each test from step 3, go to "Job Config History" and record on the release ticket the timestamp of the configuration used to test the release
  • Go to Manage Jenkins > Clouds > gce2 > Configure and record the VM image tagged "tests" used for jenkins workers to run the tests for the release (should be something like jenkins-image-arvados-tests-YYYYMMDDHHMMSS) on the release ticket
  • Go to packer-build-jenkins-image-arvados-tests history and record on the release ticket the Jenkins job used to build the above VM image.
29 release eng Add the release to doi:10.5281/zenodo.6382942
Updating Zenodo Version of Arvados after Release
https://zenodo.org/record/6382943
================================================ FILE: doc/development/release/FastqPipeline.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # More about running fastq-to-gVCF When we do releases, we run a test pipeline that is intended to be representative of a bioinformatics workload. 1. Deploy the version of `arvados-cwl-runner` that you want to test and make sure that the corresponding `arvados/jobs` image [has been built and uploaded to docker hub](https://ci.arvados.org/view/Release%20Pipeline/job/docker-jobs-image-release/) or built using the `arvados/build/build-dev-docker-jobs-image.sh` script and uploaded using `arv-keepdocker`. 2. Clone 3. Create an Arvados project for the test run 4. `cd arvados/tutorial/WGS-processing` 5. Run the following command: `arvados-cwl-runner --no-wait --disable-reuse --project-uuid cwl/wgs-processing-wf.cwl yml/wgs-processing-wf-chr19.yml` 6. Monitor this for success. It usually takes about an hour to run. If you are running this on `pirca` then all the data should already be present. If you are running it from somewhere else, you may need to do some additional data copying from `pirca` to the other cluster. The input document `yml/wgs-processing-wf-chr19.yml` has the portable data hashes of the collections. ================================================ FILE: doc/development/release/JavaSDK.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Releasing Java SDK packages The Java SDK is distributed on the Sonatype Central Repository. Here are the steps to release a new jar file: 1. Build and upload package using https://ci.arvados.org/view/All/job/build-java-sdk 2. Go to [Sonatype Publishing Settings](https://central.sonatype.com/publishing/deployments) and log in with the appropriate credentials (gopass oss.sonatype.org/curii) 3. Make sure you’re on the “Deployments” tab. 4. Find the jar that was just uploaded by Jenkins. Click the “Publish” button and wait for the process to finish. See [documentation about the publishing API we use](https://central.sonatype.org/publish/publish-portal-ossrh-staging-api/). ## Getting the authentication token for Sonatype [Log into Sonatype](https://central.sonatype.com/usertoken) and under the account menu select “User Tokens” to review and manage tokens. Our current Jenkins token is stored in gopass as `curii-systems/websites/oss.sonatype.org/jenkins`. ## gradle.properties To upload to Sonatype, you need the token (see above) and a secret key. You must upload a GPG-signed package. All these parameters are set in `gradle.properties` which we keep as a Jenkins secret. Note that the property values after the equals sign should not be quoted. I’m not certain if spaces are allowed around the equals sign, but currently it works with no extra spaces. ossrhUsername=... ossrhPassword=... signing.keyId=... signing.password= signing.secretKeyRingFile=...-secret-key.gpg ================================================ FILE: doc/development/release/ManualTests.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Manual testing plan ## Manual testing of SDKs that don’t have good coverage ## Release candidate builds with ~1 ~2 ## Workbench2 Need to go through this whole testing plan with both an admin and non-admin account. Admin only operations are indicated. ### Login - Test login using username/password - Test login using OpenID Connect - Test login as federated user - Login to using remote account on centralized federation (LoginCluster) - Login to using remote account on peer federation ### Left side navigation - Click on each top level icon (home projects, favorites/public favorite, shared, all processes, instance types, shell access, groups, trash) and confirm that the appropriate page loads with no errors - Check that the left side panel can be resized - Check that that the toggle side panel button works as expected - Check that the +NEW button is disabled unless a project is displayed ### Home projects top panel - Project name should match the logged in user - Check for expected toolbar buttons - Details - User account - API Details - Check the buttons have the expected behavior ### Project view #### Top panel Check that it shows the project name Check that it shows the first line of the project description. Check that there is an arrow that expands to show the full description. Check that it shows project properties. Check that there is an arrow that expands to show all the properties if they don’t fit on a single line. Check that it renders the toolbar of project operations (listed below). Check that the each operation behaves correctly and operates on the project. Check that if the window is narrow, the rightmost toolbar icons spill into an overflow menu Check that breadcrumbs include the project name and each parent project. #### Data tab Should show projects, workflows, and collections (in that order) Clicking on the name rendered in blue text should navigate to the item Clicking anywhere else but the name should toggle between selected and not selected - Unless clicking on the checkbox, clicking on the row clears any other selected items - When a row is selected, the toolbar moves from the top panel to the data table panel - Clicking on the check box to the left when a different item is selected selects both items - The toolbar updates to show only the operations that can be applied to both items - Clicking “View details” should open the right info panel. Check that it shows details for the currently selected item. Check that the toolbar operations are sorted and grouped consistently across different types of items. Check that the toolbar operations are appropriate to the type of item selected. Expected toolbar when project is selected: - View details - Open in new tab - Copy link to clipboard - Open with 3rd party client - API details - —- - Share - New project - Edit project - Move to - Move to trash - —- - Freeze project - Add to favorites - Add to public favorites (admin only) Expected toolbar when workflow is selected: - View details - Open in new tab - Copy link to clipboard - API details - —- - Run workflow - Delete workflow Expected toolbar when collection is selected - View details - Open in new tab - Copy link to clipboard - Open with 3rd party client - API details - —- - Share - Edit collection - Move to - Make a copy - Move to trash - —- - Add to favorites - Add to public favorites (admin only) Check that all the toolbar operations work as expected. Check that right-clicking on a row selects the row and then opens the appropriate context menu. - Check that the operations apply to the item that was clicked on. - Check that the operations in the right-click context menu match the toolbar. Check that clicking on each action in the context menu works as expected. Check that entering text into the search box refreshes the list with search results Check that clicking on the three bars in the upper right opens a menu to select columns Check that enabling/disabling data columns works. Check that all columns are filled in appropriately for each item, or blank (“-”) where no such data applies. Check that clicking on the “Name” column sorts by name. - Check sort by “Date created” - Check sort by “Last modified” - Check sort by “Trash at” - Check sort by “Delete at” Check that clicking on “Go to the next page” loads the next page of items. Check that the getting the number of items doesn’t block loading the table contents. #### Workflows tab Check it shows processes (workflow runs) only. Check that it shows the number of completed, failed, queued and running processes, as well as the total, at the top of the data table. Check that it shows the name, status, type, runtime and last modified times. Check that entering text into the search box refreshes the list with search results Check that the toolbar and context menu behave as expected: - View details - Open in new tab - Outputs - API details - —- - Edit process - Copy and re-run process - Remove - —- - Add to favorites - Add to public favorites (admin only) Check that selecting more that one item updates the toolbar to “Remove”. Check that clicking on “Go to the next page” loads the next page of items. Check that the getting the number of items doesn’t block loading the table contents. Check that process status is rendered correctly. Check that filtering by process status shows only rows with the intended status. Check that runtime is calculated/rendered correctly. ### My favorites Check that all items marked as “favorite” appear. Check that clicking on an item shows the appropriate toolbar. Check that clicking “Remove from favorites” on an item refreshes the favorites list and the item is no longer present. ### Public favorites Check that all items marked as “public favorite” appear. Check that clicking on an item shows the appropriate toolbar. Check that clicking “Remove from public favorites” on an item refreshes the public favorites list and the item is no longer present. (admin only) ### Shared with me Check that it shows all the things that don’t belong to the current user. Check that selecting an item shows the appropriate toolbar. Check that clicking on “Go to the next page” loads the next page of items. Check that the getting the number of items doesn’t block loading the table contents. ### All processes Check that it shows all processes visible to the user, regardless of owner project. Check that selecting an item shows the appropriate toolbar. Check that clicking on “Go to the next page” loads the next page of items. Check that the getting the number of items doesn’t block loading the table contents. ### Instance types Check that all the available instance types are listed and formatted properly. ### Shell Access Check that shell nodes are listed. Check that the ssh command line is valid. Check that webshell works properly. ## Groups — standalone and peer federation 1. Create group 2. Log in as non-admin user. 3. Log in as a second non-admin user in a private window for testing sharing. 4. check that users cannot see one another 5. Add user to group 6. Check that users can see one another ## Collections 1. Create a collection & upload a file 2. Add a file 3. Rename a file 4. Remove a file 5. Download one of the files 6. Make a sharing link to the collection & check usage from private window 7. Mark collection as a favorite, check that it shows up in favorites 8. Rename collection 9. Edit description 10. Add property 11. Search for collection by property 12. Search for collection by name 13. Search for collection by filename 14. Search for collection by keyword in description 15. Trash collection 16. Check that collection can be found in the trash 17. Untrash collection ## Projects 1. Create a project 2. Rename a project 3. Edit description 4. Create a collection inside the project 5. Move a collection into the project 6. Add read-only sharing permission to the project & check access from other user 7. Add read-write sharing permission to project & check access from other user 8. Add manage sharing permission to project & check access from other user 9. Mark project as favorite, check that it shows up in favorites 10. Search for project by name 11. Search for project by keyword in description 12. Trash project 13. Check that project can be found in the trash 14. Untrash project ## Workflows 1. Upload workflow with arvados-cwl-runnner —create-workflow 2. Browse workflow 3. Select workflow to run 4. Choose input file 5. Watch it run 1. Check logging 2. Check live updates 3. Check links to input & output 6. Check that it shows up in All Processes ## Federation ### Peer federation 2 or more clusters are configured with a ‘Remoteclusters’ entry in config.yml. ### Login cluster federation 2 or more clusters are configured with a ‘Remoteclusters’ entry in config.yml. One of the clusters is the ‘login cluster’, which means the **other** clusters have a section like this in their config (clsr1 is the login cluster): Clusters: clsr2: Login: LoginCluster: clsr1 #### Groups 1. Login cluster: create group 2. Satellite cluster: Log in as non-admin user. 3. Satellite cluster: Log in as a second non-admin user in a private window for testing sharing. 4. Satellite cluster: check that users cannot see one another 5. Login cluster: add both users to group 6. Satellite cluster: Check that users can see one another 7. Satellite cluster: create group 8. Satellite cluster: add both users to group 9. Satellite cluster: Check that both users can share with the group created on the satellite cluster ## Misc 1. As admin, create a “public favorite” and make sure users see it. 2. As admin, deactivate a user. Make sure that user can’t log back in 3. Add a cluster for multi-site search. 4. Upload ssh key & check view 5. Create git repo & check view 6. As admin, add virtual machine access & check view ================================================ FILE: doc/development/release/Zenodo.md ================================================ [comment]: # (Copyright © The Arvados Authors. All rights reserved.) [comment]: # () [comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0) # Updating Zenodo Version of Arvados after Release 1. Download a `.zip` of your new Arvados release from [GitHub Releases](https://github.com/arvados/arvados/tags) 2. Log in to [Zenodo](https://zenodo.org/) using the credentials from `gopass "curii-systems/zenodo.org/sysadmin+zenodo@curii.com"` 3. Go to the [Arvados record](https://zenodo.org/records/15213491) and press the the New Version button (Using new versions lets us use the overarching DOI for our `citations.md` and keep all the versions together on Zenodo) 4. In the form, update the following: 1. Upload the `.zip` file for this release you downloaded earlier 2. Request a new DOI for this version 3. Update the Publication Date for this release 4. Add any Creators who have worked on Arvados and aren’t listed 5. Under Additional Description, edit the links for Release Notes and (if you’re doing a major release) Documentation 6. Update the Version number with this release Once you add a new version, you can’t change its DOI but everything else is editable if you accidentally make a mistake. So, don’t worry :) just edit the new version to fix it. ================================================ FILE: doc/examples/config/zzzzz.yml ================================================ AutoReloadConfig: true Clusters: zzzzz: ManagementToken: e687950a23c3a9bceec28c6223a06c79 SystemRootToken: systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy API: RequestTimeout: 30s TLS: Insecure: true Collections: BlobSigningKey: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc TrustAllContent: true ForwardSlashNameSubstitution: / ================================================ FILE: doc/index.html.liquid ================================================ --- layout: default no_nav_left: true navsection: top title: Arvados | Documentation ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %}

ARVADOS

A free and open source platform for big data science

Dax reading a book

What is Arvados

Arvados is a platform for managing compute and storage for cloud and HPC clusters. It allows you to track your methods and datasets, share them securely, and easily re-run analyses. It also make it possible to run analysis across multiple clusters (HPC, cloud, or hybrid) with Federated Multi-Cluster Workflows.

Support and Community

Interact with the Arvados community on the Arvados Forum and the arvados/community channel at gitter.im.

Curii Corporation provides managed Arvados installations as well as commercial support for Arvados. Please contact info@curii.com for more information.

Contributing

Please visit the developer documentation. Arvados is 100% free and open source software, check out the code on GitHub.

Arvados is under active development, see the recent developer activity.

License

Most of Arvados is licensed under the GNU AGPL v3. The SDKs are licensed under the Apache License 2.0 and can be incorporated into proprietary code. See Arvados Free Software Licenses for more information.

Sections

User Guide — How to manage data and do analysis with Arvados.

SDK Reference — Details about the accessing Arvados from various programming languages.

Arvados Architecture — Details about the Arvados components and architecture.

API Reference — Details about the Arvados REST API.

Admin Guide — Details about administering an Arvados cluster.

Install Guide — How to install Arvados.


The content of the above documentation is licensed under the Creative Commons Attribution-Share Alike 3.0 United States license. Code samples in the above documentation are licensed under the Apache License, Version 2.0.

================================================ FILE: doc/install/arvbox.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Arvados-in-a-box ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2. arvbox is unsupported The Arvados team does not maintain or support arvbox as of Arvados 3.2.0. If you were using arvbox in demo mode, consider installing on a Debian-based virtual machine with our "single-node Ansible installer":{{ site.baseurl }}/install/install-single-host.html. If you were using arvbox for development, we now provide an Ansible playbook to install development dependencies on a Debian-based system. Our "Hacking Prerequisites documentation":https://github.com/arvados/arvados/blob/main/doc/development/Prerequisites.md has instructions for how to use it. Installing systems with Ansible requires a little more initial setup, but once you've done that, it's easier to keep a system up-to-date: when you want to update a system, you simply re-run the playbook. We think this trade-off lets us provide a better experience to a wider variety of users. ================================================ FILE: doc/install/config.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Configuration files ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2. Arvados /etc/arvados/config.yml The configuration file is normally found at @/etc/arvados/config.yml@ and will be referred to as just @config.yml@ in this guide. This configuration file must be kept in sync across every service node in the cluster, but not shell and compute nodes (which do not require config.yml). h3. Syntax The configuration file is in "YAML":https://yaml.org/ format. This is a block syntax where indentation is significant (similar to Python). By convention we use two space indent. The first line of the file is always "Clusters:", underneath it at the first indent level is the Cluster ID. All the actual cluster configuration follows under the Cluster ID. This means all configuration parameters are indented by at least two levels (four spaces). Comments start with @#@ . We recommend a YAML-syntax plugin for your favorite text editor, such as @yaml-mode@ (Emacs) or @yaml-vim@. Example file:
Clusters:                         # Clusters block, everything else is listed under this
  abcde:                          # Cluster ID, everything under it is configuration for this cluster
    ExampleConfigKey: "fghijk"    # An example configuration key
    ExampleConfigGroup:           # A group of keys
      ExampleDurationConfig: 12s  # Example duration
      ExampleSizeConfig: 99KiB    # Example with a size suffix
Each configuration group may only appear once. When a configuration key is within a config group, it will be written with the group name leading, for example @ExampleConfigGroup.ExampleSizeConfig@. Duration suffixes are s=seconds, m=minutes or h=hours. Size suffixes are K=10 ^3^, Ki=2 ^10^, M=10 ^6^, Mi=2 ^20^, G=10 ^9^, Gi=2 ^30^, T=10 ^12^, Ti=2 ^40^, P=10 ^15^, Pi=2 ^50^, E=10 ^18^, Ei=2 ^60^. You can optionally follow with a "B" (eg "MB" or "MiB") for readability (it does not affect the units.) h3(#empty). Create empty configuration file Change @webserver-user@ to the user that runs your web server process. This is @www-data@ on Debian-based systems, and @nginx@ on Red Hat-based systems.
# export ClusterID=xxxxx
# umask 027
# mkdir -p /etc/arvados
# cat > /etc/arvados/config.yml <<EOF
Clusters:
  ${ClusterID}:
EOF
# chgrp webserver-user /etc/arvados /etc/arvados/config.yml
h2. Nginx configuration This guide will also cover setting up "Nginx":https://www.nginx.com/ as a reverse proxy for Arvados services. Nginx performs two main functions: TLS termination and virtual host routing. The virtual host configuration for each component will go in its own file in @/etc/nginx/conf.d/@. h2. Synchronizing config file The Arvados configuration file must be kept in sync across every service node in the cluster. We strongly recommend using a devops configuration management tool such as "Puppet":https://puppet.com/open-source/ to synchronize the config file. Alternately, something like the following script to securely copy the configuration file to each node may be helpful. Replace the @ssh@ targets with your nodes.
#!/bin/sh
sudo cat /etc/arvados/config.yml | ssh 10.0.0.2 sudo sh -c "'cat > /etc/arvados/config.yml'"
sudo cat /etc/arvados/config.yml | ssh 10.0.0.3 sudo sh -c "'cat > /etc/arvados/config.yml'"
================================================ FILE: doc/install/configure-azure-blob-storage.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Configure Azure Blob storage ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Keepstore can store data in one or more Azure Storage containers. h2. Set up VMs and Storage Accounts Before starting the configuration of individual keepstore servers is good to have an idea of the keepstores servers' final layout. One key decision is the amount of servers and type of VM to run. Azure may change over time the bandwith capacity of each type. After conducting some empirical saturation tests, the conclusion was that the bandwith is proportional to the amount of cores with some exceptions. As a rule of thumb, is better to invest resources in more cores instead of memory or IOps. Another decision is how many VMs should be running keepstore. For example there could be 8 VMs with one core each or one machine with 8 cores. Or anything in between. Assuming is the same cost for Cloud resources, there is always the benefit of distributing the risk of faulty VMs. The recommendation is to start with 2 VMs and expand in pairs. Having a minimum of 2 cores each. The total amount of VMs will be a function of the budget and the pipeline traffic to avoid saturation during periods of high usage. Standard D v3 family is a balanced choice, making Standard_D2_v3 the 2-core option There are many options for storage accounts. You can read details from Azure on their documentation "https://docs.microsoft.com/en-us/azure/storage/common/storage-introduction":https://docs.microsoft.com/en-us/azure/storage/common/storage-introduction. The type of storage and access tier will be a function of the budget and desired responsiveness. A balanced option is to have General-purpose Standard Storage account and use Blob storage, hot access tiers. Keepstore can be configure to reflect the level of underlaying redundancy the storage will have. This is call data replication option. For example LRS (Locally Redundant Storage) saves 3 copies of the data. There desired redundancy can be chosen at the keepstore layer or at the Storage Accunt layer. The decision where the redundancy will be done and the type of Storage Account data replication (LRS, ZRS, GRS and RA-GRS) has trade-offs. Please read more on "https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy":https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy and decide what is best for your needs. h2. Create a storage container Using the Azure web portal or command line tool, create or choose a storage account with a suitable redundancy profile and availability region. Use the storage account keys to create a new container.
~$ azure config mode arm
~$ az login
~$ az group create exampleGroupName eastus2
~$ az storage account create --sku Standard_LRS --kind BlobStorage --encryption-services blob --access-tier Hot --https-only true --location eastus2 --resource-group exampleGroupName --name exampleStorageAccountName
~$ az storage account keys list --resource-group exampleGroupName --account-name exampleStorageAccountName
[
  {
    "keyName": "key1",
    "permissions": "Full",
    "value": "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=="
  },
  {
    "keyName": "key2",
    "permissions": "Full",
    "value": "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=="
  }
]
~$ AZURE_STORAGE_ACCOUNT="exampleStorageAccountName" \
AZURE_STORAGE_ACCESS_KEY="zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==" \
azure storage container create --name exampleContainerName
Note that Keepstore services may be configured to use multiple Azure Storage accounts and multiple containers within a storage account. h2. Configure keepstore Volumes are configured in the @Volumes@ section of the cluster configuration file. {% include 'assign_volume_uuid' %}
    Volumes:
      ClusterID-nyw5e-000000000000000:
        AccessViaHosts:
          # This section determines which keepstore servers access the
          # volume. In this example, keep0 has read/write access, and
          # keep1 has read-only access.
          #
          # If the AccessViaHosts section is empty or omitted, all
          # keepstore servers will have read/write access to the
          # volume.
          "http://keep0.ClusterID.example.com:25107": {}
          "http://keep1.ClusterID.example.com:25107": {ReadOnly: true}

        Driver: Azure
        DriverParameters:
          # Storage account name and secret key, used for
          # authentication.
          StorageAccountName: exampleStorageAccountName
          StorageAccountKey: zzzzzzzzzzzzzzzzzzzzzzzzzz

          # Storage container name.
          ContainerName: exampleContainerName

          # The cloud environment to use,
          # e.g. "core.chinacloudapi.cn". Defaults to
          # "core.windows.net" if blank or omitted.
          StorageBaseURL: ""

          # Time to wait for an upstream response before failing the
          # request.
          RequestTimeout: 10m

          # Time to wait before retrying a failed "list blobs" Azure
          # API call.
          ListBlobsRetryDelay: 10s

          # Maximum attempts at a "list blobs" Azure API call before
          # giving up.
          ListBlobsMaxAttempts: 12

          # If non-zero, use multiple concurrent requests (each
          # requesting MaxGetBytes bytes) when retrieving data. If
          # zero or omitted, get the entire blob with one request.
          #
          # Normally this is zero but if you find that 4 small
          # requests complete faster than a single large request, for
          # example, you might set this to 16777216 (64 MiB ÷ 4).
          MaxGetBytes: 0

          # Time to wait for an unexpectedly empty blob to become
          # non-empty. Azure's create-and-write operation is not
          # atomic. The default value typically allows concurrent GET
          # and PUT requests to succeed despite the race window.
          WriteRaceInterval: 15s

          # Time to wait between GET attempts while waiting for
          # WriteRaceInterval to expire.
          WriteRacePollTime: 1s

        # How much replication is provided by the underlying storage
        # container.  This is used to inform replication decisions at
        # the Keep layer.
        Replication: 3

        # If true, do not accept write or trash operations, even if
        # AccessViaHosts.*.ReadOnly is false.
        #
        # If false or omitted, enable write access (subject to
        # AccessViaHosts.*.ReadOnly, where applicable).
        ReadOnly: false

        # Storage classes to associate with this volume.  See "Storage
        # classes" in the "Admin" section of doc.arvados.org.
        StorageClasses: null
================================================ FILE: doc/install/configure-fs-storage.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Configure filesystem storage ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Keepstore can store data in local and network-attached POSIX filesystems. h2. Setting up filesystem mounts Volumes are configured in the @Volumes@ section of the cluster configuration file. You may provide multiple volumes for a single keepstore process to manage multiple disks. Keepstore distributes blocks among volumes in round-robin fashion. {% include 'assign_volume_uuid' %} Note that each volume entry has an @AccessViaHosts@ section indicating which Keepstore instance(s) will serve that volume. In this example, keep0 and keep1 each have their own data disk. The @/mnt/local-disk@ directory on keep0 is volume @ClusterID-nyw5e-000000000000000@, and the @/mnt/local-disk@ directory on keep1 is volume @ClusterID-nyw5e-000000000000001@ .
    Volumes:
      ClusterID-nyw5e-000000000000000:
        AccessViaHosts:
          "http://keep0.ClusterID.example.com:25107": {}
        Driver: Directory
        DriverParameters:
          # The directory that will be used as the backing store.
          Root: /mnt/local-disk

        # How much replication is performed by the underlying
        # filesystem.  (for example, a network filesystem may provide
        # its own replication).  This is used to inform replication
        # decisions at the Keep layer.
        Replication: 1

        # If true, do not accept write or trash operations, only
        # reads.
        ReadOnly: false

        # Storage classes to associate with this volume.
        StorageClasses: null

      ClusterID-nyw5e-000000000000001:
        AccessViaHosts:
          "http://keep1.ClusterID.example.com:25107": {}
        Driver: Directory
        DriverParameters:
          Root: /mnt/local-disk
In the case of a network-attached filesystem, the @AccessViaHosts@ section can have multiple entries. If the filesystem is accessible by all keepstore servers, the AccessViaHosts section can be empty, or omitted entirely. In this example, the underlying storage system performs replication, so specifying @Replication: 2@ means a block is considered to be stored twice for the purposes of data integrity, while only stored on a single volume from the perspective of Keep.
    Volumes:
      ClusterID-nyw5e-000000000000002:
        AccessViaHosts:
          # This section determines which keepstore servers access the
          # volume. In this example, keep0 has read/write access, and
          # keep1 has read-only access.
          #
          # If the AccessViaHosts section is empty or omitted, all
          # keepstore servers will have read/write access to the
          # volume.
          "http://keep0.ClusterID.example.com:25107/": {}
          "http://keep1.ClusterID.example.com:25107/": {ReadOnly: true}
        Driver: Directory
        DriverParameters:
          Root: /mnt/network-attached-filesystem
        Replication: 2
================================================ FILE: doc/install/configure-s3-object-storage.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Configure S3 object storage ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Keepstore can store data in object storage compatible with the S3 API, such as Amazon S3, Google Cloud Storage, Ceph RADOS, NetApp StorageGRID, and others. Volumes are configured in the @Volumes@ section of the cluster configuration file. # "Configuration example":#example # "IAM Policy":#IAM h2(#example). Configuration example {% include 'assign_volume_uuid' %}
    Volumes:
      ClusterID-nyw5e-000000000000000:
        AccessViaHosts:
          # This section determines which keepstore servers access the
          # volume. In this example, keep0 has read/write access, and
          # keep1 has read-only access.
          #
          # If the AccessViaHosts section is empty or omitted, all
          # keepstore servers will have read/write access to the
          # volume.
          "http://keep0.ClusterID.example.com:25107": {}
          "http://keep1.ClusterID.example.com:25107": {ReadOnly: true}

        Driver: S3
        DriverParameters:
          # Bucket name.
          Bucket: example-bucket-name

          # Optionally, you can specify S3 access credentials here.
          # If these are left blank, IAM role credentials will be
          # retrieved from instance metadata (IMDSv2).
          AccessKeyID: ""
          SecretAccessKey: ""

          # Storage provider region. If Endpoint is specified, the
          # region determines the request signing method, and defaults
          # to "us-east-1".
          Region: us-east-1

          # Storage provider endpoint. For Amazon S3, use "" or
          # omit. For Google Cloud Storage, use
          # "https://storage.googleapis.com".
          Endpoint: ""

          # Change to true if the region requires a LocationConstraint
          # declaration.
          LocationConstraint: false

          # Use V2 signatures instead of the default V4. Amazon S3
          # supports V4 signatures in all regions, but this option
          # might be needed for other S3-compatible services.
          V2Signature: false

          # Use path-style requests instead of the default
          # virtual-hosted-style requests.  This might be needed for
          # S3-compatible services other than AWS.  If using AWS, see
          # https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access
          # for deprecation information.
          UsePathStyle: false

          # By default keepstore stores data using the MD5 checksum
          # (32 hexadecimal characters) as the object name, e.g.,
          # "0123456abc...". Setting PrefixLength to 3 changes this
          # naming scheme to "012/0123456abc...". This can improve
          # performance, depending on the S3 service being used. For
          # example, PrefixLength 3 is recommended to avoid AWS
          # limitations on the number of read/write operations per
          # second per prefix (see
          # https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/).
          #
          # Note that changing PrefixLength on an existing volume is
          # not currently supported. Once you have started using a
          # bucket as an Arvados volume, you should not change its
          # configured PrefixLength, or configure another volume using
          # the same bucket and a different PrefixLength.
          PrefixLength: 0

          # Requested page size for "list bucket contents" requests.
          IndexPageSize: 1000

          # Maximum time to wait while making the initial connection
          # to the backend before failing the request.
          ConnectTimeout: 1m

          # Maximum time to wait for a complete response from the
          # backend before failing the request.
          ReadTimeout: 2m

          # Maximum eventual consistency latency
          RaceWindow: 24h

        # How much replication is provided by the underlying bucket.
        # This is used to inform replication decisions at the Keep
        # layer.
        Replication: 2

        # If true, do not accept write or trash operations, even if
        # AccessViaHosts.*.ReadOnly is false.
        #
        # If false or omitted, enable write access (subject to
        # AccessViaHosts.*.ReadOnly, where applicable).
        ReadOnly: false

        # Storage classes to associate with this volume.  See "Storage
        # classes" in the "Admin" section of doc.arvados.org.
        StorageClasses: null
h2(#IAM). IAM Policy On Amazon, VMs which will access the S3 bucket (these include keepstore and compute nodes) will need an IAM policy with "permission that can read, write, list and delete objects in the bucket":https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create.html . Here is an example policy:
{
    "Id": "arvados-keepstore policy",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                  "s3:*"
            ],
            "Resource": "arn:aws:s3:::xarv1-nyw5e-000000000000000-volume"
            "Resource": "arn:aws:s3:::xarv1-nyw5e-000000000000000-volume/*"
        }
    ]
}
================================================ FILE: doc/install/container-shell-access.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Configure container shell access ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados can be configured to permit shell access to running containers. This can be handy for debugging, but it could affect reproducability of workflows. This feature can be enabled for admin users, or for all users. By default, it is entirely disabled. The relevant configuration section is
    Containers:
      ShellAccess:
        # An admin user can use "arvados-client shell" to start an
        # interactive shell (with any user ID) in any running
        # container.
        Admin: false

        # Any user can use "arvados-client shell" to start an
        # interactive shell (with any user ID) in any running
        # container that they started, provided it isn't also
        # associated with a different user's container request.
        #
        # Interactive sessions make it easy to alter the container's
        # runtime environment in ways that aren't recorded or
        # reproducible. Consider the implications for automatic
        # container reuse before enabling and using this feature. In
        # particular, note that starting an interactive session does
        # not disqualify a container from being reused by a different
        # user/workflow in the future.
        User: false
To enable the feature a firewall change may also be required. This feature requires the opening of tcp connections from @arvados-controller@ to the range specified in the @net.ipv4.ip_local_port_range@ sysctl on compute nodes. If that range is unknown or hard to determine, it will be sufficient to allow tcp connections from @arvados-controller@ to port 1024-65535 on compute nodes, while allowing traffic that is part of existing tcp connections. After changing the configuration, @arvados-controller@ must be restarted for the change to take effect. When enabling, shell access will be enabled for any running containers. When disabling, access is removed immediately for any running containers, as well as any containers started subsequently. Restarting @arvados-controller@ will kill any active connections. Usage instructions for this feature are available in the "User guide":{{site.baseurl}}/user/debugging/container-shell-access.html. ================================================ FILE: doc/install/crunch2/install-compute-node-docker.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Set up a compute node with Docker ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin_warning' %} This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html. {% include 'notebox_end' %} {% include 'notebox_begin_warning' %} These instructions apply when Containers.RuntimeEngine is set to @docker@, refer to "Set up a compute node with Singularity":install-compute-node-singularity.html when running @singularity@. {% include 'notebox_end' %} # "Introduction":#introduction # "Set up Docker":#docker # "Update fuse.conf":#fuse # "Update docker-cleaner.json":#docker-cleaner # "Install'python-arvados-fuse and crunch-run and arvados-docker-cleaner":#install-packages h2(#introduction). Introduction This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados on a static cluster. These steps must be performed on every compute node. h2(#docker). Set up Docker See "Set up Docker":../install-docker.html {% include 'install_cuda' %} {% assign arvados_component = 'python-arvados-fuse crunch-run arvados-docker-cleaner' %} {% include 'install_compute_fuse' %} {% include 'install_docker_cleaner' %} {% include 'install_packages' %} {% assign arvados_component = 'arvados-docker-cleaner' %} {% include 'start_service' %} ================================================ FILE: doc/install/crunch2/install-compute-node-singularity.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Set up a compute node with Singularity ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin_warning' %} This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html. {% include 'notebox_end' %} {% include 'notebox_begin_warning' %} These instructions apply when Containers.RuntimeEngine is set to @singularity@, refer to "Set up a compute node with Docker":install-compute-node-docker.html when running @docker@. {% include 'notebox_end' %} # "Introduction":#introduction # "Install python-arvados-fuse and crunch-run and squashfs-tools":#install-packages # "Set up Singularity":#singularity # "Singularity mksquashfs configuration":#singularity_mksquashfs_configuration h2(#introduction). Introduction Please refer to the "Singularity":{{site.baseurl}}/architecture/singularity.html documentation in the Architecture section. This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados on a static cluster. These steps must be performed on every compute node. {% assign arvados_component = 'python-arvados-fuse crunch-run squashfs-tools' %} {% include 'install_packages' %} {% include 'install_cuda' %} h2(#singularity). Set up Singularity Follow the "Singularity installation instructions":https://sylabs.io/guides/latest/user-guide/quick_start.html. Note that while the latest stable version is normally expected to be compatible, Arvados is currently tested with singularity 3.10.4. Make sure @singularity@ and @mksquashfs@ are working:
$ singularity version
singularity-ce version 3.10.4-dirty
$ mksquashfs -version
mksquashfs version 4.4 (2019/08/29)
[...]
Then update @Containers.RuntimeEngine@ in your cluster configuration:
      # Container runtime: "docker" (default) or "singularity"
      RuntimeEngine: singularity
{% include 'singularity_mksquashfs_configuration' %} ================================================ FILE: doc/install/crunch2-cloud/install-compute-node.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Build a cloud compute node image ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin_warning' %} @arvados-dispatch-cloud@ is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm or LSF. {% include 'notebox_end' %} p(#introduction). This page describes how to build a compute node image that can be used to run containers dispatched by Arvados in the cloud. # "Prerequisites":#prerequisites ## "Check your distribution":#check-distro ## "Create and configure an SSH keypair":#sshkeypair ## "Get the Arvados source":#git-clone ## "Install Ansible":#install-ansible ## "Install Packer and the Ansible plugin":#install-packer # "Fully automated build with Packer and Ansible":#building ## "Write Ansible settings for the compute node":#ansible-variables ## "Set up Packer for your cloud":#packer-variables ### "AWS":#aws-variables ### "Azure":#azure-variables ## "Run Packer":#run-packer # "Partially automated build with Ansible":#ansible-build ## "Write Ansible settings for the compute node":#ansible-variables-standalone ## "Write an Ansible inventory":#ansible-inventory ## "Run Ansible":#run-ansible # "Manual build":#requirements h2(#prerequisites). Prerequisites h3(#sshkeypair). Create and configure an SSH keypair @arvados-dispatch-cloud@ communicates with the compute nodes via SSH. To do this securely, an SSH keypair is needed. The key type must be RSA or ED25519 to work with Amazon EC2. Generate an ED25519 keypair with no passphrase:
~$ ssh-keygen -t ed25519 -N '' -f ~/.ssh/id_dispatcher
Generating public/private ed25519 key pair.
Your identification has been saved in /home/user/.ssh/id_dispatcher.
Your public key has been saved in /home/user/.ssh/id_dispatcher.pub.
The key fingerprint is:
[...]
After you do this, the contents of the private key in @~/.ssh/id_dispatcher@ need to be stored in your "cluster configuration file":{{ site.baseurl }}/admin/config.html under @Containers.DispatchPrivateKey@. The public key at @~/.ssh/id_dispatcher.pub@ will need to be authorized to access instances booted from the image. Keep this file; our Ansible playbook will read it to set this up for you. h3(#git-clone). Get the Arvados source Compute node templates are only available in the Arvados source tree. Clone a copy of the Arvados source for the version of Arvados you're using in a directory convenient for you: {% include 'branchname' %}
~$ git clone --depth=1 --branch={{ branchname }} https://github.com/arvados/arvados ~/arvados
h3(#install-ansible). Install Ansible {% include 'install_ansible' header_level: 'h4' %} h3(#install-packer). Install Packer and the Ansible plugin We provide Packer templates that can automatically create a compute instance, configure it with Ansible, shut it down, and create a cloud image from the result. "Install Packer following their instructions.":https://developer.hashicorp.com/packer/docs/install After you do, install Packer's Ansible provisioner by running:
~$ packer plugins install github.com/hashicorp/ansible
h2(#building). Fully automated build with Packer and Ansible After you have both tools installed, you can configure both with information about your Arvados cluster and cloud environment and then run a fully automated build. h3(#ansible-variables). Write Ansible settings for the compute node In the @tools/compute-images@ directory of your Arvados source checkout, copy @host_config.example.yml@ to @host_config.yml@. Edit @host_config.yml@ with information about how your compute nodes should be set up following the instructions in the comments. h3(#packer-variables). Set up Packer for your cloud You need to provide different configuration to Packer depending on which cloud you're deploying Arvados in. h4(#aws-variables). AWS Install Packer's AWS builder by running:
~$ packer plugins install github.com/hashicorp/amazon
In the @tools/compute-images@ directory of your Arvados source checkout, copy @aws_config.example.json@ to @aws_config.json@. Fill in values for the configuration settings as follows: * If you already have AWS credentials configured that Packer can use to create and manage an EC2 instance, set @aws_profile@ to the name of those credentials in your configuration. Otherwise, set @aws_access_key@ and @aws_secret_key@ with information from an API token with those permissions. * Set @aws_region@, @vpc_id@, and @subnet_id@ with identifiers for the network where Packer should create the EC2 instance. * Set @aws_source_ami@ to the AMI of the base image that should be booted and used as the base for your compute node image. Set @ssh_user@ to the name of administrator account that is used on that image. * Set @aws_volume_gb@ to the size of of the image you want to create in GB. The default 20 should be sufficient for most installs. You may increase this if you're using a custom source AMI with more software pre-installed. * Set @arvados_cluster@ to the same five-alphanumeric identifier used under @Clusters@ in your Arvados cluster configuration. * If you installed Ansible to a nonstandard location, set @ansible_command@ to the absolute path of @ansible-playbook@. For example, if you installed Ansible in a virtualenv at @~/ansible@, set @ansible_command@ to {% raw %}"{{env `HOME`}}/ansible/bin/ansible-playbook"{% endraw %}. When you finish writing your configuration, "run Packer":#run-packer. h4(#azure-variables). Azure {% comment %} FIXME: Incomplete {% endcomment %} Install Packer's Azure builder by running:
~$ packer plugins install github.com/hashicorp/azure
In the @tools/compute-images@ directory of your Arvados source checkout, copy @azure_config.example.json@ to @azure_config.json@. Fill in values for the configuration settings as follows: * The settings load credentials from Azure's standard environment variables. As long as you have these environment variables set in the shell before you run Packer, they will be loaded as normal. Alternatively, you can set them directly in the configuration file. These secrets can be generated from the Azure portal, or with the CLI using a command like:
~$ az ad sp create-for-rbac --name Packer --password ...
* Set @location@ and @resource_group@ with identifiers for where Packer should create the cloud instance. * Set @image_sku@ to the identifier of the base image that should be booted and used as the base for your compute node image. Set @ssh_user@ to the name of administrator account you want to use on that image. * Set @ssh_private_key_file@ to the path with the private key you generated earlier for the dispatcher to use. For example, {% raw %}"{{env `HOME`}}/.ssh/id_dispatcher"{% endraw %}. * Set @arvados_cluster@ to the same five-alphanumeric identifier used under @Clusters@ in your Arvados cluster configuration. * If you installed Ansible to a nonstandard location, set @ansible_command@ to the absolute path of @ansible-playbook@. For example, if you installed Ansible in a virtualenv at @~/ansible@, set @ansible_command@ to {% raw %}"{{env `HOME`}}/ansible/bin/ansible-playbook"{% endraw %}. When you finish writing your configuration, "run Packer":#run-packer. h3(#run-packer). Run Packer In the @tools/compute-images@ directory of your Arvados source checkout, run Packer with your configuration and the template appropriate for your cloud. For example, to build an image on AWS, run:
arvados/tools/compute-images$ packer build -var-file=aws_config.json aws_template.json
To build an image on Azure, replace both instances of *@aws@* with *@azure@*, and run that command. {% include 'notebox_begin_warning' %} If @packer build@ fails early with @ok=0@, @changed=0@, @failed=1@, and a message like this:
TASK [Gathering Facts] *********************************************************
fatal: [default]: FAILED! => {"msg": "failed to transfer file to /home/you/.ansible/tmp/ansible-local-1821271ym6nh1cw/tmp2kyfkhy4 /home/admin/.ansible/tmp/ansible-tmp-1732380360.0917368-1821275-172216075852170/AnsiballZ_setup.py:\n\n"}

PLAY RECAP *********************************************************************
default : ok=0  changed=0  unreachable=0  failed=1  skipped=0  rescued=0  ignored=0
This might mean the version of @scp@ on your computer is trying to use new protocol features that doesn't work with the older SSH server on the cloud image. You can work around this by running:
$ export ANSIBLE_SCP_EXTRA_ARGS="'-O'"
Then rerun your full @packer build@ command from the same shell. {% include 'notebox_end' %} If the build succeeds, it will report the identifier of your image at the end of the process. For example, when you build an AWS image, it will look like this:
==> Builds finished. The artifacts of successful builds are:
--> amazon-ebs: AMIs were created:
us-east-1: ami-012345abcdef56789
That identifier can now be set as @CloudVMs.ImageID@ in your cluster configuration. You do not need to run any other compute node build process on this page; continue to "installing the cloud dispatcher":install-dispatch-cloud.html. h2(#ansible-build). Partially automated build with Ansible If Arvados does not include a template for your cloud, or you do not have permission to run Packer, you can run the Ansible playbook by itself. This can set up a base Debian or Ubuntu system with all the software and configuration necessary to do Arvados compute work. After it's done, you can manually snapshot the node and create a cloud image from it. h3(#ansible-variables-standalone). Write Ansible settings for the compute node In the @tools/compute-images@ directory of your Arvados source checkout, copy @host_config.example.yml@ to @host_config.yml@. Edit @host_config.yml@ with information about how your compute nodes should be set up following the instructions in the comments. Note that you *must set* @arvados_cluster_id@ in this file since you are not running Packer. h3(#ansible-inventory). Write an Ansible inventory The compute node playbook runs on a host named @default@. In the @tools/compute-images@ directory of your Arvados source checkout, write a file named @inventory.ini@ with information about how to connect to this node via SSH. It should be one line like this:
# Example inventory.ini for an Arvados compute node
default ansible_host=192.0.2.9 ansible_user=admin
* @ansible_host@ can be the running node's hostname or IP address. You need to be able to reach this host from the system where you're running Ansible. * @ansible_user@ names the user account that Ansible should use for the SSH connection. It needs to have permission to use @sudo@ on the running node. You can add other Ansible configuration options like @ansible_port@ to your inventory if needed. Refer to the "Ansible inventory documentation":https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html for details. h3(#run-ansible). Run Ansible If you installed Ansible inside a virtualenv, activate that virtualenv now. Then, in the @tools/compute-images@ directory of your Arvados source checkout, run @ansible-playbook@ with your inventory and configuration:
arvados/tools/compute-images$ ansible-playbook --ask-become-pass --inventory=inventory.ini --extra-vars=@host_config.yml ../ansible/build-compute-image.yml
You'll be prompted with @BECOME password:@. Enter the password for the @ansible_user@ you defined in the inventory to use sudo on the running node. {% include 'notebox_begin_warning' %} If @ansible-playbook@ fails early with @ok=0@, @changed=0@, @failed=1@, and a message like this:
TASK [Gathering Facts] *********************************************************
fatal: [default]: FAILED! => {"msg": "failed to transfer file to /home/you/.ansible/tmp/ansible-local-1821271ym6nh1cw/tmp2kyfkhy4 /home/admin/.ansible/tmp/ansible-tmp-1732380360.0917368-1821275-172216075852170/AnsiballZ_setup.py:\n\n"}

PLAY RECAP *********************************************************************
default : ok=0  changed=0  unreachable=0  failed=1  skipped=0  rescued=0  ignored=0
This might mean the version of @scp@ on your computer is trying to use new protocol features that doesn't work with the older SSH server on the cloud image. You can work around this by running:
$ export ANSIBLE_SCP_EXTRA_ARGS="'-O'"
Then rerun your full @ansible-playbook@ command from the same shell. {% include 'notebox_end' %} If it succeeds, Ansible should report a "PLAY RECAP" with @failed=0@:
PLAY RECAP *********************************************************************
default : ok=41  changed=37  unreachable=0  failed=0  skipped=5  rescued=0  ignored=0
Your node is now ready to run Arvados compute work. You can snapshot the node, create an image from it, and set that image as @CloudVMs.ImageID@ in your Arvados cluster configuration. The details of that process are cloud-specific and out of scope for this documentation. You do not need to run any other compute node build process on this page; continue to "installing the cloud dispatcher":install-dispatch-cloud.html. h2(#requirements). Manual build If you cannot run Ansible, you can create a cloud instance, manually set it up to be a compute node, and then create an image from it. The details of this process depend on which distribution you use on the cloud instance and which cloud you use; all these variations are out of scope for this documentation. These are the requirements: * Except on Azure, the SSH public key you generated previously must be an authorized key for the user that Crunch is configured to use. For example, if your cluster's @CloudVMs.DriverParameters.AdminUsername@ setting is *@crunch@*, then the dispatcher's public key should be listed in ~crunch/.ssh/authorized_keys in the image. This user must also be allowed to use sudo without a password unless the user is @root@. (On Azure, the dispatcher makes additional calls to automatically set up and authorize the user, making these steps unnecessary.) * SSH needs to be running and reachable by @arvados-dispatch-cloud@ on the port named by @CloudVMs.SSHPort@ in your cluster's configuration file (default 22). * Install the @python3-arvados-fuse@ package. Enable the @user_allow_other@ option in @/etc/fuse.conf@. * Install either "Docker":https://docs.docker.com/engine/install/ or "Singularity":https://docs.sylabs.io/guides/3.0/user-guide/installation.html as appropriate based on the @Containers.RuntimeEngine@ setting in your cluster's configuration file. If you install Docker, you may also want to install and set up the @arvados-docker-cleaner@ package to conserve space on long-running instances, but it's not strictly required. * All available scratch space should be made available under @/tmp@. ================================================ FILE: doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install the cloud dispatcher ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin_warning' %} @arvados-dispatch-cloud@ is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm or LSF. {% include 'notebox_end' %} # "Introduction":#introduction # "Create compute node VM image":#create-image # "Update config.yml":#update-config # "Install arvados-dispatch-cloud":#install-packages # "Start the service":#start-service # "Restart the API server and controller":#restart-api # "Confirm working installation":#confirm-working h2(#introduction). Introduction The cloud dispatch service is for running containers on cloud VMs. It works with Microsoft Azure and Amazon EC2; future versions will also support Google Compute Engine. The cloud dispatch service can run on any node that can connect to the Arvados API service, the cloud provider's API, and the SSH service on cloud VMs. It is not resource-intensive, so you can run it on the API server node. More detail about the internal operation of the dispatcher can be found in the "architecture section":{{site.baseurl}}/architecture/dispatchcloud.html. h2(#update-config). Update config.yml h3. Configure CloudVMs Add or update the following portions of your cluster configuration file, @config.yml@. Refer to "config.defaults.yml":{{site.baseurl}}/admin/config.html for information about additional configuration options. The @DispatchPrivateKey@ should be the *private* key generated in "Create a SSH keypair":install-compute-node.html#sshkeypair .
    Services:
      DispatchCloud:
        InternalURLs:
          "http://localhost:9006": {}
    Containers:
      CloudVMs:
        # BootProbeCommand is a shell command that succeeds when an instance is ready for service
        BootProbeCommand: "sudo systemctl status docker"

        # --- driver-specific configuration goes here --- see Amazon and Azure examples below ---

      DispatchPrivateKey: |
        -----BEGIN RSA PRIVATE KEY-----
        MIIEpQIBAAKCAQEAqXoCzcOBkFQ7w4dvXf9B++1ctgZRqEbgRYL3SstuMV4oawks
        ttUuxJycDdsPmeYcHsKo8vsEZpN6iYsX6ZZzhkO5nEayUTU8sBjmg1ZCTo4QqKXr
        FJ+amZ7oYMDof6QEdwl6KNDfIddL+NfBCLQTVInOAaNss7GRrxLTuTV7HcRaIUUI
        jYg0Ibg8ZZTzQxCvFXXnjseTgmOcTv7CuuGdt91OVdoq8czG/w8TwOhymEb7mQlt
        lXuucwQvYgfoUgcnTgpJr7j+hafp75g2wlPozp8gJ6WQ2yBWcfqL2aw7m7Ll88Nd
        [...]
        oFyAjVoexx0RBcH6BveTfQtJKbktP1qBO4mXo2dP0cacuZEtlAqW9Eb06Pvaw/D9
        foktmqOY8MyctzFgXBpGTxPliGjqo8OkrOyQP2g+FL7v+Km31Xs61P8=
        -----END RSA PRIVATE KEY-----
    InstanceTypes:
      x1md:
        ProviderType: x1.medium
        VCPUs: 8
        RAM: 64GiB
        IncludedScratch: 64GB
        Price: 0.62
      x1lg:
        ProviderType: x1.large
        VCPUs: 16
        RAM: 128GiB
        IncludedScratch: 128GB
        Price: 1.23
h3(#GPUsupport). NVIDIA GPU support To specify instance types with NVIDIA GPUs, the compute image must be built with CUDA support (this means setting @arvados_compute_nvidia: true@ in @host_config.yml@ when "building the compute image":install-compute-node.html). You must include an additional @GPU@ section for each instance type that includes GPUs:
    InstanceTypes:
      g4dn:
        ProviderType: g4dn.xlarge
        VCPUs: 4
        RAM: 16GiB
        IncludedScratch: 125GB
        Price: 0.56
        GPU:
          Stack: "cuda"
          DriverVersion: "11.4"
          HardwareTarget: "7.5"
          DeviceCount: 1
          VRAM: 16GiB
The @DriverVersion@ is the version of the CUDA toolkit installed in your compute image (in "X.Y" format, do not include the patchlevel). The @HardwareTarget@ is the "CUDA compute capability of the GPUs available for this instance type":https://developer.nvidia.com/cuda-gpus in "X.Y" format. The @DeviceCount@ is the number of GPU cores available for this instance type. @VRAM@ is the amount of VRAM available per GPU device. h3(#ROCmGPUsupport). AMD GPU support To specify instance types with AMD GPUs, the compute image must be built with ROCm support (currently, installing ROCm automatically is not supported by the Arvados compute image Ansible playbook, but can be added manually after the fact). You must include an additional @GPU@ section for each instance type that includes GPUs:
    InstanceTypes:
      g4dn:
        ProviderType: g4da.xlarge
        VCPUs: 4
        RAM: 16GiB
        IncludedScratch: 125GB
        Price: 0.56
        GPU:
          Stack: "rocm"
          DriverVersion: "6.2"
          HardwareTarget: "gfx1100"
          DeviceCount: 1
          VRAM: 16GiB
@DriverVersion@ is the version of the ROCm toolkit installed in your compute image (in "X.Y" format, do not include the patchlevel). @HardwareTarget@ (e.g. gfx1100) corresponds to the GPU architecture of the device. Use @rocminfo@ to determine your hardware target. See also "Accelerator and GPU hardware specifications":https://rocm.docs.amd.com/en/latest/reference/gpu-arch-specs.html (use the column "LLVM target name") and "LLVM AMDGPU backend documentation":https://llvm.org/docs/AMDGPUUsage.html . @DeviceCount@ is the number of GPU cores available for this instance type. @VRAM@ is the amount of VRAM available per GPU device. h3(#aws-ebs-autoscaler). EBS Autoscale configuration See "Autoscaling compute node scratch space":install-compute-node.html#aws-ebs-autoscaler for details about compute image configuration. The @Containers.InstanceTypes@ list should be modified so that all @AddedScratch@ lines are removed, and the @IncludedScratch@ value should be set to 5 TB. This way, the scratch space requirements will be met by all the defined instance type. For example:
    InstanceTypes:
      c5large:
        ProviderType: c5.large
        VCPUs: 2
        RAM: 4GiB
        IncludedScratch: 5TB
        Price: 0.085
      m5large:
        ProviderType: m5.large
        VCPUs: 2
        RAM: 8GiB
        IncludedScratch: 5TB
        Price: 0.096
...
You will also need to create an IAM role in AWS with these permissions:
{
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                "ec2:AttachVolume",
                "ec2:DescribeVolumeStatus",
                "ec2:DescribeVolumes",
                "ec2:DescribeTags",
                "ec2:ModifyInstanceAttribute",
                "ec2:DescribeVolumeAttribute",
                "ec2:CreateVolume",
                "ec2:DeleteVolume",
                "ec2:CreateTags"
            ],
            "Resource": "*"
        }
    ]
}
Then set @Containers.CloudVMs.DriverParameters.IAMInstanceProfile@ to the name of the IAM role. This will make @arvados-dispatch-cloud@ pass an IAM instance profile to the compute nodes when they start up, giving them sufficient permissions to attach and grow EBS volumes. h3. AWS Credentials for Local Keepstore on Compute node When @Containers.LocalKeepBlobBuffersPerVCPU@ is non-zero, the compute node will spin up a local Keepstore service for direct storage access. If Keep is backed by S3, the compute node will need to be able to access the S3 bucket. If the AWS credentials for S3 access are configured in @config.yml@ (i.e. @Volumes.DriverParameters.AccessKeyID@ and @Volumes.DriverParameters.SecretAccessKey@), these credentials will be made available to the local Keepstore on the compute node to access S3 directly and no further configuration is necessary. If @config.yml@ does not have @Volumes.DriverParameters.AccessKeyID@ and @Volumes.DriverParameters.SecretAccessKey@ defined, Keepstore uses instance metadata to retrieve IAM role credentials. The @CloudVMs.DriverParameters.IAMInstanceProfile@ parameter must be configured with the name of a profile whose IAM role has permission to access the S3 bucket(s). With this setup, @arvados-dispatch-cloud@ will attach the IAM role to the compute node as it is created. The instance profile name is "often identical to the name of the IAM role":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#ec2-instance-profile. *If you are also using EBS Autoscale feature, the role in @IAMInstanceProfile@ must have both ec2 and s3 permissions.* h3. Minimal configuration example for Amazon EC2 The ImageID value is the compute node image that was built in "the previous section":install-compute-node.html#aws.
    Containers:
      CloudVMs:
        ImageID: ami-01234567890abcdef
        Driver: ec2
        DriverParameters:
          # If you are not using an IAM role for authentication, specify access
          # credentials here. Otherwise, omit or set AccessKeyID and
          # SecretAccessKey to an empty value.
          AccessKeyID: XXXXXXXXXXXXXXXXXXXX
          SecretAccessKey: YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY

          SecurityGroupIDs:
          - sg-0123abcd
          SubnetID: subnet-0123abcd
          Region: us-east-1
          EBSVolumeType: gp2
          AdminUsername: arvados
h3(#IAM). Example IAM policy for cloud dispatcher Example policy for the IAM role used by the cloud dispatcher:
{
    "Id": "arvados-dispatch-cloud policy",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                  "ec2:CreateTags",
                  "ec2:Describe*",
                  "ec2:CreateImage",
                  "ec2:CreateKeyPair",
                  "ec2:ImportKeyPair",
                  "ec2:DeleteKeyPair",
                  "ec2:RunInstances",
                  "ec2:StopInstances",
                  "ec2:TerminateInstances",
                  "ec2:ModifyInstanceAttribute",
                  "ec2:CreateSecurityGroup",
                  "ec2:DeleteSecurityGroup",
                  "iam:PassRole"
            ],
            "Resource": "*"
        }
    ]
}
h3. Minimal configuration example for Azure Using managed disks: The ImageID value is the compute node image that was built in "the previous section":install-compute-node.html#azure.
    Containers:
      CloudVMs:
        ImageID: "zzzzz-compute-v1597349873"
        Driver: azure
        # (azure) managed disks: set MaxConcurrentInstanceCreateOps to 20 to avoid timeouts, cf
        # https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image
        MaxConcurrentInstanceCreateOps: 20
        DriverParameters:
          # Credentials.
          SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
          ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
          ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
          TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX

          # Data center where VMs will be allocated
          Location: centralus

          # The resource group where the VM and virtual NIC will be
          # created.
          ResourceGroup: zzzzz
          NetworkResourceGroup: yyyyy   # only if different from ResourceGroup
          Network: xxxxx
          Subnet: xxxxx-subnet-private

          # The resource group where the disk image is stored, only needs to
          # be specified if it is different from ResourceGroup
          ImageResourceGroup: aaaaa

Azure recommends using managed images. If you plan to start more than 20 VMs simultaneously, Azure recommends using a shared image gallery instead to avoid slowdowns and timeouts during the creation of the VMs. Using an image from a shared image gallery:
    Containers:
      CloudVMs:
        ImageID: "shared_image_gallery_image_definition_name"
        Driver: azure
        DriverParameters:
          # Credentials.
          SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
          ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
          ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
          TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX

          # Data center where VMs will be allocated
          Location: centralus

          # The resource group where the VM and virtual NIC will be
          # created.
          ResourceGroup: zzzzz
          NetworkResourceGroup: yyyyy   # only if different from ResourceGroup
          Network: xxxxx
          Subnet: xxxxx-subnet-private

          # The resource group where the disk image is stored, only needs to
          # be specified if it is different from ResourceGroup
          ImageResourceGroup: aaaaa

          # (azure) shared image gallery: the name of the gallery
          SharedImageGalleryName: "shared_image_gallery_1"
          # (azure) shared image gallery: the version of the image definition
          SharedImageGalleryImageVersion: "0.0.1"

Using unmanaged disks (deprecated): The ImageID value is the compute node image that was built in "the previous section":install-compute-node.html#azure.
    Containers:
      CloudVMs:
        ImageID: "https://zzzzzzzz.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.55555555-5555-5555-5555-555555555555.vhd"
        Driver: azure
        DriverParameters:
          # Credentials.
          SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
          ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
          ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
          TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX

          # Data center where VMs will be allocated
          Location: centralus

          # The resource group where the VM and virtual NIC will be
          # created.
          ResourceGroup: zzzzz
          NetworkResourceGroup: yyyyy   # only if different from ResourceGroup
          Network: xxxxx
          Subnet: xxxxx-subnet-private

          # Where to store the VM VHD blobs
          StorageAccount: example
          BlobContainer: vhds

Get the @SubscriptionID@ and @TenantID@:
$ az account list
[
  {
    "cloudName": "AzureCloud",
    "id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXX",
    "isDefault": true,
    "name": "Your Subscription",
    "state": "Enabled",
    "tenantId": "YYYYYYYY-YYYY-YYYY-YYYYYYYY",
    "user": {
      "name": "you@example.com",
      "type": "user"
    }
  }
]
You will need to create a "service principal" to use as a delegated authority for API access.
$ az ad app create --display-name "Arvados Dispatch Cloud (ClusterID)" --homepage "https://arvados.org" --identifier-uris "https://ClusterID.example.com" --end-date 2299-12-31 --password Your_Password
$ az ad sp create "appId"
(appId is part of the response of the previous command)
$ az role assignment create --assignee "objectId" --role Owner --scope /subscriptions/{subscriptionId}/
(objectId is part of the response of the previous command)
Now update your @config.yml@ file: @ClientID@ is the 'appId' value. @ClientSecret@ is what was provided as Your_Password. h3. Test your configuration Run the @cloudtest@ tool to verify that your configuration works. This creates a new cloud VM, confirms that it boots correctly and accepts your configured SSH private key, and shuts it down.
~$ arvados-server cloudtest && echo "OK!"
Refer to the "cloudtest tool documentation":../../admin/cloudtest.html for more information. {% assign arvados_component = 'arvados-dispatch-cloud' %} {% include 'install_packages' %} {% include 'start_service' %} {% include 'restart_api' %} h2(#confirm-working). Confirm working installation On the dispatch node, start monitoring the arvados-dispatch-cloud logs:
# journalctl -o cat -fu arvados-dispatch-cloud.service
In another terminal window, use the diagnostics tool to run a simple container.
# arvados-client sudo diagnostics
INFO       5: running health check (same as `arvados-server check`)
INFO      10: getting discovery document from https://zzzzz.arvadosapi.com/discovery/v1/apis/arvados/v1/rest
...
INFO     160: running a container
INFO      ... container request submitted, waiting up to 10m for container to run
After performing a number of other quick tests, this will submit a new container request and wait for it to finish. While the diagnostics tool is waiting, the @arvados-dispatch-cloud@ logs will show details about creating a cloud instance, waiting for it to be ready, and scheduling the new container on it. You can also use the "arvados-dispatch-cloud API":{{site.baseurl}}/api/dispatch.html to get a list of queued and running jobs and cloud instances. Use your @ManagementToken@ to test the dispatcher's endpoint. For example, when one container is running:
~$ curl -sH "Authorization: Bearer $token" http://localhost:9006/arvados/v1/dispatch/containers
{
  "items": [
    {
      "container": {
        "uuid": "zzzzz-dz642-hdp2vpu9nq14tx0",
        ...
        "state": "Running",
        "scheduling_parameters": {
          "partitions": null,
          "preemptible": false,
          "max_run_time": 0
        },
        "exit_code": 0,
        "runtime_status": null,
        "started_at": null,
        "finished_at": null
      },
      "instance_type": {
        "Name": "Standard_D2s_v3",
        "ProviderType": "Standard_D2s_v3",
        "VCPUs": 2,
        "RAM": 8589934592,
        "Scratch": 16000000000,
        "IncludedScratch": 16000000000,
        "AddedScratch": 0,
        "Price": 0.11,
        "Preemptible": false
      }
    }
  ]
}
A similar request can be made to the @http://localhost:9006/arvados/v1/dispatch/instances@ endpoint. After the container finishes, you can get the container record by UUID *from a shell server* to see its results:
shell:~$ arv get zzzzz-dz642-hdp2vpu9nq14tx0
{
 ...
 "exit_code":0,
 "log":"a01df2f7e5bc1c2ad59c60a837e90dc6+166",
 "output":"d41d8cd98f00b204e9800998ecf8427e+0",
 "state":"Complete",
 ...
}
You can use standard Keep tools to view the container's output and logs from their corresponding fields. For example, to see the logs from the collection referenced in the @log@ field:
~$ arv keep ls a01df2f7e5bc1c2ad59c60a837e90dc6+166
./crunch-run.txt
./stderr.txt
./stdout.txt
~$ arv-get a01df2f7e5bc1c2ad59c60a837e90dc6+166/stdout.txt
2016-08-05T13:53:06.201011Z Hello, Crunch!
If the container does not dispatch successfully, refer to the @arvados-dispatch-cloud@ logs for information about why it failed. ================================================ FILE: doc/install/crunch2-lsf/install-dispatch.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install the LSF dispatcher ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin_warning' %} @arvados-dispatch-lsf@ is only relevant for on premises clusters that will spool jobs to LSF. Skip this section if you use Slurm or if you are installing a cloud cluster. {% include 'notebox_end' %} h2(#overview). Overview Containers can be dispatched to an LSF cluster. The dispatcher sends work to the cluster using LSF's @bsub@ command, so it works in a variety of LSF configurations. In order to run containers, you must choose a user that has permission to set up FUSE mounts and run Singularity/Docker containers on each compute node. This install guide refers to this user as the @crunch@ user. We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions. However, you can run the dispatcher under any account with sufficient permissions across the cluster. Set up all of your compute nodes with "Docker":../crunch2/install-compute-node-singularity.html or "Singularity":../crunch2/install-compute-node-docker.html. *Current limitations*: * Arvados container priority is not propagated to LSF job priority. This can cause inefficient use of compute resources, and even deadlock if there are fewer compute nodes than concurrent Arvados workflows. * Combining LSF with docker may not work, depending on LSF configuration and user/group IDs (if LSF only sets up the configured user's primary group ID when executing the crunch-run process on a compute node, it may not have permission to connect to the docker daemon). h2(#update-config). Update config.yml Arvados-dispatch-lsf reads the common configuration file at @/etc/arvados/config.yml@. Add a DispatchLSF entry to the Services section, using the hostname where @arvados-dispatch-lsf@ will run, and an available port:
    Services:
      DispatchLSF:
        InternalURLs:
          "http://hostname.zzzzz.arvadosapi.com:9007": {}
Review the following configuration parameters and adjust as needed. {% include 'hpc_max_gateway_tunnels' %} h3(#BsubSudoUser). Containers.LSF.BsubSudoUser arvados-dispatch-lsf uses @sudo@ to execute @bsub@, for example @sudo -E -u crunch bsub [...]@. This means the @crunch@ account must exist on the hosts where LSF jobs run ("execution hosts"), as well as on the host where you are installing the Arvados LSF dispatcher (the "submission host"). To use a user account other than @crunch@, configure @BsubSudoUser@:
    Containers:
      LSF:
        BsubSudoUser: lsfuser
Alternatively, you can arrange for the arvados-dispatch-lsf process to run as an unprivileged user that has a corresponding account on all compute nodes, and disable the use of @sudo@ by specifying an empty string:
    Containers:
      LSF:
        # Don't use sudo
        BsubSudoUser: ""
h3(#BsubArgumentsList). Containers.LSF.BsubArgumentsList When arvados-dispatch-lsf invokes @bsub@, you can add arguments to the command by specifying @BsubArgumentsList@. You can use this to send the jobs to specific cluster partitions or add resource requests. Set @BsubArgumentsList@ to an array of strings. Template variables starting with % will be substituted as follows: %U uuid %C number of VCPUs %M memory in MB %T tmp in MB %G number of GPU devices (@runtime_constraints.cuda.device_count@) %W maximum job run time in minutes, suitable for use with @-W@ or @-We@ flags (see MaxRunTimeOverhead MaxRunTimeDefault below) Use %% to express a literal %. The %%J in the default will be changed to %J, which is interpreted by @bsub@ itself. For example:
    Containers:
      LSF:
        BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]", "-R", "select[mem>=%MMB]", "-R", "select[tmp>=%TMB]", "-R", "select[ncpus>=%C]", "-We", "%W"]
Note that the default value for @BsubArgumentsList@ uses the @-o@ and @-e@ arguments to write stdout/stderr data to files in @/tmp@ on the compute nodes, which is helpful for troubleshooting installation/configuration problems. Ensure you have something in place to delete old files from @/tmp@, or adjust these arguments accordingly. h3(#BsubCUDAArguments). Containers.LSF.BsubCUDAArguments If the container requests access to GPUs (@runtime_constraints.cuda.device_count@ of the container request is greater than zero), the command line arguments in @BsubCUDAArguments@ will be added to the command line _after_ @BsubArgumentsList@. This should consist of the additional @bsub@ flags your site requires to schedule the job on a node with GPU support. Set @BsubCUDAArguments@ to an array of strings. For example:
    Containers:
      LSF:
        BsubCUDAArguments: ["-gpu", "num=%G"]
h3(#MaxRunTimeOverhead). Containers.LSF.MaxRunTimeOverhead Extra time to add to each container's @scheduling_parameters.max_run_time@ value when substituting for @%W@ in @BsubArgumentsList@, to account for time spent setting up the container image, copying output files, etc. h3(#MaxRunTimeDefault). Containers.LSF.MaxRunTimeDefault Default @max_run_time@ value to use for containers that do not specify one in @scheduling_parameters.max_run_time@. If this is zero, and @BsubArgumentsList@ contains @"-W", "%W"@ or @"-We", "%W"@, those arguments will be dropped when submitting containers that do not specify @scheduling_parameters.max_run_time@. h3(#PollInterval). Containers.PollInterval arvados-dispatch-lsf polls the API server periodically for new containers to run. The @PollInterval@ option controls how often this poll happens. Set this to a string of numbers suffixed with one of the time units @s@, @m@, or @h@. For example:
    Containers:
      PollInterval: 10s
h3(#ReserveExtraRAM). Containers.ReserveExtraRAM: Extra RAM for jobs Extra RAM to reserve (in bytes) on each LSF job submitted by Arvados, which is added to the amount specified in the container's @runtime_constraints@. If not provided, the default value is zero. Supports suffixes @KB@, @KiB@, @MB@, @MiB@, @GB@, @GiB@, @TB@, @TiB@, @PB@, @PiB@, @EB@, @EiB@ (where @KB@ is 10[^3^], @KiB@ is 2[^10^], @MB@ is 10[^6^], @MiB@ is 2[^20^] and so forth).
    Containers:
      ReserveExtraRAM: 256MiB
h3(#CrunchRunArgumentList). Containers.CrunchRunArgumentList: Using host networking for containers Older Linux kernels (prior to 3.18) have bugs in network namespace handling which can lead to compute node lockups. This by is indicated by blocked kernel tasks in "Workqueue: netns cleanup_net". If you are experiencing this problem, as a workaround you can disable use of network namespaces by Docker across the cluster. Be aware this reduces container isolation, which may be a security risk.
    Containers:
      CrunchRunArgumentsList:
        - "-container-enable-networking=always"
        - "-container-network-mode=host"
h3(#InstanceTypes). InstanceTypes: Avoid submitting jobs with unsatisfiable resource constraints LSF does not provide feedback when a submitted job's RAM, CPU, or disk space constraints cannot be satisfied by any node: the job will wait in the queue indefinitely with "pending" status, reported by Arvados as "queued". As a workaround, you can configure @InstanceTypes@ with your LSF cluster's compute node sizes. Arvados will use these sizes to determine when a container is impossible to run, and cancel it instead of submitting an LSF job. Apart from detecting non-runnable containers, the configured instance types will not have any effect on scheduling.
    InstanceTypes:
      most-ram:
        VCPUs: 8
        RAM: 640GiB
        IncludedScratch: 640GB
      most-cpus:
        VCPUs: 32
        RAM: 256GiB
        IncludedScratch: 640GB
      gpu:
        VCPUs: 8
        RAM: 256GiB
        IncludedScratch: 640GB
        CUDA:
          DriverVersion: "11.4"
          HardwareCapability: "7.5"
          DeviceCount: 1
{% assign arvados_component = 'arvados-dispatch-lsf' %} {% include 'install_packages' %} {% include 'start_service' %} {% include 'restart_api' %} h2(#confirm-working). Confirm working installation On the dispatch node, start monitoring the arvados-dispatch-lsf logs:
# journalctl -o cat -fu arvados-dispatch-lsf.service
In another terminal window, use the diagnostics tool to run a simple container.
# arvados-client sudo diagnostics
INFO       5: running health check (same as `arvados-server check`)
INFO      10: getting discovery document from https://zzzzz.arvadosapi.com/discovery/v1/apis/arvados/v1/rest
...
INFO     160: running a container
INFO      ... container request submitted, waiting up to 10m for container to run
After performing a number of other quick tests, this will submit a new container request and wait for it to finish. While the diagnostics tool is waiting, the @arvados-dispatch-lsf@ logs will show details about submitting an LSF job to run the container. ================================================ FILE: doc/install/crunch2-slurm/configure-slurm.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Configure Slurm ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin_warning' %} @crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster. {% include 'notebox_end' %} Containers can be dispatched to a Slurm cluster. The dispatcher sends work to the cluster using Slurm's @sbatch@ command, so it works in a variety of Slurm configurations. In order to run containers, you must run the dispatcher as a user that has permission to set up FUSE mounts and run Docker containers on each compute node. This install guide refers to this user as the @crunch@ user. We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions. However, you can run the dispatcher under any account with sufficient permissions across the cluster. We will assume that you have Slurm and munge running. h3. Sample Slurm configuration file Here's an example @slurm.conf@ for use with Arvados:

ControlMachine=ClusterID.example.com
SlurmctldPort=6817
SlurmdPort=6818
AuthType=auth/munge
StateSaveLocation=/tmp
SlurmdSpoolDir=/tmp/slurmd
SwitchType=switch/none
MpiDefault=none
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmdPidFile=/var/run/slurmd.pid
ProctrackType=proctrack/pgid
CacheGroups=0
ReturnToService=2
TaskPlugin=task/affinity
#
# TIMERS
SlurmctldTimeout=300
SlurmdTimeout=300
InactiveLimit=0
MinJobAge=300
KillWait=30
Waittime=0
#
# SCHEDULING
SchedulerType=sched/backfill
SchedulerPort=7321
SelectType=select/linear
FastSchedule=0
#
# LOGGING
SlurmctldDebug=3
#SlurmctldLogFile=
SlurmdDebug=3
#SlurmdLogFile=
JobCompType=jobcomp/none
#JobCompLoc=
JobAcctGatherType=jobacct_gather/none
#
# COMPUTE NODES
NodeName=DEFAULT
PartitionName=DEFAULT MaxTime=INFINITE State=UP

NodeName=compute[0-255]
PartitionName=compute Nodes=compute[0-255] Default=YES Shared=YES
h3. Slurm configuration essentials Whenever you change this file, you will need to update the copy _on every compute node_ as well as the controller node, and then run @sudo scontrol reconfigure@. *@ControlMachine@* should be a DNS name that resolves to the Slurm controller (dispatch/API server). This must resolve correctly on all Slurm worker nodes as well as the controller itself. In general Slurm is very sensitive about all of the nodes being able to communicate with the controller _and one another_, all using the same DNS names. *@SelectType=select/linear@* is needed on cloud-based installations that update node sizes dynamically, but it can only schedule one container at a time on each node. On a static or homogeneous cluster, use @SelectType=select/cons_res@ with @SelectTypeParameters=CR_CPU_Memory@ instead to enable node sharing. *@NodeName=compute[0-255]@* establishes that the hostnames of the worker nodes will be compute0, compute1, etc. through compute255. * There are several ways to compress sequences of names, like @compute[0-9,80,100-110]@. See the "hostlist" discussion in the @slurm.conf(5)@ and @scontrol(1)@ man pages for more information. * It is not necessary for all of the nodes listed here to be alive in order for Slurm to work, although you should make sure the DNS entries exist. It is easiest to define lots of hostnames up front, assigning them to real nodes and updating your DNS records as the nodes appear. This minimizes the frequency of @slurm.conf@ updates and use of @scontrol reconfigure@. Each hostname in @slurm.conf@ must also resolve correctly on all Slurm worker nodes as well as the controller itself. Furthermore, the hostnames used in the configuration file must match the hostnames reported by @hostname@ or @hostname -s@ on the nodes themselves. This applies to the ControlMachine as well as the worker nodes. For example: * In @slurm.conf@ on control and worker nodes: @ControlMachine=ClusterID.example.com@ * In @slurm.conf@ on control and worker nodes: @NodeName=compute[0-255]@ * In @/etc/resolv.conf@ on control and worker nodes: @search ClusterID.example.com@ * On the control node: @hostname@ reports @ClusterID.example.com@ * On worker node 123: @hostname@ reports @compute123.ClusterID.example.com@ h3. Automatic hostname assignment The API server will choose an unused hostname from the set given in @application.yml@, which defaults to @compute[0-255]@. If it is not feasible to give your compute nodes hostnames like compute0, compute1, etc., you can accommodate other naming schemes with a bit of extra configuration. If you want Arvados to assign names to your nodes with a different consecutive numeric series like @{worker1-0000, worker1-0001, worker1-0002}@, add an entry to @application.yml@; see @/var/www/arvados-api/current/config/application.default.yml@ for details. Example: * In @application.yml@: assign_node_hostname: worker1-%04d * In @slurm.conf@: NodeName=worker1-[0000-0255] If your worker hostnames are already assigned by other means, and the full set of names is known in advance, have your worker node bootstrapping script send its current hostname, rather than expect Arvados to assign one. * In @application.yml@: assign_node_hostname: false * In @slurm.conf@: NodeName=alice,bob,clay,darlene If your worker hostnames are already assigned by other means, but the full set of names is _not_ known in advance, you can use the @slurm.conf@ and @application.yml@ settings in the previous example, but you must also update @slurm.conf@ (both on the controller and on all worker nodes) and run @sudo scontrol reconfigure@ whenever a new node comes online. ================================================ FILE: doc/install/crunch2-slurm/install-dispatch.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install the Slurm dispatcher ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin_warning' %} @crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster. {% include 'notebox_end' %} # "Introduction":#introduction # "Update config.yml":#update-config # "Install crunch-dispatch-slurm":#install-packages # "Start the service":#start-service # "Restart the API server and controller":#restart-api h2(#introduction). Introduction This assumes you already have a Slurm cluster, and have set up all of your compute nodes with "Docker":../crunch2/install-compute-node-docker.html or "Singularity":../crunch2/install-compute-node-singularity.html. Slurm packages are available on all distributions supported by Arvados. Please see your distribution package repositories. For information on installing Slurm from source, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html The Arvados Slurm dispatcher can run on any node that can submit requests to both the Arvados API server and the Slurm controller (via @sbatch@). It is not resource-intensive, so you can run it on the API server node. h2(#update-config). Update config.yml Crunch-dispatch-slurm reads the common configuration file at @/etc/arvados/config.yml@. Add a DispatchSLURM entry to the Services section, using the hostname where @crunch-dispatch-slurm@ will run, and an available port:
    Services:
      DispatchSLURM:
        InternalURLs:
          "http://hostname.zzzzz.arvadosapi.com:9007": {}
The following configuration parameters are optional. {% include 'hpc_max_gateway_tunnels' %} h3(#PollPeriod). Containers.PollInterval crunch-dispatch-slurm polls the API server periodically for new containers to run. The @PollInterval@ option controls how often this poll happens. Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h@. For example:
    Containers:
      PollInterval: 3m30s
h3(#ReserveExtraRAM). Containers.ReserveExtraRAM: Extra RAM for jobs Extra RAM to reserve (in bytes) on each Slurm job submitted by Arvados, which is added to the amount specified in the container's @runtime_constraints@. If not provided, the default value is zero. Helpful when using @-cgroup-parent-subsystem@, where @crunch-run@ and @arv-mount@ share the control group memory limit with the user process. In this situation, at least 256MiB is recommended to accommodate each container's @crunch-run@ and @arv-mount@ processes. Supports suffixes @KB@, @KiB@, @MB@, @MiB@, @GB@, @GiB@, @TB@, @TiB@, @PB@, @PiB@, @EB@, @EiB@ (where @KB@ is 10[^3^], @KiB@ is 2[^10^], @MB@ is 10[^6^], @MiB@ is 2[^20^] and so forth).
    Containers:
      ReserveExtraRAM: 256MiB
h3(#MinRetryPeriod). Containers.MinRetryPeriod: Rate-limit repeated attempts to start containers If Slurm is unable to run a container, the dispatcher will submit it again after the next PollPeriod. If PollPeriod is very short, this can be excessive. If MinRetryPeriod is set, the dispatcher will avoid submitting the same container to Slurm more than once in the given time span.
    Containers:
      MinRetryPeriod: 30s
h3(#KeepServiceURIs). Containers.Slurm.SbatchEnvironmentVariables Some Arvados installations run a local keepstore on each compute node to handle all Keep traffic. To override Keep service discovery and access the local keep server instead of the global servers, set ARVADOS_KEEP_SERVICES in SbatchEnvironmentVariables:
    Containers:
      SLURM:
        SbatchEnvironmentVariables:
          ARVADOS_KEEP_SERVICES: "http://127.0.0.1:25107"
h3(#PrioritySpread). Containers.Slurm.PrioritySpread crunch-dispatch-slurm adjusts the "nice" values of its Slurm jobs to ensure containers are prioritized correctly relative to one another. This option tunes the adjustment mechanism. * If non-Arvados jobs run on your Slurm cluster, and your Arvados containers are waiting too long in the Slurm queue because their "nice" values are too high for them to compete with other Slurm jobs, you should use a smaller PrioritySpread value. * If you have an older Slurm system that limits nice values to 10000, a smaller @PrioritySpread@ can help avoid reaching that limit. * In other cases, a larger value is beneficial because it reduces the total number of adjustments made by executing @scontrol@. The smallest usable value is @1@. The default value of @10@ is used if this option is zero or negative. Example:
    Containers:
      SLURM:
        PrioritySpread: 1000
h3(#SbatchArguments). Containers.Slurm.SbatchArgumentsList When crunch-dispatch-slurm invokes @sbatch@, you can add arguments to the command by specifying @SbatchArguments@. You can use this to send the jobs to specific cluster partitions or add resource requests. Set @SbatchArguments@ to an array of strings. For example:
    Containers:
      SLURM:
        SbatchArgumentsList:
          - "--partition=PartitionName"
Note: If an argument is supplied multiple times, @slurm@ uses the value of the last occurrence of the argument on the command line. Arguments specified through Arvados are added after the arguments listed in SbatchArguments. This means, for example, an Arvados container with that specifies @partitions@ in @scheduling_parameter@ will override an occurrence of @--partition@ in SbatchArguments. As a result, for container parameters that can be specified through Arvados, SbatchArguments can be used to specify defaults but not enforce specific policy. h3(#CrunchRunCommand-cgroups). Containers.CrunchRunArgumentList: Dispatch to Slurm cgroups If your Slurm cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunch's Docker containers to be dispatched inside Slurm's cgroups. This provides consistent enforcement of resource constraints. To do this, use a crunch-dispatch-slurm configuration like the following:
    Containers:
      CrunchRunArgumentsList:
        - "-cgroup-parent-subsystem=memory"
When using cgroups v1, the choice of subsystem ("memory" in this example) must correspond to one of the resource types enabled in Slurm's @cgroup.conf@. The specified subsystem is singled out only to let Crunch determine the name of the cgroup provided by Slurm. Limits for other resource types will also be respected. When doing this, you should also set "ReserveExtraRAM":#ReserveExtraRAM . {% include 'notebox_begin' %} Some versions of Docker (at least 1.9), when run under systemd, require the cgroup parent to be specified as a systemd slice. This causes an error when specifying a cgroup parent created outside systemd, such as those created by Slurm. You can work around this issue by disabling the Docker daemon's systemd integration. This makes it more difficult to manage Docker services with systemd, but Crunch does not require that functionality, and it will be able to use Slurm's cgroups as container parents. To do this, configure the Docker daemon on all compute nodes to run with the option @--exec-opt native.cgroupdriver=cgroupfs@. {% include 'notebox_end' %} h3(#CrunchRunCommand-network). Containers.CrunchRunArgumentList: Using host networking for containers Older Linux kernels (prior to 3.18) have bugs in network namespace handling which can lead to compute node lockups. This by is indicated by blocked kernel tasks in "Workqueue: netns cleanup_net". If you are experiencing this problem, as a workaround you can disable use of network namespaces by Docker across the cluster. Be aware this reduces container isolation, which may be a security risk.
    Containers:
      CrunchRunArgumentsList:
        - "-container-enable-networking=always"
        - "-container-network-mode=host"
{% assign arvados_component = 'crunch-dispatch-slurm' %} {% include 'install_packages' %} {% include 'start_service' %} {% include 'restart_api' %} ================================================ FILE: doc/install/crunch2-slurm/install-test.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Test Slurm dispatch ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin_warning' %} @crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster. {% include 'notebox_end' %} h2. Test compute node setup You should now be able to submit Slurm jobs that run in Docker containers. On the node where you're running the dispatcher, you can test this by running:
~$ sudo -u crunch srun -N1 docker run busybox echo OK
If it works, this command should print @OK@ (it may also show some status messages from Slurm and/or Docker). If it does not print @OK@, double-check your compute node setup, and that the @crunch@ user can submit Slurm jobs. h2. Test the dispatcher Make sure all of your compute nodes are set up with "Docker":../crunch2/install-compute-node-docker.html or "Singularity":../crunch2/install-compute-node-singularity.html. On the dispatch node, start monitoring the crunch-dispatch-slurm logs:
# journalctl -o cat -fu crunch-dispatch-slurm.service
In another terminal window, use the diagnostics tool to run a simple container.
# arvados-client sudo diagnostics
INFO       5: running health check (same as `arvados-server check`)
INFO      10: getting discovery document from https://zzzzz.arvadosapi.com/discovery/v1/apis/arvados/v1/rest
...
INFO     160: running a container
INFO      ... container request submitted, waiting up to 10m for container to run
Once @crunch-dispatch-slurm@ polls the API server for new containers to run, you should see it dispatch the new container. It will log messages like:
2016/08/05 13:52:54 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 started
2016/08/05 13:53:04 About to submit queued container zzzzz-dz642-hdp2vpu9nq14tx0
2016/08/05 13:53:04 sbatch succeeded: Submitted batch job 8102
Before the container finishes, Slurm's @squeue@ command will show the new job in the list of queued and running jobs. For example, you might see:
~$ squeue --long
Fri Aug  5 13:57:50 2016
  JOBID PARTITION     NAME     USER    STATE       TIME TIMELIMIT  NODES NODELIST(REASON)
   8103   compute zzzzz-dz   crunch  RUNNING       1:56 UNLIMITED      1 compute0
The job's name corresponds to the container's UUID. You can get more information about it by running, e.g., scontrol show job Name=UUID. When the container finishes, the dispatcher will log that, with the final result:
2016/08/05 13:53:14 Container zzzzz-dz642-hdp2vpu9nq14tx0 now in state "Complete" with locked_by_uuid ""
2016/08/05 13:53:14 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 finished
After the container finishes, you can get the container record by UUID *from a shell server* to see its results:
shell:~$ arv get zzzzz-dz642-hdp2vpu9nq14tx0
{
 ...
 "exit_code":0,
 "log":"a01df2f7e5bc1c2ad59c60a837e90dc6+166",
 "output":"d41d8cd98f00b204e9800998ecf8427e+0",
 "state":"Complete",
 ...
}
You can use standard Keep tools to view the container's output and logs from their corresponding fields. For example, to see the logs from the collection referenced in the @log@ field:
~$ arv keep ls a01df2f7e5bc1c2ad59c60a837e90dc6+166
./crunch-run.txt
./stderr.txt
./stdout.txt
~$ arv-get a01df2f7e5bc1c2ad59c60a837e90dc6+166/stdout.txt
2016-08-05T13:53:06.201011Z Hello, Crunch!
If the container does not dispatch successfully, refer to the @crunch-dispatch-slurm@ logs for information about why it failed. ================================================ FILE: doc/install/diagnostics.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Cluster diagnostics tool ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The @diagnostics@ subcommand of @arvados-client@ performs a variety of checks to help confirm that your Arvados installation has been properly configured. It is extremely helpful to validate that your install is successful. Depending on where you are running the installer, you need to provide @-internal-client@ or @-external-client@. * If you are running the diagnostics from one of the Arvados machines inside the private network, you want @-internal-client@. * If you running the diagnostics from your workstation outside of the private network, you should use @-external-client@. Here is an example of it in action:
$ ARVADOS_API_HOST=ClusterID.example.com ARVADOS_API_TOKEN=YourSystemTokenHere arvados-client diagnostics -external-client
INFO      10: getting discovery document from https://ClusterID.example.com/discovery/v1/apis/arvados/v1/rest
INFO      20: getting exported config from https://ClusterID.example.com/arvados/v1/config
INFO      30: getting current user record
INFO      40: connecting to service endpoint https://keep.ClusterID.example.com/
INFO      41: connecting to service endpoint https://*.collections.ClusterID.example.com/
INFO      42: connecting to service endpoint https://download.ClusterID.example.com/
INFO      43: connecting to service endpoint wss://ws.ClusterID.example.com/websocket
INFO      44: connecting to service endpoint https://workbench.ClusterID.example.com/
INFO      45: connecting to service endpoint https://workbench2.ClusterID.example.com/
INFO      50: checking CORS headers at https://ClusterID.example.com/
INFO      51: checking CORS headers at https://keep.ClusterID.example.com/d41d8cd98f00b204e9800998ecf8427e+0
INFO      52: checking CORS headers at https://download.ClusterID.example.com/
INFO      60: checking internal/external client detection
INFO      61: reading+writing via keep service at https://keep.ClusterID.example.com:443/
INFO      80: finding/creating "scratch area for diagnostics" project
INFO      90: creating temporary collection
INFO     100: uploading file via webdav
INFO     110: checking WebDAV ExternalURL wildcard (https://*.collections.ClusterID.example.com/)
INFO     120: downloading from webdav (https://d41d8cd98f00b204e9800998ecf8427e-0.collections.ClusterID.example.com/foo)
INFO     121: downloading from webdav (https://d41d8cd98f00b204e9800998ecf8427e-0.collections.ClusterID.example.com/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)
INFO     122: downloading from webdav (https://download.ClusterID.example.com/c=d41d8cd98f00b204e9800998ecf8427e+0/_/foo)
INFO     123: downloading from webdav (https://download.ClusterID.example.com/c=d41d8cd98f00b204e9800998ecf8427e+0/_/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)
INFO     124: downloading from webdav (https://a15a27cbc1c7d2d4a0d9e02529aaec7e-128.collections.ClusterID.example.com/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)
INFO     125: downloading from webdav (https://download.ClusterID.example.com/c=ce8i5-4zz18-bkfvq2skqqf78xd/_/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)
INFO     130: getting list of virtual machines
INFO     140: getting workbench1 webshell page
INFO     150: connecting to webshell service
INFO     160: running a container
INFO      ... container request submitted, waiting up to 10m for container to run
INFO    9990: deleting temporary collection
INFO    --- no errors ---
================================================ FILE: doc/install/index.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Installation options ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin' %} This section is about installing an Arvados cluster. If you are just looking to install Arvados client tools and libraries, "go to the SDK section.":{{site.baseurl}}/sdk/ {% include 'notebox_end' %} Arvados components run on supported GNU/Linux distributions. The Arvados elastic compute management layer supports Amazon Web Services (AWS) and Microsoft Azure cloud platforms as well as on-premises installs using SLURM and IBM Spectrum LSF. The Arvados storage layer supports filesystem storage (including NFS, such as IBM GPFS), Azure blob storage, Amazon S3, and systems that offer an S3-compatible API such as Ceph Object Gateway and NetApp StorageGRID. "Arvados is Free Software":{{site.baseurl}}/user/copying/copying.html and self-install installations are not limited in any way. Commercial support and development are also available from "Curii Corporation.":https://www.curii.com/ Arvados components can be installed and configured in a number of different ways.
table(table table-bordered table-condensed). ||_. Setup difficulty|_. Arvados Evaluation|_. Development|_. Production Data Management|_. Production Workflows| |"Single-host install":install-single-host.html|Easy|yes|limited|limited|limited| |"Multi-host install":install-multi-host.html|Moderate|yes|yes|yes|yes| |"Manual installation":install-manual-prerequisites.html|Difficult|yes|yes|yes|yes| |"Cluster Operation Subscription supported by Curii":https://curii.com|N/A ^1^|yes|yes|yes|yes|
^1^ No user installation necessary. Curii engineers will install and configure Arvados in your own infrastructure. ================================================ FILE: doc/install/install-api-server.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install API server and Controller ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Introduction":#introduction # "Install dependencies":#dependencies # "Set up database":#database-setup # "Update config.yml":#update-config # "Update nginx configuration":#update-nginx # "Install arvados-api-server and arvados-controller":#install-packages # "Confirm working installation":#confirm-working h2(#introduction). Introduction The Arvados core API server consists of four services: PostgreSQL, Arvados Rails API, Arvados Controller, and Nginx. Here is a simplified diagram showing the relationship between the core services. Client requests arrive at the public-facing Nginx reverse proxy. The request is forwarded to Arvados controller. The controller is able handle some requests itself, the rest are forwarded to the Arvados Rails API. The Rails API server implements the majority of business logic, communicating with the PostgreSQL database to fetch data and make transactional updates. All services are stateless, except the PostgreSQL database. This guide assumes all of these services will be installed on the same node, but it is possible to install these services across multiple nodes. !(full-width){{site.baseurl}}/images/proxy-chain.svg! h2(#dependencies). Install dependencies # "Install PostgreSQL":install-postgresql.html # "Install nginx":nginx.html h2(#database-setup). Set up database {% assign service_role = "arvados" %} {% assign service_database = "arvados_production" %} {% assign use_contrib = true %} {% include 'install_postgres_database' %} h2(#update-config). Update config.yml Starting from an "empty config.yml file,":config.html#empty add the following configuration keys. h3. Tokens
    SystemRootToken: "$system_root_token"
    ManagementToken: "$management_token"
    Collections:
      BlobSigningKey: "$blob_signing_key"
These secret tokens are used to authenticate messages between Arvados components. * @SystemRootToken@ is used by Arvados system services to authenticate as the system (root) user when communicating with the API server. * @ManagementToken@ is used to authenticate access to system metrics. * @Collections.BlobSigningKey@ is used to control access to Keep blocks. Each token should be a string of at least 50 alphanumeric characters. You can generate a suitable token with the following command:
~$ tr -dc 0-9a-zA-Z </dev/urandom | head -c50 ; echo
h3. PostgreSQL.Connection
    PostgreSQL:
      Connection:
        host: localhost
        user: arvados
        password: $postgres_password
        dbname: arvados_production
Replace the @$postgres_password@ placeholder with the password you generated during "database setup":#database-setup. h3. Services
    Services:
      Controller:
        ExternalURL: "https://ClusterID.example.com"
        InternalURLs:
          "http://localhost:8003": {}
      RailsAPI:
        # Does not have an ExternalURL
        InternalURLs:
          "http://localhost:8004": {}
      ContainerWebServices:
        # Does not have InternalURLs
        ExternalURL: "https://*.containers.ClusterID.example.com"
Replace @ClusterID.example.com@ with the hostname that you previously selected for the API server. The @Services@ section of the configuration helps Arvados components contact one another (service discovery). Each service has one or more @InternalURLs@ and an @ExternalURL@. The @InternalURLs@ describe where the service runs, and how the Nginx reverse proxy will connect to it. The @ExternalURL@ is how external clients contact the service. h2(#update-nginx). Update nginx configuration Use a text editor to create a new file @/etc/nginx/conf.d/arvados-controller.conf@ with the following configuration. Options that need attention are marked in red.
proxy_http_version 1.1;

# When Keep clients request a list of Keep services from the API
# server, use the origin IP address to determine if the request came
# from the internal subnet or it is an external client.  This sets the
# $external_client variable which in turn is used to set the
# X-External-Client header.
#
# The API server uses this header to choose whether to respond to a
# "available keep services" request with either a list of internal keep
# servers (0) or with the keepproxy (1).
#
# Following the example here, update the 10.20.30.0/24 netmask
# to match your private subnet.
# Update 1.2.3.4 and add lines as necessary with the public IP
# address of all servers that can also access the private network to
# ensure they are not considered 'external'.

geo $external_client {
  default        1;
  127.0.0.0/24   0;
  10.20.30.0/24  0;
  1.2.3.4/32     0;
}

# This is the port where nginx expects to contact arvados-controller.
upstream controller {
  server     localhost:8003  fail_timeout=10s;
}

server {
  # This configures the public https port that clients will actually connect to,
  # the request is reverse proxied to the upstream 'controller'

  listen       443 ssl;
  server_name  ClusterID.example.com
               *.containers.ClusterID.example.com;

  ## If a wildcard name like *.containers.ClusterID.example.com is not
  ## available, and Services.ContainerWebServices.ExternalPortMin and
  ## ExternalPortMax are configured instead, then the "listen" and
  ## "server_name" directives should be adjusted accordingly.  Example:
  #
  # listen       443 ssl;
  # listen       2000-2999 ssl;
  # server_name  ClusterID.example.com
  #              containers.ClusterID.example.com;
  #
  ## The number of ports in the range (1000 in this example) should be
  ## added to the worker_connections setting in the events section of
  ## your Nginx configuration (default 512).  If the system-supplied
  ## RLIMIT_NOFILE value is low (some systems default to 1024), the
  ## worker_rlimit_nofile setting in the main section should also be
  ## increased by the same amount.
  #
  # events { worker_connections: 1512; }
  # worker_rlimit_nofile: 2024;

  ssl_certificate     /YOUR/PATH/TO/cert.pem;
  ssl_certificate_key /YOUR/PATH/TO/cert.key;

  # Refer to the comment about this setting in the passenger (arvados
  # api server) section of your Nginx configuration.
  client_max_body_size 128m;

  location / {
    proxy_pass               http://controller;
    proxy_redirect           off;
    proxy_connect_timeout    90s;
    proxy_read_timeout       300s;
    proxy_max_temp_file_size 0;
    proxy_request_buffering  off;
    proxy_buffering          off;
    proxy_http_version       1.1;

    proxy_set_header      Host              $http_host;
    proxy_set_header      Upgrade           $http_upgrade;
    proxy_set_header      Connection        "upgrade";
    proxy_set_header      X-External-Client $external_client;
    proxy_set_header      X-Forwarded-For   $proxy_add_x_forwarded_for;
    proxy_set_header      X-Forwarded-Proto https;
    proxy_set_header      X-Real-IP         $remote_addr;
  }
}
h2(#install-packages). Install arvados-api-server and arvados-controller h3. Red Hat, AlmaLinux, and Rocky Linux 8
# dnf install --enablerepo=powertools arvados-api-server arvados-controller
h3. Red Hat, AlmaLinux, and Rocky Linux 9 or 10
# dnf install --enablerepo=devel arvados-api-server arvados-controller
h3. Debian and Ubuntu
# apt install arvados-api-server arvados-controller
h3(#railsapi-config). Configure Rails API server By default, the Rails API server is configured to listen on @localhost:8004@, matching the example cluster configuration above. If you need to change this, edit the @arvados-railsapi.service@ definition to redefine the @PASSENGER_ADDRESS@ and @PASSENGER_PORT@ environment variables, like this:
# systemctl edit arvados-railsapi.service
### Editing /etc/systemd/system/arvados-railsapi.service.d/override.conf
### Anything between here and the comment below will become the new contents of the file
[Service]
Environment=PASSENGER_ADDRESS=0.0.0.0
Environment=PASSENGER_PORT=8040
### Lines below this comment will be discarded
[...]
You can similarly define other Passenger settings if desired. The "Passenger Standalone reference":https://www.phusionpassenger.com/library/config/standalone/reference/ documents all the available settings. {% assign arvados_component = 'arvados-railsapi arvados-controller' %} {% include 'start_service' %} h2(#confirm-working). Confirm working installation We recommend using the "Cluster diagnostics tool.":diagnostics.html The first few tests (10, 20, 30) will succeed if you have a working API server and controller. Of course, tests for services that you have not yet installed and configured will fail. Here are some other checks you can perform manually. h3. Confirm working controller
$ curl https://ClusterID.example.com/arvados/v1/config
h3. Confirm working Rails API server
$ curl https://ClusterID.example.com/discovery/v1/apis/arvados/v1/rest
h3. Confirm that you can use the system root token to act as the system root user
$ curl -H "Authorization: Bearer $system_root_token" https://ClusterID.example.com/arvados/v1/users/current
h3. Troubleshooting If you are getting TLS errors, make sure the @ssl_certificate@ directive in your nginx configuration has the "full certificate chain":http://nginx.org/en/docs/http/configuring_https_servers.html#chains. Logs can be found in @/var/www/arvados-api/current/log/production.log@ and using @journalctl -u arvados-controller@. See also the admin page on "Logging":{{site.baseurl}}/admin/logging.html. ================================================ FILE: doc/install/install-docker.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Set up Docker ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'install_compute_docker' %} ================================================ FILE: doc/install/install-keep-balance.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install Keep-balance ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Introduction":#introduction # "Update config.yml":#update-config # "Install keep-balance package":#install-packages # "Start the service":#start-service h2(#introduction). Introduction Keep-balance deletes unreferenced and overreplicated blocks from Keep servers, makes additional copies of underreplicated blocks, and moves blocks into optimal locations as needed (e.g., after adding new servers). See "Balancing Keep servers":{{site.baseurl}}/admin/keep-balance.html for usage details. Keep-balance can be installed anywhere with network access to Keep services, arvados-controller, and PostgreSQL. Typically it runs on the same host as keepproxy. *A cluster should have only one instance of keep-balance running at a time.* {% include 'notebox_begin' %} If you are installing keep-balance on an existing system with valuable data, you can run keep-balance in "dry run" mode first and review its logs as a precaution. To do this, set the @Collections.BalancePullLimit@ and @Collections.BalanceTrashLimit@ configuration entries to zero. {% include 'notebox_end' %} h2(#update-config). Update the cluster config Edit the cluster config at @config.yml@ and set @Services.Keepbalance.InternalURLs@. This port is only used to publish metrics.
    Services:
      Keepbalance:
        InternalURLs:
          "http://keep.ClusterID.example.com:9005/": {}
Ensure your cluster configuration has @Collections.BlobTrash: true@ (this is the default).
# arvados-server config-dump | grep BlobTrash:
      BlobTrash: true
If BlobTrash is false, unneeded blocks will be counted and logged by keep-balance, but they will not be deleted. {% assign arvados_component = 'keep-balance' %} {% include 'install_packages' %} {% include 'start_service' %} ================================================ FILE: doc/install/install-keep-web.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install Keep-web server ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Introduction":#introduction # "Configure DNS":#introduction # "Configure anonymous user token":#update-config # "Update nginx configuration":#update-nginx # "Install keep-web package":#install-packages # "Start the service":#start-service # "Restart the API server and controller":#restart-api # "Confirm working installation":#confirm-working h2(#introduction). Introduction The Keep-web server provides read/write access to files stored in Keep using WebDAV and S3 protocols. This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. It can be installed anywhere with access to Keep services, controller, and the PostgreSQL server. It is typically installed behind a web proxy that provides TLS support. See the "godoc page":https://pkg.go.dev/git.arvados.org/arvados.git/services/keep-web for more detail. h2(#dns). Configure DNS It is important to properly configure the keep-web service to so it does not open up cross-site-scripting (XSS) attacks. A HTML file can be stored in collection. If an attacker causes a victim to visit that HTML file through Workbench, it will be rendered by the browser. If all collections are served at the same domain, the browser will consider collections as coming from the same origin and thus have access to the same browsing data (such as API token), enabling malicious Javascript in the HTML file to access Arvados as the victim. There are two approaches to mitigate this. # The service can tell the browser that all files should go to download instead of in-browser preview, except in situations where an attacker is unlikely to be able to gain access to anything they didn't already have access to. # Each collection served by @keep-web@ is served on its own virtual host. This allows for file with executable content to be displayed in-browser securely. The virtual host embeds the collection uuid or portable data hash in the hostname. For example, a collection with uuid @xxxxx-4zz18-tci4vn4fa95w0zx@ could be served as @xxxxx-4zz18-tci4vn4fa95w0zx.collections.ClusterID.example.com@ . The portable data hash @dd755dbc8d49a67f4fe7dc843e4f10a6+54@ could be served at @dd755dbc8d49a67f4fe7dc843e4f10a6-54.collections.ClusterID.example.com@ . This requires "wildcard DNS record":https://en.wikipedia.org/wiki/Wildcard_DNS_record and "wildcard TLS certificate.":https://en.wikipedia.org/wiki/Wildcard_certificate h3. Collections download URL Downloads links will served from the URL in @Services.WebDAVDownload.ExternalURL@ . The collection uuid or PDH is put in the URL path. If blank, serve links to WebDAV with @disposition=attachment@ query param. Unlike preview links, browsers do not render attachments, so there is no risk of XSS. If @WebDAVDownload@ is blank, and @WebDAV@ has a single origin (not wildcard, see below), then Workbench will show an error page
    Services:
      WebDAVDownload:
        ExternalURL: https://download.ClusterID.example.com
h3. Collections preview URL Collections will be served using the URL pattern in @Services.WebDAV.ExternalURL@ . If blank, use @Services.WebDAVDownload.ExternalURL@ instead, and disable inline preview. If both are empty, downloading collections from workbench will be impossible. When wildcard domains configured, credentials are still required to access non-public data. h4. In their own subdomain Collections can be served from their own subdomain:
    Services:
      WebDAV:
        ExternalURL: https://*.collections.ClusterID.example.com/
This option is preferred if you plan to access Keep using third-party S3 client software, because it accommodates S3 virtual host-style requests and path-style requests without any special client configuration. h4. Under the main domain Alternately, they can go under the main domain by including @--@:
    Services:
      WebDAV:
        ExternalURL: https://*--collections.ClusterID.example.com/
h4. From a single domain Serve preview links from a single domain, setting uuid or pdh in the path (similar to downloads). This configuration only allows previews of public data (data accessible by the anonymous user) and collection-sharing links (where the token is already embedded in the URL); it will ignore authorization headers, so a request for non-public data may return "404 Not Found" even if normally valid credentials were provided.
    Services:
      WebDAV:
        ExternalURL: https://collections.ClusterID.example.com/
Note the trailing slash. {% include 'notebox_begin' %} Whether you choose to serve collections from their own subdomain or from a single domain, it's important to keep in mind that they should be served from me same _site_ as Workbench for the inline previews to work. Please check "keep-web's URL pattern guide":../api/keep-web-urls.html#same-site to learn more. {% include 'notebox_end' %} h2. Set InternalURLs
    Services:
      WebDAV:
        InternalURLs:
          http://localhost:9002: {}
h2(#update-config). Configure anonymous user token If you intend to use Keep-web to serve public data to anonymous clients, configure it with an anonymous token. Generate a random string (>= 32 characters long) and put it in the @config.yml@ file, in the @AnonymousUserToken@ field.
    Users:
      AnonymousUserToken: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
Set @Users.AnonymousUserToken: ""@ (empty string) or leave it out if you do not want to serve public data. h3. Update nginx configuration Put a reverse proxy with SSL support in front of keep-web. Keep-web itself runs on the port 9002 (or whatever is specified in @Services.WebDAV.InternalURL@) while the reverse proxy runs on port 443 and forwards requests to Keep-web. Use a text editor to create a new file @/etc/nginx/conf.d/keep-web.conf@ with the following configuration. Options that need attention are marked in red.
upstream keep-web {
  server                127.0.0.1:9002;
}

server {
  listen                443 ssl;
  server_name           download.ClusterID.example.com
                        collections.ClusterID.example.com
                        *.collections.ClusterID.example.com
                        ~.*--collections\.ClusterID\.example\.com;

  proxy_connect_timeout 90s;
  proxy_read_timeout    300s;

  ssl                   on;
  ssl_certificate       /YOUR/PATH/TO/cert.pem;
  ssl_certificate_key   /YOUR/PATH/TO/cert.key;

  location / {
    proxy_pass          http://keep-web;
    proxy_set_header    Host            $host;
    proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;

    client_max_body_size    0;
    proxy_http_version      1.1;
    proxy_request_buffering off;
    proxy_max_temp_file_size 0;
  }
}
{% include 'notebox_begin' %} If you restrict access to your Arvados services based on network topology -- for example, your proxy server is not reachable from the public internet -- additional proxy configuration might be needed to thwart cross-site scripting attacks that would circumvent your restrictions. Normally, Keep-web accepts requests for multiple collections using the same host name, provided the client's credentials are not being used. This provides insufficient XSS protection in an installation where the "anonymously accessible" data is not truly public, but merely protected by network topology. In such cases -- for example, a site which is not reachable from the internet, where some data is world-readable from Arvados's perspective but is intended to be available only to users within the local network -- the downstream proxy should configured to return 401 for all paths beginning with "/c=" {% include 'notebox_end' %} h3. Configure filesystem cache size Keep-web stores copies of recently accessed data blocks in @/var/cache/arvados/keep@. The cache size defaults to 10% of the size of the filesystem where that directory is located (typically @/var@) and can be customized with the @DiskCacheSize@ config entry.
  Collections:
    WebDAVCache:
      DiskCacheSize: 20 GiB
{% assign arvados_component = 'keep-web' %} {% include 'install_packages' %} {% include 'start_service' %} {% include 'restart_api' %} h2(#confirm-working). Confirm working installation We recommend using the "Cluster diagnostics tool.":diagnostics.html Here are some other checks you can perform manually.
$ curl -H "Authorization: Bearer $system_root_token" https://download.ClusterID.example.com/c=59389a8f9ee9d399be35462a0f92541c-53/_/hello.txt
If wildcard collections domains are configured:
$ curl -H "Authorization: Bearer $system_root_token" https://59389a8f9ee9d399be35462a0f92541c-53.collections.ClusterID.example.com/hello.txt
If using a single collections preview domain:
$ curl https://collections.ClusterID.example.com/c=59389a8f9ee9d399be35462a0f92541c-53/t=$system_root_token/_/hello.txt
================================================ FILE: doc/install/install-keepproxy.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install Keepproxy server ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Introduction":#introduction # "Update config.yml":#update-config # "Update nginx configuration":#update-nginx # "Install keepproxy package":#install-packages # "Start the service":#start-service # "Restart the API server and controller":#restart-api # "Confirm working installation":#confirm-working h2(#introduction). Introduction The Keepproxy server is a gateway into your Keep storage. Unlike the Keepstore servers, which are only accessible on the local LAN, Keepproxy is suitable for clients located elsewhere on the internet. Specifically, in contrast to Keepstore: * A client writing through Keepproxy sends a single copy of a data block, and Keepproxy distributes copies to the appropriate Keepstore servers. * A client can write through Keepproxy without precomputing content hashes. * Keepproxy checks API token validity before processing requests. (Clients that can connect directly to Keepstore can use it as scratch space even without a valid API token.) By convention, we use the following hostname for the Keepproxy server:
table(table table-bordered table-condensed). |_. Hostname| |@keep.ClusterID.example.com@|
This hostname should resolve from anywhere on the internet. h2(#update-config). Update config.yml Edit the cluster config at @config.yml@ and set @Services.Keepproxy.ExternalURL@ and @Services.Keepproxy.InternalURLs@.
    Services:
      Keepproxy:
        ExternalURL: https://keep.ClusterID.example.com
        InternalURLs:
          "http://localhost:25107": {}
h2(#update-nginx). Update Nginx configuration Put a reverse proxy with SSL support in front of Keepproxy. Keepproxy itself runs on the port 25107 (or whatever is specified in @Services.Keepproxy.InternalURL@) while the reverse proxy runs on port 443 and forwards requests to Keepproxy. Use a text editor to create a new file @/etc/nginx/conf.d/keepproxy.conf@ with the following configuration. Options that need attention are marked in red.
upstream keepproxy {
  server                127.0.0.1:25107;
}

server {
  listen                  443 ssl;
  server_name             keep.ClusterID.example.com;

  proxy_connect_timeout   90s;
  proxy_read_timeout      300s;
  proxy_set_header        X-Real-IP $remote_addr;
  proxy_http_version      1.1;
  proxy_request_buffering off;
  proxy_max_temp_file_size 0;

  ssl_certificate     /YOUR/PATH/TO/cert.pem;
  ssl_certificate_key /YOUR/PATH/TO/cert.key;

  # Clients need to be able to upload blocks of data up to 64MiB in size.
  client_max_body_size    64m;

  location / {
    proxy_pass            http://keepproxy;
  }
}
Note: if the Web uploader is failing to upload data and there are no logs from keepproxy, be sure to check the nginx proxy logs. In addition to "GET" and "PUT", The nginx proxy must pass "OPTIONS" requests to keepproxy, which should respond with appropriate Cross-origin resource sharing headers. If the CORS headers are not present, brower security policy will cause the upload request to silently fail. The CORS headers are generated by keepproxy and should not be set in nginx. {% assign arvados_component = 'keepproxy' %} {% include 'install_packages' %} {% include 'start_service' %} {% include 'restart_api' %} h2(#confirm-working). Confirm working installation We recommend using the "Cluster diagnostics tool.":diagnostics.html Because Keepproxy is specifically a gateway used by outside clients, for this test you should run the diagnostics from a client machine outside the Arvados private network, and provide the @-external-client@ parameter. Here are some other checks you can perform manually. Log into a host that is on a network external to your private Arvados network. The host should be able to contact your keepproxy server (eg @keep.ClusterID.example.com@), but not your keepstore servers (eg keep[0-9].ClusterID.example.com). @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment. @ARVADOS_API_HOST@ should be the hostname of the API server. @ARVADOS_API_TOKEN@ should be the system root token. Install the "Command line SDK":{{site.baseurl}}/sdk/cli/install.html Check that the keepproxy server is in the @keep_service@ "accessible" list:

$ arv keep_service accessible
[...]
If keepstore does not show up in the "accessible" list, and you are accessing it from within the private network, check that you have "properly configured the @geo@ block for the API server":install-api-server.html#update-nginx . Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html You should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections. Be sure to execute this from _outside_ the cluster's private network. {% include 'arv_put_example' %} ================================================ FILE: doc/install/install-keepstore.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install Keepstore servers ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Introduction":#introduction # "Update config.yml":#update-config # "Install keepstore package":#install-packages # "Restart the API server and controller":#restart-api # "Confirm working installation":#confirm-working # "Note on storage management":#note h2. Introduction Keepstore provides access to underlying storage for reading and writing content-addressed blocks, with enforcement of Arvados permissions. Keepstore supports a variety of cloud object storage and POSIX filesystems for its backing store. h3. Plan your storage layout In the steps below, you will configure a number of backend storage volumes (like local filesystems and S3 buckets) and specify which keepstore servers have read-only and read-write access to which volumes. It is possible to configure arbitrary server/volume layouts. However, in order to provide good performance and efficient use of storage resources, we strongly recommend using one of the following layouts: # Each volume is writable by exactly one server, and optionally readable by one or more other servers. The total capacity of all writable volumes is the same for each server. # Each volume is writable by all servers. Each volume has enough built-in redundancy to satisfy your requirements, i.e., you do not need Arvados to mirror data across multiple volumes. We recommend starting off with two Keepstore servers. Exact server specifications will be site and workload specific, but in general keepstore will be I/O bound and should be set up to maximize aggregate bandwidth with compute nodes. To increase capacity (either space or throughput) it is straightforward to add additional servers, or (in cloud environments) to increase the machine size of the existing servers. By convention, we use the following hostname pattern:
table(table table-bordered table-condensed). |_. Hostname| |@keep0.ClusterID.example.com@| |@keep1.ClusterID.example.com@|
Keepstore servers should not be directly accessible from the Internet (they are accessed via "keepproxy":install-keepproxy.html), so the hostnames only need to resolve on the private network. h2(#update-config). Update cluster config h3. Configure storage volumes Fill in the @Volumes@ section of @config.yml@ for each storage volume. Available storage volume types include POSIX filesystems and cloud object storage. It is possible to have different volume types in the same cluster. * To use a POSIX filesystem, including both local filesystems (ext4, xfs) and network file system such as GPFS or Lustre, follow the setup instructions on "Filesystem storage":configure-fs-storage.html * If you are using S3-compatible object storage (including Amazon S3, Google Cloud Storage, and Ceph RADOS), follow the setup instructions on "S3 Object Storage":configure-s3-object-storage.html * If you are using Azure Blob Storage, follow the setup instructions on "Azure Blob Storage":configure-azure-blob-storage.html There are a number of general configuration parameters for Keepstore. They are described in the "configuration reference":{{site.baseurl}}/admin/config.html. In particular, you probably want to change @API/MaxKeepBlobBuffers@ to align Keepstore's memory usage with the available memory on the machine that hosts it. h3. List services Add each keepstore server to the @Services.Keepstore@ section of @/etc/arvados/config.yml@ .
    Services:
      Keepstore:
        # No ExternalURL because they are only accessed by the internal subnet.
        InternalURLs:
          "http://keep0.ClusterID.example.com:25107": {}
          "http://keep1.ClusterID.example.com:25107": {}
          # and so forth
{% assign arvados_component = 'keepstore' %} {% include 'install_packages' %} {% include 'start_service' %} {% include 'restart_api' %} h2(#confirm-working). Confirm working installation We recommend using the "Cluster diagnostics tool.":diagnostics.html Here are some other checks you can perform manually. Log into a host that is on your private Arvados network. The host should be able to contact your your keepstore servers (eg keep[0-9].ClusterID.example.com). @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment. @ARVADOS_API_HOST@ should be the hostname of the API server. @ARVADOS_API_TOKEN@ should be the system root token. Install the "Command line SDK":{{site.baseurl}}/sdk/cli/install.html Check that the keepstore server is in the @keep_service@ "accessible" list:
$ arv keep_service accessible
[...]
If keepstore does not show up in the "accessible" list, and you are accessing it from within the private network, check that you have "properly configured the @geo@ block for the API server":install-api-server.html#update-nginx . Next, install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html You should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections. Be sure to execute this from _inside_ the cluster's private network. You will be able to access keep from _outside_ the private network after setting up "keepproxy":install-keepproxy.html . {% include 'arv_put_example' %} h2(#note). Note on storage management On its own, a keepstore server never deletes data. Instead, the keep-balance service determines which blocks are candidates for deletion and instructs the keepstore to move those blocks to the trash. Please see the "Balancing Keep servers":{{site.baseurl}}/admin/keep-balance.html for more details. ================================================ FILE: doc/install/install-manual-prerequisites.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Planning and prerequisites ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Before attempting installation, you should begin by reviewing supported platforms, choosing backends for identity, storage, and scheduling, and decide how you will distribute Arvados services onto machines. You should also choose an Arvados Cluster ID, choose your hostnames, and aquire TLS certificates. It may be helpful to make notes as you go along using one of these worksheets: "New cluster checklist for AWS":new_cluster_checklist_AWS.xlsx - "New cluster checklist for Azure":new_cluster_checklist_Azure.xlsx - "New cluster checklist for on premises Slurm":new_cluster_checklist_slurm.xlsx The installation guide describes how to set up a basic standalone Arvados instance. Additional configuration for features including "federation,":{{site.baseurl}}/admin/federation.html "collection versioning,":{{site.baseurl}}/admin/collection-versioning.html "managed properties,":{{site.baseurl}}/admin/collection-managed-properties.html and "storage classes":{{site.baseurl}}/admin/collection-managed-properties.html are described in the "Admin guide.":{{site.baseurl}}/admin/ The Arvados storage subsystem is called "keep". The compute subsystem is called "crunch". # "Supported GNU/Linux distributions":#supportedlinux # "Choosing which components to install":#components # "Identity provider":#identity # "Storage backend (Keep)":#storage # "Container compute scheduler (Crunch)":#scheduler # "Hardware or virtual machines":#machines # "Arvados Cluster ID":#clusterid # "DNS and TLS":#dnstls h2(#supportedlinux). Supported GNU/Linux distributions {% include 'supportedlinux' %} h2(#components). Choosing which components to install Arvados consists of many components, some of which may be omitted (at the cost of reduced functionality.) It may also be helpful to review the "Arvados Architecture":{{site.baseurl}}/architecture/ to understand how these components interact. table(table table-bordered table-condensed). |\3=. *Core*| |"PostgreSQL database":install-postgresql.html |Stores data for the API server.|Required.| |"API server + Controller":install-api-server.html |Core Arvados logic for managing users, groups, collections, containers, and enforcing permissions.|Required.| |\3=. *Keep (storage)*| |"Keepstore":install-keepstore.html |Stores content-addressed blocks in a variety of backends (local filesystem, cloud object storage).|Required.| |"Keepproxy":install-keepproxy.html |Gateway service to access keep servers from external networks.|Required to be able to use arv-put, arv-get, or arv-mount outside the private Arvados network.| |"Keep-web":install-keep-web.html |Gateway service providing read/write HTTP and WebDAV support on top of Keep.|Required to access files from Workbench.| |"Keep-balance":install-keep-balance.html |Storage cluster maintenance daemon responsible for moving blocks to their optimal server location, adjusting block replication levels, and trashing unreferenced blocks.|Required to free deleted data from underlying storage, and to ensure proper replication and block distribution (including support for storage classes).| |\3=. *User interface*| |"Workbench2":install-workbench2-app.html |Primary graphical user interface for working with file collections and running containers.|Optional. Depends on API server, keep-web, websockets server.| |\3=. *Additional services*| |"Websockets server":install-ws.html |Event distribution server.|Required to view streaming container logs in Workbench.| |"Shell server":install-shell-server.html |Grant Arvados users access to Unix shell accounts on dedicated shell nodes.|Optional.| |\3=. *Crunch (running containers)*| |"arvados-dispatch-cloud":crunch2-cloud/install-dispatch-cloud.html |Run analysis workflows on cloud by allocating and freeing cloud VM instances on demand.|Optional| |"crunch-dispatch-slurm":crunch2-slurm/install-dispatch.html |Run analysis workflows distributed across a Slurm cluster.|Optional| |"crunch-dispatch-lsf":crunch2-lsf/install-dispatch.html |Run analysis workflows distributed across an LSF cluster.|Optional| h2(#identity). Identity provider Choose which backend you will use to authenticate users. * Google login to authenticate users with a Google account. * OpenID Connect (OIDC) if you have Single-Sign-On (SSO) service that supports the OpenID Connect standard. * LDAP login to authenticate users by username/password using the LDAP protocol, supported by many services such as OpenLDAP and Active Directory. * PAM login to authenticate users by username/password according to the PAM configuration on the controller node. h2(#postgresql). PostgreSQL Arvados works well with a standalone PostgreSQL installation. When deploying on AWS, Aurora RDS also works but Aurora Serverless is not recommended. h2(#storage). Storage backend Choose which backend you will use for storing and retrieving content-addressed Keep blocks. * File systems storage, such as ext4 or xfs, or network file systems such as GPFS or Lustre * Amazon S3, or other object storage that supports the S3 API including Google Cloud Storage and Ceph. * Azure blob storage You should also determine the desired replication factor for your data. A replication factor of 1 means only a single copy of a given data block is kept. With a conventional file system backend and a replication factor of 1, a hard drive failure is likely to lose data. For this reason the default replication factor is 2 (two copies are kept). A backend may have its own replication factor (such as durability guarantees of cloud buckets) and Arvados will take this into account when writing a new data block. h2(#scheduler). Container compute scheduler Choose which backend you will use to schedule computation. * On AWS EC2 and Azure, you probably want to use @arvados-dispatch-cloud@ to manage the full lifecycle of cloud compute nodes: starting up nodes sized to the container request, executing containers on those nodes, and shutting nodes down when no longer needed. * For on-premises HPC clusters using "slurm":https://slurm.schedmd.com/ use @crunch-dispatch-slurm@ to execute containers with slurm job submissions. * For on-premises HPC clusters using "LSF":https://www.ibm.com/products/hpc-workload-management/ use @crunch-dispatch-lsf@ to execute containers with slurm job submissions. * For single node demos, use @crunch-dispatch-local@ to execute containers directly. h2(#machines). Hardware (or virtual machines) Choose how to allocate Arvados services to machines. We recommend that each machine start with a clean installation of a supported GNU/Linux distribution. For a production installation, this is a reasonable starting point:
table(table table-bordered table-condensed). |_. Function|_. Number of nodes|_. Recommended specs| |PostgreSQL database, Arvados API server, Arvados controller, Websockets, Container dispatcher|1|16+ GiB RAM, 4+ cores, fast disk for database| |Workbench, Keepproxy, Keep-web, Keep-balance|1|8 GiB RAM, 2+ cores| |Keepstore servers ^1^|2+|4 GiB RAM| |Compute worker nodes ^1^|0+ |Depends on workload; scaled dynamically in the cloud| |User shell nodes ^2^|0+|Depends on workload|
^1^ Should be scaled up as needed ^2^ Refers to shell nodes managed by Arvados that provide ssh access for users to interact with Arvados at the command line. Optional. {% include 'notebox_begin' %} For a small demo installation, it is possible to run all the Arvados services on a single node. Special considerations for single-node installs will be noted in boxes like this. {% include 'notebox_end' %} h2(#clusterid). Arvados Cluster ID Each Arvados installation is identified by a cluster identifier, which is a unique 5-character lowercase alphanumeric string. There are 36 5 = 60466176 possible cluster identifiers. * For automated test purposes, use “z****” * For experimental/local-only/private clusters that won’t ever be visible on the public Internet, use “x****” * For long-lived clusters, we recommend reserving a cluster id. Contact "info@curii.com":mailto:info@curii.com for more information. Here is one way to make a random 5-character string:
~$ tr -dc 0-9a-z </dev/urandom | head -c5; echo
You may also use a different method to pick the cluster identifier. The cluster identifier will be part of the hostname of the services in your Arvados cluster. The rest of this documentation will refer to it as your @ClusterID@. Whenever @ClusterID@ appears in a configuration example, replace it with your five-character cluster identifier. h2(#dnstls). DNS entries and TLS certificates The following services are normally public-facing and require DNS entries and corresponding TLS certificates. Get certificates from your preferred TLS certificate provider. We recommend using "Let's Encrypt":https://letsencrypt.org/. You can run several services on the same node, but each distinct DNS name requires a valid, matching TLS certificate. This guide uses the following DNS name conventions. A later part of this guide will describe how to set up Nginx virtual hosts. It is possible to use custom DNS names for the Arvados services.
table(table table-bordered table-condensed). |_. Function|_. DNS name| |Arvados API|@ClusterID.example.com@| |Arvados Webshell|webshell.@ClusterID.example.com@| |Arvados Websockets endpoint|ws.@ClusterID.example.com@| |Arvados Workbench|workbench.@ClusterID.example.com@| |Arvados Workbench 2|workbench2.@ClusterID.example.com@| |Arvados Keepproxy server|keep.@ClusterID.example.com@| |Arvados Keep-web server|download.@ClusterID.example.com@ _and_ *.collections.@ClusterID.example.com@ _or_ *--collections.@ClusterID.example.com@ _or_ collections.@ClusterID.example.com@ (see the "keep-web install docs":install-keep-web.html)| |Container web services|*.containers.@ClusterID.example.com@ _or_ *--containers.@ClusterID.example.com@|
Setting up Arvados is easiest when Wildcard TLS and wildcard DNS are available. It is also possible to set up Arvados without wildcard TLS and DNS, but some functionality will be unavailable: * A wildcard for @keep-web@ (e.g., *.collections.@ClusterID.example.com@) is needed to allow users to view Arvados-hosted data in their browsers. More information on this tradeoff caused by the CORS rules applied by modern browsers is available in the "keep-web URL pattern guide":../api/keep-web-urls.html. * A wildcard for @controller@ (e.g., *.containers.@ClusterID.example.com@) is needed to allow users to connect to Arvados-hosted services in their browsers. The table below lists the required TLS certificates and DNS names in each scenario.
table(table table-bordered table-condensed). ||_. Wildcard TLS and DNS available|_. Wildcard TLS available|_. Other| |TLS|@ClusterID.example.com@ *.@ClusterID.example.com@ *.collections.@ClusterID.example.com@ *.containers.@ClusterID.example.com@|*.@ClusterID.example.com@ @ClusterID.example.com@|@ClusterID.example.com@ git.@ClusterID.example.com@ webshell.@ClusterID.example.com@ ws.@ClusterID.example.com@ workbench.@ClusterID.example.com@ workbench2.@ClusterID.example.com@ keep.@ClusterID.example.com@ download.@ClusterID.example.com@ collections.@ClusterID.example.com@| |DNS|@ClusterID.example.com@ git.@ClusterID.example.com@ webshell.@ClusterID.example.com@ ws.@ClusterID.example.com@ workbench.@ClusterID.example.com@ workbench2.@ClusterID.example.com@ keep.@ClusterID.example.com@ download.@ClusterID.example.com@ *.collections.@ClusterID.example.com@ *.containers.@ClusterID.example.com@|@ClusterID.example.com@ git.@ClusterID.example.com@ webshell.@ClusterID.example.com@ ws.@ClusterID.example.com@ workbench.@ClusterID.example.com@ workbench2.@ClusterID.example.com@ keep.@ClusterID.example.com@ download.@ClusterID.example.com@ collections.@ClusterID.example.com@|@ClusterID.example.com@ git.@ClusterID.example.com@ webshell.@ClusterID.example.com@ ws.@ClusterID.example.com@ workbench.@ClusterID.example.com@ workbench2.@ClusterID.example.com@ keep.@ClusterID.example.com@ download.@ClusterID.example.com@ collections.@ClusterID.example.com@|
{% include 'notebox_begin' %} It is also possible to create your own certificate authority, issue server certificates, and install a custom root certificate in the browser. This is out of scope for this guide. {% include 'notebox_end' %} ================================================ FILE: doc/install/install-multi-host.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Multi-Host Arvados ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Introduction":#introduction # "Prerequisites and planning":#prerequisites # "Download the installer":#download # "Initialize the installer":#copy_config # "Set up your infrastructure":#setup-infra ## "Create AWS infrastructure with Terraform":#terraform ## "Create required infrastructure manually":#inframanual # "Edit local.params* files":#localparams # "Configure Keep storage":#keep # "Choose the SSL configuration":#certificates ## "Using a Let's Encrypt certificates":#lets-encrypt ## "Bring your own certificates":#bring-your-own ### "Securing your TLS certificate keys":#secure-tls-keys # "Create a compute image":#create_a_compute_image # "Begin installation":#installation # "Further customization of the installation":#further_customization # "Confirm the cluster is working":#test-install ## "Debugging issues":#debugging ## "Iterating on config changes":#iterating ## "Common problems and solutions":#common-problems # "Initial user and login":#initial_user # "Monitoring and Metrics":#monitoring # "Load balancing controllers":#load_balancing # "After the installation":#post_install h2(#introduction). Introduction This multi host installer is the recommended way to set up a production Arvados cluster. These instructions include specific details for installing on Amazon Web Services (AWS), which are marked as "AWS specific". However with additional customization the installer can be used as a template for deployment on other cloud provider or HPC systems. h2(#prerequisites). Prerequisites and planning h3. Cluster ID and base domain Choose a 5-character cluster identifier that will represent the cluster. Here are "guidelines on choosing a cluster identifier":../architecture/federation.html#cluster_id . Only lowercase letters and digits 0-9 are allowed. Examples will use @xarv1@ or @${CLUSTER}@, you should substitute the cluster id you have selected. Determine the base domain for the cluster. This will be referred to as @${DOMAIN}@. For example, if DOMAIN is @xarv1.example.com@, then @controller.${DOMAIN}@ means @controller.xarv1.example.com@. h3(#DNS). DNS hostnames for each service You will need a DNS entry for each service. When using the "Terraform script":#terraform to set up your infrastructure, these domains will be created automatically using AWS Route 53. In the default configuration these are: # @controller.${DOMAIN}@ # @ws.${DOMAIN}@ # @keep0.${DOMAIN}@ # @keep1.${DOMAIN}@ # @keep.${DOMAIN}@ # @download.${DOMAIN}@ # @*.collections.${DOMAIN}@ -- a wildcard DNS resolving to the @keepweb@ service # @*.containers.${DOMAIN}@ -- a wildcard DNS resolving to the @controller@ service # @workbench.${DOMAIN}@ # @workbench2.${DOMAIN}@ # @webshell.${DOMAIN}@ # @shell.${DOMAIN}@ # @prometheus.${DOMAIN}@ # @grafana.${DOMAIN}@ For more information, see "DNS entries and TLS certificates":install-manual-prerequisites.html#dnstls. h2(#download). Download the installer {% assign local_params_src = 'multiple_hosts' %} {% assign config_examples_src = 'multi_host/aws' %} {% assign terraform_src = 'terraform/aws' %} {% include 'download_installer' %} h2(#setup-infra). Set up your infrastructure ## "Create AWS infrastructure with Terraform":#terraform ## "Create required infrastructure manually":#inframanual h3(#terraform). Create AWS infrastructure with Terraform (AWS specific) We provide a set of Terraform code files that you can run to create the necessary infrastructure on Amazon Web Services. These files are located in the @terraform@ installer directory and are divided in three sections: # The @terraform/vpc/@ subdirectory controls the network related infrastructure of your cluster, including firewall rules and split-horizon DNS resolution. # The @terraform/data-storage/@ subdirectory controls the stateful part of your cluster, currently only sets up the S3 bucket for holding the Keep blocks and in the future it'll also manage the database service. # The @terraform/services/@ subdirectory controls the hosts that will run the different services on your cluster, makes sure that they have the required software for the installer to do its job. h4. Software requirements & considerations {% include 'notebox_begin' %} The Terraform state files (that keep crucial infrastructure information from the cloud) will be saved inside each subdirectory, under the @terraform.tfstate@ name. These will be committed to the git repository used to coordinate deployment. It is very important to keep this git repository secure, only sysadmins that will be responsible for maintaining your Arvados cluster should have access to it. {% include 'notebox_end' %} h4. Terraform code configuration Each section described above contain a @terraform.tfvars@ file with some configuration values that you should set before applying each configuration. You should at least set the AWS region, cluster prefix and domain name in @terraform/vpc/terraform.tfvars@:
{% include 'terraform_vpc_tfvars' %}
If you don't set the main configuration variables at @vpc/terraform.tfvars@ file, you will be asked to re-enter these parameters every time you run Terraform. The @data-storage/terraform.tfvars@ and @services/terraform.tfvars@ let you configure additional details, including the SSH public key for deployment, instance & volume sizes, etc. All these configurations are provided with sensible defaults:
{% include 'terraform_datastorage_tfvars' %}
{% include 'terraform_services_tfvars' %}
h4. Set credentials You will need an AWS access key and secret key to create the infrastructure.
export AWS_ACCESS_KEY_ID="anaccesskey"
export AWS_SECRET_ACCESS_KEY="asecretkey"
h4. Create the infrastructure Build the infrastructure by running @./installer.sh terraform@. The last stage will output the information needed to set up the cluster's domain and continue with the installer. for example:
./installer.sh terraform
...
Apply complete! Resources: 16 added, 0 changed, 0 destroyed.

Outputs:

arvados_sg_id = "sg-02f999a99973999d7"
arvados_subnet_id = "subnet-01234567abc"
cluster_int_cidr = "10.1.0.0/16"
cluster_name = "xarv1"
compute_subnet_id = "subnet-abcdef12345"
deploy_user = "admin"
domain_name = "xarv1.example.com"
letsencrypt_iam_access_key_id = "AKAA43MAAAWAKAADAASD"
private_ip = {
  "controller" = "10.1.1.1"
  "keep0" = "10.1.1.3"
  "keep1" = "10.1.1.4"
  "keepproxy" = "10.1.1.2"
  "shell" = "10.1.1.7"
  "workbench" = "10.1.1.5"
}
public_ip = {
  "controller" = "18.235.116.23"
  "keep0" = "34.202.85.86"
  "keep1" = "38.22.123.98"
  "keepproxy" = "34.231.9.201"
  "shell" = "44.208.155.240"
  "workbench" = "52.204.134.136"
}
region_name = "us-east-1"
route53_dns_ns = tolist([
  "ns-1119.awsdns-11.org",
  "ns-1812.awsdns-34.co.uk",
  "ns-437.awsdns-54.com",
  "ns-809.awsdns-37.net",
])
ssl_password_secret_name = "xarv1-arvados-ssl-privkey-password"
vpc_id = "vpc-0999994998399923a"
letsencrypt_iam_secret_access_key = "XXXXXSECRETACCESSKEYXXXX"
database_password = 
h4. Additional DNS configuration Once Terraform has completed, the infrastructure for your Arvados cluster is up and running. One last piece of DNS configuration is required. The domain names for your cluster (e.g.: controller.xarv1.example.com) are managed via "Route 53":https://aws.amazon.com/route53/ and the TLS certificates will be issued using "Let's Encrypt":https://letsencrypt.org/ . You need to configure the parent domain to delegate to the newly created zone. For example, you need to configure "example.com" to delegate the subdomain "xarv1.example.com" to the nameservers for the Arvados hostname records created by Terraform. You do this by creating a @NS@ record on the parent domain that refers to the name servers listed in the Terraform output parameter @route53_dns_ns@. If your parent domain is also controlled by Route 53, the process will be like this: # Log in to the AWS Console and navigate to the service page for *Route 53* # Go to the list of *Hosted zones* and click on the zone for the parent domain # Click on *Create record* # For *Record name* put the cluster id # For *Record type* choose @NS - Name servers for a hosted zone@ # For *Value* add the values from Terraform output parameter @route53_dns_ns@, one hostname per line, with punctuation (quotes and commas) removed. # Click *Create records* If the parent domain is controlled by some other service, follow the guide for the the appropriate service. h4. Other important output parameters The certificates will be requested from Let's Encrypt when you run the installer. * @cluster_int_cidr@ will be used to set @CLUSTER_INT_CIDR@ * You'll also need @compute_subnet_id@ and @arvados_sg_id@ to set @COMPUTE_SUBNET@ and @COMPUTE_SG@ in @local.params@ and when you "create a compute image":#create_a_compute_image. You can now proceed to "edit local.params* files":#localparams. h3(#inframanual). Create required infrastructure manually If you will be setting up infrastructure without using the provided Terraform script, here are the recommendations you will need to consider. h4. Virtual Private Cloud (AWS specific) We recommend setting Arvados up in its own "Virtual Private Cloud (VPC)":https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html When you do so, you need to configure a couple of additional things: # "Create a subnet for the compute nodes":https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html # You should set up a "security group which allows SSH access (port 22)":https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html # Make sure to add a "VPC S3 endpoint":https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints-s3.html h4(#keep-bucket). S3 Bucket (AWS specific) We recommend "creating an S3 bucket":https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html for data storage named @${CLUSTER}-nyw5e-000000000000000-volume@. We recommend creating an IAM role called @${CLUSTER}-keepstore-00-iam-role@ with a "policy that can read, write, list and delete objects in the bucket":configure-s3-object-storage.html#IAM . With the example cluster id @xarv1@ the bucket would be called @xarv1-nyw5e-000000000000000-volume@ and the role would be called @xarv1-keepstore-00-iam-role@. These names are recommended because they are default names used in the configuration template. If you use different names, you will need to edit the configuration template later. h4(#hosts). Required hosts You will need to allocate several hosts (physical or virtual machines) for the fixed infrastructure of the Arvados cluster. These machines should have at least 2 cores and 8 GiB of RAM, running a supported Linux distribution. {% include 'supportedlinux' %} Allocate the following hosts as appropriate for your site. On AWS you may choose to do it manually with the AWS console, or using a DevOps tool such as CloudFormation or Terraform. With the exception of "keep0" and "keep1", all of these hosts should have external (public) IP addresses if you intend for them to be accessible outside of the private network or VPC. The installer will set up the Arvados services on your machines. Here is the default assignment of services to machines: # API node ## postgresql server ## arvados api server ## arvados controller (recommended hostname @controller.${DOMAIN}@ and @*.containers.${DOMAIN}@) # KEEPSTORE nodes (at least 1 if using S3 as a Keep backend, else 2) ## arvados keepstore (recommended hostname @keep0.${DOMAIN}@ and @keep1.${DOMAIN}@) # WORKBENCH node ## arvados legacy workbench URLs (recommended hostname @workbench.${DOMAIN}@) ## arvados workbench2 (recommended hostname @workbench2.${DOMAIN}@) ## arvados webshell (recommended hostname @webshell.${DOMAIN}@) ## arvados websocket (recommended hostname @ws.${DOMAIN}@) ## arvados cloud dispatcher ## arvados keepbalance ## arvados keepproxy (recommended hostname @keep.${DOMAIN}@) ## arvados keepweb (recommended hostname @download.${DOMAIN}@ and @*.collections.${DOMAIN}@) # SHELL node (optional) ## arvados shell (recommended hostname @shell.${DOMAIN}@) When using the database installed by Arvados (and not an "external database":#ext-database), the database is stored under @/var/lib/postgresql@. Arvados logs are also kept in @/var/log@ and @/var/www/arvados-api/shared/log@. Accordingly, you should ensure that the disk partition containing @/var@ has adequate storage for your planned usage. We suggest starting with 50GiB of free space on the database host. h4. Additional prerequisites when preparing machines to run the installer # From the account where you are performing the install, passwordless @ssh@ to each machine This means the client's public key should added to @~/.ssh/authorized_keys@ on each node. # Passwordless @sudo@ access on the account on each machine you will @ssh@ in to This usually means adding the account to the @sudo@ group and having a rule like this in @/etc/sudoers.d/arvados_passwordless@ that allows members of group @sudo@ to execute any command without entering a password.
%sudo ALL=(ALL:ALL) NOPASSWD:ALL
# @git@ installed on each machine # Port 443 reachable by clients (AWS specific) The machine that runs the arvados cloud dispatcher will need an "IAM role that allows it to manage EC2 instances.":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html#IAM If your infrastructure differs from the setup proposed above (ie, different hostnames), you can still use the installer, but "additional customization may be necessary":#further_customization . h2(#localparams). Edit @local.params*@ files The cluster configuration parameters are included in two files: @local.params@ and @local.params.secrets@. These files can be found wherever you choose to initialize the installation files (e.g., @~/setup-arvados-xarv1@ in these examples). The @local.params.secrets@ file is intended to store security-sensitive data such as passwords, private keys, tokens, etc. Depending on the security requirements of the cluster deployment, you may wish to store this file in a secrets store like AWS Secrets Manager or Jenkins credentials. h3. Parameters from @local.params@: # Set @CLUSTER@ to the 5-character cluster identifier. (e.g. "xarv1") # Set @DOMAIN@ to the base DNS domain of the environment. (e.g. "xarv1.example.com") # Set the @*_INT_IP@ variables with the internal (private) IP addresses of each host. Since services share hosts, some hosts are the same. See "note about /etc/hosts":#etchosts # Edit @CLUSTER_INT_CIDR@, this should be the CIDR of the private network that Arvados is running on, e.g. the VPC. If you used terraform, this is emitted as @cluster_int_cidr@. _CIDR stands for "Classless Inter-Domain Routing" and describes which portion of the IP address that refers to the network. For example 192.168.3.0/24 means that the first 24 bits are the network (192.168.3) and the last 8 bits are a specific host on that network._ _AWS Specific: Go to the AWS console and into the VPC service, there is a column in this table view of the VPCs that gives the CIDR for the VPC (IPv4 CIDR)._ # Set @INITIAL_USER_EMAIL@ to your email address, as you will be the first admin user of the system. h3. Parameters from @local.params.secrets@: # Set each @KEY@ / @TOKEN@ / @PASSWORD@ to a random string. You can use @installer.sh generate-tokens@
./installer.sh generate-tokens
BLOB_SIGNING_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
MANAGEMENT_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
SYSTEM_ROOT_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
ANONYMOUS_USER_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
DATABASE_PASSWORD=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# Set @DATABASE_PASSWORD@ to a random string (unless you "already have a database":#ext-database then you should set it to that database's password) Important! If this contains any non-alphanumeric characters, in particular ampersand ('&'), it is necessary to add backslash quoting. For example, if the password is @Lq&MZDATABASE_PASSWORD="Lq\&MZ\
# Set @LE_AWS_*@ credentials to allow Let's Encrypt do authentication through Route53 # Set @DISPATCHER_SSH_PRIVKEY@ to a SSH private key that @arvados-dispatch-cloud@ will use to connect to the compute nodes:
DISPATCHER_SSH_PRIVKEY="-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
...
s4VY40kNxs6MsAAAAPbHVjYXNAaW5zdGFsbGVyAQIDBA==
-----END OPENSSH PRIVATE KEY-----"
You can create one by following the steps described on the "building a compute node documentation":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html#sshkeypair page. h3(#etchosts). Note on @/etc/hosts@ Because Arvados services are typically accessed by external clients, they are likely to have both a public IP address and a internal IP address. On cloud providers such as AWS, sending internal traffic to a service's public IP address can incur egress costs and throttling. Thus it is very important for internal traffic to stay on the internal network. The installer implements this by updating @/etc/hosts@ on each node to associate each service's hostname with the internal IP address, so that when Arvados services communicate with one another, they always use the internal network address. This is NOT a substitute for DNS, you still need to set up DNS names for all of the services that have public IP addresses (it does, however, avoid a complex "split-horizon" DNS configuration). It is important to be aware of this because if you mistype the IP address for any of the @*_INT_IP@ variables, hosts may unexpectedly fail to be able to communicate with one another. If this happens, check and edit as necessary the file @/etc/hosts@ on the host that is failing to make an outgoing connection. h2(#keep). Configure Keep storage The @multi_host/aws@ template uses S3 for storage. Arvados also supports "filesystem storage":configure-fs-storage.html and "Azure blob storage":configure-azure-blob-storage.html . Keep storage configuration can be found in in the @arvados.cluster.Volumes@ section of @local_config_dir/pillars/arvados.sls@. h3. Object storage in S3 (AWS Specific) If you "followed the recommended naming scheme":#keep-bucket for both the bucket and role (or used the provided Terraform script), you're done. If you did not follow the recommended naming scheme for either the bucket or role, you'll need to update these parameters in @local.params@: # Set @KEEP_AWS_S3_BUCKET@ to the value of "keepstore bucket you created earlier":#keep-bucket # Set @KEEP_AWS_IAM_ROLE@ to "keepstore role you created earlier":#keep-bucket You can also configure a specific AWS Region for the S3 bucket by setting @KEEP_AWS_REGION@. {% include 'ssl_config_multi' %} h2(#authentication). Configure your authentication provider (optional, recommended) By default, the installer will use the "Test" provider, which is a list of usernames and cleartext passwords stored in the Arvados config file. *This is low security configuration and you are strongly advised to configure one of the other "supported authentication methods":setup-login.html* . h2(#ext-database). Using an external database (optional) The standard behavior of the installer is to install and configure PostgreSQL for use by Arvados. You can optionally configure it to use a separately managed database instead. Arvados requires a database that is compatible with PostgreSQL 9.5 or later. For example, Arvados is known to work with Amazon Aurora (note: even idle, Arvados services will periodically poll the database, so we strongly advise using "provisioned" mode). # In @local.params@, remove 'database' from the list of roles assigned to the controller node:
NODES=(
  [controller.${DOMAIN}]=controller,websocket,dispatcher,keepbalance
  ...
)
# In @local.params@, set @DATABASE_INT_IP@ to empty string and @DATABASE_EXTERNAL_SERVICE_HOST_OR_IP@ to the database endpoint (can be a hostname, does not have to be an IP address).
DATABASE_INT_IP=""
...
DATABASE_EXTERNAL_SERVICE_HOST_OR_IP="arvados.xxxxxxx.eu-east-1.rds.amazonaws.com"
# In @local.params.secrets@, set @DATABASE_PASSWORD@ to the correct value. "See the previous section describing correct quoting":#localparams # In @local.params@ you may need to adjust the database name and user. h2(#further_customization). Further customization of the installation (optional) If you are installing on AWS and have followed all of the naming conventions recommend in this guide, you probably don't need to do any further customization. If you are installing on a different cloud provider or on HPC, other changes may require editing the Saltstack pillars and states files found in @local_config_dir@. In particular, @local_config_dir/pillars/arvados.sls@ contains the template (in the @arvados.cluster@ section) used to produce the Arvados configuration file that is distributed to all the nodes. Consult the "Configuration reference":config.html for a comprehensive list of configuration keys. Any extra Salt "state" files you add under @local_config_dir/states@ will be added to the Salt run and applied to the hosts. h2(#create_a_compute_image). Configure compute nodes {% include 'branchname' %} If you will use fixed compute nodes with an HPC scheduler such as SLURM or LSF, you will need to "Set up your compute nodes with Docker":{{site.baseurl}}/install/crunch2/install-compute-node-docker.html or "Set up your compute nodes with Singularity":{{site.baseurl}}/install/crunch2/install-compute-node-singularity.html. On cloud installations, containers are dispatched in Docker daemons running in the _compute instances_, which need some additional setup. h3. Build the compute image Follow "the instructions to build a cloud compute node image":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html using the compute image builder script found in @arvados/tools/compute-images@ in your Arvados clone from "step 3":#download. h3. Configure the compute image Once the image has been created, open @local.params@ and edit as follows (AWS specific settings described here, you will need to make custom changes for other cloud providers): # Set @COMPUTE_AMI@ to the AMI produced by Packer # Set @COMPUTE_AWS_REGION@ to the appropriate AWS region # Set @COMPUTE_USER@ to the admin user account on the image # Set the @COMPUTE_SG@ list to the VPC security group which you set up to allow SSH connections to these nodes # Set @COMPUTE_SUBNET@ to the value of SubnetId of your VPC # Update @arvados.cluster.InstanceTypes@ in @local_config_dir/pillars/arvados.sls@ as necessary. The example instance types are for AWS, other cloud providers will of course have different instance types with different names and specifications. (AWS specific) If m5/c5 node types are not available, replace them with m4/c4. You'll need to double check the values for Price and IncludedScratch/AddedScratch for each type that is changed. h2(#installation). Begin installation At this point, you are ready to run the installer script in deploy mode that will conduct all of the Arvados installation. Run this in the @~/arvados-setup-xarv1@ directory:
./installer.sh deploy
This will install and configure Arvados on all the nodes. It will take a while and produce a lot of logging. If it runs into an error, it will stop. h2(#test-install). Confirm the cluster is working When everything has finished, you can run the diagnostics. There's a couple ways of doing this listed below. h3. Running diagnostics from the same system as the installer The requirements to run diagnostics are having @arvados-client@ and @docker@ installed. If this is not possible you can run them on your Arvados shell node as explained in the next section. Depending on where you are running the installer, you need to provide @-internal-client@ or @-external-client@. If you are running the installer from a host connected to the Arvados private network, use @-internal-client@. Otherwise, use @-external-client@.
./installer.sh diagnostics (-internal-client|-external-client)
h3. Running diagnostics from a cluster node You can run the diagnostics from the cluster's shell node. This has the advantage that you don't need to manage any software on your local system, but might not be a possibility if your Arvados cluster doesn't include a shell node.
./installer.sh diagnostics-internal
h3(#debugging). Debugging issues The installer records log files for each deployment. Most service logs go to @/var/log/syslog@. The logs for Rails API server can be found in @/var/www/arvados-api/current/log/production.log@ on the appropriate instance(s). Workbench 2 is a client-side Javascript application. If you are having trouble loading Workbench 2, check the browser's developer console (this can be found in "Tools → Developer Tools"). h3(#iterating). Iterating on config changes You can iterate on the config and maintain the cluster by making changes to @local.params@ and @local_config_dir@ and running @installer.sh deploy@ again. If you are debugging a configuration issue on a specific node, you can speed up the cycle a bit by deploying just one node:
./installer.sh deploy keep0.xarv1.example.com
However, once you have a final configuration, you should run a full deploy to ensure that the configuration has been synchronized on all the nodes. h3(#common-problems). Common problems and solutions h4. PG::UndefinedTable: ERROR: relation \"api_clients\" does not exist The arvados-api-server package sets up the database as a post-install script. If the database host or password wasn't set correctly (or quoted correctly) at the time that package is installed, it won't be able to set up the database. This will manifest as an error like this:
#

If this happens, you need to

1. correct the database information
2. run @./installer.sh deploy xarv1.example.com@ to update the configuration on the API/controller node
3. Log in to the API/controller server node, then run this command to re-run the post-install script, which will set up the database:
dpkg-reconfigure arvados-api-server
4. Re-run @./installer.sh deploy@ again to synchronize everything, and so that the install steps that need to contact the API server are run successfully. h4. Missing ENA support (AWS Specific) If the AMI wasn't built with ENA (extended networking) support and the instance type requires it, it'll fail to start. You'll see an error in syslog on the node that runs @arvados-dispatch-cloud@. The solution is to build a new AMI with --aws-ena-support true h2(#initial_user). Initial user and login At this point you should be able to log into the Arvados cluster. The initial URL will be @https://workbench.${DOMAIN}@ If you did *not* "configure a different authentication provider":#authentication you will be using the "Test" provider, and the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster. It uses the values of @INITIAL_USER@ and @INITIAL_USER_PASSWORD@ from the @local.params*@ file. If you *did* configure a different authentication provider, the first user to log in will automatically be given Arvados admin privileges. h2(#monitoring). Monitoring and Metrics You can monitor the health and performance of the system using the admin dashboard: @https://grafana.${DOMAIN}@ To log in, use username "admin" and @${INITIAL_USER_PASSWORD}@ from @local.params.secrets@. Once logged in, you will want to add the dashboards to the front page. # On the left icon bar, click on "Browse" # You should see a folder called "Arvados Cluster", click to open it ## If you don't see anything, make sure the check box next to "Starred" is not selected # You should see three dashboards "Arvados cluster overview", "Node exporter" and "Postgres exporter" # Visit each dashboard, at the top of the page click on the star next to the title to "Mark as favorite" # They should now be linked on the front page. h2(#load_balancing). Load balancing controllers (optional) In order to handle high loads and perform rolling upgrades, the controller service can be scaled to a number of hosts and the installer make this implementation a fairly simple task. First, you should take care of the infrastructure deployment: if you use our Terraform code, you will need to set up the @terraform.tfvars@ in @terraform/vpc/@ so that in addition to the node named @controller@ (the load-balancer), a number of @controllerN@ nodes (backends) are defined as needed, and added to the @internal_service_hosts@ list. We suggest that the backend nodes just hold the controller service and nothing else, so they can be easily created or destroyed as needed without other service disruption. The following is an example @terraform/vpc/terraform.tfvars@ file that describes a cluster with a load-balancer, 2 backend nodes, a separate database node, a shell node, a keepstore node and a workbench node that will also hold other miscelaneous services:
region_name = "us-east-1"
cluster_name = "xarv1"
domain_name = "xarv1.example.com"
# Include controller nodes in this list so instances are assigned to the
# private subnet. Only the balancer node should be connecting to them.
internal_service_hosts = [ "keep0", "shell", "database", "controller1", "controller2" ]

# Assign private IPs for the controller nodes. These will be used to create
# internal DNS resolutions that will get used by the balancer and database nodes.
private_ip = {
  controller = "10.1.1.11"
  workbench = "10.1.1.15"
  database = "10.1.2.12"
  controller1 = "10.1.2.21"
  controller2 = "10.1.2.22"
  shell = "10.1.2.17"
  keep0 = "10.1.2.13"
}
Once the infrastructure is deployed, you'll then need to define which node will be using the @balancer@ role and which will be the @controller@ nodes in @local.params@, as it's being shown in this partial example:
NODES=(
  [controller.${DOMAIN}]=balancer
  [controller1.${DOMAIN}]=controller
  [controller2.${DOMAIN}]=controller
  [database.${DOMAIN}]=database
  ...
)
Note that we also set the @database@ role to its own node instead of just leaving it in a shared controller node. Each time you run @installer.sh deploy@, the system will automatically do rolling upgrades. This means it will make changes to one controller node at a time, after removing it from the balancer so that there's no downtime. h2(#post_install). After the installation As part of the operation of @installer.sh@, it automatically creates a @git@ repository with your configuration templates. You should retain this repository but *be aware that it contains sensitive information* (passwords and tokens used by the Arvados services as well as cloud credentials if you used Terraform to create the infrastructure). As described in "Iterating on config changes":#iterating you may use @installer.sh deploy@ to re-run the Salt to deploy configuration changes and upgrades. However, be aware that the configuration templates created for you by @installer.sh@ are a snapshot which are not automatically kept up to date. When deploying upgrades, consult the "Arvados upgrade notes":{{site.baseurl}}/admin/upgrading.html to see if changes need to be made to the configuration file template in @local_config_dir/pillars/arvados.sls@. To specify the version to upgrade to, set the @VERSION@ parameter in @local.params@. See also "Maintenance and upgrading":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information. ================================================ FILE: doc/install/install-postgresql.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install PostgreSQL 9.4+ ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Arvados requires at least version *9.4* of PostgreSQL. We recommend using version 10 or newer. * "AWS":#aws * "Red Hat, AlmaLinux, and Rocky Linux":#rh8 * "Debian or Ubuntu":#debian h3(#aws). AWS When deploying on AWS, Arvados can use an Aurora RDS PostgreSQL database. Aurora Serverless is not recommended. h3(#rh8). Red Hat, AlmaLinux, and Rocky Linux {% comment %} The default version on RH8 is PostgreSQL 10. You can install up to PostgreSQL 13. {% endcomment %} # Install PostgreSQL
# dnf install postgresql-server postgresql-contrib
# Initialize the database
# postgresql-setup initdb
# Configure the database to accept password connections from localhost
# sed -ri -e 's/^(host +all +all +(127\.0\.0\.1\/32|::1\/128) +)ident$/\1md5/' /var/lib/pgsql/data/pg_hba.conf
# Configure the database to accept password connections from the local network (replace @10.9.8.0/24@ with your private network mask)
# echo 'host all all 10.9.8.0/24 md5' | tee -a /var/lib/pgsql/data/pg_hba.conf
# Configure the database to launch at boot and start now
# systemctl enable --now postgresql
h3(#debian). Debian or Ubuntu All supported versions of Debian and Ubuntu include a version of PostgreSQL you can use with Arvados. # Install PostgreSQL
# apt --no-install-recommends install postgresql postgresql-contrib
# Configure PostgreSQL to accept password connections from the local network (replace @10.9.8.0/24@ with your private network mask)
# echo 'host all all 10.9.8.0/24 md5' | tee -a /etc/postgresql/*/main/pg_hba.conf
# Configure the database to launch at boot and start now
# systemctl enable --now postgresql
================================================ FILE: doc/install/install-shell-server.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Set up a shell node ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Introduction":#introduction # "Install Dependencies and SDKs":#dependencies # "Install git and curl":#install-packages # "Create record for VM":#vm-record # "Install arvados-login-sync":#arvados-login-sync # "Confirm working installation":#confirm-working h2(#introduction). Introduction Arvados support for shell nodes allows you to use Arvados permissions to grant Linux shell accounts to users. A shell node runs the @arvados-login-sync@ service to manage user accounts, and typically has Arvados utilities and SDKs pre-installed. Users are allowed to log in and run arbitrary programs. For optimal performance, the Arvados shell server should be on the same LAN as the Arvados cluster. Because Arvados @config.yml@ _contains secrets_ it should *not* be present on shell nodes. Shell nodes should be separate virtual machines from the VMs running other Arvados services. You may choose to grant root access to users so that they can customize the node, for example, installing new programs. This has security considerations depending on whether a shell node is single-user or multi-user. A single-user shell node should be set up so that it only stores Arvados access tokens that belong to that user. In that case, that user can be safely granted root access without compromising other Arvados users. In the multi-user shell node case, a malicious user with @root@ access could access other user's Arvados tokens. Users should only be given @root@ access on a multi-user shell node if you would trust them to be Arvados administrators. Be aware that with access to the @docker@ daemon, it is trival to gain *root* access to any file on the system, so giving users @docker@ access should be considered equivalent to @root@ access. h2(#dependencies). Install Dependencies and SDKs # "Install Ruby and Bundler":ruby.html # "Install the Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html # "Install the FUSE driver":{{site.baseurl}}/sdk/fuse/install.html # "Install the CLI":{{site.baseurl}}/sdk/cli/install.html # "Install the R SDK":{{site.baseurl}}/sdk/R/index.html (optional) # "Install Docker":install-docker.html (optional) {% assign arvados_component = 'git curl' %} {% include 'install_packages' %} h2(#vm-record). Create record for VM As an admin, create an Arvados virtual_machine object representing this shell server. This will return a uuid.
apiserver:~$ arv --format=uuid virtual_machine create --virtual-machine '{"hostname":"shell.ClusterID.example.com"}'
zzzzz-2x53u-zzzzzzzzzzzzzzz
h2(#arvados-login-sync). Install arvados-login-sync The @arvados-login-sync@ service makes it possible for Arvados users to log in to the shell server. It sets up login accounts, updates group membership, adds each user's SSH public keys to the @~/.ssh/authorized_keys@ file, and adds an Arvados token to @~/.config/arvados/settings.conf@ . Install the @arvados-login-sync@ program from RubyGems.
shellserver:# gem install arvados-login-sync
h2(#arvados-login-sync). Run arvados-login-sync periodically Create a cron job to run the @arvados-login-sync@ program every 2 minutes. This will synchronize user accounts. If this is a single-user shell node, then @ARVADOS_API_TOKEN@ should be a token for that user. See "Create a token for a user":{{site.baseurl}}/admin/user-management-cli.html#create-token . If this is a multi-user shell node, then @ARVADOS_API_TOKEN@ should be an administrator token such as the @SystemRootToken@. See discussion in the "introduction":#introduction about security on multi-user shell nodes. Set @ARVADOS_VIRTUAL_MACHINE_UUID@ to the UUID from "Create record for VM":#vm-record h3. Standalone cluster
shellserver:# umask 0700; tee /etc/cron.d/arvados-login-sync <<EOF
ARVADOS_API_HOST="ClusterID.example.com"
ARVADOS_API_TOKEN="xxxxxxxxxxxxxxxxx"
ARVADOS_VIRTUAL_MACHINE_UUID="zzzzz-2x53u-zzzzzzzzzzzzzzz"
*/2 * * * * root arvados-login-sync
EOF
h3. Part of a LoginCluster federation If the cluster is part of a "federation with centralized user management":../admin/federation.html#LoginCluster , the login sync script needs to be given an admin token from the login cluster.
shellserver:# umask 0700; tee /etc/cron.d/arvados-login-sync <<EOF
ARVADOS_API_HOST="ClusterID.example.com"
ARVADOS_API_TOKEN="yyyloginclusteradmintokenyyyy"
ARVADOS_VIRTUAL_MACHINE_UUID="zzzzz-2x53u-zzzzzzzzzzzzzzz"
*/2 * * * * root arvados-login-sync
EOF
h2(#confirm-working). Confirm working installation A user should be able to log in to the shell server when the following conditions are satisfied: # As an admin user, you have given the user permission to log in using the Workbench → Admin menu → "Users" item → "Show" button → "Admin" tab → "Setup account" button. # The cron job has run. In order to log in via SSH, the user must also upload an SSH public key. Alternately, if configured, users can log in using "Webshell":install-webshell.html . See also "how to add a VM login permission link at the command line":../admin/user-management-cli.html ================================================ FILE: doc/install/install-single-host.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Single host Arvados ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Limitations of the single host install":#limitations # "Prerequisites and planning":#prerequisites # "Download the installer":#download # "Install Ansible":#install-ansible # "Set up cluster configuration":#localparams # "Set up cluster inventory":#inventory # "Run the installer playbook":#run-playbook # "Test the cluster":#test-install # "Changing your configuration":#further_customization # "Upgrading your Arvados cluster":#post_install h2(#limitations). Limitations of the single host install *NOTE: The single host installation is a good choice for evaluating Arvados, but it is not recommended for production use.* Using the default configuration, the single host install has scaling limitations compared to a production multi-host install: * It uses the local @/var@ partition to store all user data and logs. * It uses the @crunch-dispatch-local@ dispatcher, which has a limit of eight concurrent jobs. * Because jobs and Arvados services all run on the same machine, they will compete for CPU/RAM resources. h2(#prerequisites). Prerequisites and planning h3. Cluster ID Choose a 5-character cluster identifier that will represent the cluster. Refer to "our guidelines on choosing a cluster identifier":../architecture/federation.html#cluster_id. Only lowercase letters and digits 0-9 are allowed. Our documentation uses @xurid@ throughout. You should replace this each time it appears with your chosen cluster identifier. h3. Cluster host You will need a dedicated (virtual) machine for your Arvados server with at least 2 cores and 8 GiB of RAM (4+ cores / 16+ GiB recommended if you are running workflows) running a supported Linux distribution: {% include 'supportedlinux' %} The single host install stores all user data and logs under @/var@. You should ensure that this partition has adequate storage for your planned usage. We suggest starting with at least 50GiB of free space. You must be able to connect to this host via SSH. Your account must have permission to run arbitrary commands with @sudo@. h2(#download). Download the installer The Ansible installer is only available in the Arvados source tree. Clone a copy of the Arvados source for the version of Arvados you're using in a directory convenient for you: {% include 'branchname' %}
~$ git clone --depth=1 --branch={{ branchname }} https://github.com/arvados/arvados ~/arvados
h2(#install-ansible). Install Ansible {% include 'install_ansible' header_level: 'h3' %} h2(#localparams). Set up cluster configuration Copy the example cluster configuration from the Arvados source tree to a location outside it. We recommend you use your chosen cluster ID in the filename to help keep it unique. For example:
$ cp arvados/tools/ansible/examples/simple-cluster-config.yml ~/xurid-config.yml
Open the copy you created in your editor, and make changes following the instructions at the top of the file. h2(#inventory). Set up cluster inventory Copy the example cluster inventory from the Arvados source tree to a location outside it. We recommend you use your chosen cluster ID in the filename to help keep it unique. For example:
$ cp arvados/tools/ansible/examples/simple-cluster-inventory.yml ~/xurid-inventory.yml
Open the copy you created in your editor and make these changes noted in comments: * Under @hosts:@, change @hostname.example@ to the hostname or address of your cluster node. * Change @arvados_config_file@ to the path of the cluster configuration you created in the previous step. * Change @arvados_cluster_id@ to your chosen cluster ID. You may make other changes noted in comments, but the changes listed above are required. h2(#run-playbook). Run the installer playbook With your cluster configuration and inventory complete, you can use them to run the installer playbook:
$ cd arvados/tools/ansible
arvados/tools/ansible $ ansible-playbook -Ki ~/xurid-inventory.yml install-arvados-cluster.yml
This will prompt you for a @BECOME password:@. Enter your sudo password on the cluster node. Ansible will use this to perform privileged system configuration. You will see it start to log tasks like:
PLAY [Bootstrap nodes] *********************************************************

TASK [Load Arvados configuration file] *****************************************
ok: [hostname.example -> localhost]

TASK [Load Arvados cluster configuration] **************************************
ok: [hostname.example]

TASK [ansible.builtin.include_role : distro_bootstrap] *************************

TASK [distro_bootstrap : Get distribution IDs] *********************************
changed: [hostname.example]
If all goes well, it will log finish with a @PLAY RECAP@ reporting @failed=0@, which indicates all tasks were successful:
PLAY RECAP *********************************************************************
hostname.example : ok=161  changed=34   unreachable=0    failed=0    skipped=23   rescued=0    ignored=0
h3(#playbook-problems). Diagnosing problems with the playbook run If the @PLAY RECAP@ indicates that a task failed, that will typically be logged with a message like this:
TASK [arvados_controller : Start and enable arvados-controller.service] ********
fatal: [hostname.example]: FAILED! => {"changed": false, "msg": "Unable to restart service arvados-controller.service: Job for arvados-controller.service failed because the control process exited with error code.\nSee \"systemctl status arvados-controller.service\" and \"journalctl -xeu arvados-controller.service\" for details.\n"}
The @TASK@ line gives you some context for what failed. The first part (@arvados_controller@ in this example) describes generally what Arvados service it was configuring. The rest of the line describes the specific step it was taking (starting @arvados-controller.service@ in this example). This context can suggest where you might check your configuration for problems or look on the cluster node for additional information. This example problem was caused by the Controller service in the cluster configuration trying to use an already-claimed port in one of the @InternalURLs@. h2(#test-install). Test the cluster h3. Run diagnostics The @arvados-client diagnostics@ command can check all services on a cluster to identify problems with inconsistent configuration. *On your cluster node*, install and run it like this:
$ sudo apt install arvados-client
$ sudo arvados-client sudo diagnostics -internal-client
INFO       5: running health check (same as `arvados-server check`)
INFO      10: getting discovery document from https://hostname.example:8443/discovery/v1/apis/arvados/v1/rest
INFO      20: getting exported config from https://hostname.example:8443/arvados/v1/config
[…]
INFO     160: running a container
INFO      ... container request uuid = xurid-xvhdp-12345abcde67890
INFO      ... container request submitted, waiting up to 10m for container to run
INFO    9990: deleting temporary collection
INFO    --- no errors ---
h3. Access Workbench The default Ansible inventory deploys Arvados with a self-signed certificate. If you deployed this way, you will have the best Workbench experience if you configure your browser to trust that certificate for it and supporting services. Follow the instructions for your specific browser below. If you configured the inventory with a different certificate that is already trusted by your browser, you can skip these steps. You should be able to open the URL from @Services.Workbench2.ExternalURL@ from your cluster configuration in your browser. h4. Trusting self-signed certificates in Chrome {% comment %} Last updated for Chrome v138 {% endcomment %} # Find the @arvados_tls.Default@ setting in your Ansible inventory. # If those options specify @remote: true@, copy the @cert@ path from your cluster host to the host where you're running the browser. Note you _only_ need the @cert@ file, not the @key@ file. # In the URL bar, enter @chrome://certificate-manager/@ and open that. # Under the "Custom" header, open "Installed by you." # Next to "Trusted Cerficates," press the "Import" button. # In the file picker dialog, open your copy of the @arvados_tls.Default.cert@ file. Now you should be able to open the URL from @Services.Workbench2.ExternalURL@ from your cluster configuration in your browser. You can skip the next section unless you also want to set up Firefox. h4. Trusting self-signed certificates in Firefox {% comment %} Last updated for Firefox 140 {% endcomment %} # Open the "Edit" menu and select "Settings." # Find and press the "View Certificates…" button to open the Certificate Manager. # Open the "Servers" tab. # Press the "Add Exception…" button. # Enter the @ExternalURL@ in your cluster configuration for the @Workbench2@ service. # Press the "Get Certificate" button. # Press the "Confirm Security Exception" button. # Repeat the process from step 4 with your configured URLs for the @Controller@, @Keepproxy@, @WebDAV@, and @WebDAVDownload@ services. Now you should be able to open the URL from @Services.Workbench2.ExternalURL@ from your cluster configuration in your browser. h2(#further_customization). Changing your configuration In the future, if you want to make changes to your Arvados cluster or Ansible inventory configuration, simply edit those files and "run the playbook again":#run-playbook. The playbook will deploy your changes to all the component services. h2(#post_install). Upgrading your Arvados cluster When a new version of Arvados is released, the general process to upgrade the cluster is: # In your Arvados checkout directory, @git fetch@ and then @git switch@ to the branch or tag that corresponds to the release you want to use. # Consult the "Arvados upgrade notes":{{site.baseurl}}/admin/upgrading.html to see if you need or want to make change to your cluster configuration file. # "Run the playbook again":#run-playbook with your cluster inventory. See also "Maintenance and upgrading":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information. ================================================ FILE: doc/install/install-webshell.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Configure webshell ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Introduction":#introduction # "Prerequisites":#prerequisites # "Update config.yml":#configure # "Update nginx configuration":#update-nginx # "Install packages":#install-packages # "Configure shellinabox":#config-shellinabox # "Configure pam":#config-pam # "Confirm working installation":#confirm-working h2(#introduction). Introduction Arvados supports @webshell@, which allows ssh access to shell nodes via the browser. This functionality is integrated in @Workbench@. @Webshell@ is provided by the @shellinabox@ package which runs on each shell node for which webshell is enabled. For authentication, a supported @pam library@ that allows authentication against Arvados is also required. One Nginx (or similar web server) virtualhost is also needed to expose all the @shellinabox@ instances via https. h2(#prerequisites). Prerequisites # "Install Workbench 2":{{site.baseurl}}/install/install-workbench2-app.html # "Set up a shell node":{{site.baseurl}}/install/install-shell-server.html h2(#configure). Update config.yml Edit the cluster config at @config.yml@ and set @Services.WebShell.ExternalURL@. Replace @zzzzz@ with your cluster id. Workbench will use this information to activate its support for webshell.
    Services:
      WebShell:
        InternalURLs: {}
        ExternalURL: https://webshell.ClusterID.example.com/
h2(#update-nginx). Update Nginx configuration The arvados-webshell service will be accessible from anywhere on the internet, so we recommend using SSL for transport encryption. This Nginx virtualhost could live on your Workbench server, or any other server that is reachable by your Workbench users and can access the @shell-in-a-box@ service on the shell node(s) on port 4200. Use a text editor to create a new file @/etc/nginx/conf.d/arvados-webshell.conf@ with the following configuration. Options that need attention are marked in red.
upstream arvados-webshell {
  server                shell.ClusterID.example.com:4200;
}

server {
  listen                443 ssl;
  server_name           webshell.ClusterID.example.com;

  proxy_connect_timeout 90s;
  proxy_read_timeout    300s;

  ssl                   on;
  ssl_certificate       /YOUR/PATH/TO/cert.pem;
  ssl_certificate_key   /YOUR/PATH/TO/cert.key;

  location /shell.ClusterID {
    if ($request_method = 'OPTIONS') {
       add_header 'Access-Control-Allow-Origin' '*';
       add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
       add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
       add_header 'Access-Control-Max-Age' 1728000;
       add_header 'Content-Type' 'text/plain charset=UTF-8';
       add_header 'Content-Length' 0;
       return 204;
    }
    if ($request_method = 'POST') {
       add_header 'Access-Control-Allow-Origin' '*';
       add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
       add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
    }
    if ($request_method = 'GET') {
       add_header 'Access-Control-Allow-Origin' '*';
       add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
       add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
    }

    proxy_ssl_session_reuse off;
    proxy_read_timeout  90;
    proxy_set_header    X-Forwarded-Proto https;
    proxy_set_header    Host $http_host;
    proxy_set_header    X-Real-IP $remote_addr;
    proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_pass          http://arvados-webshell;
  }
}
Note that the location line in the nginx config matches your shell node hostname *without domain*, because that is how the shell node was defined in the "Set up a shell node":{{site.baseurl}}/install/install-shell-server.html#vm-record instructions. It makes for a more user friendly experience in Workbench. For additional shell nodes with @shell-in-a-box@, add @location@ and @upstream@ sections as needed. {% assign arvados_component = 'shellinabox libpam-arvados-go' %} {% include 'install_packages' %} h2(#config-shellinabox). Configure shellinabox h3. Red Hat, AlmaLinux, and Rocky Linux Edit @/etc/sysconfig/shellinaboxd@:
# TCP port that shellinboxd's webserver listens on
PORT=4200

# SSL is disabled because it is terminated in Nginx. Adjust as needed.
OPTS="--disable-ssl --no-beep --service=/shell.ClusterID.example.com:AUTH:HOME:SHELL"
# systemctl enable shellinabox
# systemctl start shellinabox
h3. Debian and Ubuntu Edit @/etc/default/shellinabox@:
# TCP port that shellinboxd's webserver listens on
SHELLINABOX_PORT=4200

# SSL is disabled because it is terminated in Nginx. Adjust as needed.
SHELLINABOX_ARGS="--disable-ssl --no-beep --service=/shell.ClusterID.example.com:AUTH:HOME:SHELL"
# systemctl enable shellinabox
# systemctl start shellinabox
h2(#config-pam). Configure pam Use a text editor to create a new file @/etc/pam.d/shellinabox@ with the following configuration. Options that need attention are marked in red.
# This example is a stock debian "login" file with pam_arvados
# replacing pam_unix. It can be installed as /etc/pam.d/shellinabox .

auth       optional   pam_faildelay.so  delay=3000000
auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
auth       requisite  pam_nologin.so
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
session       required   pam_env.so readenv=1
session       required   pam_env.so readenv=1 envfile=/etc/default/locale

# The first argument is the address of the API server.  The second
# argument is this shell node's hostname.  The hostname must match the
# "hostname" field of the virtual_machine record.
auth [success=1 default=ignore] /usr/lib/pam_arvados.so ClusterID.example.com shell.ClusterID.example.com

auth    requisite            pam_deny.so
auth    required            pam_permit.so

auth       optional   pam_group.so
session    required   pam_limits.so
session    optional   pam_lastlog.so
session    optional   pam_motd.so  motd=/run/motd.dynamic
session    optional   pam_motd.so
session    optional   pam_mail.so standard

@include common-account
@include common-session
@include common-password

session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
h2(#confirm-working). Confirm working installation We recommend using the "Cluster diagnostics tool.":diagnostics.html Here are some other checks you can perform manually. A user should now be able to log in to the shell server, using webshell via workbench. Please refer to "Accessing an Arvados VM with Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html ================================================ FILE: doc/install/install-workbench2-app.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install Workbench 2 ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} # "Update config.yml":#update-config # "Update Nginx configuration":#update-nginx # "Install arvados-workbench2":#install-packages # "Restart the API server and controller":#restart-api # "Confirm working installation":#confirm-working # "Trusted client setting":#trusted_client Workbench2 is the web-based user interface for Arvados. {% include 'notebox_begin' %} Workbench2 is the replacement for Arvados Workbench. Workbench2 is suitable for day-to-day use, but does not yet implement every feature of the traditional Workbench. {% include 'notebox_end' %} h2(#configure). Update config.yml Edit @config.yml@ to set the keys below. The full set of configuration options are in the "Workbench section of config.yml":{{site.baseurl}}/admin/config.html
    Services:
      Workbench2:
        ExternalURL: "https://workbench2.ClusterID.example.com"
h2(#update-nginx). Update Nginx configuration Workbench2 does not require its own database. It is a set of html, javascript and css files that are served as static files from Nginx. Use a text editor to create a new file @/etc/nginx/conf.d/arvados-workbench2.conf@ with the following configuration. Options that need attention are marked in red.
server {
    listen       80;
    server_name  workbench2.ClusterID.example.com;
    return 301   https://workbench2.ClusterID.example.com$request_uri;
}

server {
  listen       443 ssl;
  server_name  workbench2.ClusterID.example.com;

  ssl_certificate     /YOUR/PATH/TO/cert.pem;
  ssl_certificate_key /YOUR/PATH/TO/cert.key;

  index  index.html;

  # Workbench2 uses a call to /config.json to bootstrap itself
  # and find out where to contact the API server.
  location /config.json {
    return 200 '{ "API_HOST": "ClusterID.example.com" }';
  }

  location / {
    root      /var/www/arvados-workbench2/workbench2;
    index     index.html;
    try_files $uri $uri/ /index.html;
    if (-f $document_root/maintenance.html) {
      return 503;
    }
  }
}
h2. Vocabulary configuration Workbench2 will load, if available, a vocabulary definition which lists available metadata properties for groups and collections. To learn how to configure the property vocabulary definition, please visit the "Metadata Vocabulary Format":{{site.baseurl}}/admin/metadata-vocabulary.html page in the Admin section. {% assign arvados_component = 'arvados-workbench2' %} {% include 'install_packages' %} {% include 'restart_api' %} h2(#confirm-working). Confirm working installation Visit @https://workbench2.ClusterID.example.com@ in a browser. You should be able to log in using the login method you configured in the previous step. If @Users.AutoAdminFirstUser@ is true, you will be an admin user. ================================================ FILE: doc/install/install-ws.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install the websocket server ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} The arvados-ws server provides event notifications to websocket clients. It can be installed anywhere with access to Postgres database and the Arvados API server, typically behind a web proxy that provides SSL support. See the "godoc page":http://godoc.org/github.com/arvados/arvados/services/ws for additional information. # "Update config.yml":#update-config # "Update nginx configuration":#update-nginx # "Install arvados-ws package":#install-packages # "Start the service":#start-service # "Restart the API server and controller":#restart-api # "Confirm working installation":#confirm-working h2(#configure). Update config.yml Edit the cluster config at @config.yml@ and set @Services.Websocket.ExternalURL@ and @Services.Websocket.InternalURLs@. Replace @zzzzz@ with your cluster id.
    Services:
      Websocket:
        InternalURLs:
	  "http://localhost:8005": {}
        ExternalURL: wss://ws.ClusterID.example.com/websocket
h2(#update-nginx). Update Nginx configuration The arvados-ws service will be accessible from anywhere on the internet, so we recommend using SSL for transport encryption. Use a text editor to create a new file @/etc/nginx/conf.d/arvados-ws.conf@ with the following configuration. Options that need attention are marked in red.
upstream arvados-ws {
  server                127.0.0.1:8005;
}

server {
  listen                443 ssl;
  server_name           ws.ClusterID.example.com;

  proxy_connect_timeout 90s;
  proxy_read_timeout    300s;

  ssl                   on;
  ssl_certificate       /YOUR/PATH/TO/cert.pem;
  ssl_certificate_key   /YOUR/PATH/TO/cert.key;

  location / {
    proxy_pass          http://arvados-ws;
    proxy_set_header    Upgrade         $http_upgrade;
    proxy_set_header    Connection      "upgrade";
    proxy_set_header    Host            $host;
    proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
  }
}
{% assign arvados_component = 'arvados-ws' %} {% include 'install_packages' %} {% include 'start_service' %} {% include 'restart_api' %} h2(#confirm). Confirm working installation We recommend using the "Cluster diagnostics tool.":diagnostics.html Here are some other checks you can perform manually. Confirm the service is listening on its assigned port and responding to requests.
~$ curl https://ws.ClusterID.example.com/websocket
not websocket protocol
================================================ FILE: doc/install/nginx.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install Nginx ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h3. Red Hat, AlmaLinux, and Rocky Linux
# dnf install nginx
h3. Debian and Ubuntu
# apt --no-install-recommends install nginx
================================================ FILE: doc/install/packages.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Arvados package repositories ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} On any host where you install Arvados software, you'll need to add the Arvados package repository. They're available for several popular distributions. * "Red Hat, AlmaLinux, and Rocky Linux":#redhat * "Debian and Ubuntu":#debian h3(#redhat). Red Hat, AlmaLinux, and Rocky Linux Packages are available for the following Red Hat-based distributions: * AlmaLinux 10 (since 10.0) * AlmaLinux 9 (since 9.2) * AlmaLinux 8 (since 8.8) * RHEL 10 (since 10.0) * RHEL 9 (since 9.2) * RHEL 8 (since 8.8) * Rocky Linux 10 (since 10.0) * Rocky Linux 9 (since 9.2) * Rocky Linux 8 (since 8.8) {% include 'setup_redhat_repo' %} h3(#debian). Debian and Ubuntu Packages are available for the following Debian-based distributions: * Debian 12 ("bookworm") * Ubuntu 24.04 ("noble") * Ubuntu 22.04 ("jammy") {% include 'setup_debian_repo' %} ================================================ FILE: doc/install/ruby.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Install Ruby and Bundler ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'install_ruby_and_bundler' %} ================================================ FILE: doc/install/salt-multi-host.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Multi-Host Arvados ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} "This page has moved.":install-multi-host.html ================================================ FILE: doc/install/salt-single-host.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Single host Arvados ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} "This page has moved.":install-single-host.html ================================================ FILE: doc/install/setup-login.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Set up web based login ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} Select one of the following login mechanisms for your cluster. # If all users will authenticate with Google, "configure Google login":#google. # If all users will authenticate with an OpenID Connect provider (other than Google), "configure OpenID Connect":#oidc. # If all users will authenticate with an existing LDAP service, "configure LDAP":#ldap. # If all users will authenticate using PAM as configured on your controller node, "configure PAM":#pam. h2(#google). Google login With this configuration, users will sign in with their Google accounts. Use the Google Developers Console to create a set of client credentials. # Select or create a project. # Click *+ Enable APIs and Services*. #* Search for *Google People API* and click *Enable API*. #* Navigate back to the main "APIs & Services" page. # On the sidebar, click *OAuth consent screen*. #* On consent screen settings, enter your identifying details. #* Under *Branding* → *Authorized domains* add your domain (@example.com@). #* Click *Save*. # On the sidebar, click *Clients*, then click *+ Create client*, arriving at the *OAuth client ID* setup page. # Under *Application type* select *Web application*. # Add the JavaScript origin: @https://workbench2.ClusterID.example.com@. This should match the Web origin where you will host Workbench. Note that it can only include the schema, hostname, and port parts; the path, in particular a trailing @/@, is not allowed. # Add the Redirect URI: @https://ClusterID.example.com/login@. The host part of this URI should match the @ExternalURL@ of the Arvados controller service as specified in the configuration file @/etc/arvados/config.yml@, including the port if specified. # Copy the values of *Client ID* and *Client secret* to the @Login.Google@ section of @/etc/arvados/config.yml@. {% codeblock as yaml %} Login: Google: Enable: true ClientID: "0000000000000-zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.apps.googleusercontent.com" ClientSecret: "zzzzzzzzzzzzzzzzzzzzzzzz" {% endcodeblock %} h2(#oidc). OpenID Connect With this configuration, users will sign in with a third-party OpenID Connect provider such as GitHub, Auth0, Okta, or PingFederate. Similar to the Google login section above, you will need to register your Arvados cluster with the provider as an application (relying party). When asked for a redirect URL or callback URL, use @https://ClusterID.example.com/login@ (the external URL of your controller service, plus @/login@). The provider will supply an issuer URL, client ID, and client secret. Add these to your Arvados configuration. {% codeblock as yaml %} Login: OpenIDConnect: Enable: true Issuer: https://accounts.example.com/ ClientID: "0123456789abcdef" ClientSecret: "zzzzzzzzzzzzzzzzzzzzzzzz" {% endcodeblock %} h3. Accepting OpenID bearer tokens as Arvados API tokens Arvados can also be configured to accept provider-issued access tokens as Arvados API tokens by setting @Login.OpenIDConnect.AcceptAccessToken@ to @true@. This can be useful for integrating third party applications. {% codeblock as yaml %} Login: OpenIDConnect: AcceptAccessToken: true AcceptAccessTokenScope: "arvados" {% endcodeblock %} # If the provider-issued tokens are JWTs, and @Login.OpenIDConnect.AcceptAccessTokenScope@ is not empty, Arvados will check that the token contains the configured scope, and reject tokens that do not have the configured scope. This can be used to control which users or applications are permitted to access your Arvados instance. # Tokens are validated by presenting them to the UserInfo endpoint advertised by the OIDC provider. # Once validated, a token is cached and accepted without re-checking for up to 10 minutes. # A token that fails validation is cached and will not be re-checked for up to 5 minutes. # Network errors and HTTP 5xx responses from the provider's UserInfo endpoint are not cached. # The OIDC token cache size is currently limited to 1000 tokens, if the number of distinct tokens used in a 5 minute period is greater than this, tokens may be checked more frequently. Check the OpenIDConnect section in the "default config file":{{site.baseurl}}/admin/config.html for more details and configuration options. h2(#ldap). LDAP With this configuration, authentication uses an external LDAP service like OpenLDAP or Active Directory. Enable LDAP authentication and provide your LDAP server's host, port, and credentials (if needed to search the directory) in @config.yml@: {% codeblock as yaml %} Login: LDAP: Enable: true URL: ldap://ldap.example.com:389 SearchBindUser: cn=lookupuser,dc=example,dc=com SearchBindPassword: xxxxxxxx SearchBase: ou=Users,dc=example,dc=com {% endcodeblock %} The email address reported by LDAP will be used as primary key for Arvados accounts. This means *users must not be able to edit their own email addresses* in the directory. Additional configuration settings are available: * @StartTLS@ is enabled by default. * @StripDomain@ and @AppendDomain@ modify the username entered by the user before searching for it in the directory. * @SearchAttribute@ (default @uid@) is the LDAP attribute used when searching for usernames. * @SearchFilters@ accepts LDAP filter expressions to control which users can log in. Check the LDAP section in the "default config file":{{site.baseurl}}/admin/config.html for more details and configuration options. h2(#pam). PAM With this configuration, authentication is done according to the Linux PAM ("Pluggable Authentication Modules") configuration on your controller host. Enable PAM authentication in @config.yml@: {% codeblock as yaml %} Login: PAM: Enable: true {% endcodeblock %} Check the "default config file":{{site.baseurl}}/admin/config.html for more PAM configuration options. The default PAM configuration on most Linux systems uses the local user/password database in @/etc/passwd@ and @/etc/shadow@ for all logins. In this case, in order to log in to Arvados, users must have a UNIX account and password on the controller host itself. This can be convenient for a single-user or test cluster. Configuring a user account with a shell of @/bin/false@ will enable the user to log into Arvados but not log into shell login on the controller host. PAM can also be configured to use other authentication systems such such as NIS or Kerberos. In a production environment, PAM configuration should use the service name ("arvados" by default) and set a separate policy for Arvados login. In this case, Arvados users should not have shell accounts on the controller node. For information about configuring PAM, refer to the "PAM System Administrator's Guide":http://www.linux-pam.org/Linux-PAM-html/Linux-PAM_SAG.html. ================================================ FILE: doc/install/workbench.html.textile.liquid ================================================ --- layout: default navsection: installguide title: Customizing Workbench ... {% comment %} Copyright (C) The Arvados Authors. All rights reserved. SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} h2. Site name Use the @Workbench.SiteName@ configuration option to set the site name rendered at the top of Workbench. {% codeblock as yaml %} Workbench: SiteName: Arvados Workbench {% endcodeblock %} h2. Welcome page Use the @Workbench.WelcomePageHTML@ configuration option to set the text that is rendered when a user arrives at the front page (and has not yet logged in). {% codeblock as yaml %} Workbench: WelcomePageHTML: |

Please log in.

If you have never used Arvados Workbench before, logging in for the first time will automatically create a new account.

Arvados Workbench uses your information only for identification, and does not retrieve any other personal information. {% endcodeblock %} h2. Inactive user page Use the @Workbench.InactivePageHTML@ configuration option to set the text that is rendered when a user logs in but is inactive. {% codeblock as yaml %} Workbench: InactivePageHTML: |

Hi! You're logged in, but...

Your account is inactive.

An administrator must activate your account before you can get any further.

{% endcodeblock %} h2(#banner). Message banner on login and custom tooltips Set the @Workbench.BannerUUID@ configuration option to the UUID of a collection. *This collection should be shared with all users.* {% codeblock as yaml %} Workbench: BannerUUID: zzzzz-4zz18-0123456789abcde {% endcodeblock %} h3. Banner You can have box pop up when users load Workbench to give information such as links to site-specific documentation or notification about anticipated downtime. The banner appears when a user loads workbench and have not yet viewed the current banner text. Users can also view the banner after dismissing it by selecting the *Restore Banner* option from the *Notifications* menu. The banner text (HTML formatted) is loaded from the file @banner.html@ in the collection provided in @BannerUUID@. The banner does _not_ need to be wrapped by *html* or *body* tags (if present, they will be removed). {% include 'html_tags' %} h3. Tooltips You can provide a custom tooltip overlay to provide site-specific guidance for using workbench. Users can opt-out by selecting *Disable Tooltips* from the *Notifications* menu. The tooltips are loaded from the file @tooltips.json@ in the collection provided in @BannerUUID@. The format of this file is a JSON object where the key is a "CSS selector":https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors and the value is the text of the tooltip. Here is an example: {% codeblock as yaml %} { "[data-cy=side-panel-button]": "Click here to create a new project!", "[data-cy=project-panel] tbody tr:nth-child(1)": "First element in the project list" } {% endcodeblock %} The first example adds a tooltip displaying "Click here to create a new project!" to the HTML node with the attribute @data-cy="side-panel-button"@. The second example adds a tooltip displaying "First element in the project list" by finding the project panel element, finding the table body element within the project panel, then matching the first table row. Use the web developer tools offer by your browser to determine what identifiers are available and construct selections that will anchor your tooltips to the desired workbench components. ================================================ FILE: doc/js/bootstrap.js ================================================ /*! * Bootstrap v3.1.0 (http://getbootstrap.com) * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ if (typeof jQuery === 'undefined') { throw new Error('Bootstrap requires jQuery') } /* ======================================================================== * Bootstrap: transition.js v3.1.0 * http://getbootstrap.com/javascript/#transitions * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/) // ============================================================ function transitionEnd() { var el = document.createElement('bootstrap') var transEndEventNames = { 'WebkitTransition' : 'webkitTransitionEnd', 'MozTransition' : 'transitionend', 'OTransition' : 'oTransitionEnd otransitionend', 'transition' : 'transitionend' } for (var name in transEndEventNames) { if (el.style[name] !== undefined) { return { end: transEndEventNames[name] } } } return false // explicit for ie8 ( ._.) } // http://blog.alexmaccaw.com/css-transitions $.fn.emulateTransitionEnd = function (duration) { var called = false, $el = this $(this).one($.support.transition.end, function () { called = true }) var callback = function () { if (!called) $($el).trigger($.support.transition.end) } setTimeout(callback, duration) return this } $(function () { $.support.transition = transitionEnd() }) }(jQuery); /* ======================================================================== * Bootstrap: alert.js v3.1.0 * http://getbootstrap.com/javascript/#alerts * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // ALERT CLASS DEFINITION // ====================== var dismiss = '[data-dismiss="alert"]' var Alert = function (el) { $(el).on('click', dismiss, this.close) } Alert.prototype.close = function (e) { var $this = $(this) var selector = $this.attr('data-target') if (!selector) { selector = $this.attr('href') selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7 } var $parent = $(selector) if (e) e.preventDefault() if (!$parent.length) { $parent = $this.hasClass('alert') ? $this : $this.parent() } $parent.trigger(e = $.Event('close.bs.alert')) if (e.isDefaultPrevented()) return $parent.removeClass('in') function removeElement() { $parent.trigger('closed.bs.alert').remove() } $.support.transition && $parent.hasClass('fade') ? $parent .one($.support.transition.end, removeElement) .emulateTransitionEnd(150) : removeElement() } // ALERT PLUGIN DEFINITION // ======================= var old = $.fn.alert $.fn.alert = function (option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.alert') if (!data) $this.data('bs.alert', (data = new Alert(this))) if (typeof option == 'string') data[option].call($this) }) } $.fn.alert.Constructor = Alert // ALERT NO CONFLICT // ================= $.fn.alert.noConflict = function () { $.fn.alert = old return this } // ALERT DATA-API // ============== $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close) }(jQuery); /* ======================================================================== * Bootstrap: button.js v3.1.0 * http://getbootstrap.com/javascript/#buttons * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // BUTTON PUBLIC CLASS DEFINITION // ============================== var Button = function (element, options) { this.$element = $(element) this.options = $.extend({}, Button.DEFAULTS, options) this.isLoading = false } Button.DEFAULTS = { loadingText: 'loading...' } Button.prototype.setState = function (state) { var d = 'disabled' var $el = this.$element var val = $el.is('input') ? 'val' : 'html' var data = $el.data() state = state + 'Text' if (!data.resetText) $el.data('resetText', $el[val]()) $el[val](data[state] || this.options[state]) // push to event loop to allow forms to submit setTimeout($.proxy(function () { if (state == 'loadingText') { this.isLoading = true $el.addClass(d).attr(d, d) } else if (this.isLoading) { this.isLoading = false $el.removeClass(d).removeAttr(d) } }, this), 0) } Button.prototype.toggle = function () { var changed = true var $parent = this.$element.closest('[data-toggle="buttons"]') if ($parent.length) { var $input = this.$element.find('input') if ($input.prop('type') == 'radio') { if ($input.prop('checked') && this.$element.hasClass('active')) changed = false else $parent.find('.active').removeClass('active') } if (changed) $input.prop('checked', !this.$element.hasClass('active')).trigger('change') } if (changed) this.$element.toggleClass('active') } // BUTTON PLUGIN DEFINITION // ======================== var old = $.fn.button $.fn.button = function (option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.button') var options = typeof option == 'object' && option if (!data) $this.data('bs.button', (data = new Button(this, options))) if (option == 'toggle') data.toggle() else if (option) data.setState(option) }) } $.fn.button.Constructor = Button // BUTTON NO CONFLICT // ================== $.fn.button.noConflict = function () { $.fn.button = old return this } // BUTTON DATA-API // =============== $(document).on('click.bs.button.data-api', '[data-toggle^=button]', function (e) { var $btn = $(e.target) if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn') $btn.button('toggle') e.preventDefault() }) }(jQuery); /* ======================================================================== * Bootstrap: carousel.js v3.1.0 * http://getbootstrap.com/javascript/#carousel * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // CAROUSEL CLASS DEFINITION // ========================= var Carousel = function (element, options) { this.$element = $(element) this.$indicators = this.$element.find('.carousel-indicators') this.options = options this.paused = this.sliding = this.interval = this.$active = this.$items = null this.options.pause == 'hover' && this.$element .on('mouseenter', $.proxy(this.pause, this)) .on('mouseleave', $.proxy(this.cycle, this)) } Carousel.DEFAULTS = { interval: 5000, pause: 'hover', wrap: true } Carousel.prototype.cycle = function (e) { e || (this.paused = false) this.interval && clearInterval(this.interval) this.options.interval && !this.paused && (this.interval = setInterval($.proxy(this.next, this), this.options.interval)) return this } Carousel.prototype.getActiveIndex = function () { this.$active = this.$element.find('.item.active') this.$items = this.$active.parent().children() return this.$items.index(this.$active) } Carousel.prototype.to = function (pos) { var that = this var activeIndex = this.getActiveIndex() if (pos > (this.$items.length - 1) || pos < 0) return if (this.sliding) return this.$element.one('slid.bs.carousel', function () { that.to(pos) }) if (activeIndex == pos) return this.pause().cycle() return this.slide(pos > activeIndex ? 'next' : 'prev', $(this.$items[pos])) } Carousel.prototype.pause = function (e) { e || (this.paused = true) if (this.$element.find('.next, .prev').length && $.support.transition) { this.$element.trigger($.support.transition.end) this.cycle(true) } this.interval = clearInterval(this.interval) return this } Carousel.prototype.next = function () { if (this.sliding) return return this.slide('next') } Carousel.prototype.prev = function () { if (this.sliding) return return this.slide('prev') } Carousel.prototype.slide = function (type, next) { var $active = this.$element.find('.item.active') var $next = next || $active[type]() var isCycling = this.interval var direction = type == 'next' ? 'left' : 'right' var fallback = type == 'next' ? 'first' : 'last' var that = this if (!$next.length) { if (!this.options.wrap) return $next = this.$element.find('.item')[fallback]() } if ($next.hasClass('active')) return this.sliding = false var e = $.Event('slide.bs.carousel', { relatedTarget: $next[0], direction: direction }) this.$element.trigger(e) if (e.isDefaultPrevented()) return this.sliding = true isCycling && this.pause() if (this.$indicators.length) { this.$indicators.find('.active').removeClass('active') this.$element.one('slid.bs.carousel', function () { var $nextIndicator = $(that.$indicators.children()[that.getActiveIndex()]) $nextIndicator && $nextIndicator.addClass('active') }) } if ($.support.transition && this.$element.hasClass('slide')) { $next.addClass(type) $next[0].offsetWidth // force reflow $active.addClass(direction) $next.addClass(direction) $active .one($.support.transition.end, function () { $next.removeClass([type, direction].join(' ')).addClass('active') $active.removeClass(['active', direction].join(' ')) that.sliding = false setTimeout(function () { that.$element.trigger('slid.bs.carousel') }, 0) }) .emulateTransitionEnd($active.css('transition-duration').slice(0, -1) * 1000) } else { $active.removeClass('active') $next.addClass('active') this.sliding = false this.$element.trigger('slid.bs.carousel') } isCycling && this.cycle() return this } // CAROUSEL PLUGIN DEFINITION // ========================== var old = $.fn.carousel $.fn.carousel = function (option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.carousel') var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option) var action = typeof option == 'string' ? option : options.slide if (!data) $this.data('bs.carousel', (data = new Carousel(this, options))) if (typeof option == 'number') data.to(option) else if (action) data[action]() else if (options.interval) data.pause().cycle() }) } $.fn.carousel.Constructor = Carousel // CAROUSEL NO CONFLICT // ==================== $.fn.carousel.noConflict = function () { $.fn.carousel = old return this } // CAROUSEL DATA-API // ================= $(document).on('click.bs.carousel.data-api', '[data-slide], [data-slide-to]', function (e) { var $this = $(this), href var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7 var options = $.extend({}, $target.data(), $this.data()) var slideIndex = $this.attr('data-slide-to') if (slideIndex) options.interval = false $target.carousel(options) if (slideIndex = $this.attr('data-slide-to')) { $target.data('bs.carousel').to(slideIndex) } e.preventDefault() }) $(window).on('load', function () { $('[data-ride="carousel"]').each(function () { var $carousel = $(this) $carousel.carousel($carousel.data()) }) }) }(jQuery); /* ======================================================================== * Bootstrap: collapse.js v3.1.0 * http://getbootstrap.com/javascript/#collapse * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // COLLAPSE PUBLIC CLASS DEFINITION // ================================ var Collapse = function (element, options) { this.$element = $(element) this.options = $.extend({}, Collapse.DEFAULTS, options) this.transitioning = null if (this.options.parent) this.$parent = $(this.options.parent) if (this.options.toggle) this.toggle() } Collapse.DEFAULTS = { toggle: true } Collapse.prototype.dimension = function () { var hasWidth = this.$element.hasClass('width') return hasWidth ? 'width' : 'height' } Collapse.prototype.show = function () { if (this.transitioning || this.$element.hasClass('in')) return var startEvent = $.Event('show.bs.collapse') this.$element.trigger(startEvent) if (startEvent.isDefaultPrevented()) return var actives = this.$parent && this.$parent.find('> .panel > .in') if (actives && actives.length) { var hasData = actives.data('bs.collapse') if (hasData && hasData.transitioning) return actives.collapse('hide') hasData || actives.data('bs.collapse', null) } var dimension = this.dimension() this.$element .removeClass('collapse') .addClass('collapsing') [dimension](0) this.transitioning = 1 var complete = function () { this.$element .removeClass('collapsing') .addClass('collapse in') [dimension]('auto') this.transitioning = 0 this.$element.trigger('shown.bs.collapse') } if (!$.support.transition) return complete.call(this) var scrollSize = $.camelCase(['scroll', dimension].join('-')) this.$element .one($.support.transition.end, $.proxy(complete, this)) .emulateTransitionEnd(350) [dimension](this.$element[0][scrollSize]) } Collapse.prototype.hide = function () { if (this.transitioning || !this.$element.hasClass('in')) return var startEvent = $.Event('hide.bs.collapse') this.$element.trigger(startEvent) if (startEvent.isDefaultPrevented()) return var dimension = this.dimension() this.$element [dimension](this.$element[dimension]()) [0].offsetHeight this.$element .addClass('collapsing') .removeClass('collapse') .removeClass('in') this.transitioning = 1 var complete = function () { this.transitioning = 0 this.$element .trigger('hidden.bs.collapse') .removeClass('collapsing') .addClass('collapse') } if (!$.support.transition) return complete.call(this) this.$element [dimension](0) .one($.support.transition.end, $.proxy(complete, this)) .emulateTransitionEnd(350) } Collapse.prototype.toggle = function () { this[this.$element.hasClass('in') ? 'hide' : 'show']() } // COLLAPSE PLUGIN DEFINITION // ========================== var old = $.fn.collapse $.fn.collapse = function (option) { return this.each(function () { var $this = $(this) var data = $this.data('bs.collapse') var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option) if (!data && options.toggle && option == 'show') option = !option if (!data) $this.data('bs.collapse', (data = new Collapse(this, options))) if (typeof option == 'string') data[option]() }) } $.fn.collapse.Constructor = Collapse // COLLAPSE NO CONFLICT // ==================== $.fn.collapse.noConflict = function () { $.fn.collapse = old return this } // COLLAPSE DATA-API // ================= $(document).on('click.bs.collapse.data-api', '[data-toggle=collapse]', function (e) { var $this = $(this), href var target = $this.attr('data-target') || e.preventDefault() || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') //strip for ie7 var $target = $(target) var data = $target.data('bs.collapse') var option = data ? 'toggle' : $this.data() var parent = $this.attr('data-parent') var $parent = parent && $(parent) if (!data || !data.transitioning) { if ($parent) $parent.find('[data-toggle=collapse][data-parent="' + parent + '"]').not($this).addClass('collapsed') $this[$target.hasClass('in') ? 'addClass' : 'removeClass']('collapsed') } $target.collapse(option) }) }(jQuery); /* ======================================================================== * Bootstrap: dropdown.js v3.1.0 * http://getbootstrap.com/javascript/#dropdowns * ======================================================================== * Copyright 2011-2014 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * ======================================================================== */ +function ($) { 'use strict'; // DROPDOWN CLASS DEFINITION // ========================= var backdrop = '.dropdown-backdrop' var toggle = '[data-toggle=dropdown]' var Dropdown = function (element) { $(element).on('click.bs.dropdown', this.toggle) } Dropdown.prototype.toggle = function (e) { var $this = $(this) if ($this.is('.disabled, :disabled')) return var $parent = getParent($this) var isActive = $parent.hasClass('open') clearMenus() if (!isActive) { if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) { // if mobile we use a backdrop because click events don't delegate $('