Repository: FleekHQ/space-daemon Branch: master Commit: 646538d0b2db Files: 224 Total size: 1.5 MB Directory structure: gitextract_hiosi7jg/ ├── .github/ │ └── workflows/ │ ├── release.yml │ └── test.yml ├── .gitignore ├── .goreleaser.yml ├── .vscode/ │ └── launch.json ├── LICENSE ├── Makefile ├── README.md ├── app/ │ └── app.go ├── ci/ │ ├── add-osx-cert.sh │ └── gon.hcl ├── cmd/ │ └── space-daemon/ │ └── main.go ├── config/ │ ├── config.go │ ├── json_config.go │ └── map_config.go ├── core/ │ ├── backup/ │ │ └── backup.go │ ├── component.go │ ├── env/ │ │ ├── env.go │ │ └── file_env.go │ ├── events/ │ │ └── events.go │ ├── fsds/ │ │ ├── config.go │ │ ├── data_source.go │ │ ├── dir_entry.go │ │ ├── files_ds.go │ │ ├── read_write_wrapper.go │ │ ├── shared_with_me_ds.go │ │ ├── spacefs.go │ │ └── utils.go │ ├── ipfs/ │ │ ├── dag.go │ │ ├── ipfs.go │ │ ├── node/ │ │ │ └── node.go │ │ ├── utils.go │ │ └── utils_test.go │ ├── keychain/ │ │ ├── app_token.go │ │ ├── keychain.go │ │ ├── keyring/ │ │ │ └── keyring.go │ │ ├── mnemonic.go │ │ └── test/ │ │ └── keychain_test.go │ ├── libfuse/ │ │ ├── block_size.go │ │ ├── directory.go │ │ ├── files.go │ │ └── vfs.go │ ├── permissions/ │ │ ├── app_token.go │ │ └── app_token_test.go │ ├── search/ │ │ ├── bleve/ │ │ │ ├── analyzer.go │ │ │ ├── bleve.go │ │ │ ├── bleve_test.go │ │ │ └── options.go │ │ ├── engines.go │ │ ├── model.go │ │ └── sqlite/ │ │ ├── model.go │ │ ├── options.go │ │ ├── sqlite.go │ │ └── sqlite_test.go │ ├── space/ │ │ ├── domain/ │ │ │ └── domain.go │ │ ├── fuse/ │ │ │ ├── controller.go │ │ │ ├── fs.go │ │ │ ├── installer/ │ │ │ │ ├── installer_darwin.go │ │ │ │ ├── installer_darwin_test.go │ │ │ │ ├── installer_linux.go │ │ │ │ ├── installer_windows.go │ │ │ │ └── interface.go │ │ │ ├── mount.go │ │ │ ├── mount_windows.go │ │ │ ├── state.go │ │ │ └── state_test.go │ │ ├── services/ │ │ │ ├── fs_utils.go │ │ │ ├── services.go │ │ │ ├── services_app_token.go │ │ │ ├── services_central_server.go │ │ │ ├── services_fs.go │ │ │ ├── services_identity.go │ │ │ ├── services_keypair.go │ │ │ ├── services_notifs.go │ │ │ ├── services_search.go │ │ │ ├── services_sharing.go │ │ │ ├── services_vault.go │ │ │ └── sharing_utils.go │ │ ├── space.go │ │ └── space_test.go │ ├── spacefs/ │ │ ├── fs.go │ │ ├── fs_test.go │ │ └── interfaces.go │ ├── store/ │ │ └── store.go │ ├── sync/ │ │ ├── fs.go │ │ ├── notifier_default.go │ │ ├── sync.go │ │ ├── textile.go │ │ └── textile_test.go │ ├── textile/ │ │ ├── README.md │ │ ├── account.go │ │ ├── buckd.go │ │ ├── bucket/ │ │ │ ├── bucket.go │ │ │ ├── bucket_dir.go │ │ │ ├── bucket_file.go │ │ │ └── crypto/ │ │ │ ├── crypto.go │ │ │ ├── crypto_test.go │ │ │ ├── decrypter.go │ │ │ └── encrypter.go │ │ ├── bucket_factory.go │ │ ├── client.go │ │ ├── common/ │ │ │ └── common.go │ │ ├── event_handler.go │ │ ├── hub/ │ │ │ ├── hmacTestKey │ │ │ ├── hub_auth.go │ │ │ └── hub_auth_test.go │ │ ├── listener.go │ │ ├── mailbox.go │ │ ├── mailbox_test.go │ │ ├── mirror.go │ │ ├── model/ │ │ │ ├── buckets.go │ │ │ ├── mirror_file.go │ │ │ ├── model.go │ │ │ ├── received_file.go │ │ │ ├── received_file_test.go │ │ │ ├── search.go │ │ │ ├── sent_file.go │ │ │ └── shared_public_key.go │ │ ├── notifier/ │ │ │ └── notifier.go │ │ ├── public.go │ │ ├── search.go │ │ ├── secure_bucket_client.go │ │ ├── sharing.go │ │ ├── sync/ │ │ │ ├── mirror.go │ │ │ ├── pinning.go │ │ │ ├── queue.go │ │ │ ├── restore.go │ │ │ ├── sync.go │ │ │ ├── sync_test.go │ │ │ ├── synchronizer.go │ │ │ ├── task-executors.go │ │ │ ├── task.go │ │ │ └── threads.go │ │ ├── textile.go │ │ └── utils/ │ │ ├── utils.go │ │ └── utils_test.go │ ├── util/ │ │ ├── address/ │ │ │ ├── PROTOCOL.md │ │ │ └── address.go │ │ ├── paths.go │ │ └── rlimit/ │ │ ├── rlimit_unix.go │ │ └── rlimit_windows.go │ ├── vault/ │ │ ├── vault.go │ │ └── vault_test.go │ └── watcher/ │ ├── blacklist.go │ ├── blacklist_windows.go │ ├── handler.go │ ├── options.go │ ├── watcher.go │ └── watcher_test.go ├── coverage/ │ └── .gitkeep ├── devtools/ │ └── googleapis/ │ ├── LICENSE │ ├── README.grpc-gateway │ └── google/ │ ├── api/ │ │ ├── annotations.proto │ │ ├── http.proto │ │ └── httpbody.proto │ └── rpc/ │ ├── code.proto │ ├── error_details.proto │ └── status.proto ├── docs/ │ ├── crypto/ │ │ └── vault.md │ └── sharing/ │ └── types-of-sharing.md ├── examples/ │ ├── ipfsLite/ │ │ └── ipfsLite.go │ └── textileBucketsClient/ │ ├── README.md │ ├── bucket-sync/ │ │ └── bucket-sync.go │ ├── buckets.go │ ├── create-thread-with-key/ │ │ └── create-thread-with-key.go │ ├── join-thread/ │ │ └── join-thread.go │ ├── local-buck/ │ │ └── local-buck.go │ ├── open-share-file/ │ │ └── open-share-file.go │ ├── set-envs │ └── sync-test/ │ └── sync-test.go ├── go.mod ├── go.sum ├── grpc/ │ ├── auth/ │ │ ├── app_token_auth/ │ │ │ ├── app_token_auth.go │ │ │ └── auth_from_md.go │ │ └── middleware/ │ │ └── grpc_auth.go │ ├── grpc.go │ ├── handlers.go │ ├── handlers_account.go │ ├── handlers_app_token.go │ ├── handlers_backup.go │ ├── handlers_central_services.go │ ├── handlers_fuse.go │ ├── handlers_key_pair.go │ ├── handlers_notif.go │ ├── handlers_search.go │ ├── handlers_sharing.go │ ├── handlers_textile.go │ ├── handlers_vault.go │ ├── pb/ │ │ ├── space.pb.go │ │ └── space.pb.gw.go │ └── proto/ │ └── space.proto ├── integration_tests/ │ ├── README.md │ ├── fixtures/ │ │ ├── app.go │ │ ├── client.go │ │ ├── configs.go │ │ └── directories.go │ ├── helpers/ │ │ ├── assertions.go │ │ ├── directories.go │ │ └── initialize.go │ ├── integration_tests_suite_test.go │ ├── sharing_test.go │ └── uploads_test.go ├── log/ │ └── logger.go ├── mocks/ │ ├── Bucket.go │ ├── Client.go │ ├── FilesSearchEngine.go │ ├── HubAuth.go │ ├── Keychain.go │ ├── Keyring.go │ ├── Mailbox.go │ ├── Model.go │ ├── Store.go │ ├── Syncer.go │ ├── Vault.go │ ├── fuse/ │ │ ├── FSDataSource.go │ │ └── FuseInstaller.go │ ├── mock.go │ ├── mock_config.go │ ├── mock_env.go │ ├── mock_textile_handler.go │ └── mock_textile_users_client.go ├── scripts/ │ └── windows.bat ├── swagger/ │ └── ui/ │ └── space.swagger.json └── tracing/ └── tracing.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/release.yml ================================================ name: Release with goreleaser on: push: tags: - v*.*.* jobs: build: runs-on: macos-latest name: goreleaser steps: - uses: actions/checkout@v2 - name: Unshallow Fetch run: git fetch --prune --unshallow - uses: actions/setup-go@v2 with: go-version: 1.14 - name: Add MacOS certs run: cp ./ci/add-osx-cert.sh /tmp/add-osx-cert.sh && chmod +x /tmp/add-osx-cert.sh && /tmp/add-osx-cert.sh env: CERTIFICATE_OSX_APPLICATION: ${{ secrets.CERTIFICATE_OSX_APPLICATION }} CERTIFICATE_PASSWORD: ${{ secrets.CERTIFICATE_PASSWORD }} - name: Install gon via HomeBrew for code signing and app notarization run: | brew tap mitchellh/gon brew install mitchellh/gon/gon - name: Set stage to prd for all run: | echo "STAGE=PRD" >> $GITHUB_ENV - name: Set stage to dev if tagged develop if: endsWith(github.ref, '-dev') run: | echo "STAGE=DEV" >> $GITHUB_ENV - name: Set secret names id: secretnames run: | echo $STAGE echo "::set-output name=SERVICES_API_URL::SERVICES_API_URL_${STAGE}" echo "::set-output name=VAULT_API_URL::VAULT_API_URL_${STAGE}" echo "::set-output name=VAULT_SALT_SECRET::VAULT_SALT_SECRET_${STAGE}" echo "::set-output name=SERVICES_HUB_AUTH_URL::SERVICES_HUB_AUTH_URL_${STAGE}" echo "::set-output name=TXL_HUB_TARGET::TXL_HUB_TARGET_${STAGE}" echo "::set-output name=TXL_HUB_MA::TXL_HUB_MA_${STAGE}" echo "::set-output name=TXL_THREADS_TARGET::TXL_THREADS_TARGET_${STAGE}" echo "::set-output name=TXL_HUB_GATEWAY_URL::TXL_HUB_GATEWAY_URL_${STAGE}" echo "::set-output name=TXL_USER_KEY::TXL_USER_KEY_${STAGE}" echo "::set-output name=TXL_USER_SECRET::TXL_USER_SECRET_${STAGE}" echo "::set-output name=SPACE_STORAGE_SITE_URL::SPACE_STORAGE_SITE_URL_${STAGE}" - name: Release via goreleaser uses: goreleaser/goreleaser-action@master with: args: release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} IPFS_ADDR: ${{ secrets.IPFS_ADDR }} IPFS_NODE_ADDR: ${{ secrets.IPFS_NODE_ADDR }} IPFS_NODE_PATH: ${{ secrets.IPFS_NODE_PATH }} SERVICES_API_URL: ${{ secrets[steps.secretnames.outputs.SERVICES_API_URL] }} VAULT_API_URL: ${{ secrets[steps.secretnames.outputs.VAULT_API_URL] }} VAULT_SALT_SECRET: ${{ secrets[steps.secretnames.outputs.VAULT_SALT_SECRET] }} SERVICES_HUB_AUTH_URL: ${{ secrets[steps.secretnames.outputs.SERVICES_HUB_AUTH_URL] }} TXL_HUB_TARGET: ${{ secrets[steps.secretnames.outputs.TXL_HUB_TARGET] }} TXL_HUB_MA: ${{ secrets[steps.secretnames.outputs.TXL_HUB_MA] }} TXL_THREADS_TARGET: ${{ secrets[steps.secretnames.outputs.TXL_THREADS_TARGET] }} TXL_HUB_GATEWAY_URL: ${{ secrets[steps.secretnames.outputs.TXL_HUB_GATEWAY_URL] }} TXL_USER_KEY: ${{ secrets[steps.secretnames.outputs.TXL_USER_KEY] }} TXL_USER_SECRET: ${{ secrets[steps.secretnames.outputs.TXL_USER_SECRET] }} SPACE_STORAGE_SITE_URL: ${{ secrets[steps.secretnames.outputs.SPACE_STORAGE_SITE_URL] }} ================================================ FILE: .github/workflows/test.yml ================================================ #on: [push, pull_request] on: [pull_request] name: Test jobs: unit-test: strategy: matrix: go-version: [1.14.x] platform: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.platform }} steps: - name: Install Go uses: actions/setup-go@v2 with: go-version: ${{ matrix.go-version }} - name: Checkout code uses: actions/checkout@v2 - name: Unit Test if: ${{ matrix.platform != 'windows-latest' }} run: go test -race -coverprofile=coverage/unitcoverage.out $(go list ./... | grep -v integration_tests) - name: Unit Test (Win) if: ${{ matrix.platform == 'windows-latest' }} # skipping coverage collection on windows run: go test -race $(go list ./... | grep -v integration_tests) - name: Coveralls if: ${{ matrix.platform != 'windows-latest' }} uses: shogo82148/actions-goveralls@v1 with: flag-name: unit-test-${{ matrix.platform }} path-to-profile: coverage/unitcoverage.out parallel: true integration-test: strategy: matrix: go-version: [ 1.14.x ] # platform: [ ubuntu-latest, macos-latest, windows-latest ] platform: [ macos-latest ] runs-on: ${{ matrix.platform }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SERVICES_API_URL: ${{ secrets.outputs.SERVICES_API_URL_DEV }} VAULT_API_URL: ${{ secrets.VAULT_API_URL_DEV }} VAULT_SALT_SECRET: ${{ secrets.VAULT_SALT_SECRET_DEV }} SERVICES_HUB_AUTH_URL: ${{ secrets.SERVICES_HUB_AUTH_URL_DEV }} TXL_HUB_TARGET: ${{ secrets.TXL_HUB_TARGET_DEV }} TXL_HUB_MA: ${{ secrets.TXL_HUB_MA_DEV }} TXL_THREADS_TARGET: ${{ secrets.TXL_THREADS_TARGET_DEV }} TXL_HUB_GATEWAY_URL: ${{ secrets.TXL_HUB_GATEWAY_URL_DEV }} TXL_USER_KEY: ${{ secrets.TXL_USER_KEY_DEV }} TXL_USER_SECRET: ${{ secrets.TXL_USER_SECRET_DEV }} SPACE_STORAGE_SITE_URL: ${{ secrets.SPACE_STORAGE_SITE_URL_DEV }} steps: - name: Install Go uses: actions/setup-go@v2 with: go-version: ${{ matrix.go-version }} - name: Install gnome-keyring (Ubuntu) if: ${{ matrix.platform == 'ubuntu-latest' }} run: sudo apt-get install pass gnome-keyring dbus-x11 - name: Verify gnome-keyring is installed (Ubuntu) if: ${{ matrix.platform == 'ubuntu-latest' }} run: gnome-keyring-daemon -V - name: Checkout code uses: actions/checkout@v2 - name: Integration Test if: ${{ matrix.platform != 'windows-latest' }} run: go test -v -timeout 60m -coverprofile=coverage/integrationcoverage.out ./integration_tests/... - name: Integration Test (Win) if: ${{ matrix.platform == 'windows-latest' }} # skipping coverage collection on windows run: go test -v -timeout 60m ./integration_tests/... - name: Coveralls if: ${{ matrix.platform != 'windows-latest' }} uses: shogo82148/actions-goveralls@v1 with: flag-name: integration-test-${{ matrix.platform }} path-to-profile: coverage/integrationcoverage.out parallel: true submit-coverage: needs: [unit-test, integration-test] runs-on: ubuntu-latest steps: - name: Coveralls Finished uses: shogo82148/actions-goveralls@v1 with: parallel-finished: true ================================================ FILE: .gitignore ================================================ # Binaries for programs and plugins *.exe *.exe~ *.dll *.so *.dylib # Test binary, built with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out coverage/* !coverage/.gitkeep # Dependency directories (remove the comment below to include it) # vendor/ .idea/ debug/ .DS_Store .env space.json bin debug/ dist/ main devtools/grpc-ecosystem __debug_bin ================================================ FILE: .goreleaser.yml ================================================ # Make sure to check the documentation at http://goreleaser.com before: hooks: # You may remove this if you don't use go modules. - go mod download # you may remove this if you don't need go generate # - go generate ./... project_name: space builds: - id: space # env: # - CGO_ENABLED=0 ldflags: - -s -w -X main.spaceapi={{ .Env.SERVICES_API_URL }} - -X main.vaultapi={{ .Env.VAULT_API_URL }} - -X main.vaultsaltsecret={{ .Env.VAULT_SALT_SECRET }} - -X main.spacehubauth={{ .Env.SERVICES_HUB_AUTH_URL }} - -X main.textilehub={{ .Env.TXL_HUB_TARGET }} - -X main.textilehubma={{ .Env.TXL_HUB_MA }} - -X main.textilethreads={{ .Env.TXL_THREADS_TARGET }} - -X main.textilehubgatewayurl={{ .Env.TXL_HUB_GATEWAY_URL }} - -X main.textileuserkey={{ .Env.TXL_USER_KEY }} - -X main.textileusersecret={{ .Env.TXL_USER_SECRET }} - -X main.spacestoragesiteurl={{ .Env.SPACE_STORAGE_SITE_URL }} - -X main.ipfsaddr={{ .Env.IPFS_ADDR }} - -X main.ipfsnodeaddr={{ .Env.IPFS_NODE_ADDR }} - -X main.ipfsnodepath={{ .Env.IPFS_NODE_PATH }} main: ./cmd/space-daemon/main.go binary: space goos: - linux - id: space-darwin # env: # - CGO_ENABLED=0 ldflags: - -s -w -X main.spaceapi={{ .Env.SERVICES_API_URL }} - -X main.vaultapi={{ .Env.VAULT_API_URL }} - -X main.vaultsaltsecret={{ .Env.VAULT_SALT_SECRET }} - -X main.spacehubauth={{ .Env.SERVICES_HUB_AUTH_URL }} - -X main.textilehub={{ .Env.TXL_HUB_TARGET }} - -X main.textilehubma={{ .Env.TXL_HUB_MA }} - -X main.textilethreads={{ .Env.TXL_THREADS_TARGET }} - -X main.textilehubgatewayurl={{ .Env.TXL_HUB_GATEWAY_URL }} - -X main.textileuserkey={{ .Env.TXL_USER_KEY }} - -X main.textileusersecret={{ .Env.TXL_USER_SECRET }} - -X main.spacestoragesiteurl={{ .Env.SPACE_STORAGE_SITE_URL }} - -X main.ipfsaddr={{ .Env.IPFS_ADDR }} - -X main.ipfsnodeaddr={{ .Env.IPFS_NODE_ADDR }} - -X main.ipfsnodepath={{ .Env.IPFS_NODE_PATH }} main: ./cmd/space-daemon/main.go binary: space goos: - darwin # hooks: # post: gon -log-level debug ci/gon.hcl - id: space-win # env: # - CGO_ENABLED=1 ldflags: - -s -w -X main.spaceapi={{ .Env.SERVICES_API_URL }} - -X main.vaultapi={{ .Env.VAULT_API_URL }} - -X main.vaultsaltsecret={{ .Env.VAULT_SALT_SECRET }} - -X main.spacehubauth={{ .Env.SERVICES_HUB_AUTH_URL }} - -X main.textilehub={{ .Env.TXL_HUB_TARGET }} - -X main.textilehubma={{ .Env.TXL_HUB_MA }} - -X main.textilethreads={{ .Env.TXL_THREADS_TARGET }} - -X main.textilehubgatewayurl={{ .Env.TXL_HUB_GATEWAY_URL }} - -X main.textileuserkey={{ .Env.TXL_USER_KEY }} - -X main.textileusersecret={{ .Env.TXL_USER_SECRET }} - -X main.spacestoragesiteurl={{ .Env.SPACE_STORAGE_SITE_URL }} - -X main.ipfsaddr={{ .Env.IPFS_ADDR }} - -X main.ipfsnodeaddr={{ .Env.IPFS_NODE_ADDR }} - -X main.ipfsnodepath={{ .Env.IPFS_NODE_PATH }} main: ./cmd/space-daemon/main.go binary: space goos: - windows # ignore: # - goos: windows # goarch: 386 archives: - name_template: '{{ .Binary }}_{{ .Os }}_{{ .Arch }}' format: binary files: - LICENSE* - README* - CHANGELOG* - dist/space-macos-i386.dmg - dist/space-macos-x86_64.dmg replacements: darwin: Darwin linux: Linux windows: Windows 386: i386 amd64: x86_64 checksum: name_template: 'checksums.txt' snapshot: name_template: "{{ .Tag }}-next" changelog: sort: asc filters: exclude: - '^docs:' - '^test:' ================================================ FILE: .vscode/launch.json ================================================ { // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ { "name": "Launch", "type": "go", "request": "launch", "mode": "auto", "program": "${workspaceFolder}/cmd/space-daemon", "envFile": "${workspaceFolder}/.env", "args": ["-dev=true"] } ] } ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2020 FleekHQ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Makefile ================================================ build: go build \ -o bin/space \ -ldflags \ "-X 'main.ipfsaddr=${IPFS_ADDR}' \ -X 'main.ipfsnodeaddr=${IPFS_NODE_ADDR}' \ -X 'main.ipfsnodepath=${IPFS_NODE_PATH}' \ -X 'main.spaceapi=${SERVICES_API_URL}' \ -X 'main.spacestoragesiteurl=${SPACE_STORAGE_SITE_URL}' \ -X 'main.vaultapi=${VAULT_API_URL}' \ -X 'main.vaultsaltsecret=${VAULT_SALT_SECRET}' \ -X 'main.spacehubauth=${SERVICES_HUB_AUTH_URL}' \ -X 'main.textilehub=${TXL_HUB_TARGET}' \ -X 'main.textilehubma=${TXL_HUB_MA}' \ -X 'main.textilethreads=${TXL_THREADS_TARGET}' \ -X 'main.textilehubgatewayurl=${TXL_HUB_GATEWAY_URL}' \ -X 'main.textileuserkey=${TXL_USER_KEY}' \ -X 'main.textileusersecret=${TXL_USER_SECRET}'" \ cmd/space-daemon/main.go test: go test $$(go list ./... | grep -v integration_tests) test_coverage: go test -coverprofile=coverage/unitcoverage.out $$(go list ./... | grep -v integration_tests) integration_test: go test -v -p 1 ./integration_tests/... integration_test_coverage: go test -v -p 1 -coverprofile=coverage/integrationcoverage.out ./integration_tests/... proto_gen: protoc -I grpc/pb/ -I grpc/proto/ -I./devtools/googleapis grpc/proto/space.proto --go_out=plugins=grpc:grpc/pb gen_rest: protoc -I grpc/pb/ -I grpc/proto/ -I./devtools/googleapis grpc/proto/space.proto --go_out=plugins=grpc:grpc/pb --grpc-gateway_out=logtostderr=true:grpc/pb gen_all: proto_gen gen_rest ## runs jaeger tracing server, should be used when trace is enabled on daemon jaegar: docker run \ --rm \ --name jaeger \ -p 6831:6831/udp \ -p 16686:16686 \ jaegertracing/all-in-one:latest ================================================ FILE: README.md ================================================ # Space Daemon Space Daemon is a wrapper built in Go around awesome IPFS tools so that you can have start coding a decentralized desktop app as fast as possible. It's built on top of Textile Threads and Buckets. Out of the box it includes: - A running local instance of [Textile Threads](https://github.com/textileio/go-threads). - Interfaces to create local private, encrypted buckets. - Interfaces for sharing those buckets and the files within. - Identity service so that sharing can be done through usernames or emails. - FUSE for drive mounting, so that the files can be explored natively in your OS. - Key management. Note: This project is in active development, so it might change its API until it reaches a stable version. ## Installation By default, Space Daemon connects to hosted services provided by Fleek. This should be good if you just want to get it running quickly. However, if you want to connect to your own services, read the [Modules Section](https://github.com/FleekHQ/space-daemon#Modules). ### Downloading the binary Check out the releases [here](https://github.com/FleekHQ/space-daemon/releases). You can download the latest version for your OS and you should be good to go. If you want to run Space Daemon by source, check out [this section](https://github.com/FleekHQ/space-daemon#Running) ## Usage Space Daemon provides a gRPC interface. You can read its proto schema [here](https://github.com/FleekHQ/space-daemon/blob/master/grpc/proto/space.proto). It contains methods to: - Create files and directories - List files and directories - Creating buckets - Sharing buckets - Creating identities You can also use the JavaScript client here [https://github.com/FleekHQ/space-client](https://github.com/FleekHQ/space-client) This can be useful if, for example, you are building a web app that needs to interact with a user's locally running Space Daemon. ## Modules Space Daemon requires a few modules to run successfully. If you downloaded the binary, you don't have to worry about this since it will be connecting to our services. It's good to understand what's happening behind the scenes though. ### IPFS Node All encrypted files are stored in an IPFS node. For convenience, Space Daemon runs an embedded node within the daemon that can be configured as well as the option to specify an external node to connect to. If you have your own node outside of the daemon, then set the flag `-ipfsnode` to `false`. This will not spin up an embedded node. You can then connect to your external node by providing the `-ipfsaddr` flag (e.g. `-ipfsaddr=/ip4/127.0.0.1/tcp/5001`). In the case you are running the embedded IPFS node, you can further configure the listen address and data directory by setting these flags respectively: `-ipfsnodeaddr` and `-ipfsnodepath`. ### Textile Hub Required for sharing files between users and backing it up. It stores all backed up files encrypted using a set of keys so that only you, and people you share files with, can read the data. We host our own instance of the Textile Hub, and by default, Space Daemon will conect to it. It can be customized by providing the `-textilehub` flag and `-textilethreads` flag. If you want to host your own Textile Hub node, you can [read its documentation here](https://github.com/textileio/textile) ### Space Services We provide hosted alternatives for these services. You can deploy your own by following the instructions in its repo: [https://github.com/fleekHQ/space-services](https://github.com/fleekHQ/space-services) #### Identity These are centralized services that are optional, but offer additional convenience. Used mainly for identity. By using these services, you can allow users to claim usernames, so that Space Daemon can know the public key of a given username and in that way share files via username without having to input public keys directly. #### Authentication Our hosted Textile Hub requires authentication via public key for logging in. This service sends a challenge to Space Daemon, which signs the challenge with the private key of the user and in that way our hosted Textile Hub can allow the user to store data. ## Running from source After cloning this repo, you can run it from source by running `go run ./cmd/space-daemon -dev`. Consider that you will need the following environment variables exported in your system: ``` IPFS_ADDR=[Your IPFS node address] SERVICES_API_URL=[The URL where Space Services API is located] VAULT_API_URL=[The URL where Space Vault API is located] VAULT_SALT_SECRET=[A random string used for kdf functions before storing keys to the vault] SERVICES_HUB_AUTH_URL=[The URL where Space Services Textile Hub Authorizer is located] TXL_HUB_TARGET=[The URL of the Textile Hub] TXL_HUB_MA=[The multiaddress for the Textile hub] TXL_THREADS_TARGET=[The URL of the Textile Hub where Threads are hosted, can be the same that TXL_HUB_TARGET] # NOTE: the following are required temporarily and will be removed once hub auth wrapper is setup TXL_USER_KEY=[Space level key for hub access] TXL_USER_SECRET=[Space level secret for hub access] ``` Alternatively, you can run `make` to compile the binary. Make sure you have these environment variables exposed though. You can see some example environment variables in `.env.example`. ## Contributting We are happy to receive issues and review pull requests. Please make sure to write tests for the code you are introducing and make sure it doesn't break already passing tests. Read the following sections for an introduction into the code. ### Package Structure Loosely based on these resources: https://github.com/golang-standards/project-layout - `/grpc` Folder structure for gRPC and REST API. - `/cmd` Entry point directory for all binaries this repo handles. E.g cmd/{binary-name}/main.go - `/config` Global Config code - `/core` Directory for the core objects of the package - `/logger` Directory for app logging - `/examples` Directory playground for general examples and drafts ### Main classes - `ipfs`: contains utils for general IPFS operations. - `keychain`: manages user public/private key pair. - `libfuse`: interoperates with FUSE for mounting drives. - `space`: contains the main integration from the services to the final Textile or FS operations. - `store`: contains a wrapper around a local db. - `sync`: keeps track of open files so that the updates get pushed to IPFS - `textile`: wrapper around Textile booting and operations ### Generating Mocks Mocks are generated using https://github.com/vektra/mockery. For Linux it needs to be built from source. `mockery --name InterfaceToMock --dir path/to/go/files` ### Protobuf If you update the gRPC API, you need to regenerate the Protobuf file. You will need to install the following binaries in your Go path: - `go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway` Checking the binaries: `ls $GOPATH/bin` Should show the following binaries in your path: protoc-gen-go, protoc-gen-grpc-gateway Run the protobuf generation: `make proto_gen` Run the REST proxy generation: `make gen_rest` ** Ideally you should run `make gen_all` before commiting as this would run all the above three code generations and ensure everything is up to date ** NOTE: See here for instructions on Reverse Proxy: https://github.com/grpc-ecosystem/grpc-gateway ### Debugging, Profiling and Tracing The following flags can be run with the binary to output profiling files for debugging. Flags support a full path to a file. `-cpuprofile cpu.prof -memprofile mem.prof` By default, the binary runs in debug mode (this may change after release) and it boots a pprof server in localhost:6060. See docs how to interact with pprof server here: https://github.com/google/pprof/blob/master/doc/README.md To disable debug mode add this flag to binary arguments `-debug=false` To enable trace in the daemon, pass `-trace` to the binary arguments. The daemon uses [jaegar](https://www.jaegertracing.io/) for collecting trace information. Run `make jaegar` to quickly start a jaeger agent that collects the daemons trace information. You can `http://localhost:16686/` to explore the web ui for traces collected. ### CI Secrets Secrets are set by adding them in Github and then specifying them in `release.yml`. Secrets can be constant across environment/stages or be stage specific. If specified, the release file will dynamically generate the secret name based on the stage by adding a `_DEV` or `_PRD` suffix to the secret name only for the specificed environment variable. It will always use `_PRD` unless the tag ends in `-dev`. So for example tag `v0.0.15` will use PRD values, while `v0.0.15-dev` will use DEV values. Stage specific secret names will only be used for secrets in `release.yml` that point to the step output instead of the secret name directly (i.e., `SERVICES_API_URL: ${{ secrets[steps.secretnames.outputs.SERVICES_API_URL] }}` instead of `SERVICES_API_URL: ${{ secrets.SERVICES_API_URL }}`. So to add a new secret: * If it's not stage specific then add the secret in GH with no suffix and in `release.yml`, refer to it based on the secret name. * If it is stage specific, then create the 2 secrets in GH (ending in `_PRD` and `_DEV`), add the entry in step `secretnames`, and make sure the secret name in the next step points to the step output ================================================ FILE: app/app.go ================================================ package app import ( "context" "fmt" "github.com/FleekHQ/space-daemon/core/space/fuse/installer" "github.com/FleekHQ/space-daemon/core/search/bleve" "github.com/pkg/errors" "github.com/FleekHQ/space-daemon/core" "github.com/FleekHQ/space-daemon/grpc" "github.com/FleekHQ/space-daemon/core/space/fuse" "github.com/FleekHQ/space-daemon/core/vault" "github.com/FleekHQ/space-daemon/core/fsds" "github.com/FleekHQ/space-daemon/core/spacefs" textile "github.com/FleekHQ/space-daemon/core/textile" "github.com/FleekHQ/space-daemon/core/textile/hub" "github.com/FleekHQ/space-daemon/core/env" "github.com/FleekHQ/space-daemon/core/space" node "github.com/FleekHQ/space-daemon/core/ipfs/node" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/sync" "github.com/FleekHQ/space-daemon/log" "golang.org/x/sync/errgroup" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/store" w "github.com/FleekHQ/space-daemon/core/watcher" "github.com/golang-collections/collections/stack" ) // Shutdown logic follows this example https://gist.github.com/akhenakh/38dbfea70dc36964e23acc19777f3869 type App struct { eg *errgroup.Group components *stack.Stack cfg config.Config env env.SpaceEnv IsRunning bool } type componentMap struct { name string component core.Component } func New(cfg config.Config, env env.SpaceEnv) *App { return &App{ components: stack.New(), cfg: cfg, env: env, IsRunning: false, } } // Start is the Entry point for the app. // All module components are initialized and managed here. // When a top level module that need to be shutdown on exit is initialized. It should be // added to the apps list of tracked components using the `Run()` function, but if the component has a blocking // start/run function it should be tracked with the `RunAsync()` function and call the blocking function in the // input function block. func (a *App) Start() error { var ctx context.Context a.eg, ctx = errgroup.WithContext(context.Background()) log.SetLogLevel(a.cfg.GetString(config.LogLevel, "debug")) // init appStore appStore := store.New( store.WithPath(a.cfg.GetString(config.SpaceStorePath, "")), ) if err := appStore.Open(); err != nil { return err } a.Run("Store", appStore) // Init keychain kc := keychain.New(keychain.WithPath(a.cfg.GetString(config.SpaceStorePath, "")), keychain.WithStore(appStore)) // Init Vault v := vault.New(a.cfg.GetString(config.SpaceVaultAPIURL, ""), a.cfg.GetString(config.SpaceVaultSaltSecret, "")) watcher, err := w.New() if err != nil { return err } a.Run("FolderWatcher", watcher) // setup local ipfs node if Ipfsnode is set if a.cfg.GetBool(config.Ipfsnode, true) { // setup local ipfs node node := node.NewIpsNode(a.cfg) err = a.RunAsync("IpfsNode", node, func() error { return node.Start(ctx) }) if err != nil { log.Error("error starting embedded IPFS node", err) return err } } else { log.Info("Skipping embedded IPFS node") } // setup local buckets buckd := textile.NewBuckd(a.cfg) err = a.RunAsync("BucketDaemon", buckd, func() error { return buckd.Start(ctx) }) if err != nil { return err } hubAuth := hub.New(appStore, kc, a.cfg) // setup files search engine searchEngine := bleve.NewSearchEngine(bleve.WithDBPath(a.cfg.GetString(config.SpaceStorePath, ""))) a.Run("FilesSearchEngine", searchEngine) // setup textile client uc := textile.CreateUserClient(a.cfg.GetString(config.TextileHubTarget, "")) textileClient := textile.NewClient(appStore, kc, hubAuth, uc, nil, searchEngine) err = a.RunAsync("TextileClient", textileClient, func() error { return textileClient.Start(ctx, a.cfg) }) if err != nil { return err } // watcher is started inside bucket sync bucketSync := sync.New(watcher, textileClient, appStore, nil) // setup the Space Service sv, svErr := space.NewService( appStore, textileClient, bucketSync, a.cfg, kc, v, hubAuth, space.WithEnv(a.env), ) if svErr != nil { return svErr } // setup FUSE FS Handler sfs := spacefs.New(fsds.NewSpaceFSDataSource( sv, fsds.WithFilesDataSources(sv), fsds.WithSharedWithMeDataSources(sv), )) fuseInstaller := installer.NewFuseInstaller() fuseController := fuse.NewController(ctx, a.cfg, appStore, sfs, fuseInstaller) if fuseController.ShouldMount() { log.Info("Mounting FUSE Drive") if err := fuseController.Mount(); err != nil { log.Error("Mounting FUSE drive failed", err) } else { log.Info("Mounting FUSE Drive successful") } } a.Run("FuseController", fuseController) // setup gRPC Server srv := grpc.New( sv, fuseController, kc, grpc.WithPort(a.cfg.GetInt(config.SpaceServerPort, 0)), grpc.WithProxyPort(a.cfg.GetInt(config.SpaceProxyServerPort, 0)), grpc.WithRestProxyPort(a.cfg.GetInt(config.SpaceRestProxyServerPort, 0)), ) textileClient.AttachMailboxNotifier(srv) textileClient.AttachSynchronizerNotifier(srv) // start the gRPC server err = a.RunAsync("gRPCServer", srv, func() error { return srv.Start(ctx) }) if err != nil { return err } err = a.RunAsync("BucketSync", bucketSync, func() error { bucketSync.RegisterNotifier(srv) return bucketSync.Start(ctx) }) if err != nil { return err } log.Info("Daemon ready") a.IsRunning = true return nil } // Run registers this component to be cleaned up on Shutdown func (a *App) Run(name string, component core.Component) { log.Debug("Starting Component", "name:"+name) a.components.Push(&componentMap{ name: name, component: component, }) } // RunAsync performs the same function as Run() but also accepts an function to be run // async to initialize the component. func (a *App) RunAsync(name string, component core.AsyncComponent, fn func() error) error { log.Debug("Starting Async Component", "name:"+name) if a.eg == nil { log.Warn("App.RunAsync() should be called after App.Start()") return nil } errc := make(chan error) a.eg.Go(func() error { err := fn() if err != nil { errc <- err } return err }) select { case err := <-errc: return err case <-component.WaitForReady(): a.components.Push(&componentMap{ name: name, component: component, }) } return nil } // Shutdown would perform a graceful shutdown of all components added through the // Run() or RunAsync() functions func (a *App) Shutdown() error { log.Info("Daemon shutdown started") if !a.IsRunning { return errors.New("app is not running") } for a.components.Len() > 0 { m, ok := a.components.Pop().(*componentMap) if ok { log.Debug("Shutting down Component", fmt.Sprintf("name:%s", m.name)) if err := m.component.Shutdown(); err != nil { log.Error(fmt.Sprintf("error shutting down %s", m.name), err) } } } err := a.eg.Wait() log.Info("Shutdown complete") a.IsRunning = false return err } ================================================ FILE: ci/add-osx-cert.sh ================================================ #!/usr/bin/env sh KEY_CHAIN=build.keychain CERTIFICATE_P12=certificate.p12 # Recreate the certificate from the secure environment variable echo $CERTIFICATE_OSX_APPLICATION | base64 --decode > $CERTIFICATE_P12 #create a keychain security create-keychain -p actions $KEY_CHAIN # Make the keychain the default so identities are found security default-keychain -s $KEY_CHAIN # Unlock the keychain security unlock-keychain -p actions $KEY_CHAIN security import $CERTIFICATE_P12 -k $KEY_CHAIN -P $CERTIFICATE_PASSWORD -T /usr/bin/codesign; security set-key-partition-list -S apple-tool:,apple: -s -k actions $KEY_CHAIN # remove certs rm -fr *.p12 ================================================ FILE: ci/gon.hcl ================================================ # The path follows a pattern # ./dist/BUILD-ID_TARGET/BINARY-NAME source = ["./dist/space-darwin_darwin_amd64/space","./dist/space-darwin_darwin_386/space"] bundle_id = "co.fleek.space" apple_id { username = "daniel@fleek.co" password = "@env:APPLE_DEVELOPER_DANIEL_PASSWORD" } sign { application_identity = "Mac Developer: Daniel Merrill (8257VLCFL7)" } ================================================ FILE: cmd/space-daemon/main.go ================================================ package main import ( "flag" "fmt" "net/http" _ "net/http/pprof" "os" "os/signal" "runtime" "runtime/pprof" "syscall" "github.com/FleekHQ/space-daemon/tracing" "github.com/opentracing/opentracing-go" "github.com/FleekHQ/space-daemon/log" "github.com/FleekHQ/space-daemon/app" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/env" "github.com/FleekHQ/space-daemon/core/util/rlimit" ) var ( cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") memprofile = flag.String("memprofile", "", "write memory profile to `file`") debugMode = flag.Bool("debug", true, "run daemon with debug mode for profiling") enableTracing = flag.Bool("trace", false, "run tracing on daemon rpc") devMode = flag.Bool("dev", false, "run daemon in dev mode to use .env file") ipfsnode = flag.Bool("ipfsnode", true, "run IPFS embedded into the daemon (defaults to true)") ipfsaddr string ipfsnodeaddr string ipfsnodepath string spaceapi string spacestoragesiteurl string vaultapi string vaultsaltsecret string spacehubauth string textilehub string textilehubma string textilethreads string textilehubgatewayurl string textileuserkey string textileusersecret string ) func main() { // this defer code here ensures all profile defer call work properly returnCode := 0 defer func() { os.Exit(returnCode) }() // flags flag.Parse() log.Debug("Running mode", fmt.Sprintf("DevMode:%v", *devMode)) cf := &config.Flags{ Ipfsaddr: ipfsaddr, Ipfsnode: *ipfsnode == true, Ipfsnodeaddr: ipfsnodeaddr, Ipfsnodepath: ipfsnodepath, ServicesAPIURL: spaceapi, SpaceStorageSiteUrl: spacestoragesiteurl, VaultAPIURL: vaultapi, VaultSaltSecret: vaultsaltsecret, ServicesHubAuthURL: spacehubauth, DevMode: *devMode == true, TextileHubTarget: textilehub, TextileHubMa: textilehubma, TextileThreadsTarget: textilethreads, TextileHubGatewayUrl: textilehubgatewayurl, TextileUserKey: textileuserkey, TextileUserSecret: textileusersecret, } // CPU profiling if *debugMode == true { log.Debug("Running daemon with profiler. Visit http://localhost:6060/debug/pprof") go func() { fmt.Println(http.ListenAndServe("localhost:6060", nil)) }() } // initialize tracing if *enableTracing { log.Debug("Enabling Tracing on the Daemon") tracer, closer := tracing.MustInit("space-daemon") defer closer.Close() opentracing.SetGlobalTracer(tracer) } if *cpuprofile != "" { cleanupCpuProfile := runCpuProfiler(*cpuprofile) defer cleanupCpuProfile() } // env env := env.New() // load configs cfg := config.NewMap(cf) rlimit.SetRLimit() spaceApp := app.New(cfg, env) err := spaceApp.Start() if err != nil { log.Error("Application startup failed", err) returnCode = 1 } // setup to detect interruption interrupt := make(chan os.Signal, 1) signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM) defer signal.Stop(interrupt) <-interrupt // wait for interrupt and then shutdown app err = spaceApp.Shutdown() if *memprofile != "" { cleanupMemProfile := runMemProfiler(*memprofile) defer cleanupMemProfile() } if err != nil { log.Error("Application shutdown failed", err) returnCode = 1 } } func runCpuProfiler(outputFilePath string) func() { f, err := os.Create(outputFilePath) if err != nil { log.Error("Could not create CPU profile", err) return func() {} } if err := pprof.StartCPUProfile(f); err != nil { log.Error("Could not start CPU profile", err) } // return cleanup function return func() { pprof.StopCPUProfile() if f != nil { _ = f.Close() // error is ignored } } } func runMemProfiler(outputFilePath string) func() { f, err := os.Create(outputFilePath) if err != nil { log.Error("could not create memory profile", err) return func() {} } runtime.GC() // get up-to-date statistics if err := pprof.WriteHeapProfile(f); err != nil { log.Error("could not write memory profile", err) } // return cleanup function return func() { if f != nil { _ = f.Close() } } } ================================================ FILE: config/config.go ================================================ package config import ( "errors" ) const ( JsonConfigFileName = "space.json" SpaceServerPort = "space/rpcPort" SpaceProxyServerPort = "space/rpcProxyPort" SpaceRestProxyServerPort = "space/restProxyPort" SpaceStorageSiteUrl = "space/storageSiteUrl" SpaceStorePath = "space/storePath" TextileHubTarget = "space/textileHubTarget" TextileHubMa = "space/textileHubMa" TextileThreadsTarget = "space/textileThreadsTarget" TextileHubGatewayUrl = "space/TextileHubGatewayUrl" TextileUserKey = "space/textileUserKey" TextileUserSecret = "space/textileUserSecret" MountFuseDrive = "space/mountFuseDrive" FuseMountPath = "space/fuseMountPath" FuseDriveName = "space/fuseDriveName" SpaceServicesAPIURL = "space/servicesApiUrl" SpaceVaultAPIURL = "space/vaultApiUrl" SpaceVaultSaltSecret = "space/vaultSaltSecret" SpaceServicesHubAuthURL = "space/servicesHubAuthUrl" Ipfsaddr = "space/ipfsAddr" Ipfsnode = "space/ipfsNode" Ipfsnodeaddr = "space/ipfsNodeAddr" Ipfsnodepath = "space/ipfsNodePath" MinThreadsConnection = "space/minThreadsConn" MaxThreadsConnection = "space/maxThreadsConn" BuckdPath = "space/BuckdPath" BuckdApiMaAddr = "space/BuckdApiMaAddr" BuckdApiProxyMaAddr = "space/BuckdApiProxyMaAddr" BuckdThreadsHostMaAddr = "Space/BuckdThreadsHostMaAddr" BuckdGatewayPort = "Space/BuckdGatewayPort" LogLevel = "Space/LogLevel" ) var ( ErrConfigNotLoaded = errors.New("config file was not loaded correctly or it does not exist") ) type Flags struct { Ipfsaddr string Ipfsnode bool Ipfsnodeaddr string Ipfsnodepath string DevMode bool ServicesAPIURL string SpaceStorageSiteUrl string VaultAPIURL string VaultSaltSecret string ServicesHubAuthURL string TextileHubTarget string TextileHubMa string TextileThreadsTarget string TextileHubGatewayUrl string TextileUserKey string TextileUserSecret string SpaceStorePath string RpcServerPort int RpcProxyServerPort int RestProxyServerPort int BuckdPath string BuckdApiMaAddr string BuckdApiProxyMaAddr string BuckdThreadsHostMaAddr string BuckdGatewayPort int LogLevel string } // Config used to fetch config information type Config interface { GetString(key string, defaultValue interface{}) string GetInt(key string, defaultValue interface{}) int GetBool(key string, defaultValue interface{}) bool } ================================================ FILE: config/json_config.go ================================================ package config import ( "encoding/json" "fmt" "io/ioutil" "os" "strings" "github.com/FleekHQ/space-daemon/core/env" "github.com/FleekHQ/space-daemon/log" "github.com/creamdog/gonfig" ) // standardConfig implements Config // It loads its config information from the space.json file type jsonConfig struct { cfg gonfig.Gonfig } type defaultSpaceJson struct { TextileHubTarget string `json:"textileHubTarget"` TextileThreadsTarget string `json:"textileThreadsTarget"` RPCPort int `json:"rpcPort"` StorePath string `json:"storePath"` } type defaultJson struct { Space defaultSpaceJson `json:"space"` } // Deprecated for the default values config func NewJson(env env.SpaceEnv) Config { wd := env.WorkingFolder() f, err := os.Open(wd + "/" + JsonConfigFileName) if err != nil { // TODO: this may turn into a fatal panic error log.Info("could not find space.json file in " + wd + ", using defaults") } defer f.Close() config, err := gonfig.FromJson(f) if err != nil { log.Info("could not read space.json file, using defaults") } c := jsonConfig{ cfg: config, } return c } // Gets the configuration value given a path in the json config file // defaults to empty value if non is found and just logs errors func (c jsonConfig) GetString(key string, defaultValue interface{}) string { if c.cfg == nil { return "" } v, err := c.cfg.GetString(key, defaultValue) if err != nil { log.Error(fmt.Sprintf("error getting key %s from config", key), err) return "" } log.Debug("Getting conf " + key + ": " + v) return v } // Gets the configuration value given a path in the json config file // defaults to empty value if non is found and just logs errors func (c jsonConfig) GetInt(key string, defaultValue interface{}) int { if c.cfg == nil { return 0 } v, err := c.cfg.GetInt(key, defaultValue) if err != nil { log.Error(fmt.Sprintf("error getting key %s from config", key), err) return 0 } return v } // Gets the configuration value given a path in the json config file // defaults to empty value if non is found and just logs errors func (c jsonConfig) GetBool(key string, defaultValue interface{}) bool { if c.cfg == nil { return false } v, err := c.cfg.GetBool(key, defaultValue) if err != nil { log.Error(fmt.Sprintf("error getting key %s from config", key), err) return false } return v } func CreateConfigJson() error { fmt.Println("Generating default config file") spaceJson := defaultSpaceJson{ TextileHubTarget: "textile-hub-dev.fleek.co:3006", TextileThreadsTarget: "textile-hub-dev.fleek.co:3006", RPCPort: 9999, StorePath: "~/.fleek-space", } finalJson := defaultJson{ Space: spaceJson, } currExecutablePath, err := os.Executable() if err != nil { return err } pathSegments := strings.Split(currExecutablePath, "/") wd := strings.Join(pathSegments[:len(pathSegments)-1], "/") jsonPath := wd + "/" + JsonConfigFileName marshalled, err := json.MarshalIndent(finalJson, "", " ") if err != nil { return err } err = ioutil.WriteFile(jsonPath, marshalled, 0644) if err != nil { return err } fmt.Println("Default config file generated") return nil } ================================================ FILE: config/map_config.go ================================================ package config import ( "os" "os/user" "path/filepath" "github.com/FleekHQ/space-daemon/core/env" ) type mapConfig struct { configStr map[string]string configInt map[string]int configBool map[string]bool } func NewMap(flags *Flags) Config { configStr := make(map[string]string) configInt := make(map[string]int) configBool := make(map[string]bool) usr, _ := user.Current() // default values configStr[LogLevel] = flags.LogLevel configStr[SpaceStorePath] = filepath.Join(usr.HomeDir, ".fleek-space") configStr[MountFuseDrive] = "false" configStr[FuseDriveName] = "Space" configInt[SpaceServerPort] = 9999 configInt[SpaceProxyServerPort] = 9998 configInt[SpaceRestProxyServerPort] = 9997 if flags.DevMode { configStr[Ipfsaddr] = os.Getenv(env.IpfsAddr) configStr[Ipfsnodeaddr] = os.Getenv(env.IpfsNodeAddr) configStr[Ipfsnodepath] = os.Getenv(env.IpfsNodePath) configStr[SpaceServicesAPIURL] = os.Getenv(env.ServicesAPIURL) configStr[SpaceVaultAPIURL] = os.Getenv(env.VaultAPIURL) configStr[SpaceVaultSaltSecret] = os.Getenv(env.VaultSaltSecret) configStr[SpaceServicesHubAuthURL] = os.Getenv(env.ServicesHubAuthURL) configStr[SpaceStorageSiteUrl] = os.Getenv(env.SpaceStorageSiteUrl) configStr[TextileHubTarget] = os.Getenv(env.TextileHubTarget) configStr[TextileHubMa] = os.Getenv(env.TextileHubMa) configStr[TextileThreadsTarget] = os.Getenv(env.TextileThreadsTarget) configStr[TextileHubGatewayUrl] = os.Getenv(env.TextileHubGatewayUrl) configStr[TextileUserKey] = os.Getenv(env.TextileUserKey) configStr[TextileUserSecret] = os.Getenv(env.TextileUserSecret) if os.Getenv(env.IpfsNode) != "false" { configBool[Ipfsnode] = true } } else { configStr[Ipfsaddr] = flags.Ipfsaddr configStr[Ipfsnodeaddr] = flags.Ipfsnodeaddr configStr[Ipfsnodepath] = flags.Ipfsnodepath configStr[SpaceServicesAPIURL] = flags.ServicesAPIURL configStr[SpaceVaultAPIURL] = flags.VaultAPIURL configStr[SpaceVaultSaltSecret] = flags.VaultSaltSecret configStr[SpaceServicesHubAuthURL] = flags.ServicesHubAuthURL if flags.SpaceStorageSiteUrl != "" { configStr[SpaceStorageSiteUrl] = flags.SpaceStorageSiteUrl } configStr[TextileHubTarget] = flags.TextileHubTarget configStr[TextileHubMa] = flags.TextileHubMa configStr[TextileThreadsTarget] = flags.TextileThreadsTarget configStr[TextileHubGatewayUrl] = flags.TextileHubGatewayUrl configStr[TextileUserKey] = flags.TextileUserKey configStr[TextileUserSecret] = flags.TextileUserSecret configBool[Ipfsnode] = flags.Ipfsnode if flags.SpaceStorePath != "" { configStr[SpaceStorePath] = flags.SpaceStorePath } if flags.RpcServerPort != 0 { configInt[SpaceServerPort] = flags.RpcServerPort } if flags.RpcProxyServerPort != 0 { configInt[SpaceProxyServerPort] = flags.RpcProxyServerPort } if flags.RestProxyServerPort != 0 { configInt[SpaceRestProxyServerPort] = flags.RestProxyServerPort } if flags.BuckdPath != "" { configStr[BuckdPath] = flags.BuckdPath } if flags.BuckdApiMaAddr != "" { configStr[BuckdApiMaAddr] = flags.BuckdApiMaAddr } if flags.BuckdApiProxyMaAddr != "" { configStr[BuckdApiProxyMaAddr] = flags.BuckdApiProxyMaAddr } if flags.BuckdThreadsHostMaAddr != "" { configStr[BuckdThreadsHostMaAddr] = flags.BuckdThreadsHostMaAddr } if flags.BuckdGatewayPort != 0 { configInt[BuckdGatewayPort] = flags.BuckdGatewayPort } } // Temp fix until we move to viper if configStr[Ipfsaddr] == "" { configStr[Ipfsaddr] = "/ip4/127.0.0.1/tcp/5001" } c := mapConfig{ configStr: configStr, configInt: configInt, configBool: configBool, } return c } func (m mapConfig) GetString(key string, defaultValue interface{}) string { if val, exists := m.configStr[key]; exists { return val } if stringValue, ok := defaultValue.(string); ok { return stringValue } return "" } func (m mapConfig) GetInt(key string, defaultValue interface{}) int { if val, exists := m.configInt[key]; exists { return val } if intVal, ok := defaultValue.(int); ok { return intVal } return 0 } func (m mapConfig) GetBool(key string, defaultValue interface{}) bool { if val, exists := m.configBool[key]; exists { return val } if boolVal, ok := defaultValue.(bool); ok { return boolVal } return false } ================================================ FILE: core/backup/backup.go ================================================ package backup import ( "crypto/aes" "crypto/cipher" "crypto/rand" "encoding/json" "errors" "io" "io/ioutil" ) type Backup struct { PrivateKey string `json:"privateKey"` } // Note: Using static key since the goal of this is to obfuscate the file, not to encrypt it var key = []byte{0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC} func obfuscate(data []byte) ([]byte, error) { block, err := aes.NewCipher(key[:]) if err != nil { return nil, err } gcm, err := cipher.NewGCM(block) if err != nil { return nil, err } nonce := make([]byte, gcm.NonceSize()) _, err = io.ReadFull(rand.Reader, nonce) if err != nil { return nil, err } return gcm.Seal(nonce, nonce, data, nil), nil } func deobfuscate(ciphertext []byte) ([]byte, error) { block, err := aes.NewCipher(key[:]) if err != nil { return nil, err } gcm, err := cipher.NewGCM(block) if err != nil { return nil, err } if len(ciphertext) < gcm.NonceSize() { return nil, errors.New("malformed ciphertext") } return gcm.Open(nil, ciphertext[:gcm.NonceSize()], ciphertext[gcm.NonceSize():], nil, ) } // Creates a backup file in the given path func MarshalBackup(path string, b *Backup) error { jsonData, err := json.Marshal(b) if err != nil { return err } obfuscatedBackup, err := obfuscate(jsonData) if err != nil { return err } err = ioutil.WriteFile(path, obfuscatedBackup, 0644) return err } // Reads a file in the given path and returns a Backup object func UnmarshalBackup(path string) (*Backup, error) { obfuscatedBackup, err := ioutil.ReadFile(path) if err != nil { return nil, err } jsonData, err := deobfuscate(obfuscatedBackup) if err != nil { return nil, err } var result Backup err = json.Unmarshal(jsonData, &result) if err != nil { return nil, err } return &result, nil } ================================================ FILE: core/component.go ================================================ package core // Component represents core application components. Modules should implement this interface to allow for proper // dependency checks and shutdown type Component interface { Shutdown() error } // AsyncComponent represents components that have some async initialization // and therefore must provide a ready channel to listen to type AsyncComponent interface { Component WaitForReady() chan bool } ================================================ FILE: core/env/env.go ================================================ package env import ( syslog "log" "os" "strings" ) const ( SpaceWorkingDir = "SPACE_APP_DIR" LogLevel = "LOG_LEVEL" IpfsAddr = "IPFS_ADDR" IpfsNode = "IPFS_NODE" IpfsNodeAddr = "IPFS_NODE_ADDR" IpfsNodePath = "IPFS_NODE_PATH" ServicesAPIURL = "SERVICES_API_URL" VaultAPIURL = "VAULT_API_URL" VaultSaltSecret = "VAULT_SALT_SECRET" ServicesHubAuthURL = "SERVICES_HUB_AUTH_URL" SpaceStorageSiteUrl = "SPACE_STORAGE_SITE_URL" TextileHubTarget = "TXL_HUB_TARGET" TextileHubMa = "TXL_HUB_MA" TextileThreadsTarget = "TXL_THREADS_TARGET" TextileHubGatewayUrl = "TXL_HUB_GATEWAY_URL" TextileUserKey = "TXL_USER_KEY" TextileUserSecret = "TXL_USER_SECRET" ) type SpaceEnv interface { CurrentFolder() (string, error) WorkingFolder() string LogLevel() string } type defaultEnv struct { } func (d defaultEnv) CurrentFolder() (string, error) { path, err := os.Executable() if err != nil { return "", err } pathSegments := strings.Split(path, "/") wd := strings.Join(pathSegments[:len(pathSegments)-1], "/") return wd, nil } func (d defaultEnv) WorkingFolder() string { cf, err := d.CurrentFolder() if err != nil { syslog.Fatal("unable to get working folder", err) panic(err) } return cf } func (d defaultEnv) LogLevel() string { return "Info" } // TODO: use this one after figuring textile keys func NewDefault() SpaceEnv { return defaultEnv{} } ================================================ FILE: core/env/file_env.go ================================================ package env import ( syslog "log" "os" "strings" "github.com/joho/godotenv" ) type spaceEnv struct { } // Loads environment from .env file for dev mode func New() SpaceEnv { err := godotenv.Load() if err != nil { syslog.Println("Error loading .env file. Using defaults") } return spaceEnv{} } func (s spaceEnv) CurrentFolder() (string, error) { path, err := os.Executable() if err != nil { return "", err } pathSegments := strings.Split(path, "/") wd := strings.Join(pathSegments[:len(pathSegments)-1], "/") return wd, nil } func (s spaceEnv) WorkingFolder() string { var wd = os.Getenv(SpaceWorkingDir) // use default if wd == "" { cf, err := s.CurrentFolder() if err != nil { syslog.Fatal("unable to get working folder", err) panic(err) } wd = cf } return wd } func (s spaceEnv) LogLevel() string { var ll = os.Getenv(LogLevel) if ll == "" { return "Info" } return ll } ================================================ FILE: core/events/events.go ================================================ package events import "github.com/FleekHQ/space-daemon/core/space/domain" // These file defines events that daemon can propagate through all layers type FileEventType string const ( FileAdded FileEventType = "FileAdded" FileDeleted FileEventType = "FileDeleted" FileUpdated FileEventType = "FileUpdated" FileBackupInProgress FileEventType = "FileBackupInProgress" FileBackupReady FileEventType = "FileBackupReady" FileRestored FileEventType = "FileRestored" FileRestoring FileEventType = "FileRestoring" FolderAdded FileEventType = "FolderAdded" FolderDeleted FileEventType = "FolderDeleted" // NOTE: not sure if this needs to be specific to rename or copy FolderUpdated FileEventType = "FolderUpdated" ) type FileEvent struct { Info domain.FileInfo Type FileEventType Bucket string DbID string } func NewFileEvent(info domain.FileInfo, eventType FileEventType, bucket, dbID string) FileEvent { return FileEvent{ Info: info, Type: eventType, Bucket: bucket, DbID: dbID, } } type TextileEvent struct { BucketName string } func NewTextileEvent(bucketname string) TextileEvent { return TextileEvent{ BucketName: bucketname, } } type InvitationStatus int const ( Pending InvitationStatus = 0 Accepted Rejected ) type NotificationType int const ( InvitationType NotificationType = 0 ) type Invitation struct { InviterPublicKey string InvitationID string Status InvitationStatus ItemPaths []string } type NotificationEvent struct { Subject string Body string RelatedObject interface{} Type NotificationType CreatedAt int64 ReadAt int64 } ================================================ FILE: core/fsds/config.go ================================================ package fsds import ( "fmt" "os" "github.com/FleekHQ/space-daemon/core/space" ) var DefaultBucketName = "personal" type dataSourceConfig struct { tlfSources []*TLFDataSource } type FSDataSourceConfig func(config *dataSourceConfig) func WithTLFDataSource(source *TLFDataSource) FSDataSourceConfig { return func(config *dataSourceConfig) { config.tlfSources = append(config.tlfSources, source) } } // Configure the default 'Files` data source to be included as a data source func WithFilesDataSources(service space.Service) FSDataSourceConfig { basePath := fmt.Sprintf("%cFiles", os.PathSeparator) return WithTLFDataSource(&TLFDataSource{ name: "Files", basePath: basePath, FSDataSource: &filesDataSource{service: service}, }) } // Configure the default 'Shared With Me` data source to be included as a data source func WithSharedWithMeDataSources(service space.Service) FSDataSourceConfig { basePath := fmt.Sprintf("%cShared With Me", os.PathSeparator) return WithTLFDataSource(&TLFDataSource{ name: "Shared With Me", basePath: basePath, FSDataSource: &sharedWithMeDataSource{ service: service, maxDirLimit: 1000, cache: make(map[string]*sharedFileEntry), }, }) } var blackListedDirEntryNames = map[string]bool{ // OSX specific special directories ".Trashes": true, ".localized": true, ".fseventsd": true, ".ql_disablethumbnails": true, ".ql_disablecache": true, // special space empty directory file ".keep": true, } ================================================ FILE: core/fsds/data_source.go ================================================ package fsds import ( "context" "os" "path/filepath" "strings" ) // FileReadWriterCloser implements interfaces to read, copy, seek and close. type FileReadWriterCloser interface { Read(ctx context.Context, data []byte, offset int64) (int, error) Write(ctx context.Context, data []byte, offset int64) (int, error) Close(ctx context.Context) error Stats(ctx context.Context) (*DirEntry, error) Truncate(ctx context.Context, size uint64) error } // FSDataSource is data source of file/directories and their information // It is used as a local/remote cache for looking up information about the directories. // It should also ensure that the user in the context has permission to data that is being request type FSDataSource interface { // Get a single node, this can be called on either a file or folder entry // This is typically used by the OS for lookup of the information about the entry at path Get(ctx context.Context, path string) (*DirEntry, error) // GetChildren returns child entries in the directory/folder GetChildren(ctx context.Context, path string) ([]*DirEntry, error) // OpenReader returns a file reader Open(ctx context.Context, path string) (FileReadWriterCloser, error) // CreateEntry should create a directory or file based on the mode at the path CreateEntry(ctx context.Context, path string, mode os.FileMode) (*DirEntry, error) // RenameEntry should rename the directory entry from old to new RenameEntry(ctx context.Context, oldPath, newPath string) error // DeleteEntry should delete the item at the path DeleteEntry(ctx context.Context, path string) error } // TLFDataSource represents a data source handler for a particular top level file. type TLFDataSource struct { name string basePath string FSDataSource } // Returns child path inside data source func (t *TLFDataSource) ChildPath(path string) string { return strings.TrimPrefix(path, t.basePath) } // returns the path with the datasource base path prefixed func (t *TLFDataSource) ParentPath(path string) string { return filepath.Join(t.basePath, path) } ================================================ FILE: core/fsds/dir_entry.go ================================================ package fsds import ( "fmt" "os" "path/filepath" "strconv" "strings" "time" "github.com/FleekHQ/space-daemon/log" "github.com/FleekHQ/space-daemon/core/space/domain" ) var StandardFileAccessMode os.FileMode = 0777 // -rw------- var StandardDirAccessMode = os.ModeDir | 0777 //0700 // drwx------ var RestrictedDirAccessMode = os.ModeDir | 0500 // dr-x------ only allow reading and opening directory for user // DirEntry implements the DirEntryOps type DirEntry struct { entry domain.DirEntry mode os.FileMode dbId string } func NewDirEntry(entry domain.DirEntry) *DirEntry { return NewDirEntryWithMode(entry, 0) } func NewDirEntryFromFileInfo(info os.FileInfo, path string) *DirEntry { return &DirEntry{ entry: domain.DirEntry{ Path: filepath.Dir(path), IsDir: info.IsDir(), Name: filepath.Base(path), SizeInBytes: fmt.Sprintf("%d", info.Size()), Created: info.ModTime().Format(time.RFC3339), Updated: info.ModTime().Format(time.RFC3339), FileExtension: filepath.Ext(path), }, mode: StandardFileAccessMode, dbId: "", } } func NewDirEntryWithMode(entry domain.DirEntry, mode os.FileMode) *DirEntry { return &DirEntry{ entry: entry, mode: mode, } } func (d *DirEntry) Path() string { if d.IsDir() { return fmt.Sprintf( "%s%c", strings.TrimRight(d.entry.Path, fmt.Sprintf("%c", os.PathSeparator)), os.PathSeparator, ) } return d.entry.Path } // IsDir implement DirEntryAttribute // And returns if the directory is a boolean or not func (d *DirEntry) IsDir() bool { return d.entry.IsDir } // Name implements the DirEntryAttribute Interface func (d *DirEntry) Name() string { return d.entry.Name } // Size implements the DirEntryAttribute Interface and return the size of the item func (d *DirEntry) Size() uint64 { intSize, err := strconv.ParseUint(d.entry.SizeInBytes, 10, 64) if err != nil { log.Error("Error getting direntry size", err) // error, so returning 0 in the meantime return 0 } return intSize } // Mode implements the DirEntryAttribute Interface // Currently if it is a file, returns all access permission 0766 // but ideally should restrict the permission if owner is not the same as file func (d *DirEntry) Mode() os.FileMode { if d.mode != 0 { return d.mode } if d.IsDir() { return StandardDirAccessMode } return StandardFileAccessMode } func (d *DirEntry) Uid() uint32 { // for now return id of currently logged in user return uint32(os.Getuid()) } func (d *DirEntry) Gid() uint32 { return uint32(os.Getgid()) } // Ctime implements the DirEntryAttribute Interface // It returns the time the directory was created func (d *DirEntry) Ctime() time.Time { t, err := time.Parse(time.RFC3339, d.entry.Created) if err != nil { log.Error("Error parsing direntry created time", err) return time.Time{} } return t } // ModTime returns the modification time func (d *DirEntry) ModTime() time.Time { t, err := time.Parse(time.RFC3339, d.entry.Updated) if err != nil { log.Error("Error parsing direntry updated time", err) return time.Time{} } return t } ================================================ FILE: core/fsds/files_ds.go ================================================ package fsds import ( "context" "fmt" "io/ioutil" "os" "path/filepath" "syscall" "time" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/space" "github.com/FleekHQ/space-daemon/log" ) // Provides content for the 'Files' content managed by the space user // Requests for items in this path are dispatched to this datasource from SpaceFSDataSource type filesDataSource struct { service space.Service } // Maybe consider caching at the level of SpaceFSDataSource when results are returned from top level file func (f *filesDataSource) Get(ctx context.Context, path string) (*DirEntry, error) { baseName := filepath.Base(path) if isBaseDirectory(path) || path == "" { return NewDirEntryWithMode(domain.DirEntry{ Path: path, IsDir: true, Name: baseName, Created: time.Now().Format(time.RFC3339), Updated: time.Now().Format(time.RFC3339), }, RestrictedDirAccessMode), nil } log.Debug("FileDS Get", fmt.Sprintf("path:%s", path)) itemsInParent, err := f.service.ListDir(ctx, path, DefaultBucketName, true) if err != nil { if !isNotExistError(err) { return nil, EntryNotFound } return nil, err } // If space service.ListDir on path is empty, then it is a file // if it is not empty, then it is a directory if len(itemsInParent) != 0 { // is a directory because space directory cannot be empty (must at least contain a .keep file) return NewDirEntry(domain.DirEntry{ Path: path, IsDir: true, Name: baseName, Created: time.Now().Format(time.RFC3339), Updated: time.Now().Format(time.RFC3339), }), nil } // OpenFile to get Size information of file // TODO: Verify service.OpenFile() logic to ensure that multiple open file doesn't recreate multiple local copies for the same file without cleanup r, err := f.service.OpenFile(ctx, path, DefaultBucketName, "") if err != nil { //if isNotExistError(err) { return nil, EntryNotFound //} } fileStat, err := os.Stat(r.Location) if err != nil { return nil, err } //is a file, so return file entry return NewDirEntry(domain.DirEntry{ Path: path, IsDir: false, Name: baseName, SizeInBytes: fmt.Sprintf("%d", fileStat.Size()), Created: time.Now().Format(time.RFC3339), Updated: time.Now().Format(time.RFC3339), FileExtension: filepath.Ext(path), }), nil } func (f *filesDataSource) GetChildren(ctx context.Context, path string) ([]*DirEntry, error) { log.Debug("FileDS GetChildren", fmt.Sprintf("path:%s", path)) domainEntries, err := f.service.ListDir(ctx, path, DefaultBucketName, true) if err != nil { return nil, err } dirEntries := make([]*DirEntry, len(domainEntries)) for i, domainEntries := range domainEntries { dirEntries[i] = NewDirEntry(domainEntries.DirEntry) } return dirEntries, nil } func (f *filesDataSource) Open(ctx context.Context, path string) (FileReadWriterCloser, error) { log.Debug("FileDS Open", fmt.Sprintf("path:%s", path)) openFileInfo, err := f.service.OpenFile(ctx, path, DefaultBucketName, "") if err != nil { return nil, err } return OpenSpaceFilesHandler(f.service, openFileInfo.Location, path, DefaultBucketName), nil } // Create the entry at the specified path and return a DirEntry representing it. // The DirEntry would be used to write/copy the items necessary at the point func (f *filesDataSource) CreateEntry(ctx context.Context, path string, mode os.FileMode) (*DirEntry, error) { log.Debug("FileDS CreateEntry", fmt.Sprintf("path:%s", path), fmt.Sprintf("mode:%v", mode)) entryName := filepath.Base(path) parentDir := filepath.Dir(path) if mode.IsDir() { err := f.service.CreateFolder(ctx, path, DefaultBucketName) if err != nil { return nil, err } return NewDirEntry(domain.DirEntry{ Path: path, IsDir: true, Name: entryName, Created: time.Now().Format(time.RFC3339), Updated: time.Now().Format(time.RFC3339), }), nil } // create an empty file to uploaded to the specified path newFilePath := filepath.Join(os.TempDir(), path) _ = os.MkdirAll(filepath.Dir(newFilePath), os.ModePerm) err := ioutil.WriteFile(newFilePath, []byte{}, mode) if err != nil { log.Error("Error creating empty file", err, "newFilePath:"+newFilePath) return nil, err } waitChan, _, err := f.service.AddItems( ctx, []string{ newFilePath, }, parentDir, DefaultBucketName, ) if err != nil { return nil, err } r := <-waitChan if r.Error != nil { log.Error("FileDS Failed to upload file", r.Error) return nil, err } return NewDirEntry(domain.DirEntry{ Path: path, IsDir: false, Name: entryName, Created: time.Now().Format(time.RFC3339), Updated: time.Now().Format(time.RFC3339), }), nil } // RenameEntry for now only supports renaming of empty folders // Depending on user request and textile support, non-empty folders and file renames will be supported func (f *filesDataSource) RenameEntry(ctx context.Context, oldPath, newPath string) error { log.Debug("FileDS RenameEntry", "oldPath:"+oldPath, "newPath:"+newPath) entry, err := f.Get(ctx, oldPath) if err != nil { return err } if !entry.IsDir() { log.Warn("FileDS trying to rename an entry that is not a directory") return syscall.ENOTSUP } childEntries, err := f.GetChildren(ctx, oldPath) if err != nil { log.Error("failed to get children of old path", err, "oldPath:"+oldPath) return err } if len(childEntries) != 0 && !areAllEntriesHidden(childEntries) { log.Warn("FileDS renaming directory that is not empty") // folder is not empty, so just error out return syscall.ENOTSUP // in the future, we should do a recursive copy to the newPath and then delete old path } if err = f.service.CreateFolder(ctx, newPath, DefaultBucketName); err != nil { log.Error("failed to new path create folder", err, "newPath:"+newPath) return syscall.ENOTSUP } return f.service.RemoveDirOrFile(ctx, oldPath, DefaultBucketName) } func areAllEntriesHidden(entries []*DirEntry) bool { for _, entry := range entries { baseName := filepath.Base(entry.Name()) if !blackListedDirEntryNames[baseName] { return false } } return true } func (f *filesDataSource) DeleteEntry(ctx context.Context, path string) error { log.Debug("FileDS DeletEntry", "path:"+path) return f.service.RemoveDirOrFile(ctx, path, DefaultBucketName) } ================================================ FILE: core/fsds/read_write_wrapper.go ================================================ package fsds import ( "context" "fmt" "io" "os" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/log" ) // Wrapper around space files read and write logic. // On close, it pushes changes to space.Service type SpaceFilesHandler struct { service SyncService localFile *os.File localFilePath string remotePath string bucketName string editted bool } type SyncService interface { AddItemWithReader(ctx context.Context, reader io.Reader, targetPath, bucketName string) (domain.AddItemResult, error) } func OpenSpaceFilesHandler( service SyncService, localFilePath, remoteFilePath, bucketName string, ) *SpaceFilesHandler { return &SpaceFilesHandler{ service: service, localFilePath: localFilePath, localFile: nil, remotePath: remoteFilePath, bucketName: bucketName, editted: false, } } func (s *SpaceFilesHandler) Read(ctx context.Context, b []byte, offset int64) (int, error) { log.Debug( "Reading bytes from file handler", "path:"+s.remotePath, "bucket:"+s.bucketName, fmt.Sprintf("offset:%d", offset), ) s.openLocalFile() _, err := s.localFile.Seek(offset, io.SeekStart) if err != nil { return 0, err } return s.localFile.Read(b) } func (s *SpaceFilesHandler) Write(ctx context.Context, b []byte, offset int64) (int, error) { log.Debug( "Writing bytes to file handler", "path:"+s.remotePath, "bucket:"+s.bucketName, fmt.Sprintf("offset:%d", offset), ) s.openLocalFile() _, err := s.localFile.Seek(offset, io.SeekStart) if err != nil { return 0, err } n, err := s.localFile.Write(b) if err == nil { s.editted = true } return n, err } func (s *SpaceFilesHandler) Close(ctx context.Context) error { log.Debug("Closing access to SpaceFileHandler", "remotePath:"+s.remotePath, "localPath:"+s.localFilePath) defer func() { if s.localFile != nil { // background synchronizer should handle sync on close s.localFile.Close() s.localFile = nil } }() //if s.editted && s.localFile != nil { // _, err := s.localFile.Seek(0, 0) // if err != nil { // log.Error("Error seeking local file to beginning for upload", err) // return err // } // // _, err = s.service.AddItemWithReader( // ctx, // s.localFile, // s.remotePath, // s.bucketName, // ) // if err != nil { // return err // } //} return nil } // Stats for now always reads stats from local file func (s *SpaceFilesHandler) Stats(ctx context.Context) (*DirEntry, error) { s.openLocalFile() info, err := os.Stat(s.localFilePath) if err != nil { return nil, err } return NewDirEntryFromFileInfo(info, s.remotePath), nil } func (s *SpaceFilesHandler) Truncate(ctx context.Context, size uint64) error { s.openLocalFile() return s.localFile.Truncate(int64(size)) } func (s *SpaceFilesHandler) openLocalFile() { if s.localFile != nil { return } s.localFile, _ = os.OpenFile(s.localFilePath, os.O_APPEND|os.O_RDWR, os.ModeAppend) } ================================================ FILE: core/fsds/shared_with_me_ds.go ================================================ package fsds import ( "context" "fmt" "os" "path/filepath" "syscall" "time" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/space" "github.com/FleekHQ/space-daemon/log" ) type sharedFileEntry struct { entry *DirEntry dbId string bucket string } // Provides content for the 'Shared With Me' content managed by the space user // Requests for items in this path are dispatched to this datasource from SpaceFSDataSource type sharedWithMeDataSource struct { service space.Service maxDirLimit int cache map[string]*sharedFileEntry } // Maybe consider caching at the level of SpaceFSDataSource when results are returned from top level file func (f *sharedWithMeDataSource) Get(ctx context.Context, path string) (*DirEntry, error) { baseName := filepath.Base(path) if isBaseDirectory(path) || path == "" { // return parent directory info return NewDirEntryWithMode(domain.DirEntry{ Path: path, IsDir: true, Name: baseName, Created: time.Now().Format(time.RFC3339), Updated: time.Now().Format(time.RFC3339), }, RestrictedDirAccessMode), nil } log.Debug("SharedWithMeDS Get", fmt.Sprintf("path:%s", path)) // check cache if Item is already there entry, exists := f.cache[path] if exists { return entry.entry, nil } itemsInParent, _, err := f.service.GetSharedWithMeFiles(ctx, "", f.maxDirLimit) if err != nil { if !isNotExistError(err) { return nil, EntryNotFound } return nil, err } f.cacheResults(itemsInParent) // find item matching path for _, entry := range itemsInParent { if entry.Path == path { return NewDirEntry(entry.DirEntry), nil } } return nil, EntryNotFound } // GetChildren should only be called on the parent folder and will always return func (f *sharedWithMeDataSource) GetChildren(ctx context.Context, path string) ([]*DirEntry, error) { log.Debug("SharedWithMeDS GetChildren", fmt.Sprintf("path:%s", path)) if !isBaseDirectory(path) && path != "" { // just return empty directory since shared with me currently only supports files return []*DirEntry{}, nil } entries, _, err := f.service.GetSharedWithMeFiles(ctx, "", f.maxDirLimit) if err != nil { return nil, err } // this ensure it always refreshes the cache whenever operating system calls list directory f.cacheResults(entries) dirEntries := make([]*DirEntry, len(entries)) for i, entry := range entries { dirEntries[i] = NewDirEntry(entry.DirEntry) } return dirEntries, nil } func (f *sharedWithMeDataSource) Open(ctx context.Context, path string) (FileReadWriterCloser, error) { log.Debug("SharedWithMeDS Open", fmt.Sprintf("path:%s", path)) entry, exists := f.cache[path] if !exists { return nil, EntryNotFound } openFileInfo, err := f.service.OpenFile(ctx, path, entry.bucket, entry.dbId) if err != nil { return nil, err } return OpenSpaceFilesHandler(f.service, openFileInfo.Location, path, entry.bucket), nil } // CreateEntry is not supported for shared with me files. func (f *sharedWithMeDataSource) CreateEntry(ctx context.Context, path string, mode os.FileMode) (*DirEntry, error) { // not allowed so just return error return nil, syscall.ENOTSUP } func (f *sharedWithMeDataSource) RenameEntry(ctx context.Context, oldPath, newPath string) error { // Renaming items in the shared directory is not supported return syscall.ENOTSUP } func (f *sharedWithMeDataSource) DeleteEntry(ctx context.Context, path string) error { // Deleting items in the shared directory is not supported return syscall.ENOTSUP } func (f *sharedWithMeDataSource) cacheResults(items []*domain.SharedDirEntry) { for _, item := range items { f.cache[item.Path] = &sharedFileEntry{ entry: NewDirEntryWithMode(item.DirEntry, StandardFileAccessMode), dbId: item.DbID, bucket: item.Bucket, } } } ================================================ FILE: core/fsds/spacefs.go ================================================ package fsds import ( "context" "os" "path/filepath" "strings" "syscall" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/space" ) // EntryNotFound error when a directory is not found var EntryNotFound = syscall.ENOENT // errors.New("Directory entry not found") var baseDir = NewDirEntryWithMode( domain.DirEntry{ Path: "/", IsDir: true, Name: "", }, RestrictedDirAccessMode, ) // SpaceFSDataSource is an implementation of the FSDataSource // It interacts with the Space Service Layer to provide data type SpaceFSDataSource struct { service space.Service tlfSources []*TLFDataSource // temp cache to speed up node fetching interactions // TODO: handle cache invalidation entryCache map[string]*DirEntry } func NewSpaceFSDataSource(service space.Service, configOptions ...FSDataSourceConfig) *SpaceFSDataSource { config := dataSourceConfig{} for _, configure := range configOptions { configure(&config) } return &SpaceFSDataSource{ service: service, tlfSources: config.tlfSources, entryCache: make(map[string]*DirEntry), } } // Get returns the DirEntry information for item at path func (d *SpaceFSDataSource) Get(ctx context.Context, path string) (*DirEntry, error) { //log.Debug("FSDS.Get", "path:"+path) baseName := filepath.Base(path) if blackListedDirEntryNames[baseName] { return nil, EntryNotFound } // handle quick lookup of home directory if isBaseDirectory(path) { return baseDir, nil } // cache get results if entry, exists := d.entryCache[path]; exists { return entry, nil } dataSource := d.findTLFDataSource(path) if dataSource == nil { return nil, EntryNotFound } result, err := dataSource.Get(ctx, dataSource.ChildPath(path)) if err != nil { return nil, err } result.entry.Path = dataSource.ParentPath(result.entry.Path) d.entryCache[path] = result return result, nil } func (d *SpaceFSDataSource) findTLFDataSource(path string) *TLFDataSource { for _, i := range d.tlfSources { if strings.HasPrefix(path, i.basePath) { return i } } return nil } // GetChildren returns list of entries in a path func (d *SpaceFSDataSource) GetChildren(ctx context.Context, path string) ([]*DirEntry, error) { //log.Debug("FSDS.GetChildren", "path:"+path) baseName := filepath.Base(path) if blackListedDirEntryNames[baseName] { return nil, EntryNotFound } if isBaseDirectory(path) { return d.getTopLevelDirectories(), nil } dataSource := d.findTLFDataSource(path) if dataSource == nil { return nil, EntryNotFound } result, err := dataSource.GetChildren(ctx, dataSource.ChildPath(path)) // format results if result != nil { for _, entry := range result { entry.entry.Path = dataSource.ParentPath(entry.entry.Path) d.entryCache[entry.entry.Path] = entry } } return result, err } // Open is invoked to read the content of a file func (d *SpaceFSDataSource) Open(ctx context.Context, path string) (FileReadWriterCloser, error) { //log.Debug("FSDS.Open", "path:"+path) dataSource := d.findTLFDataSource(path) if dataSource == nil { return nil, EntryNotFound } return dataSource.Open(ctx, dataSource.ChildPath(path)) } // CreateEntry creates a directory or file based on the mode at the path func (d *SpaceFSDataSource) CreateEntry(ctx context.Context, path string, mode os.FileMode) (*DirEntry, error) { //log.Debug("FSDS.CreateEntry", "path:"+path) dataSource := d.findTLFDataSource(path) if dataSource == nil { return nil, syscall.ENOTSUP } result, err := dataSource.CreateEntry(ctx, dataSource.ChildPath(path), mode) if result != nil { result.entry.Path = dataSource.ParentPath(result.entry.Path) } return result, err } func (d *SpaceFSDataSource) RenameEntry(ctx context.Context, oldPath, newPath string) error { //log.Debug("FSDS.RenameEntry", "oldPath:"+oldPath, "newPath:"+newPath) oldPathDataSource := d.findTLFDataSource(oldPath) newPathDataSource := d.findTLFDataSource(newPath) if oldPathDataSource.name != newPathDataSource.name { // renaming can only happen within the same datasource return syscall.ENOTSUP } return oldPathDataSource.RenameEntry( ctx, oldPathDataSource.ChildPath(oldPath), oldPathDataSource.ChildPath(newPath), ) } func (d *SpaceFSDataSource) DeleteEntry(ctx context.Context, path string) error { //log.Debug("FSDS.DeleteEntry", "path:"+path) dataSource := d.findTLFDataSource(path) return dataSource.DeleteEntry(ctx, dataSource.ChildPath(path)) } // Returns list of top level entry func (d *SpaceFSDataSource) getTopLevelDirectories() []*DirEntry { var directories []*DirEntry for _, ds := range d.tlfSources { directories = append(directories, NewDirEntryWithMode( domain.DirEntry{ Path: ds.basePath, IsDir: true, Name: ds.name, //Created: "", //Updated: "", }, RestrictedDirAccessMode, )) } return directories } ================================================ FILE: core/fsds/utils.go ================================================ package fsds import ( "fmt" "os" "strings" ) func isBaseDirectory(path string) bool { return path == "/" } func isDirPath(path string) bool { return strings.HasSuffix(path, fmt.Sprintf("%c", os.PathSeparator)) } func isNotExistError(err error) bool { if err == nil { return false } // Example of current error representing file not found: // error: code = Unknown desc = no link named ".localized" under bafybeievqvkeo2ycggt4lino45pj3olv7yo2e6sybcmyphicejsvq2vimi[] if strings.Contains(err.Error(), "no link named") { return true } if strings.Contains(err.Error(), "could not resolve path") { return true } return false } ================================================ FILE: core/ipfs/dag.go ================================================ package ipfs import ( "context" "errors" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" "sync" ) var ( ErrNotFound = errors.New("not found") ) type mapBasedDag struct { mu sync.Mutex nodes map[string]ipld.Node } func NewDagService() *mapBasedDag { return &mapBasedDag{nodes: make(map[string]ipld.Node)} } func (d *mapBasedDag) Get(ctx context.Context, cid cid.Cid) (ipld.Node, error) { d.mu.Lock() defer d.mu.Unlock() if n, ok := d.nodes[cid.KeyString()]; ok { return n, nil } return nil, ErrNotFound } func (d *mapBasedDag) GetMany(ctx context.Context, cids []cid.Cid) <-chan *ipld.NodeOption { d.mu.Lock() defer d.mu.Unlock() out := make(chan *ipld.NodeOption, len(cids)) for _, c := range cids { if n, ok := d.nodes[c.KeyString()]; ok { out <- &ipld.NodeOption{Node: n} } else { out <- &ipld.NodeOption{Err: ErrNotFound} } } close(out) return out } func (d *mapBasedDag) Add(ctx context.Context, node ipld.Node) error { d.mu.Lock() defer d.mu.Unlock() d.nodes[node.Cid().KeyString()] = node return nil } func (d *mapBasedDag) AddMany(ctx context.Context, nodes []ipld.Node) error { d.mu.Lock() defer d.mu.Unlock() for _, n := range nodes { d.nodes[n.Cid().KeyString()] = n } return nil } func (d *mapBasedDag) Remove(ctx context.Context, c cid.Cid) error { d.mu.Lock() defer d.mu.Unlock() delete(d.nodes, c.KeyString()) return nil } func (d *mapBasedDag) RemoveMany(ctx context.Context, cids []cid.Cid) error { d.mu.Lock() defer d.mu.Unlock() for _, c := range cids { delete(d.nodes, c.KeyString()) } return nil } ================================================ FILE: core/ipfs/ipfs.go ================================================ package ipfs import ( "context" "io" "sync" "github.com/ipfs/go-cid" "github.com/pkg/errors" "github.com/ipfs/interface-go-ipfs-core/path" "github.com/FleekHQ/space-daemon/log" ma "github.com/multiformats/go-multiaddr" "github.com/FleekHQ/space-daemon/config" files "github.com/ipfs/go-ipfs-files" httpapi "github.com/ipfs/go-ipfs-http-client" ) type AddItemResult struct { Error error Resolved path.Resolved } type LinkNodesInput struct { // name of link Name string Path path.Path } type LinkNodesResult struct { ParentPath path.Resolved } type Client interface { AddItems(ctx context.Context, items []io.Reader) []AddItemResult AddItem(ctx context.Context, item io.Reader) AddItemResult // Links each of the nodes in the input under the same parent LinkNodes(ctx context.Context, nodes []LinkNodesInput) (*LinkNodesResult, error) PullItem(ctx context.Context, cid cid.Cid) (io.ReadCloser, error) } type SpaceIpfsClient struct { client *httpapi.HttpApi } func NewSpaceIpfsClient(cfg config.Config) (*SpaceIpfsClient, error) { ipfsAddr := cfg.GetString(config.Ipfsaddr, "/ip4/127.0.0.1/tcp/5001") multiaddress, err := ma.NewMultiaddr(ipfsAddr) if err != nil { log.Error("Unable to parse IPFS Multiaddr", err) return nil, err } ic, err := httpapi.NewApi(multiaddress) if err != nil { return nil, err } return &SpaceIpfsClient{ client: ic, }, nil } func (s *SpaceIpfsClient) AddItems(ctx context.Context, items []io.Reader) []AddItemResult { results := make([]AddItemResult, len(items)) wg := sync.WaitGroup{} for i, item := range items { wg.Add(1) go func(i int, item io.Reader) { resolved, err := s.client.Unixfs().Add( ctx, files.NewReaderFile(item), ) results[i] = AddItemResult{ Error: err, Resolved: resolved, } wg.Done() }(i, item) } wg.Wait() return results } func (s *SpaceIpfsClient) AddItem(ctx context.Context, item io.Reader) AddItemResult { result := s.AddItems(ctx, []io.Reader{item}) return result[0] } func (s *SpaceIpfsClient) LinkNodes(ctx context.Context, nodes []LinkNodesInput) (*LinkNodesResult, error) { if len(nodes) == 0 { return nil, errors.New("no nodes passed to link nodes") } parentNode, err := s.client.Object().New(ctx) if err != nil { return nil, err } parentPath := path.IpfsPath(parentNode.Cid()) for _, node := range nodes { parentPath, err = s.client.Object().AddLink(ctx, parentPath, node.Name, node.Path) if err != nil { return nil, errors.Wrap(err, "failed to link nodes") } } return &LinkNodesResult{ ParentPath: parentPath, }, nil } func (s *SpaceIpfsClient) PullItem(ctx context.Context, cid cid.Cid) (io.ReadCloser, error) { node, err := s.client.Unixfs().Get(ctx, path.IpfsPath(cid)) if err != nil { return nil, err } var file files.File switch f := node.(type) { case files.File: file = f case files.Directory: return nil, errors.New("unsupported cid provided") default: return nil, errors.New("unsupported cid provided") } return file, nil } ================================================ FILE: core/ipfs/node/node.go ================================================ package ipfs import ( "context" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/log" "os/user" "fmt" "io/ioutil" "path/filepath" "sync" icore "github.com/ipfs/interface-go-ipfs-core" ma "github.com/multiformats/go-multiaddr" ipfsconfig "github.com/ipfs/go-ipfs-config" "github.com/ipfs/go-ipfs/commands" "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/core/coreapi" "github.com/ipfs/go-ipfs/core/corehttp" "github.com/ipfs/go-ipfs/core/node/libp2p" "github.com/ipfs/go-ipfs/plugin/loader" "github.com/ipfs/go-ipfs/repo/fsrepo" coreiface "github.com/ipfs/interface-go-ipfs-core" "github.com/libp2p/go-libp2p-core/peer" ) type IpfsNode struct { coreApi coreiface.CoreAPI coreNode *core.IpfsNode cancel context.CancelFunc IsRunning bool Ready chan bool cfg config.Config } func NewIpsNode(cfg config.Config) *IpfsNode { return &IpfsNode{ Ready: make(chan bool), cfg: cfg, } } func (node *IpfsNode) Start(ctx context.Context) error { log.Info("Starting the ipfs node") err := node.start() if err != nil { return err } log.Info("Running the ipfs node") node.IsRunning = true node.Ready <- true return nil } func (node *IpfsNode) WaitForReady() chan bool { return node.Ready } func (node *IpfsNode) Stop() error { return node.stop() } func (node *IpfsNode) Shutdown() error { close(node.Ready) return node.Stop() } func (node *IpfsNode) start() error { ctx, cancel := context.WithCancel(context.Background()) node.cancel = cancel pathRoot, err := ipfsconfig.PathRoot() if err != nil { return err } repoPath := node.cfg.GetString(config.Ipfsnodepath, pathRoot) if repoPath == "" { repoPath = pathRoot } else { usr, err := user.Current() if err != nil { repoPath = pathRoot } else { repoPath = filepath.Join(usr.HomeDir, repoPath) } } if err := setupPlugins(repoPath); err != nil { return err } // init the repo repoCfg, err := ipfsconfig.Init(ioutil.Discard, 2048) if err != nil { return err } err = fsrepo.Init(repoPath, repoCfg) if err != nil { return err } // open the repo repo, err := fsrepo.Open(repoPath) if err != nil { return err } // construct the node nodeOptions := &core.BuildCfg{ Online: true, Routing: libp2p.DHTClientOption, Repo: repo, } node.coreNode, err = core.NewNode(ctx, nodeOptions) if err != nil { return err } node.coreApi, err = coreapi.NewCoreAPI(node.coreNode) if err != nil { return err } addr := node.cfg.GetString(config.Ipfsnodeaddr, "/ip4/127.0.0.1/tcp/5001") if addr == "" { addr = "/ip4/127.0.0.1/tcp/5001" } var opts = []corehttp.ServeOption{ corehttp.GatewayOption(true, "/ipfs", "/ipns"), corehttp.WebUIOption, corehttp.CommandsOption(cmdCtx(node.coreNode, repoPath)), } go func() { if err := corehttp.ListenAndServe(node.coreNode, addr, opts...); err != nil { log.Error("Error starting api: ", err) return } }() // TODO: better place? bootstrapNodes := []string{ "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", "/ip4/104.131.131.82/udp/4001/quic/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", } go connectToPeers(ctx, node.coreApi, bootstrapNodes) return nil } func (node *IpfsNode) stop() error { node.IsRunning = false err := node.coreNode.Close() if err != nil { return err } node.cancel() return nil } func connectToPeers(ctx context.Context, ipfs icore.CoreAPI, peers []string) error { var wg sync.WaitGroup peerInfos := make(map[peer.ID]*peer.AddrInfo, len(peers)) for _, addrStr := range peers { addr, err := ma.NewMultiaddr(addrStr) if err != nil { return err } pii, err := peer.AddrInfosFromP2pAddrs(addr) if err != nil { return err } pi, ok := peerInfos[pii[0].ID] if !ok { pi = &peer.AddrInfo{ID: pii[0].ID} peerInfos[pi.ID] = pi } pi.Addrs = append(pi.Addrs, pii[0].Addrs...) } wg.Add(len(peerInfos)) for _, peerInfo := range peerInfos { go func(peerInfo *peer.AddrInfo) { defer wg.Done() err := ipfs.Swarm().Connect(ctx, *peerInfo) if err != nil { return } }(peerInfo) } wg.Wait() return nil } func setupPlugins(externalPluginsPath string) error { // load any external plugins if available on externalPluginsPath plugins, err := loader.NewPluginLoader(filepath.Join(externalPluginsPath, "plugins")) if err != nil { return fmt.Errorf("error loading plugins: %s", err) } // load preloaded and external plugins if err := plugins.Initialize(); err != nil { return fmt.Errorf("error initializing plugins: %s", err) } if err := plugins.Inject(); err != nil { return fmt.Errorf("error injecting plugins: %s", err) } return nil } func cmdCtx(node *core.IpfsNode, repoPath string) commands.Context { return commands.Context{ ConfigRoot: repoPath, LoadConfig: func(path string) (*ipfsconfig.Config, error) { return node.Repo.Config() }, ConstructNode: func() (*core.IpfsNode, error) { return node, nil }, ReqLog: &commands.ReqLog{}, } } ================================================ FILE: core/ipfs/utils.go ================================================ package ipfs import ( "context" "errors" "fmt" "io" "net/http" "strings" "github.com/ipfs/go-cid" chunker "github.com/ipfs/go-ipfs-chunker" ipld "github.com/ipfs/go-ipld-format" "github.com/ipfs/go-merkledag" "github.com/ipfs/go-unixfs/importer/balanced" "github.com/ipfs/go-unixfs/importer/helpers" "github.com/ipfs/go-unixfs/importer/trickle" mh "github.com/multiformats/go-multihash" ) func GetFileHash(r io.Reader) (string, error) { hashFun := "sha2-256" prefix, err := merkledag.PrefixForCidVersion(1) if err != nil { return "", fmt.Errorf("bad CID Version: %s", err) } hashFunCode, ok := mh.Names[strings.ToLower(hashFun)] if !ok { return "", fmt.Errorf("unrecognized hash function: %s", hashFun) } prefix.MhType = hashFunCode prefix.MhLength = -1 prefix.Codec = cid.DagProtobuf dagServ := NewDagService() dbp := helpers.DagBuilderParams{ Dagserv: dagServ, RawLeaves: true, Maxlinks: helpers.DefaultLinksPerBlock, NoCopy: false, CidBuilder: &prefix, } chnk, err := chunker.FromString(r, "") if err != nil { return "", err } dbh, err := dbp.New(chnk) if err != nil { return "", err } layout := "trickle" var n ipld.Node switch layout { case "trickle": n, err = trickle.Layout(dbh) case "balanced", "": n, err = balanced.Layout(dbh) default: return "", errors.New("invalid Layout") } return n.Cid().String(), nil } func DownloadIpfsItemViaGateway(ctx context.Context, gatewayUrl string, cid cid.Cid) (io.ReadCloser, error) { url := fmt.Sprintf("%s/ipfs/%s", gatewayUrl, cid.String()) req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return nil, err } client := http.Client{} resp, err := client.Do(req.WithContext(ctx)) if err != nil { return nil, err } if resp.StatusCode != 200 { return nil, fmt.Errorf("failed to fetch item %s: status_code %d", cid.String(), resp.StatusCode) } return resp.Body, nil } func DownloadIpfsItem(ctx context.Context, nodeUrl string, cid cid.Cid) (io.ReadCloser, error) { // https://docs.ipfs.io/reference/http/api/#api-v0-cat url := fmt.Sprintf("http://%s/api/v0/cat?arg=%s", nodeUrl, cid.String()) req, err := http.NewRequest(http.MethodPost, url, nil) if err != nil { return nil, err } client := http.Client{} resp, err := client.Do(req.WithContext(ctx)) if err != nil { return nil, err } if resp.StatusCode != 200 { return nil, fmt.Errorf("failed to fetch item %s: status_code %d", cid.String(), resp.StatusCode) } return resp.Body, nil } ================================================ FILE: core/ipfs/utils_test.go ================================================ package ipfs import ( "os" "strings" "testing" "github.com/stretchr/testify/assert" ) // fleek hash: bafybeiemzcxynbrrhtcpmmdtkl42molkiyfqu3j5ewp2o7izdmomptfkgi func TestIpfs_GetFileHash_FromStringReader(t *testing.T) { t.Skip() r := strings.NewReader("IPFS test data for reader") expectedHash := "bafybeie4zu4wu7lexqty2aubpe36dnpd6edgb5mthhtab5hyhuju7jlcgm" res, err := GetFileHash(r) assert.Nil(t, err) assert.NotNil(t, res) assert.Equal(t, expectedHash, res) } // bafybeic3jetthfk7tjmewz42idwsaeek5a7myw6n46zrrxdmp5nlkc6diy func TestIpfs_GetFileHash_FromFile(t *testing.T) { t.Skip() r, _ := os.Open("test1.txt") expectedHash := "bafybeie4zu4wu7lexqty2aubpe36dnpd6edgb5mthhtab5hyhuju7jlcgm" res, err := GetFileHash(r) assert.Nil(t, err) assert.NotNil(t, res) assert.Equal(t, expectedHash, res) } ================================================ FILE: core/keychain/app_token.go ================================================ package keychain import ( "errors" "github.com/99designs/keyring" "github.com/FleekHQ/space-daemon/core/permissions" ) const AppTokenStoreKey = "appToken" const MasterAppTokenStoreKey = "masterAppToken" var ErrMasterTokenAlreadyExists = errors.New("master app token already exists") func (kc *keychain) StoreAppToken(tok *permissions.AppToken) error { ring, err := kc.getKeyRing() if err != nil { return err } // Prevent overriding existing master key key, _ := kc.st.Get([]byte(getMasterTokenStKey())) if key != nil && tok.IsMaster { return ErrMasterTokenAlreadyExists } // Prevents overriding even if user logged out and logged back in (which clears the store) _, err = ring.Get(getMasterTokenStKey()) if err == nil && tok.IsMaster { return ErrMasterTokenAlreadyExists } marshalled, err := permissions.MarshalToken(tok) if err != nil { return err } err = ring.Set(keyring.Item{ Key: AppTokenStoreKey + "_" + tok.Key, Data: marshalled, Label: "Space App - App Token", }) if err != nil { return err } if tok.IsMaster { if err := kc.st.Set([]byte(getMasterTokenStKey()), []byte(tok.Key)); err != nil { return err } if err := ring.Set(keyring.Item{ Key: getMasterTokenStKey(), Data: marshalled, Label: "Space App - Master App Token", }); err != nil { return err } } return nil } func (kc *keychain) GetAppToken(key string) (*permissions.AppToken, error) { ring, err := kc.getKeyRing() if err != nil { return nil, err } token, err := ring.Get(AppTokenStoreKey + "_" + key) if err != nil { return nil, err } return permissions.UnmarshalToken(token.Data) } func getMasterTokenStKey() string { return AppTokenStoreKey + "_" + MasterAppTokenStoreKey } ================================================ FILE: core/keychain/keychain.go ================================================ package keychain import ( "crypto/ed25519" "crypto/sha512" "encoding/hex" "os" "path" "strings" "golang.org/x/crypto/pbkdf2" "errors" "github.com/99designs/keyring" ri "github.com/FleekHQ/space-daemon/core/keychain/keyring" "github.com/FleekHQ/space-daemon/core/permissions" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/log" "github.com/libp2p/go-libp2p-core/crypto" "github.com/textileio/go-threads/core/thread" sym "github.com/textileio/go-threads/crypto/symmetric" ) const PrivateKeyStoreKey = "key" const PublicKeyStoreKey = "pub" const privKeyMnemonicSeparator = "___" var ( ErrKeyPairNotFound = errors.New("No key pair found in the local db.") ) type keychain struct { fileDir string st store.Store ring ri.Keyring privKey *crypto.PrivKey } type Keychain interface { GenerateKeyPair() (pub []byte, priv []byte, err error) GenerateKeyFromMnemonic(...GenerateKeyFromMnemonicOpts) (mnemonic string, err error) GetStoredKeyPairInLibP2PFormat() (crypto.PrivKey, crypto.PubKey, error) GetStoredPublicKey() (crypto.PubKey, error) GetStoredMnemonic() (string, error) GetManagedThreadKey(threadKeyName string) (thread.Key, error) GenerateKeyPairWithForce() (pub []byte, priv []byte, err error) Sign([]byte) ([]byte, error) ImportExistingKeyPair(priv crypto.PrivKey, mnemonic string) error DeleteKeypair() error StoreAppToken(tok *permissions.AppToken) error GetAppToken(key string) (*permissions.AppToken, error) } type keychainOptions struct { fileDir string store store.Store // Don't use kc.ring directly, use getKeyRing() instead ring ri.Keyring } var defaultKeychainOptions = keychainOptions{ fileDir: store.DefaultRootDir, } // Helper function for setting keychain file path for Windows/Linux func WithPath(path string) Option { return func(o *keychainOptions) { if path != "" { o.fileDir = path } } } func WithStore(st store.Store) Option { return func(o *keychainOptions) { if st != nil { o.store = st } } } // Used to inject a mock keyring in tests or in case you want to use a custom keyring implementation func WithKeyring(ring ri.Keyring) Option { return func(o *keychainOptions) { if ring != nil { o.ring = ring } } } type Option func(o *keychainOptions) func New(opts ...Option) *keychain { o := defaultKeychainOptions for _, opt := range opts { opt(&o) } if o.store == nil { defaultStore := store.New(store.WithPath(o.fileDir)) o.store = defaultStore } return &keychain{ fileDir: o.fileDir, st: o.store, ring: o.ring, } } // Generates a public/private key pair using ed25519 algorithm. // It stores it in the local db and returns the key pair key. // If there's already a key pair stored, it returns an error. // Use GenerateKeyPairWithForce if you want to override existing keys func (kc *keychain) GenerateKeyPair() ([]byte, []byte, error) { if val, _ := kc.GetStoredPublicKey(); val != nil { newErr := errors.New("Error while executing GenerateKeyPair. Key pair already exists. Use GenerateKeyPairWithForce if you want to override it.") return nil, nil, newErr } return kc.generateAndStoreKeyPair(nil, "") } // Returns the stored key pair using the same signature than libp2p's GenerateEd25519Key function func (kc *keychain) GetStoredKeyPairInLibP2PFormat() (crypto.PrivKey, crypto.PubKey, error) { var priv []byte var err error _, err = kc.GetStoredPublicKey() if err != nil { return nil, nil, err } if kc.privKey != nil { return *kc.privKey, (*kc.privKey).GetPublic(), nil } if priv, _, err = kc.retrieveKeyPair(); err != nil { newErr := ErrKeyPairNotFound return nil, nil, newErr } var unmarshalledPriv crypto.PrivKey if unmarshalledPriv, err = crypto.UnmarshalEd25519PrivateKey(priv); err != nil { return nil, nil, err } kc.privKey = &unmarshalledPriv unmarshalledPub := unmarshalledPriv.GetPublic() return unmarshalledPriv, unmarshalledPub, nil } // Generates a public/private key pair using ed25519 algorithm. // It stores it in the local db and returns the key pair. // Warning: If there's already a key pair stored, it overrides it. func (kc *keychain) GenerateKeyPairWithForce() ([]byte, []byte, error) { return kc.generateAndStoreKeyPair(nil, "") } // Returns the public key currently in use in LibP2P format. // Returns an error if there's no public key set. // Unlike GetStoredKeyPairInLibP2PFormat, this method does not access the keychain func (kc *keychain) GetStoredPublicKey() (crypto.PubKey, error) { ring, err := kc.getKeyRing() if err != nil { return nil, err } _, err = ring.GetMetadata(PrivateKeyStoreKey) if err == keyring.ErrKeyNotFound { return nil, ErrKeyPairNotFound } pubInBytes, err := kc.st.Get([]byte(PublicKeyStoreKey)) if err != nil { return nil, err } if pubInBytes == nil { return nil, ErrKeyPairNotFound } pub, err := crypto.UnmarshalEd25519PublicKey(pubInBytes) if err != nil { return nil, err } return pub, nil } func (kc *keychain) GetStoredMnemonic() (string, error) { _, mnemonic, err := kc.retrieveKeyPair() if err != nil { return "", err } return mnemonic, nil } // Stores an existing private key in the keychain // Warning: If there's already a key pair stored, this will override it. func (kc *keychain) ImportExistingKeyPair(priv crypto.PrivKey, mnemonic string) error { privInBytes, err := priv.Raw() if err != nil { return err } pubInBytes, err := priv.GetPublic().Raw() if err != nil { return err } // Store the key pair in the db if err := kc.storeKeyPair(privInBytes, pubInBytes, mnemonic); err != nil { return err } kc.privKey = &priv return nil } func (kc *keychain) DeleteKeypair() error { ring, err := kc.getKeyRing() if err != nil { return err } // Note: currently ignoring error on keychain removal because it's failing randomly. // Use GenerateKeyPair with override option instead. err = ring.Remove(PrivateKeyStoreKey) err = kc.st.Remove([]byte(PublicKeyStoreKey)) if err != nil { return err } kc.privKey = nil return nil } func (kc *keychain) generateKeyPair(seed []byte) ([]byte, []byte, error) { if seed != nil { priv := ed25519.NewKeyFromSeed(seed) publicKey := priv.Public() pub, ok := publicKey.(ed25519.PublicKey) if !ok { return nil, nil, errors.New("Error while generating key pair from seed") } return pub, priv, nil } // Compute the key from a random seed pub, priv, err := ed25519.GenerateKey(nil) return pub, priv, err } func (kc *keychain) generateAndStoreKeyPair(seed []byte, mnemonic string) ([]byte, []byte, error) { // Compute the key from a random seed pub, priv, err := kc.generateKeyPair(seed) if err != nil { return nil, nil, err } // Store the key pair in the db if err := kc.storeKeyPair(priv, pub, mnemonic); err != nil { return nil, nil, err } privkey, err := crypto.UnmarshalEd25519PrivateKey(priv) if err != nil { log.Warn("Unable to cache priv key") } kc.privKey = &privkey return pub, priv, nil } // Signs a message using the stored private key. // Returns an error if the private key cannot be found. func (kc *keychain) Sign(message []byte) ([]byte, error) { if priv, _, err := kc.retrieveKeyPair(); err != nil { return nil, err } else { signedBytes := ed25519.Sign(priv, message) return signedBytes, nil } } func (kc *keychain) getKeyRing() (ri.Keyring, error) { if kc.ring != nil { return kc.ring, nil } ucd, err := os.UserConfigDir() if err != nil { return nil, err } return keyring.Open(keyring.Config{ ServiceName: "space", // MacOS keychain KeychainTrustApplication: true, KeychainAccessibleWhenUnlocked: true, // KDE Wallet KWalletAppID: "space", KWalletFolder: "space", // Windows WinCredPrefix: "space", // freedesktop.org's Secret Service LibSecretCollectionName: "space", // Pass (https://www.passwordstore.org/) PassPrefix: "space", PassDir: kc.fileDir + "/kcpw", // Fallback encrypted file FileDir: path.Join(ucd, "space", "keyring"), }) } func (kc *keychain) storeKeyPair(privKey []byte, pubKey []byte, mnemonic string) error { ring, err := kc.getKeyRing() if err != nil { return err } privAsHex := hex.EncodeToString(privKey) privWithMnemonic := privAsHex + privKeyMnemonicSeparator + mnemonic // Store private key together with mnemonic // Priv key is stored as 0x1234...890___some mnemonic // The idea behind storing them together is that we avoid asking for keychain access twice if err := ring.Set(keyring.Item{ Key: PrivateKeyStoreKey, Data: []byte(privWithMnemonic), Label: "Space App", }); err != nil { return err } // Store pub key outside of the key ring for quick access if err := kc.st.Set([]byte(PublicKeyStoreKey), pubKey); err != nil { return err } return nil } func (kc *keychain) retrieveKeyPair() (privKey []byte, mnemonic string, err error) { ring, err := kc.getKeyRing() if err != nil { return nil, "", err } privKeyItem, err := ring.Get(PrivateKeyStoreKey) if err != nil { return nil, "", err } // Priv key is stored as 0x1234...890___some mnemonic // Here we split it to return priv key and mnemonic separately privKeyAsStr := string(privKeyItem.Data) privKeyParts := strings.Split(privKeyAsStr, privKeyMnemonicSeparator) mnemonic = privKeyParts[1] privKey, err = hex.DecodeString(privKeyParts[0]) if err != nil { return nil, "", err } return privKey, mnemonic, nil } func (kc *keychain) GetManagedThreadKey(threadKeyName string) (thread.Key, error) { // Check if there's a key stored before continuing _, err := kc.GetStoredPublicKey() if err != nil { return thread.Key{}, err } size := 32 priv, _, err := kc.GetStoredKeyPairInLibP2PFormat() if err != nil { return thread.Key{}, err } privBytes, err := priv.Raw() if err != nil { return thread.Key{}, err } num := pbkdf2.Key(privBytes, []byte("threadKey"+threadKeyName), 256, size, sha512.New) if err != nil { return thread.Key{}, err } truncated := num[:sym.KeyBytes*2] managedKey, err := thread.KeyFromBytes(truncated) if err != nil { return thread.Key{}, err } return managedKey, nil } ================================================ FILE: core/keychain/keyring/keyring.go ================================================ package keyring import "github.com/99designs/keyring" type Keyring interface { Set(keyring.Item) error Get(string) (keyring.Item, error) Remove(string) error GetMetadata(string) (keyring.Metadata, error) } ================================================ FILE: core/keychain/mnemonic.go ================================================ package keychain import ( "crypto/sha512" "errors" "github.com/tyler-smith/go-bip39" "golang.org/x/crypto/pbkdf2" ) type generateKeyFromMnemonicOpts struct { override bool mnemonic string password string } var defaultMnemonicOpts = generateKeyFromMnemonicOpts{ override: false, mnemonic: "", password: "", } type GenerateKeyFromMnemonicOpts func(o *generateKeyFromMnemonicOpts) func WithMnemonic(mnemonic string) GenerateKeyFromMnemonicOpts { return func(o *generateKeyFromMnemonicOpts) { if mnemonic != "" { o.mnemonic = mnemonic } } } func WithPassword(password string) GenerateKeyFromMnemonicOpts { return func(o *generateKeyFromMnemonicOpts) { if password != "" { o.password = password } } } func WithOverride() GenerateKeyFromMnemonicOpts { return func(o *generateKeyFromMnemonicOpts) { o.override = true } } // Generates a public/private key pair using ed25519 algorithm. // It stores it in the local db and returns the mnemonic. // If Mnemonic is a blank string, it generates a random one. // If there's already a key pair stored, it overrides it if override is set to true. Returns an error otherwise func (kc *keychain) GenerateKeyFromMnemonic(opts ...GenerateKeyFromMnemonicOpts) (string, error) { o := defaultMnemonicOpts for _, opt := range opts { opt(&o) } if val, err := kc.GetStoredPublicKey(); val != nil && o.override == false && err != ErrKeyPairNotFound { newErr := errors.New("Error while executing GenerateKeyFromMnemonic. Key pair already exists.") return "", newErr } mnemonic := o.mnemonic if mnemonic == "" { entropy, err := bip39.NewEntropy(128) if err != nil { return "", err } mnemonic, err = bip39.NewMnemonic(entropy) if err != nil { return "", err } } seed, err := bip39.NewSeedWithErrorChecking(mnemonic, o.password) if err != nil { return "", err } // The seed returned by bip39 is fixed to size = 64 bytes. // However the seed in ed25519 needs to have size 32. // So to fix this we derive a key again based on the previous one, but with the correct size. compressedSeed := pbkdf2.Key(seed, []byte("iter2"+o.password), 512, 32, sha512.New) _, _, err = kc.generateAndStoreKeyPair(compressedSeed, mnemonic) if err != nil { return "", err } return mnemonic, nil } ================================================ FILE: core/keychain/test/keychain_test.go ================================================ package keychain_test import ( "encoding/hex" "errors" "strings" "testing" "github.com/99designs/keyring" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/permissions" "github.com/FleekHQ/space-daemon/mocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/tyler-smith/go-bip39" ) var ( mockStore *mocks.Store mockKeyRing *mocks.Keyring ) func initTestKeychain(t *testing.T) keychain.Keychain { mockStore = new(mocks.Store) mockStore.On("IsOpen").Return(true) mockKeyRing = new(mocks.Keyring) kc := keychain.New(keychain.WithStore(mockStore), keychain.WithKeyring(mockKeyRing)) return kc } func TestKeychain_GenerateAndRestore(t *testing.T) { kc := initTestKeychain(t) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) mockKeyRing.On("Set", mock.Anything).Return(nil) pub, priv, _ := kc.GenerateKeyPairWithForce() privKeyItem := keyring.Item{ Key: keychain.PrivateKeyStoreKey, Data: []byte(hex.EncodeToString(priv) + "___"), Label: "Space App", } mockStore.On("Get", []byte(keychain.PublicKeyStoreKey)).Return(pub, nil) mockKeyRing.On("Get", keychain.PrivateKeyStoreKey).Return(privKeyItem, nil) mockKeyRing.On("GetMetadata", mock.Anything).Return(keyring.Metadata{}, nil) libp2pPriv, _, _ := kc.GetStoredKeyPairInLibP2PFormat() // Reset mock store for assertions kc = initTestKeychain(t) mockStore.AssertNotCalled(t, "Set", []byte(keychain.PublicKeyStoreKey), pub) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) mockKeyRing.On("Set", mock.Anything).Return(nil) kc.ImportExistingKeyPair(libp2pPriv, "") mockStore.AssertCalled(t, "Set", []byte(keychain.PublicKeyStoreKey), pub) mockKeyRing.AssertCalled(t, "Set", privKeyItem) } func TestKeychain_GenerateMnemonicKey(t *testing.T) { kc := initTestKeychain(t) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) mockStore.On("Get", []byte(keychain.PublicKeyStoreKey)).Return(nil, nil) mockKeyRing.On("Set", mock.Anything).Return(nil) mockKeyRing.On("GetMetadata", mock.Anything).Return(keyring.Metadata{}, nil) val, err := kc.GenerateKeyFromMnemonic() words := strings.Split(val, " ") assert.Nil(t, err) assert.NotNil(t, val) assert.Equal(t, 12, len(words)) mockStore.AssertCalled(t, "Set", []byte(keychain.PublicKeyStoreKey), mock.Anything) mockKeyRing.AssertCalled(t, "Set", mock.Anything) } func TestKeychain_RestoreMnemonicKey(t *testing.T) { kc := initTestKeychain(t) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) mockStore.On("Get", []byte(keychain.PublicKeyStoreKey)).Return(nil, nil) mockKeyRing.On("Set", mock.Anything).Return(nil) mockKeyRing.On("GetMetadata", mock.Anything).Return(keyring.Metadata{}, nil) mnemonic := "clog chalk blame black uncover frame before decide tuition maple crowd uncle" pubFromMnemonic, _ := hex.DecodeString("bbfa792cbf0453dde84947e5733c734b1bc11592190517d579ab589ae8107907") privAsHex := "6f0938b7f2beb6f1715aaad71f578a94c51cc8ebd2cb221063e28c8a2efcabb6bbfa792cbf0453dde84947e5733c734b1bc11592190517d579ab589ae8107907" val, err := kc.GenerateKeyFromMnemonic(keychain.WithMnemonic(mnemonic)) assert.Nil(t, err) assert.NotNil(t, val) assert.Equal(t, mnemonic, val) mockStore.AssertCalled(t, "Set", []byte(keychain.PublicKeyStoreKey), pubFromMnemonic) mockKeyRing.AssertCalled(t, "Set", keyring.Item{ Key: keychain.PrivateKeyStoreKey, Data: []byte(privAsHex + "___" + mnemonic), Label: "Space App", }) } func TestKeychain_RestoreMnemonicKeyOnOverrideErr(t *testing.T) { kc := initTestKeychain(t) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) mockKeyRing.On("Set", mock.Anything).Return(nil) mockKeyRing.On("GetMetadata", mock.Anything).Return(keyring.Metadata{}, nil) mnemonic := "clog chalk blame black uncover frame before decide tuition maple crowd uncle" pubFromMnemonic, _ := hex.DecodeString("a29d5030556f55f32d82b71618e97bfe976ebebc713592122b124881b4da6191") mockStore.On("Get", []byte(keychain.PublicKeyStoreKey)).Return(pubFromMnemonic, nil) _, err := kc.GenerateKeyFromMnemonic(keychain.WithMnemonic(mnemonic)) assert.NotNil(t, err) assert.Equal(t, errors.New("Error while executing GenerateKeyFromMnemonic. Key pair already exists."), err) } func TestKeychain_RestoreMnemonicKeyExistsButNotInKeyring(t *testing.T) { kc := initTestKeychain(t) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) mockKeyRing.On("Set", mock.Anything).Return(nil) mockKeyRing.On("GetMetadata", mock.Anything).Return(keyring.Metadata{}, keyring.ErrKeyNotFound) mockStore.On("Get", []byte(keychain.PublicKeyStoreKey)).Return(nil, nil) mnemonic := "clog chalk blame black uncover frame before decide tuition maple crowd uncle" pubFromMnemonic, _ := hex.DecodeString("bbfa792cbf0453dde84947e5733c734b1bc11592190517d579ab589ae8107907") privAsHex := "6f0938b7f2beb6f1715aaad71f578a94c51cc8ebd2cb221063e28c8a2efcabb6bbfa792cbf0453dde84947e5733c734b1bc11592190517d579ab589ae8107907" val, err := kc.GenerateKeyFromMnemonic(keychain.WithMnemonic(mnemonic)) assert.Nil(t, err) assert.NotNil(t, val) assert.Equal(t, mnemonic, val) mockStore.AssertCalled(t, "Set", []byte(keychain.PublicKeyStoreKey), pubFromMnemonic) mockKeyRing.AssertCalled(t, "Set", keyring.Item{ Key: keychain.PrivateKeyStoreKey, Data: []byte(privAsHex + "___" + mnemonic), Label: "Space App", }) } func TestKeychain_RestoreMnemonicKeyMnemonicErr(t *testing.T) { kc := initTestKeychain(t) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) mockStore.On("Get", []byte(keychain.PublicKeyStoreKey)).Return(nil, nil) mockKeyRing.On("Set", mock.Anything).Return(nil) mockKeyRing.On("GetMetadata", mock.Anything).Return(keyring.Metadata{}, nil) mnemonic := "clog chalk blame black uncover frame before decide tuition maple crowd" _, err := kc.GenerateKeyFromMnemonic(keychain.WithMnemonic(mnemonic)) assert.NotNil(t, err) assert.Equal(t, bip39.ErrInvalidMnemonic, err) } func TestKeychain_RestoreMnemonicKeyOnOverrideSuccess(t *testing.T) { kc := initTestKeychain(t) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) mockKeyRing.On("Set", mock.Anything).Return(nil) mockKeyRing.On("GetMetadata", mock.Anything).Return(keyring.Metadata{}, nil) mnemonic := "clog chalk blame black uncover frame before decide tuition maple crowd uncle" pubFromMnemonic, _ := hex.DecodeString("bbfa792cbf0453dde84947e5733c734b1bc11592190517d579ab589ae8107907") privAsHex := "6f0938b7f2beb6f1715aaad71f578a94c51cc8ebd2cb221063e28c8a2efcabb6bbfa792cbf0453dde84947e5733c734b1bc11592190517d579ab589ae8107907" mockStore.On("Get", []byte(keychain.PublicKeyStoreKey)).Return(pubFromMnemonic, nil) val, err := kc.GenerateKeyFromMnemonic(keychain.WithMnemonic(mnemonic), keychain.WithOverride()) assert.Nil(t, err) assert.NotNil(t, val) assert.Equal(t, mnemonic, val) mockStore.AssertCalled(t, "Set", []byte(keychain.PublicKeyStoreKey), pubFromMnemonic) mockKeyRing.AssertCalled(t, "Set", keyring.Item{ Key: keychain.PrivateKeyStoreKey, Data: []byte(privAsHex + "___" + mnemonic), Label: "Space App", }) } func TestKeychain_GetStoredMnemonic(t *testing.T) { kc := initTestKeychain(t) mnemonic := "clog chalk blame black uncover frame before decide tuition maple crowd uncle" privAsHex := "6f0938b7f2beb6f1715aaad71f578a94c51cc8ebd2cb221063e28c8a2efcabb6bbfa792cbf0453dde84947e5733c734b1bc11592190517d579ab589ae8107907" mockKeyRing.On("Get", keychain.PrivateKeyStoreKey).Return(keyring.Item{ Key: keychain.PrivateKeyStoreKey, Data: []byte(privAsHex + "___" + mnemonic), Label: "Space App", }, nil) mnemonic2, err := kc.GetStoredMnemonic() assert.Nil(t, err) assert.Equal(t, mnemonic, mnemonic2) } func TestKeychain_AppToken_StoreMaster(t *testing.T) { kc := initTestKeychain(t) mockStore.On("Get", []byte(keychain.AppTokenStoreKey+"_"+keychain.MasterAppTokenStoreKey)).Return(nil, nil) mockKeyRing.On("Get", keychain.AppTokenStoreKey+"_"+keychain.MasterAppTokenStoreKey).Return(keyring.Item{}, keyring.ErrKeyNotFound) mockKeyRing.On("Set", mock.Anything).Return(nil) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) tok, err := permissions.GenerateRandomToken(true, []string{}) assert.NoError(t, err) err = kc.StoreAppToken(tok) assert.NoError(t, err) marshalled, err := permissions.MarshalToken(tok) assert.NoError(t, err) mockKeyRing.AssertCalled(t, "Set", keyring.Item{ Key: keychain.AppTokenStoreKey + "_" + tok.Key, Data: marshalled, Label: "Space App - App Token", }) mockKeyRing.AssertCalled(t, "Set", keyring.Item{ Key: keychain.AppTokenStoreKey + "_" + keychain.MasterAppTokenStoreKey, Data: marshalled, Label: "Space App - Master App Token", }) mockStore.AssertCalled(t, "Set", []byte(keychain.AppTokenStoreKey+"_"+keychain.MasterAppTokenStoreKey), []byte(tok.Key)) } func TestKeychain_AppToken_StoreNonMaster(t *testing.T) { kc := initTestKeychain(t) mockStore.On("Get", []byte(keychain.AppTokenStoreKey+"_"+keychain.MasterAppTokenStoreKey)).Return(nil, nil) mockKeyRing.On("Get", keychain.AppTokenStoreKey+"_"+keychain.MasterAppTokenStoreKey).Return(keyring.Item{}, keyring.ErrKeyNotFound) mockKeyRing.On("Set", mock.Anything).Once().Return(nil) tok, err := permissions.GenerateRandomToken(false, []string{}) assert.NoError(t, err) err = kc.StoreAppToken(tok) assert.NoError(t, err) marshalled, err := permissions.MarshalToken(tok) assert.NoError(t, err) mockKeyRing.AssertCalled(t, "Set", keyring.Item{ Key: keychain.AppTokenStoreKey + "_" + tok.Key, Data: marshalled, Label: "Space App - App Token", }) mockKeyRing.AssertNotCalled(t, "Set", keyring.Item{ Key: keychain.AppTokenStoreKey + "_" + keychain.MasterAppTokenStoreKey, Data: marshalled, Label: "Space App - Master App Token", }) } func TestKeychain_AppToken_StoreMasterOverride1(t *testing.T) { kc := initTestKeychain(t) tok, err := permissions.GenerateRandomToken(true, []string{}) assert.NoError(t, err) mockStore.On("Get", []byte(keychain.AppTokenStoreKey+"_"+keychain.MasterAppTokenStoreKey)).Return([]byte(tok.Key), nil) err = kc.StoreAppToken(tok) assert.Error(t, err) } func TestKeychain_AppToken_StoreMasterOverride2(t *testing.T) { kc := initTestKeychain(t) tok, err := permissions.GenerateRandomToken(true, []string{}) assert.NoError(t, err) mockStore.On("Get", []byte(keychain.AppTokenStoreKey+"_"+keychain.MasterAppTokenStoreKey)).Return(nil, nil) mockKeyRing.On("Get", keychain.AppTokenStoreKey+"_"+keychain.MasterAppTokenStoreKey).Return(keyring.Item{}, nil) err = kc.StoreAppToken(tok) assert.Error(t, err) } func TestKeychain_AppToken_Get(t *testing.T) { kc := initTestKeychain(t) tok, err := permissions.GenerateRandomToken(false, []string{}) assert.NoError(t, err) marshalled, err := permissions.MarshalToken(tok) assert.NoError(t, err) mockKeyRing.On("Get", keychain.AppTokenStoreKey+"_"+tok.Key).Return(keyring.Item{ Key: keychain.AppTokenStoreKey + "_" + tok.Key, Data: marshalled, Label: "Space App - App Token", }, nil) tok2, err := kc.GetAppToken(tok.Key) assert.NoError(t, err) assert.Equal(t, tok, tok2) } ================================================ FILE: core/libfuse/block_size.go ================================================ package libfuse // fuseBlockSize is the block size used for calculating number of blocks. This // is to make du/df work, and does not reflect in any way the internal block // size (which is variable). 512 is chosen because FUSE seems to assume this // block size all the time, despite BlockSize is provided in Statfs or Attr // response or not. Bazil FUSE's documentation verifies this: // https://github.com/bazil/fuse/blob/371fbbdaa8987b715bdd21d6adc4c9b20155f748/fuse.go#L1320 const fuseBlockSize = 512 func getNumBlocksFromSize(size uint64) uint64 { if size == 0 { return 0 } return (size-1)/fuseBlockSize + 1 } ================================================ FILE: core/libfuse/directory.go ================================================ //+build !windows package libfuse import ( "context" "errors" "fmt" "path" "strings" "syscall" "github.com/FleekHQ/space-daemon/core/spacefs" "github.com/FleekHQ/space-daemon/log" "bazil.org/fuse" "bazil.org/fuse/fs" ) var ( _ fs.Node = (*VFSDir)(nil) _ fs.NodeAccesser = (*VFSDir)(nil) _ = fs.NodeRequestLookuper(&VFSDir{}) _ = fs.HandleReadDirAller(&VFSDir{}) _ = fs.NodeCreater(&VFSDir{}) _ = fs.NodeMkdirer(&VFSDir{}) _ = fs.NodeRenamer(&VFSDir{}) _ = fs.NodeRemover(&VFSDir{}) ) // VFSDir represents a directory in the Virtual file system type VFSDir struct { vfs *VFS // pointer to the parent file system dirOps spacefs.DirOps } func NewVFSDir(vfs *VFS, dirOps spacefs.DirOps) *VFSDir { return &VFSDir{ vfs: vfs, dirOps: dirOps, } } // Attr returns fuse.Attr for the directory func (dir *VFSDir) Attr(ctx context.Context, attr *fuse.Attr) error { dirAttribute, err := dir.dirOps.Attribute(ctx) if err != nil { return err } attr.Mode = dirAttribute.Mode() attr.Uid = dirAttribute.Uid() attr.Gid = dirAttribute.Gid() return nil } // ReadDirAll reads all the content of a directory // In this mirror drive case, we just return items in the mirror path func (dir *VFSDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { dirList, err := dir.dirOps.ReadDir(ctx) if err != nil { return nil, err } var res []fuse.Dirent for _, dirEntry := range dirList { entryAttribute, err := dirEntry.Attribute(ctx) if err != nil { return nil, err } entry := fuse.Dirent{ Name: entryAttribute.Name(), } if entryAttribute.IsDir() { entry.Type = fuse.DT_Dir } else { entry.Type = fuse.DT_File } res = append(res, entry) } return res, nil } // Lookup finds entry Node within a directory // Seems to be called when not enough information is gotten from the ReadDirAll func (dir *VFSDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) { //log.Debug("VFSDir.Lookup", "name:"+req.Name) path := dir.dirOps.Path() + req.Name entry, err := dir.vfs.fsOps.LookupPath(ctx, path) if err != nil { return nil, err } entryAttribute, err := entry.Attribute(ctx) if err != nil { return nil, err } if entryAttribute.IsDir() { dirOps, ok := entry.(spacefs.DirOps) if !ok { // TODO: Return a better syscall error return nil, syscall.ENOENT } return NewVFSDir(dir.vfs, dirOps), nil } fileOps, ok := entry.(spacefs.FileOps) if !ok { // TODO: Return a better syscall error return nil, syscall.ENOENT } return &VFSFile{ vfs: dir.vfs, fileOps: fileOps, }, nil } // Create is invoked when a new directory is to be created // It implements the fs.NodeCreator interface func (dir *VFSDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { path := dir.dirOps.Path() log.Printf("Creating a file/directory: %+v in path: %s", *req, path) dirEntry, err := dir.vfs.fsOps.CreateEntry(ctx, spacefs.CreateDirEntry{ Path: fmt.Sprintf("%s%c%s", strings.TrimSuffix(path, "/"), '/', req.Name), Mode: req.Mode, }) if err != nil { return nil, nil, err } if dirOps, ok := dirEntry.(spacefs.DirOps); ok { return NewVFSDir(dir.vfs, dirOps), nil, nil } if fileOps, ok := dirEntry.(spacefs.FileOps); ok { vfsFile := NewVFSFile(dir.vfs, fileOps) handler, err := NewVFSFileHandler(ctx, vfsFile) if err != nil { return nil, nil, err } return vfsFile, handler, nil } return nil, nil, syscall.EACCES } // Mkdir implements the fs.NodeMkdirer interface func (dir *VFSDir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { path := dir.dirOps.Path() log.Debug(fmt.Sprintf("Mkdir a file/directory: %+v with name %s, in path: %s", *req, req.Name, path)) dirEntry, err := dir.vfs.fsOps.CreateEntry(ctx, spacefs.CreateDirEntry{ Path: fmt.Sprintf("%s%c%s", strings.TrimSuffix(path, "/"), '/', req.Name), Mode: req.Mode, }) if err != nil { return nil, err } if dirOps, ok := dirEntry.(spacefs.DirOps); ok { return NewVFSDir(dir.vfs, dirOps), nil } log.Error("should not happen", errors.New("created directory is not a directory")) return nil, fuse.ENOTSUP } // Rename implements the fs.NodeRenamer // Rename is only implemented for VFSDir and not VFSFile, because we currently don't support renaming files // and rename on fsOps should only work empty folders. func (dir *VFSDir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { parentPath := dir.dirOps.Path() log.Debug("Renaming node", "oldName:"+req.OldName, "newName:"+req.NewName, "parentPath:"+parentPath) return dir.vfs.fsOps.RenameEntry(ctx, spacefs.RenameDirEntry{ OldPath: path.Join(parentPath, req.OldName), NewPath: path.Join(parentPath, req.NewName), }) } // Remove implements the fs.NodeRemover func (dir *VFSDir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { parentPath := dir.dirOps.Path() return dir.vfs.fsOps.DeleteEntry(ctx, path.Join(parentPath, req.Name)) } func (dir *VFSDir) Access(ctx context.Context, req *fuse.AccessRequest) error { return nil } ================================================ FILE: core/libfuse/files.go ================================================ //+build !windows package libfuse import ( "context" "fmt" "os" "syscall" "github.com/FleekHQ/space-daemon/core/spacefs" "github.com/FleekHQ/space-daemon/log" "bazil.org/fuse" "bazil.org/fuse/fs" ) var ( _ fs.Node = (*VFSFile)(nil) _ = fs.NodeAccesser(&VFSFile{}) _ = fs.NodeOpener(&VFSFile{}) _ = fs.NodeSetattrer(&VFSFile{}) _ = fs.HandleReader(&VFSFileHandler{}) _ = fs.HandleWriter(&VFSFileHandler{}) _ = fs.HandleReleaser(&VFSFileHandler{}) ) // VFSFile represents a file in the Virtual file system type VFSFile struct { vfs *VFS // pointer to the parent file system fileOps spacefs.FileOps } func NewVFSFile(vfs *VFS, fileOps spacefs.FileOps) *VFSFile { return &VFSFile{ vfs: vfs, fileOps: fileOps, } } // Attr returns fuse.Attr for the directory or file func (vfile *VFSFile) Attr(ctx context.Context, attr *fuse.Attr) error { path := vfile.fileOps.Path() log.Printf("Getting File Attr %s", path) fileAttribute, err := vfile.fileOps.Attribute(ctx) if err != nil { log.Printf("ERROR Getting Open File Attr %s", err.Error()) return err } attr.Size = fileAttribute.Size() attr.Blocks = getNumBlocksFromSize(attr.Size) attr.Mode = fileAttribute.Mode() attr.Mtime = fileAttribute.ModTime() attr.Ctime = fileAttribute.Ctime() attr.Crtime = fileAttribute.Ctime() attr.Uid = fileAttribute.Uid() attr.Gid = fileAttribute.Gid() log.Printf("Successful File Attr %s : %+v", path, attr) return nil } // Access implements the fs.NodeAccesser interface for File. This is necessary // for macOS to correctly identify plaintext files as plaintext. If not // implemented, bazil-fuse returns a nil error for every call, so when macOS // checks for executable bit using Access (instead of Attr!), it gets a // success, which makes it think the file is executable, yielding a "Unix // executable" UTI. func (vfile *VFSFile) Access(ctx context.Context, r *fuse.AccessRequest) (err error) { if int(r.Uid) != os.Getuid() && // Finder likes to use UID 0 for some operations. osxfuse already allows // ACCESS and GETXATTR requests from root to go through. This allows root // in ACCESS handler. int(r.Uid) != 0 { // short path: not accessible by anybody other than root or the current user return syscall.EPERM } if r.Mask&03 == 0 { // Since we only check for w and x bits, we can return nil early here. return nil } // check is executable mask enable if r.Mask&01 != 0 { _, err := vfile.fileOps.Attribute(ctx) if err != nil { return err } // for now always return permission error for executable calls // we are not supporting executable at the moment return syscall.EPERM } return nil } // Setattr implements the set attribute of fs.NodeSetattrer func (vfile *VFSFile) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { path := vfile.fileOps.Path() log.Debug("Setattr called", "path:"+path, "req.Valid:"+req.Valid.String(), fmt.Sprintf("req:%v", req)) valid := req.Valid if valid.Size() { err := vfile.fileOps.Truncate(ctx, req.Size) if err != nil { return err } valid ^= fuse.SetattrSize } return nil } // Open create a handle responsible for reading the file and also closing the file after reading func (vfile *VFSFile) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { log.Printf("Opening content of file %s", vfile.fileOps.Path()) return NewVFSFileHandler(ctx, vfile) } // VFSFileHandler manages readings and closing access to a VFSFile type VFSFileHandler struct { path string readWriteOps spacefs.FileHandler } func NewVFSFileHandler(ctx context.Context, vfile *VFSFile) (*VFSFileHandler, error) { readWriteOps, err := vfile.fileOps.Open(ctx, spacefs.ReadMode) if err != nil { return nil, err } return &VFSFileHandler{ path: vfile.fileOps.Path(), readWriteOps: readWriteOps, }, nil } // Read reads the content of the reader // Ideally, decryption of the content of the file should be happening here func (vfh *VFSFileHandler) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { log.Printf("Reading content of file %s, and size: %d", vfh.path, req.Size) buf := make([]byte, req.Size) n, err := vfh.readWriteOps.Read(ctx, buf, req.Offset) if err != nil { log.Printf("Reading error: %s", err.Error()) return err } resp.Data = buf[:n] return nil } // Write writes content from request into the underlying file. Keeping track of offset and all // Ideally, encryption of the content of the file should be happening here func (vfh *VFSFileHandler) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { log.Printf("Writing content to file %s", vfh.path) n, err := vfh.readWriteOps.Write(ctx, req.Data, req.Offset) if err != nil { log.Printf("Writing error: %s", err.Error()) return err } resp.Size = n return nil } // Release closes the reader on this file handler func (vfh *VFSFileHandler) Release(ctx context.Context, req *fuse.ReleaseRequest) error { return vfh.readWriteOps.Close(ctx) } ================================================ FILE: core/libfuse/vfs.go ================================================ //+build !windows package libfuse import ( "context" "errors" "github.com/FleekHQ/space-daemon/log" "bazil.org/fuse" "bazil.org/fuse/fs" "github.com/FleekHQ/space-daemon/core/spacefs" ) var _ fs.FS = (*VFS)(nil) var ( errorNotMounted = errors.New("VFS not mounted yet") ) // VFS represent Virtual System type VFS struct { ctx context.Context fsOps spacefs.FSOps mountConnection *fuse.Conn mountPath string } // NewVFileSystem creates a new Virtual FileSystem object func NewVFileSystem(ctx context.Context, fsOps spacefs.FSOps) *VFS { return &VFS{ // storing ctx here to be used in the Root request // as FUSE doesn't provide one there ctx: ctx, fsOps: fsOps, mountConnection: nil, } } // Mount mounts the file system, if it is not already mounted func (vfs *VFS) Mount(mountPath, fsName string) error { c, err := fuse.Mount( mountPath, fuse.FSName(fsName), fuse.VolumeName(fsName), fuse.NoAppleDouble(), //fuse.ExclCreate(), //fuse.NoAppleXattr(), fuse.AsyncRead(), fuse.LocalVolume(), ) if err != nil { return err } vfs.mountPath = mountPath vfs.mountConnection = c return nil } // IsMounted returns true if the vfs still has a valid connection to the mounted path func (vfs *VFS) IsMounted() bool { return vfs.mountConnection != nil } // Serve start the FUSE server that handles requests from the mounted connection // This is a blocking operation func (vfs *VFS) Serve() error { if !vfs.IsMounted() { return errorNotMounted } if err := fs.Serve(vfs.mountConnection, vfs); err != nil { return err } // check if the mount process has an error to report <-vfs.mountConnection.Ready if err := vfs.mountConnection.MountError; err != nil { return err } // reset mount connection vfs.mountConnection = nil return nil } // UnMount closes connection func (vfs *VFS) Unmount() error { if !vfs.IsMounted() { return errorNotMounted } err := vfs.mountConnection.Close() if err != nil { return err } err = fuse.Unmount(vfs.mountPath) if err != nil { return err } vfs.mountConnection = nil return err } // Root complies with the Fuse Interface that returns the Root Node of our file system func (vfs *VFS) Root() (fs.Node, error) { rootDirEntry, err := vfs.fsOps.Root(vfs.ctx) if err != nil { return nil, err } rootDir, ok := rootDirEntry.(spacefs.DirOps) if !ok { err = errors.New("root directory is not a spacefs.DirOps") log.Error("VFS.Root() error", err) return nil, err } node := &VFSDir{ vfs: vfs, dirOps: rootDir, } return node, nil } var _ fs.FSStatfser = (*VFS)(nil) // Statfs implements the fs.FSStatfser interface and reports block and storage information stats about VFS func (vfs *VFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error { //log.Debug("Request Statfs") resp.Bsize = fuseBlockSize resp.Namelen = ^uint32(0) resp.Frsize = fuseBlockSize // Simulate a large amount of free space storageSize := uint64(1 << 50) // 2^50 bytes approximately 1PiB totalAvailableBlocks := getNumBlocksFromSize(storageSize) usedBlockSize := getNumBlocksFromSize(0) resp.Blocks = totalAvailableBlocks resp.Bavail = totalAvailableBlocks - usedBlockSize resp.Bfree = totalAvailableBlocks - usedBlockSize return nil } ================================================ FILE: core/permissions/app_token.go ================================================ package permissions import ( "crypto/rand" "encoding/base64" "encoding/json" "errors" "strings" ) var invalidAppTokenErr = errors.New("app token is invalid") const tokenKeyLength = 20 const tokenSecretLength = 30 type AppToken struct { Key string `json:"key"` Secret string `json:"secret"` IsMaster bool `json:"isMaster"` Permissions []string `json:"permissions"` } func UnmarshalToken(marshalledToken []byte) (*AppToken, error) { var result AppToken err := json.Unmarshal(marshalledToken, &result) if err != nil { return nil, err } return &result, nil } func MarshalToken(tok *AppToken) ([]byte, error) { jsonData, err := json.Marshal(tok) if err != nil { return nil, err } return jsonData, nil } func GenerateRandomToken(isMaster bool, permissions []string) (*AppToken, error) { k := make([]byte, tokenKeyLength) _, err := rand.Read(k) if err != nil { return nil, err } s := make([]byte, tokenSecretLength) _, err = rand.Read(s) if err != nil { return nil, err } return &AppToken{ Key: base64.RawURLEncoding.EncodeToString(k), Secret: base64.RawURLEncoding.EncodeToString(s), IsMaster: isMaster, Permissions: permissions, }, nil } func (a *AppToken) GetAccessToken() string { return a.Key + "." + a.Secret } func GetKeyAndSecretFromAccessToken(accessToken string) (key string, secret string, err error) { tp := strings.Split(accessToken, ".") if len(tp) < 2 { return "", "", errors.New("invalid token format") } key = tp[0] secret = tp[1] return } ================================================ FILE: core/permissions/app_token_test.go ================================================ package permissions_test import ( "testing" "github.com/FleekHQ/space-daemon/core/permissions" "github.com/stretchr/testify/assert" ) func TestPermissions_AppToken_Generation(t *testing.T) { tok, err := permissions.GenerateRandomToken(true, []string{}) assert.NoError(t, err) marshalled, err := permissions.MarshalToken(tok) assert.NoError(t, err) unmarshalled, err := permissions.UnmarshalToken(marshalled) assert.NoError(t, err) assert.Equal(t, tok, unmarshalled) } func TestPermissions_AppToken_GenerationWithPerms(t *testing.T) { tok, err := permissions.GenerateRandomToken(false, []string{"OpenFile", "ListDirectories"}) assert.NoError(t, err) marshalled, err := permissions.MarshalToken(tok) assert.NoError(t, err) unmarshalled, err := permissions.UnmarshalToken(marshalled) assert.NoError(t, err) assert.Equal(t, tok, unmarshalled) } ================================================ FILE: core/search/bleve/analyzer.go ================================================ package bleve import ( "regexp" "github.com/blevesearch/bleve/analysis" "github.com/blevesearch/bleve/analysis/analyzer/standard" filterregex "github.com/blevesearch/bleve/analysis/char/regexp" "github.com/blevesearch/bleve/registry" ) const CustomerAnalyzerName = "space_search_analyzer" /// Customer Analyzer extends the standard analyzer by registering a regexp character filter func CustomAnalyzerConstructor(config map[string]interface{}, cache *registry.Cache) (*analysis.Analyzer, error) { rv, err := standard.AnalyzerConstructor(config, cache) if err != nil { return nil, err } // replace . with white space - helps to improve results on filenames pattern, err := regexp.Compile("\\.") if err != nil { return nil, err } replacement := []byte(" ") regexpCharFilter := filterregex.New(pattern, replacement) rv.CharFilters = append(rv.CharFilters, regexpCharFilter) return rv, nil } func init() { registry.RegisterAnalyzer(CustomerAnalyzerName, CustomAnalyzerConstructor) } ================================================ FILE: core/search/bleve/bleve.go ================================================ package bleve import ( "context" "crypto/sha256" "fmt" "os/user" "path/filepath" "github.com/blevesearch/bleve/mapping" "github.com/FleekHQ/space-daemon/log" "github.com/FleekHQ/space-daemon/core/util" "github.com/FleekHQ/space-daemon/core/search" "github.com/blevesearch/bleve" ) const DbFileName = "filesIndex.bleve" type bleveSearchOption struct { dbPath string } type Option func(o *bleveSearchOption) // bleveFilesSearchEngine is a files search engine that is backed by bleve type bleveFilesSearchEngine struct { opts bleveSearchOption idx bleve.Index } // Creates a new Bleve backed search engine for files and folders func NewSearchEngine(opts ...Option) *bleveFilesSearchEngine { usr, _ := user.Current() searchOptions := bleveSearchOption{ dbPath: filepath.Join(usr.HomeDir, ".fleek-space"), } for _, opt := range opts { opt(&searchOptions) } return &bleveFilesSearchEngine{ opts: searchOptions, } } func (b *bleveFilesSearchEngine) Start() error { if b.idx != nil { log.Warn("Trying to open already opened search index") return nil } path := filepath.Join(b.opts.dbPath, DbFileName) var ( idx bleve.Index err error ) if util.DirEntryExists(path) { log.Debug("Opening existing search index") idx, err = bleve.Open(path) } else { log.Debug("Creating and opening new search index") indexMapping, err := getSearchIndexMapping() if err != nil { return err } idx, err = bleve.New(path, indexMapping) } if err != nil { return err } b.idx = idx return nil } func getSearchIndexMapping() (*mapping.IndexMappingImpl, error) { indexMapping := bleve.NewIndexMapping() indexMapping.DefaultAnalyzer = CustomerAnalyzerName filesMapping := bleve.NewDocumentMapping() // index the following fields nameFm := bleve.NewTextFieldMapping() filesMapping.AddFieldMappingsAt("ItemName", nameFm) extFm := bleve.NewTextFieldMapping() filesMapping.AddFieldMappingsAt("ItemExtension", extFm) pathFm := bleve.NewTextFieldMapping() filesMapping.AddFieldMappingsAt("ItemPath", pathFm) // ignore indexing the following fields of IndexRecord idFm := bleve.NewTextFieldMapping() idFm.Index = false filesMapping.AddFieldMappingsAt("Id", idFm) bucketFm := bleve.NewTextFieldMapping() bucketFm.Index = false filesMapping.AddFieldMappingsAt("BucketSlug", bucketFm) dbIdFm := bleve.NewTextFieldMapping() dbIdFm.Index = false filesMapping.AddFieldMappingsAt("DbId", dbIdFm) itemTypeFm := bleve.NewTextFieldMapping() itemTypeFm.Index = false filesMapping.AddFieldMappingsAt("ItemType", itemTypeFm) indexMapping.AddDocumentMapping("files", filesMapping) indexMapping.DefaultType = "files" return indexMapping, nil } func (b *bleveFilesSearchEngine) InsertFileData( ctx context.Context, data *search.InsertIndexRecord, ) (*search.IndexRecord, error) { indexId := generateIndexId(data.ItemName, data.ItemPath, data.BucketSlug, data.DbId) record := search.IndexRecord{ Id: indexId, ItemName: data.ItemName, ItemExtension: data.ItemExtension, ItemPath: data.ItemPath, ItemType: data.ItemType, BucketSlug: data.BucketSlug, DbId: data.DbId, } if err := b.idx.Index(indexId, record); err != nil { return nil, err } return &record, nil } func (b *bleveFilesSearchEngine) DeleteFileData( ctx context.Context, data *search.DeleteIndexRecord, ) error { indexId := generateIndexId(data.ItemName, data.ItemPath, data.BucketSlug, data.DbId) return b.idx.Delete(indexId) } func (b *bleveFilesSearchEngine) QueryFileData( ctx context.Context, query string, limit int, ) ([]*search.IndexRecord, error) { matchQuery := bleve.NewMatchQuery(query) matchQuery.Fuzziness = 2 prefixQuery := bleve.NewPrefixQuery(query) infixRegexQuery := bleve.NewRegexpQuery(fmt.Sprintf(".*%s.*", query)) // TODO: think of escaping invalid regex in query searchQuery := bleve.NewDisjunctionQuery(matchQuery, prefixQuery, infixRegexQuery) searchRequest := bleve.NewSearchRequest(searchQuery) searchRequest.Size = limit searchRequest.Fields = []string{"*"} searchResults, err := b.idx.Search(searchRequest) if err != nil { return nil, err } records := make([]*search.IndexRecord, len(searchResults.Hits)) for i, hit := range searchResults.Hits { records[i] = &search.IndexRecord{ Id: hit.Fields["Id"].(string), ItemName: hit.Fields["ItemName"].(string), ItemExtension: hit.Fields["ItemExtension"].(string), ItemPath: hit.Fields["ItemPath"].(string), ItemType: hit.Fields["ItemType"].(string), BucketSlug: hit.Fields["BucketSlug"].(string), DbId: hit.Fields["DbId"].(string), } } return records, nil } func (b *bleveFilesSearchEngine) Shutdown() error { if b.idx == nil { return nil } err := b.idx.Close() if err != nil { return err } b.idx = nil return nil } func generateIndexId(name, path, bucketSlug, dbId string) string { bytes := sha256.Sum256([]byte(name + path + bucketSlug + dbId)) return fmt.Sprintf("%x", bytes) } ================================================ FILE: core/search/bleve/bleve_test.go ================================================ package bleve import ( "context" "io/ioutil" "os" "testing" "github.com/FleekHQ/space-daemon/core/search" "gotest.tools/assert" ) func setupEngine(t *testing.T) (*bleveFilesSearchEngine, context.Context) { dbPath, err := ioutil.TempDir("", "testDb-*") assert.NilError(t, err, "failed to create db path") engine := NewSearchEngine(WithDBPath(dbPath)) assert.NilError(t, engine.Start(), "database failed to initialize") cleanup := func() { _ = engine.Shutdown() _ = os.RemoveAll(dbPath) } t.Cleanup(cleanup) return engine, context.Background() } func TestEngineStartAndShutdown(t *testing.T) { dbPath, err := ioutil.TempDir("", "testDb-*") assert.NilError(t, err, "failed to create db path") engine := NewSearchEngine(WithDBPath(dbPath)) assert.NilError(t, engine.Start(), "database failed to initialize") assert.NilError(t, engine.Shutdown(), "search engine failed to shutdown") // try re-opening the same engine once more engine = NewSearchEngine(WithDBPath(dbPath)) assert.NilError(t, engine.Start(), "failed to re-open existing search index") assert.NilError(t, engine.Shutdown(), "failed to shutdown existing search index") } func TestFilesSearchEngine_Insert_And_Query(t *testing.T) { engine, ctx := setupEngine(t) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "new content.pdf", ItemExtension: "pdf", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "second-content.txt", ItemExtension: "txt", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) queryResult, err := engine.QueryFileData(ctx, "pdf", 20) assert.NilError(t, err, "failed to query file data") assert.Equal(t, 1, len(queryResult), "not enough results returned from query") assert.Equal(t, "new content.pdf", queryResult[0].ItemName, "search query result incorrect") } func TestInserting_DuplicateRecords_Count_As_Single(t *testing.T) { engine, ctx := setupEngine(t) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "new content.pdf", ItemExtension: "pdf", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) // try inserting duplicate records insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "new content.pdf", ItemExtension: "pdf", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) // validate only a single record exists queryResult, err := engine.QueryFileData(ctx, "new content.pdf", 20) assert.NilError(t, err, "failed to query file data") assert.Equal(t, 1, len(queryResult), "only single result should be returned") assert.Equal(t, "new content.pdf", queryResult[0].ItemName, "search query result incorrect") } func TestFilesSearchEngine_Delete_And_Query(t *testing.T) { engine, ctx := setupEngine(t) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "new content.pdf", ItemExtension: "pdf", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "second-content.txt", ItemExtension: "txt", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) err := engine.DeleteFileData(ctx, &search.DeleteIndexRecord{ ItemName: "new content.pdf", ItemPath: "/new", BucketSlug: "personal", }) assert.NilError(t, err, "deleting file data failed") queryResult, err := engine.QueryFileData(ctx, "content", 20) assert.NilError(t, err, "failed to query file data") assert.Equal(t, 1, len(queryResult), "expected only single result") // only second content should exist in search engine assert.Equal(t, "second-content.txt", queryResult[0].ItemName, "search query result incorrect") } func TestPrefixFileSearchWorks(t *testing.T) { engine, ctx := setupEngine(t) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "hello1.txt", ItemExtension: "txt", ItemPath: "/", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "hello2.txt", ItemExtension: "txt", ItemPath: "/", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) queryResult, err := engine.QueryFileData(ctx, "he", 20) assert.NilError(t, err, "failed to query file data") assert.Equal(t, 2, len(queryResult), "query result not expected length") } func TestInfixFileSearchWorks(t *testing.T) { engine, ctx := setupEngine(t) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "hello1.txt", ItemExtension: "txt", ItemPath: "/", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "hello2.txt", ItemExtension: "txt", ItemPath: "/", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) queryResult, err := engine.QueryFileData(ctx, "el", 20) assert.NilError(t, err, "failed to query file data") assert.Equal(t, 2, len(queryResult), "query result not expected length") } func insertRecord( t *testing.T, ctx context.Context, engine search.FilesSearchEngine, record *search.InsertIndexRecord, ) { _, err := engine.InsertFileData(ctx, record) assert.NilError(t, err, "failed to insert file data") } ================================================ FILE: core/search/bleve/options.go ================================================ package bleve func WithDBPath(path string) Option { return func(o *bleveSearchOption) { o.dbPath = path } } ================================================ FILE: core/search/engines.go ================================================ package search import ( "context" ) // Represents Search Engines for File and Folders // Can be used for indexing and querying of File/Folders type FilesSearchEngine interface { Start() error InsertFileData(ctx context.Context, data *InsertIndexRecord) (*IndexRecord, error) DeleteFileData(ctx context.Context, data *DeleteIndexRecord) error QueryFileData(ctx context.Context, query string, limit int) ([]*IndexRecord, error) } ================================================ FILE: core/search/model.go ================================================ package search type IndexRecord struct { Id string ItemName string ItemExtension string ItemPath string ItemType string // Metadata here BucketSlug string DbId string } type InsertIndexRecord struct { ItemName string ItemExtension string ItemPath string ItemType string BucketSlug string DbId string } type DeleteIndexRecord struct { ItemName string ItemPath string BucketSlug string DbId string // DbId is only required for shared content } ================================================ FILE: core/search/sqlite/model.go ================================================ package sqlite import "gorm.io/gorm" type SearchIndexRecord struct { gorm.Model ItemName string `gorm:"index:idx_name_path_bucket,unique"` ItemExtension string `gorm:"size:10"` ItemPath string `gorm:"index:idx_name_path_bucket,unique"` ItemType string BucketSlug string `gorm:"index:idx_name_path_bucket,unique"` DbId string `gorm:"index"` } ================================================ FILE: core/search/sqlite/options.go ================================================ package sqlite import "gorm.io/gorm/logger" func WithDBPath(path string) Option { return func(o *sqliteSearchOption) { o.dbPath = path } } func WithLogLevel(level logger.LogLevel) Option { return func(o *sqliteSearchOption) { o.logLevel = level } } ================================================ FILE: core/search/sqlite/sqlite.go ================================================ package sqlite import ( "context" "os/user" "path/filepath" "strconv" "strings" "gorm.io/gorm/logger" "github.com/FleekHQ/space-daemon/core/search" "github.com/pkg/errors" "gorm.io/driver/sqlite" "gorm.io/gorm" ) const DbFileName = "filesIndex.db" type sqliteSearchOption struct { dbPath string logLevel logger.LogLevel } type Option func(o *sqliteSearchOption) // sqliteFilesSearchEngine is a files search engine that is backed by sqlite type sqliteFilesSearchEngine struct { db *gorm.DB opts sqliteSearchOption } // Creates a new SQLite backed search engine for files and folders func NewSearchEngine(opts ...Option) *sqliteFilesSearchEngine { usr, _ := user.Current() searchOptions := sqliteSearchOption{ dbPath: filepath.Join(usr.HomeDir, ".fleek-space"), } for _, opt := range opts { opt(&searchOptions) } return &sqliteFilesSearchEngine{ db: nil, opts: searchOptions, } } func (s *sqliteFilesSearchEngine) Start() error { dsn := filepath.Join(s.opts.dbPath, DbFileName) if db, err := gorm.Open(sqlite.Open(dsn), &gorm.Config{ Logger: logger.Default.LogMode(s.opts.logLevel), }); err != nil { return errors.Wrap(err, "failed to open database") } else { s.db = db } return s.db.AutoMigrate(&SearchIndexRecord{}) } func (s *sqliteFilesSearchEngine) InsertFileData(ctx context.Context, data *search.InsertIndexRecord) (*search.IndexRecord, error) { record := SearchIndexRecord{ ItemName: data.ItemName, ItemExtension: data.ItemExtension, ItemPath: data.ItemPath, ItemType: data.ItemPath, BucketSlug: data.BucketSlug, DbId: data.DbId, } result := s.db.Create(&record) if result.Error != nil { if strings.Contains(result.Error.Error(), "UNIQUE constraint failed") { return nil, errors.New("a similar file has already been inserted") } return nil, result.Error } return modelToIndexRecord(&record), nil } func (s *sqliteFilesSearchEngine) DeleteFileData(ctx context.Context, data *search.DeleteIndexRecord) error { stmt := s.db.Where( "item_name = ? AND item_path = ? AND bucket_slug = ?", data.ItemName, data.ItemPath, data.BucketSlug, ) if data.DbId != "" { stmt = stmt.Where("dbId = ?", data.DbId) } result := stmt.Delete(&SearchIndexRecord{}) return result.Error } func (s *sqliteFilesSearchEngine) QueryFileData(ctx context.Context, query string, limit int) ([]*search.IndexRecord, error) { var records []*SearchIndexRecord result := s.db.Where( "LOWER(item_name) LIKE ? OR LOWER(item_extension) = ?", "%"+strings.ToLower(query)+"%", strings.ToLower(query), ).Limit(limit).Find(&records) if result.Error != nil { return nil, result.Error } searchResults := make([]*search.IndexRecord, len(records)) for i, record := range records { searchResults[i] = modelToIndexRecord(record) } return searchResults, nil } func (s *sqliteFilesSearchEngine) Shutdown() error { db, err := s.db.DB() if err != nil { return err } return db.Close() } func modelToIndexRecord(model *SearchIndexRecord) *search.IndexRecord { return &search.IndexRecord{ Id: strconv.Itoa(int(model.ID)), ItemName: model.ItemName, ItemExtension: model.ItemExtension, ItemPath: model.ItemPath, ItemType: model.ItemType, BucketSlug: model.BucketSlug, DbId: model.DbId, } } ================================================ FILE: core/search/sqlite/sqlite_test.go ================================================ package sqlite import ( "context" "io/ioutil" "os" "testing" "github.com/FleekHQ/space-daemon/core/search" "gotest.tools/assert" ) func setupEngine(t *testing.T) (*sqliteFilesSearchEngine, context.Context) { dbPath, err := ioutil.TempDir("", "testDb-*") assert.NilError(t, err, "failed to create db path") engine := NewSearchEngine(WithDBPath(dbPath)) assert.NilError(t, engine.Start(), "database failed to initialize") cleanup := func() { _ = engine.Shutdown() _ = os.RemoveAll(dbPath) } t.Cleanup(cleanup) return engine, context.Background() } func TestSqliteFilesSearchEngine_Insert_And_Query(t *testing.T) { engine, ctx := setupEngine(t) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "new content.pdf", ItemExtension: "pdf", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "second-content.txt", ItemExtension: "txt", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) queryResult, err := engine.QueryFileData(ctx, "pdf", 20) assert.NilError(t, err, "failed to query file data") assert.Equal(t, 1, len(queryResult), "not enough results returned from query") assert.Equal(t, "new content.pdf", queryResult[0].ItemName, "search query result incorrect") } func TestInserting_DuplicateRecords_Fail(t *testing.T) { engine, ctx := setupEngine(t) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "new content.pdf", ItemExtension: "pdf", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) // try inserting duplicate records should fail _, err := engine.InsertFileData(ctx, &search.InsertIndexRecord{ ItemName: "new content.pdf", ItemExtension: "pdf", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) assert.Error(t, err, "a similar file has already been inserted") } func TestSqliteFilesSearchEngine_Delete_And_Query(t *testing.T) { engine, ctx := setupEngine(t) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "new content.pdf", ItemExtension: "pdf", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) insertRecord(t, ctx, engine, &search.InsertIndexRecord{ ItemName: "second-content.txt", ItemExtension: "txt", ItemPath: "/new", ItemType: "FILE", BucketSlug: "personal", DbId: "", }) err := engine.DeleteFileData(ctx, &search.DeleteIndexRecord{ ItemName: "new content.pdf", ItemPath: "/new", BucketSlug: "personal", }) assert.NilError(t, err, "deleting file data failed") queryResult, err := engine.QueryFileData(ctx, "content", 20) assert.NilError(t, err, "failed to query file data") assert.Equal(t, 1, len(queryResult), "too much result returned") // only second content should exist in search engine assert.Equal(t, "second-content.txt", queryResult[0].ItemName, "search query result incorrect") } func insertRecord( t *testing.T, ctx context.Context, engine search.FilesSearchEngine, record *search.InsertIndexRecord, ) { _, err := engine.InsertFileData(ctx, record) assert.NilError(t, err, "failed to insert file data") } ================================================ FILE: core/space/domain/domain.go ================================================ package domain import "fmt" type AppConfig struct { Port int AppPath string TextileHubTarget string TextileThreadsTarget string } type DirEntry struct { Path string IsDir bool Name string SizeInBytes string Created string Updated string FileExtension string Members []Member } type ThreadInfo struct { Addresses []string Key string } type FileInfo struct { DirEntry IpfsHash string BackedUp bool LocallyAvailable bool BackupInProgress bool RestoreInProgress bool } type OpenFileInfo struct { Location string } type KeyPair struct { PublicKey string PrivateKey string } type AddItemResult struct { SourcePath string BucketPath string Bytes int64 Error error } type AddItemsResponse struct { TotalFiles int64 TotalBytes int64 Error error } type Member struct { Address string `json:"address"` PublicKey string `json:"publicKey"` } type AddWatchFile struct { DbId string `json:"dbId"` LocalPath string `json:"local_path"` BucketPath string `json:"bucket_path"` BucketKey string `json:"bucket_key"` BucketSlug string `json:"bucket_slug"` IsRemote bool `json:"isRemote"` Cid string `json:"cid"` } type Identity struct { Address string `json:"address"` PublicKey string `json:"publicKey"` Username string `json:"username"` } type APIError struct { Message string `json:"message"` } type FileSharingInfo struct { Bucket string SharedFileCid string SharedFileKey string SpaceDownloadLink string } type NotificationTypes int const ( UNKNOWN NotificationTypes = iota INVITATION USAGEALERT INVITATION_REPLY REVOKED_INVITATION ) type FullPath struct { DbId string `json:"dbId"` BucketKey string `json:"bucketKey"` Bucket string `json:"bucket"` Path string `json:"path"` } type InvitationStatus int const ( PENDING InvitationStatus = 0 ACCEPTED InvitationStatus = 1 REJECTED InvitationStatus = 2 ) type Invitation struct { InviterPublicKey string `json:"inviterPublicKey"` InviteePublicKey string `json:"inviteePublicKey"` InvitationID string `json:"invitationID"` Status InvitationStatus `json:"status"` ItemPaths []FullPath `json:"itemPaths"` Keys [][]byte `json:"keys"` } type InvitationReply struct { InvitationID string `json:"invitationID"` } // Represents when an inviter unshared access to previously shared files in ItemPaths type RevokedInvitation struct { InviterPublicKey string `json:"inviterPublicKey"` InviteePublicKey string `json:"inviteePublicKey"` ItemPaths []FullPath `json:"itemPaths"` Keys [][]byte `json:"keys"` } type UsageAlert struct { Used int64 `json:"used"` Limit int64 `json:"limit"` Message string `json:"message"` } type MessageBody struct { Type NotificationTypes `json:"type"` Body []byte `json:"body"` } type Notification struct { ID string `json:"id"` Subject string `json:"subject"` Body string `json:"body"` NotificationType NotificationTypes `json:"notificationType"` CreatedAt int64 `json:"createdAt"` ReadAt int64 `json:"readAt"` // QUESTION: is there a way to enforce that only one of the below is present InvitationValue Invitation `json:"invitationValue"` UsageAlertValue UsageAlert `json:"usageAlertValue"` InvitationAcceptValue InvitationReply `json:"invitationAcceptValue"` RevokedInvitationValue RevokedInvitation `json:"revokedInvitationValue"` RelatedObject interface{} `json:"relatedObject"` } type APISessionTokens struct { HubToken string ServicesToken string } type MirrorFile struct { Path string BucketSlug string Backup bool Shared bool BackupInProgress bool RestoreInProgress bool } type SharedDirEntry struct { DbID string Bucket string IsPublicLink bool FileInfo Members []Member // XXX: it is duplicated from FileInfo SharedBy string } type SearchFileEntry struct { FileInfo Bucket string DbID string } type KeyBackupType int const ( PASSWORD KeyBackupType = 0 GOOGLE KeyBackupType = 1 TWITTER KeyBackupType = 2 EMAIL KeyBackupType = 3 ) func (b KeyBackupType) String() string { switch b { case 0: return "password" case 1: return "google" case 2: return "twitter" case 3: return "email" default: return fmt.Sprintf("%d", int(b)) } } // SharedFilesRoleAction represents action to be performed on the role type SharedFilesRoleAction int const ( DeleteRoleAction SharedFilesRoleAction = iota ReadWriteRoleAction ) ================================================ FILE: core/space/fuse/controller.go ================================================ package fuse import ( "context" "fmt" "os" "os/exec" "strings" "sync" "github.com/FleekHQ/space-daemon/core/space/fuse/installer" "github.com/FleekHQ/space-daemon/core/spacefs" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/log" ) // Controller is the space domain controller for managing the VFS. // It is used by the grpc server and app/daemon generally type Controller struct { cfg config.Config vfs VFS store store.Store install installer.FuseInstaller isServed bool mountLock sync.RWMutex mountPath string } var DefaultFuseDriveName = "Space" func NewController( ctx context.Context, cfg config.Config, store store.Store, sfs *spacefs.SpaceFS, install installer.FuseInstaller, ) *Controller { vfs := initVFS(ctx, sfs) return &Controller{ cfg: cfg, store: store, vfs: vfs, install: install, isServed: false, mountLock: sync.RWMutex{}, } } // ShouldMount check the store and config to determine if the VFS drive was previously mounted func (s *Controller) ShouldMount() bool { if s.cfg.GetString(config.MountFuseDrive, "false") == "true" { return true } mountFuseDrive, err := s.store.Get([]byte(config.MountFuseDrive)) if err == nil { log.Debug("Persisted mountFuseDrive", fmt.Sprintf("state=%s", string(mountFuseDrive))) return string(mountFuseDrive) == "true" } else { log.Debug("No persisted mountFuseDrive state found") } return false } // Mount mounts the vfs drive and immediately serves the handler. // It starts the Fuse Server in the background func (s *Controller) Mount() error { s.mountLock.Lock() defer s.mountLock.Unlock() if s.vfs.IsMounted() { return nil } mountPath, err := getMountPath(s.cfg) if err != nil { return err } s.mountPath = mountPath err = s.vfs.Mount( mountPath, s.cfg.GetString(config.FuseDriveName, DefaultFuseDriveName), ) if err != nil { if !strings.Contains(err.Error(), "exit status 64") { return err } // a drive mount error, so we try unmounting first and retry mounting _ = s.vfs.Unmount() s.removeMountedPath() err = s.vfs.Mount( mountPath, s.cfg.GetString(config.FuseDriveName, DefaultFuseDriveName), ) if err != nil { return err } } // persist mount state to store to trigger remount on restart if err := s.store.Set([]byte(config.MountFuseDrive), []byte("true")); err != nil { return err } s.serve() return nil } func (s *Controller) GetMountPath() string { if !s.IsMounted() { return "" } path, _ := getMountPath(s.cfg) return path } func (s *Controller) serve() { if s.isServed { return } go func() { s.isServed = true defer func() { s.isServed = false }() // this blocks and unblocks when vfs.Unmount() is called // or some external thing happens like user unmounting the drive err := s.vfs.Serve() if err != nil { log.Error("error ending fuse server", err) } log.Info("FUSE Controller server ended") }() } func (s *Controller) IsMounted() bool { s.mountLock.RLock() defer s.mountLock.RUnlock() return s.vfs.IsMounted() } func (s *Controller) Unmount() error { s.mountLock.Lock() defer s.mountLock.Unlock() if !s.vfs.IsMounted() { return nil } // persist unmount state to store to prevent remount on restart if err := s.store.Set([]byte(config.MountFuseDrive), []byte("false")); err != nil { return err } err := s.vfs.Unmount() return err } func (s *Controller) removeMountedPath() { if s.mountPath != "" { // try unmounting via os err := exec.Command("umount", s.mountPath).Run() log.Error("Failed to run unmount command", err) err = os.RemoveAll(s.mountPath) log.Error("Failed to delete mount directory on unmount", err) } } func (s *Controller) Shutdown() error { return s.Unmount() } ================================================ FILE: core/space/fuse/fs.go ================================================ package fuse // VFS represents the handler for virtually mounted drives. // it is implemented using FUSE for linux and macOS // and will use dokany for windows type VFS interface { Mount(mountPath, fsName string) error IsMounted() bool // Serve should be a blocking call and return only on unmount or shutdown Serve() error Unmount() error } ================================================ FILE: core/space/fuse/installer/installer_darwin.go ================================================ package installer import ( "context" "os/exec" "github.com/pkg/errors" "github.com/FleekHQ/space-daemon/log" "github.com/keybase/go-kext" ) type State int64 const ( Default State = iota Downloading Installing Error ) type macFuseInstaller struct { state State } func NewFuseInstaller() *macFuseInstaller { return &macFuseInstaller{ state: Default, } } func (d *macFuseInstaller) IsInstalled(ctx context.Context) (bool, error) { info, err := kext.LoadInfo("com.github.osxfuse.filesystems.osxfuse") if err != nil { log.Error("unable to determine state of extension", err) return false, err } return info != nil, nil } // Install assumes that the Fuse .pkg installer exists in a particular directory func (d *macFuseInstaller) Install(ctx context.Context, args map[string]interface{}) error { // ideally, this should download the fuse pkg and call the installer // first starting with providing a path for it, will change to download as this is a security risk d.state = Installing path, ok := args["path"].(string) if !ok { return errors.New("'path' is missing from install arguments") } installerPath, err := exec.LookPath("installer") if err != nil { return errors.Wrap(err, "pkg installer not present") } cmd := exec.Command(installerPath, "-pkg", path, "-target", "/") out, err := cmd.CombinedOutput() log.Debug("Install command output: " + string(out)) if err != nil { return err } // load the kernel extension return d.loadKernel() } func (d *macFuseInstaller) loadKernel() error { log.Debug("Loading OSXFUSE Kernel") cmd := exec.Command("/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse") output, err := cmd.CombinedOutput() log.Debug("Kernel Loading Output: " + string(output)) return err } ================================================ FILE: core/space/fuse/installer/installer_darwin_test.go ================================================ package installer import ( "context" "testing" "github.com/stretchr/testify/assert" ) // NOTE: This is more of an integration test and is commented out by default till functional test-suite is ready func TestMacFuseInstaller(t *testing.T) { ctx := context.Background() installer := NewFuseInstaller() installed, err := installer.IsInstalled(ctx) assert.NoError(t, err) assert.Equal(t, false, installed, "fuse should not be installed by default") } ================================================ FILE: core/space/fuse/installer/installer_linux.go ================================================ package installer import ( "context" "errors" ) type linuxFuseInstaller struct { } func NewFuseInstaller() *linuxFuseInstaller { return &linuxFuseInstaller{} } func (d *linuxFuseInstaller) IsInstalled(ctx context.Context) (bool, error) { return true, nil // assume fuse is installed on recent linux builds } func (d *linuxFuseInstaller) Install(ctx context.Context, args map[string]interface{}) error { return errors.New("not supported") } ================================================ FILE: core/space/fuse/installer/installer_windows.go ================================================ package installer import ( "context" "errors" ) type windowsFuseInstaller struct { } func NewFuseInstaller() *windowsFuseInstaller { return &windowsFuseInstaller{} } func (d *windowsFuseInstaller) IsInstalled(ctx context.Context) (bool, error) { return false, nil } func (d *windowsFuseInstaller) Install(ctx context.Context, args map[string]interface{}) error { return errors.New("not supported") } ================================================ FILE: core/space/fuse/installer/interface.go ================================================ package installer import "context" type FuseInstaller interface { IsInstalled(ctx context.Context) (bool, error) Install(ctx context.Context, args map[string]interface{}) error // TODO: UnInstall(ctx context.Context) } ================================================ FILE: core/space/fuse/mount.go ================================================ //+build !windows package fuse import ( "context" "fmt" "os" s "strings" "github.com/FleekHQ/space-daemon/core/libfuse" "github.com/FleekHQ/space-daemon/core/spacefs" "github.com/FleekHQ/space-daemon/config" "github.com/mitchellh/go-homedir" ) func pathExists(path string) bool { _, err := os.Stat(path) return os.IsExist(err) } func getMountPath(cfg config.Config) (string, error) { mountPath := cfg.GetString(config.FuseMountPath, "~/"+DefaultFuseDriveName) if home, err := homedir.Dir(); err == nil { // If the mount directory contains ~, we replace it with the actual home directory mountPath = s.TrimRight( s.Replace(mountPath, "~", home, -1), "/", ) } // checks to ensure we are not mounting on an already existing path if pathExists(mountPath) { // loop through 10 suffixes till we find on that exists for i := 0; i < 10; i++ { newPath := fmt.Sprintf("%s%d", mountPath, i) if !pathExists(newPath) { mountPath = newPath break } } } return mountPath, nil } func initVFS(ctx context.Context, sfs spacefs.FSOps) VFS { return libfuse.NewVFileSystem(ctx, sfs) } ================================================ FILE: core/space/fuse/mount_windows.go ================================================ package fuse import ( "context" "errors" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/spacefs" ) var errNotImplemented = errors.New("fuse not implemented for windows") func pathExists(path string) bool { return false } func getMountPath(cfg config.Config) (string, error) { return "", errNotImplemented } func initVFS(ctx context.Context, sfs spacefs.FSOps) VFS { return &dummyVFS{} } // dummyVFS acts a placeholder vfs for windows pending the actual implementation type dummyVFS struct{} func (d dummyVFS) Mount(mountPath, fsName string) error { return errNotImplemented } func (d dummyVFS) IsMounted() bool { return false } func (d dummyVFS) Serve() error { return errNotImplemented } func (d dummyVFS) Unmount() error { return errNotImplemented } ================================================ FILE: core/space/fuse/state.go ================================================ package fuse import ( "context" "runtime" "github.com/FleekHQ/space-daemon/log" ) type State string const ( UNSUPPORTED State = "UNSUPPORTED" NOT_INSTALLED State = "NOT_INSTALLED" UNMOUNTED State = "UNMOUNTED" MOUNTED State = "MOUNTED" ERROR State = "ERROR" ) var supportedOs = map[string]bool{ "linux": true, "darwin": true, } func (s *Controller) GetFuseState(ctx context.Context) (State, error) { if !supportedOs[runtime.GOOS] { return UNSUPPORTED, nil } if s.IsMounted() { return MOUNTED, nil } // try and get if it is installed installed, err := s.install.IsInstalled(ctx) if err != nil { log.Error("unable to determine state of extension", err) return ERROR, err } if !installed { return NOT_INSTALLED, err } return UNMOUNTED, nil } ================================================ FILE: core/space/fuse/state_test.go ================================================ // +build linux darwin package fuse import ( "context" "testing" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/assert" "github.com/FleekHQ/space-daemon/core/spacefs" "github.com/FleekHQ/space-daemon/mocks" fusemocks "github.com/FleekHQ/space-daemon/mocks/fuse" ) type testCtx struct { cfg *mocks.Config st *mocks.Store fsds *fusemocks.FSDataSource installer *fusemocks.FuseInstaller } func initTestCtx() (context.Context, *testCtx, *Controller) { tctx := &testCtx{ cfg: new(mocks.Config), st: new(mocks.Store), fsds: new(fusemocks.FSDataSource), installer: new(fusemocks.FuseInstaller), } ctx := context.Background() fs := spacefs.New(tctx.fsds) controller := NewController(ctx, tctx.cfg, tctx.st, fs, tctx.installer) return ctx, tctx, controller } func TestController_GetFuseState_ShouldDefaultTo_Not_Installed(t *testing.T) { ctx, test, controller := initTestCtx() test.installer.On("IsInstalled", mock.Anything).Return(false, nil) state, err := controller.GetFuseState(ctx) assert.NoError(t, err, "error on GetFuseState()") assert.Equal(t, NOT_INSTALLED, state, "unexpected state gotten") } // Note: This is more of an integration test than unit test, but should run cleanly across multiple threads func TestController_GetFuseState_ShouldBe_Unmounted_When_Installed(t *testing.T) { ctx, test, controller := initTestCtx() test.installer.On("IsInstalled", mock.Anything).Return(true, nil) state, err := controller.GetFuseState(ctx) assert.NoError(t, err, "error on GetFuseState()") assert.Equal(t, UNMOUNTED, state, "unexpected state gotten") } ================================================ FILE: core/space/services/fs_utils.go ================================================ package services import ( "io" "os" "github.com/FleekHQ/space-daemon/log" ) func PathExists(path string) bool { if _, err := os.Stat(path); err == nil { return true } return false } func IsPathDir(path string) bool { fi, err := os.Stat(path) if err != nil { log.Error("path error check isPathDir", err) return false } mode := fi.Mode() return mode.IsDir() } func RemoveDuplicates(elements []string) []string { // Use map to record duplicates as we find them. encountered := map[string]bool{} result := []string{} for v := range elements { if encountered[elements[v]] == true { // Do not add duplicate. } else { // Record this element as an encountered element. encountered[elements[v]] = true // Append to result slice. result = append(result, elements[v]) } } // Return the new slice. return result } // Reader that also counts the amount of Bytes read from the wrappeed reader type CountingReader struct { reader io.Reader BytesRead int64 } func NewCountingReader(reader io.Reader) *CountingReader { return &CountingReader{ reader: reader, BytesRead: 0, } } func (r *CountingReader) Read(b []byte) (int, error) { n, err := r.reader.Read(b) r.BytesRead += int64(n) return n, err } ================================================ FILE: core/space/services/services.go ================================================ package services import ( "context" "errors" "time" "github.com/FleekHQ/space-daemon/core/textile/hub" "github.com/FleekHQ/space-daemon/core/vault" "golang.org/x/sync/errgroup" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/env" node "github.com/FleekHQ/space-daemon/core/ipfs/node" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/core/textile" ) // Implementation for space.Service type Space struct { store store.Store cfg config.Config env env.SpaceEnv tc textile.Client sync Syncer keychain keychain.Keychain vault vault.Vault hub hub.HubAuth ipfsNode *node.IpfsNode buckd textile.Buckd aeg *errgroup.Group } type Syncer interface { AddFileWatch(addFileInfo domain.AddWatchFile) error GetOpenFilePath(bucketSlug, bucketPath, dbID, cid string) (string, bool) } type AddFileWatchFunc = func(addFileInfo domain.AddWatchFile) error func (s *Space) RegisterSyncer(sync Syncer) { s.sync = sync } func (s *Space) GetConfig(ctx context.Context) domain.AppConfig { return domain.AppConfig{ Port: s.cfg.GetInt(config.SpaceServerPort, "-1"), AppPath: s.env.WorkingFolder(), TextileHubTarget: s.cfg.GetString(config.TextileHubTarget, ""), TextileThreadsTarget: s.cfg.GetString(config.TextileThreadsTarget, ""), } } func NewSpace( st store.Store, tc textile.Client, syncer Syncer, cfg config.Config, env env.SpaceEnv, kc keychain.Keychain, v vault.Vault, h hub.HubAuth, ) *Space { return &Space{ store: st, cfg: cfg, env: env, tc: tc, sync: syncer, keychain: kc, vault: v, hub: h, } } var textileClientInitTimeout = time.Second * 60 var textileClientHubTimeout = time.Second * 60 * 3 // Waits for textile client to be initialized before returning. func (s *Space) waitForTextileInit(ctx context.Context) error { if s.tc.IsInitialized() { return nil } select { case <-time.After(textileClientInitTimeout): return errors.New("textile client not initialized in expected time") case <-s.tc.WaitForInitialized(): return nil case <-ctx.Done(): return ctx.Err() } } // Waits for textile client to be healthy (initialized and connected to hub) before returning. // If it exceeds the max amount of retries, it returns an error. func (s *Space) waitForTextileHub(ctx context.Context) error { if s.tc.IsHealthy() { return nil } select { case err := <-s.tc.WaitForHealthy(): // This returns error if there were 3 failed attempts to connect if err != nil { return err } return nil case <-time.After(textileClientHubTimeout): return errors.New("textile client not initialized in expected time") case <-ctx.Done(): return ctx.Err() } } ================================================ FILE: core/space/services/services_app_token.go ================================================ package services import ( "context" "github.com/FleekHQ/space-daemon/core/permissions" ) func (s *Space) InitializeMasterAppToken(ctx context.Context) (*permissions.AppToken, error) { newAppToken, err := permissions.GenerateRandomToken(true, []string{}) if err != nil { return nil, err } return newAppToken, s.keychain.StoreAppToken(newAppToken) } ================================================ FILE: core/space/services/services_central_server.go ================================================ package services import ( "context" "github.com/FleekHQ/space-daemon/core/space/domain" ) // Return session token for central services authenticated access func (s *Space) GetAPISessionTokens(ctx context.Context) (*domain.APISessionTokens, error) { tokens, err := s.hub.GetTokensWithCache(ctx) if err != nil { return nil, err } return &domain.APISessionTokens{ HubToken: tokens.HubToken, ServicesToken: tokens.AppToken, }, nil } ================================================ FILE: core/space/services/services_fs.go ================================================ package services import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "regexp" "strconv" "strings" "sync" "time" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/textile" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/log" ) var bucketNotFoundErr = errors.New("Could not find bucket") // Creates a bucket func (s *Space) CreateBucket(ctx context.Context, slug string) (textile.Bucket, error) { err := s.waitForTextileInit(ctx) if err != nil { return nil, err } b, err := s.tc.CreateBucket(ctx, slug) if err != nil { return nil, err } return b, nil } // Returns a list of buckets the current user has access to func (s *Space) ListBuckets(ctx context.Context) ([]textile.Bucket, error) { err := s.waitForTextileInit(ctx) if err != nil { return nil, err } buckets, err := s.tc.ListBuckets(ctx) if err != nil { return nil, err } return buckets, nil } func (s *Space) ShareBucket(ctx context.Context, slug string) (*domain.ThreadInfo, error) { err := s.waitForTextileHub(ctx) if err != nil { return nil, err } r, err := s.tc.ShareBucket(ctx, slug) if err != nil { return nil, err } addrs := make([]string, 0) for _, addr := range r.Addrs { addrs = append(addrs, addr.String()) } ti := &domain.ThreadInfo{ Addresses: addrs, Key: r.Key.String(), } return ti, nil } func (s *Space) JoinBucket(ctx context.Context, slug string, threadinfo *domain.ThreadInfo) (bool, error) { err := s.waitForTextileHub(ctx) if err != nil { return false, err } r, err := s.tc.JoinBucket(ctx, slug, threadinfo) if err != nil { return false, err } return r, nil } func (s *Space) ToggleBucketBackup(ctx context.Context, bucketSlug string, bucketBackup bool) error { err := s.waitForTextileHub(ctx) if err != nil { return err } _, err = s.tc.ToggleBucketBackup(ctx, bucketSlug, bucketBackup) if err != nil { return err } _, err = s.tc.GetBucket(ctx, bucketSlug, nil) if err != nil { return err } return nil } func (s *Space) BucketBackupRestore(ctx context.Context, bucketSlug string) error { if err := s.waitForTextileHub(ctx); err != nil { return err } if err := s.tc.BucketBackupRestore(ctx, bucketSlug); err != nil { return err } return nil } func (s *Space) getBucketForRemoteFile(ctx context.Context, bucketName, dbID, path string) (textile.Bucket, error) { input := &textile.GetBucketForRemoteFileInput{ Bucket: bucketName, DbID: dbID, Path: path, } b, err := s.tc.GetBucket(ctx, bucketName, input) if err != nil { return nil, err } if b == nil { return nil, bucketNotFoundErr } return b, nil } // Returns the bucket given the name, and if the name is "" returns the default bucket func (s *Space) getBucketWithFallback(ctx context.Context, bucketName string) (textile.Bucket, error) { var b textile.Bucket var err error if bucketName == "" { b, err = s.tc.GetDefaultBucket(ctx) } else { b, err = s.tc.GetBucket(ctx, bucketName, nil) } if err != nil { return nil, err } if b == nil { return nil, bucketNotFoundErr } return b, nil } func (s *Space) listDirAtPath( ctx context.Context, b textile.Bucket, path string, listSubfolderContent bool, listMembers bool, ) ([]domain.FileInfo, error) { dir, err := b.ListDirectory(ctx, path) if err != nil { log.Error("Error in ListDir", err) return nil, err } relPathRegex := regexp.MustCompile(`\/ip(f|n)s\/[^\/]*(?P\/.*)`) mirrorfilepaths := make([]string, 0) for _, item := range dir.Item.Items { mirrorfilepaths = append(mirrorfilepaths, item.Path) } mirror_files, err := s.tc.GetModel().FindMirrorFileByPaths(ctx, mirrorfilepaths) if err != nil { log.Error("Error fetching mirror files", err) return nil, err } entries := make([]domain.FileInfo, 0) for _, item := range dir.Item.Items { if utils.IsMetaFileName(item.Name) { continue } paths := relPathRegex.FindStringSubmatch(item.Path) var relPath string if len(paths) > 2 { relPath = relPathRegex.FindStringSubmatch(item.Path)[2] } else { relPath = item.Path } members := []domain.Member{} if listMembers { members, err = s.tc.GetPathAccessRoles(ctx, b, item.Path) if err != nil { return nil, err } } backedup := false backupInProgress := false restoreInProgress := false if mirror_files[item.Path] != nil { backedup = mirror_files[item.Path].Backup backupInProgress = mirror_files[item.Path].BackupInProgress restoreInProgress = mirror_files[item.Path].RestoreInProgress } locallyAvailable := false if item.IsDir { locallyAvailable = true } else if e, _ := b.FileExists(ctx, item.Path); e == true { locallyAvailable = true } entry := domain.FileInfo{ DirEntry: domain.DirEntry{ Path: relPath, IsDir: item.IsDir, Name: item.Name, SizeInBytes: strconv.FormatInt(item.Size, 10), FileExtension: strings.Replace(filepath.Ext(item.Name), ".", "", -1), // FIXME: real created at needed Created: time.Unix(0, item.Metadata.UpdatedAt).Format(time.RFC3339), Updated: time.Unix(0, item.Metadata.UpdatedAt).Format(time.RFC3339), Members: members, }, IpfsHash: item.Cid, BackedUp: backedup, LocallyAvailable: locallyAvailable, BackupInProgress: backupInProgress, RestoreInProgress: restoreInProgress, } entries = append(entries, entry) if item.IsDir && listSubfolderContent { newEntries, err := s.listDirAtPath(ctx, b, path+"/"+item.Name, true, listMembers) if err != nil { return nil, err } entries = append(entries, newEntries...) } } return entries, nil } // ListDir returns children entries at path in a bucket func (s *Space) ListDir(ctx context.Context, path string, bucketName string, listMembers bool) ([]domain.FileInfo, error) { err := s.waitForTextileInit(ctx) if err != nil { return nil, err } b, err := s.getBucketWithFallback(ctx, bucketName) if err != nil { return nil, err } if b == nil { return nil, errors.New("Could not find buckets") } return s.listDirAtPath(ctx, b, path, false, listMembers) } // ListDirs lists all children entries at path in a bucket // Unlike ListDir, it includes all subfolders children recursively func (s *Space) ListDirs(ctx context.Context, path string, bucketName string, listMembers bool) ([]domain.FileInfo, error) { err := s.waitForTextileInit(ctx) if err != nil { return nil, err } b, err := s.getBucketWithFallback(ctx, bucketName) if err != nil { return nil, err } return s.listDirAtPath(ctx, b, path, true, listMembers) } // Copies a file inside a bucket into a temp, unencrypted version of the file in the local file system // Include dbID if opening a shared file. Use dbID = "" otherwise. func (s *Space) OpenFile(ctx context.Context, path, bucketName, dbID string) (domain.OpenFileInfo, error) { err := s.waitForTextileInit(ctx) if err != nil { return domain.OpenFileInfo{}, err } isRemote := dbID != "" var filePath string var b textile.Bucket // check if file exists in sync if isRemote { b, err = s.getBucketForRemoteFile(ctx, bucketName, dbID, path) } else { b, err = s.getBucketWithFallback(ctx, bucketName) } if err != nil { return domain.OpenFileInfo{}, err } listdir, err := b.ListDirectory(ctx, path) if err != nil { return domain.OpenFileInfo{}, err } cid := listdir.Item.Cid if filePath, exists := s.sync.GetOpenFilePath(b.Slug(), path, dbID, cid); exists { // sanity check in case file was deleted or moved if PathExists(filePath) { // return file handle return domain.OpenFileInfo{ Location: filePath, }, nil } } // else, open new file on FS filePath, err = s.openFileOnFs(ctx, path, b, isRemote, dbID, cid) if err != nil { return domain.OpenFileInfo{}, err } // return file handle return domain.OpenFileInfo{ Location: filePath, }, nil } // TruncateData removes all data from local machine func (s *Space) TruncateData(ctx context.Context) error { // not doing anything with store because it's // handled in DeleteKeyPair // note: this might not clear storage // so need to verify and update later err := s.tc.DeleteAccount(ctx) if err != nil { return err } return nil } func (s *Space) openFileOnFs(ctx context.Context, path string, b textile.Bucket, isRemote bool, dbID, cid string) (string, error) { // write file copy to temp folder tmpFile, err := s.createTempFileForPath(ctx, path) if err != nil { log.Error("cannot create temp file while executing OpenFile", err) return "", err } defer tmpFile.Close() // look for path in textile err = b.GetFile(ctx, path, tmpFile) if err != nil { log.Error(fmt.Sprintf("error retrieving file from bucket %s in path %s", b.Key(), path), err) return "", err } // register temp file in watcher addWatchFile := domain.AddWatchFile{ DbId: dbID, LocalPath: tmpFile.Name(), BucketPath: path, BucketKey: b.Key(), BucketSlug: b.Slug(), IsRemote: isRemote, Cid: cid, } err = s.sync.AddFileWatch(addWatchFile) if err != nil { log.Error(fmt.Sprintf("error adding file to watch path %s from bucket %s in bucketpath %s", tmpFile.Name(), b.Key(), path), err) return "", err } return tmpFile.Name(), nil } // createTempFileForPath creates a temporary file using the path specified relative to the AppPath // configured when running the daemon. If inTempDir is true, then it is created relative // to the operating systems temp dir. func (s *Space) createTempFileForPath(ctx context.Context, path string) (*os.File, error) { _, fileName := filepath.Split(path) // NOTE: the pattern of the file ensures that it retains extension. e.g (rand num) + filename/path return ioutil.TempFile("", "*-"+fileName) } func (s *Space) CreateFolder(ctx context.Context, path string, bucketName string) error { err := s.waitForTextileInit(ctx) if err != nil { return err } b, err := s.getBucketWithFallback(ctx, bucketName) if err != nil { return err } if _, err := s.createFolder(ctx, path, b); err != nil { return err } return nil } func (s *Space) createFolder(ctx context.Context, path string, b textile.Bucket) (string, error) { // NOTE: may need to change signature of createFolder if we need to return this info _, root, err := b.CreateDirectory(ctx, path) if err != nil { log.Error(fmt.Sprintf("error creating folder in bucket %s with path %s", b.Key(), path), err) return "", err } return root.String(), nil } func (s *Space) AddItems(ctx context.Context, sourcePaths []string, targetPath string, bucketName string) (<-chan domain.AddItemResult, domain.AddItemsResponse, error) { err := s.waitForTextileInit(ctx) if err != nil { return nil, domain.AddItemsResponse{}, err } // check if all sourcePaths exist, else return err for _, sourcePath := range sourcePaths { if !PathExists(sourcePath) { return nil, domain.AddItemsResponse{}, errors.New(fmt.Sprintf("path not found at %s", sourcePath)) } } b, err := s.getBucketWithFallback(ctx, bucketName) if err != nil { return nil, domain.AddItemsResponse{}, err } results := make(chan domain.AddItemResult) totalsRes, err := getTotals(RemoveDuplicates(sourcePaths)) if err != nil { return nil, domain.AddItemsResponse{}, err } go func() { s.addItems(ctx, RemoveDuplicates(sourcePaths), targetPath, b, results) close(results) }() return results, totalsRes, nil } // AddItemWithReader uploads content of the reader to the targetPath on the bucket specified // // Note: the AddItemResult returns an empty SourcePath func (s *Space) AddItemWithReader( ctx context.Context, reader io.Reader, targetPath, bucketName string, ) (domain.AddItemResult, error) { err := s.waitForTextileInit(ctx) if err != nil { return domain.AddItemResult{}, err } b, err := s.getBucketWithFallback(ctx, bucketName) if err != nil { return domain.AddItemResult{}, err } countingReader := NewCountingReader(reader) _, root, err := b.UploadFile(ctx, targetPath, countingReader) if err != nil { return domain.AddItemResult{}, err } return domain.AddItemResult{ BucketPath: root.String(), Bytes: countingReader.BytesRead, }, nil } // get totals for addItems operation func getTotals(sourcePaths []string) (domain.AddItemsResponse, error) { var wg sync.WaitGroup wg.Add(len(sourcePaths)) filesRes := make(chan domain.AddItemsResponse) results := make([]domain.AddItemsResponse, 0) for _, sourcePath := range sourcePaths { go func(pathInFs string) { defer wg.Done() if IsPathDir(pathInFs) { // counting folder as a file in total with 0 bytes filesRes <- domain.AddItemsResponse{ TotalFiles: 1, TotalBytes: 0, } // get recursive var folderSubPaths []string files, err := ioutil.ReadDir(pathInFs) if err != nil { log.Error(fmt.Sprintf("error reading folder path %s ", pathInFs), err) filesRes <- domain.AddItemsResponse{ Error: err, } return } for _, file := range files { subPath := pathInFs + "/" + file.Name() if subPath != pathInFs { folderSubPaths = append(folderSubPaths, subPath) } } folderSubPathsRes, err := getTotals(folderSubPaths) if err != nil { filesRes <- domain.AddItemsResponse{ Error: err, } return } filesRes <- folderSubPathsRes } else { // get totals bytes fi, err := os.Stat(pathInFs) if err != nil { log.Error(fmt.Sprintf("error getting file size %s ", pathInFs), err) filesRes <- domain.AddItemsResponse{ Error: err, } return } // get the size filesRes <- domain.AddItemsResponse{ TotalFiles: 1, TotalBytes: fi.Size(), } } }(sourcePath) } resultsDone := make(chan struct{}) var collectErr error totalResult := domain.AddItemsResponse{} go func() { // collect results for chRes := range filesRes { if chRes.Error != nil { collectErr = chRes.Error continue } results = append(results, chRes) } for _, res := range results { totalResult.TotalBytes += res.TotalBytes totalResult.TotalFiles += res.TotalFiles } resultsDone <- struct{}{} }() wg.Wait() // closing channel to close results handling goroutine close(filesRes) // wait for all results to finish <-resultsDone if collectErr != nil { return totalResult, collectErr } return totalResult, nil } func (s *Space) addItems(ctx context.Context, sourcePaths []string, targetPath string, b textile.Bucket, results chan<- domain.AddItemResult) error { // NOTE: sequential upload of files and folders for _, sourcePath := range sourcePaths { if IsPathDir(sourcePath) { s.handleAddItemFolder(ctx, sourcePath, targetPath, b, results) } else { // add files r, err := s.addFile(ctx, sourcePath, targetPath, b) if err != nil { results <- domain.AddItemResult{ SourcePath: sourcePath, Error: err, } // next iteration continue } results <- domain.AddItemResult{ SourcePath: sourcePath, BucketPath: r.BucketPath, Bytes: r.Bytes, } } } return nil } func (s *Space) handleAddItemFolder(ctx context.Context, sourcePath string, targetPath string, b textile.Bucket, results chan<- domain.AddItemResult) { // create folder _, folderName := filepath.Split(sourcePath) targetBucketFolder := targetPath + "/" + folderName folderBucketPath, err := s.createFolder(ctx, targetBucketFolder, b) if err != nil { results <- domain.AddItemResult{ SourcePath: sourcePath, Error: err, } return } results <- domain.AddItemResult{ SourcePath: sourcePath, BucketPath: folderBucketPath, } err = s.addFolderRec(sourcePath, targetBucketFolder, ctx, b, results) if err != nil { results <- domain.AddItemResult{ SourcePath: sourcePath, Error: err, } return } } func (s *Space) addFolderRec(sourcePath string, targetPath string, ctx context.Context, b textile.Bucket, results chan<- domain.AddItemResult) error { var folderSubPaths []string // NOTE: only reading each folder one level deep since this function is recursive // if we use Walk we would need to track source paths across recursive calls to avoid duplicates files, err := ioutil.ReadDir(sourcePath) if err != nil { log.Error(fmt.Sprintf("error reading folder path %s ", sourcePath), err) return err } for _, file := range files { if file.Name() != sourcePath { folderSubPaths = append(folderSubPaths, sourcePath+"/"+file.Name()) } } // recursive call to addItems return s.addItems(ctx, folderSubPaths, targetPath, b, results) } // Working with a file func (s *Space) addFile(ctx context.Context, sourcePath string, targetPath string, b textile.Bucket) (domain.AddItemResult, error) { // get sourcePath to io.Reader f, err := os.Open(sourcePath) if err != nil { log.Error(fmt.Sprintf("error opening path %s", sourcePath), err) return domain.AddItemResult{}, err } defer f.Close() _, fileName := filepath.Split(sourcePath) var targetPathBucket string if targetPath == "" || targetPath == "/" { targetPathBucket = fileName } else { targetPathBucket = targetPath + "/" + fileName } // NOTE: could modify addFile to return back more info for processing _, root, err := b.UploadFile(ctx, targetPathBucket, f) if err != nil { log.Error(fmt.Sprintf("error creating targetPath %s in bucket %s", targetPathBucket, b.Key()), err) return domain.AddItemResult{}, err } fi, err := f.Stat() var fileSize int64 = 0 if err == nil { fileSize = fi.Size() } return domain.AddItemResult{ SourcePath: sourcePath, BucketPath: root.String(), Bytes: fileSize, }, err } // Removes a file or directory from a bucket // Note: If removing a file a user has been shared, call the RemoveMember method instead, as this works only for local buckets. func (s *Space) RemoveDirOrFile(ctx context.Context, path, bucketName string) error { err := s.waitForTextileInit(ctx) if err != nil { return err } b, err := s.getBucketWithFallback(ctx, bucketName) if err != nil { return err } _, err = b.DeleteDirOrFile(ctx, path) if err != nil { return err } return nil } ================================================ FILE: core/space/services/services_identity.go ================================================ package services import ( "bytes" "context" "encoding/hex" "encoding/json" "errors" "io/ioutil" "net/http" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/space/domain" ) type createIdentityRequest struct { PublicKey string `json:"publicKey"` Username string `json:"username"` } func parseIdentity(resp *http.Response) (*domain.Identity, error) { body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } if resp.StatusCode < 200 || resp.StatusCode >= 400 { var returnedErr domain.APIError err = json.Unmarshal(body, &returnedErr) if err != nil { return nil, err } if returnedErr.Message != "" { return nil, errors.New(returnedErr.Message) } return nil, errors.New("Unexpected API error") } var newIdentity domain.Identity err = json.Unmarshal(body, &newIdentity) if err != nil { return nil, err } return &newIdentity, nil } // Creates an identity in Space cloud services. Returns the created identity or an error if any. func (s *Space) CreateIdentity(ctx context.Context, username string) (*domain.Identity, error) { pub, err := s.keychain.GetStoredPublicKey() if err != nil { return nil, err } publicKeyBytes, err := pub.Raw() if err != nil { return nil, err } publicKeyHex := hex.EncodeToString(publicKeyBytes) identity := &createIdentityRequest{ PublicKey: publicKeyHex, Username: username, } identityJSON, err := json.Marshal(identity) if err != nil { return nil, err } apiURL := s.cfg.GetString(config.SpaceServicesAPIURL, "") resp, err := http.Post( apiURL+"/identities", "application/json", bytes.NewBuffer(identityJSON), ) if err != nil { return nil, err } defer resp.Body.Close() return parseIdentity(resp) } // Gets an identity from Space cloud services given a username func (s *Space) GetIdentityByUsername(ctx context.Context, username string) (*domain.Identity, error) { apiURL := s.cfg.GetString(config.SpaceServicesAPIURL, "") resp, err := http.Get(apiURL + "/identities/username/" + username) if err != nil { return nil, err } defer resp.Body.Close() return parseIdentity(resp) } ================================================ FILE: core/space/services/services_keypair.go ================================================ package services import ( "context" "encoding/hex" "errors" "github.com/FleekHQ/space-daemon/core/keychain" ) // Generates a key pair and returns a mnemonic for recovering that key later on func (s *Space) GenerateKeyPair(ctx context.Context, useForce bool) (string, error) { var mnemonic string var err error if useForce { mnemonic, err = s.keychain.GenerateKeyFromMnemonic(keychain.WithOverride()) } else { mnemonic, err = s.keychain.GenerateKeyFromMnemonic() } if err != nil { return "", err } return mnemonic, nil } func (s *Space) RestoreKeyPairFromMnemonic(ctx context.Context, mnemonic string) error { _, err := s.keychain.GenerateKeyFromMnemonic(keychain.WithMnemonic(mnemonic), keychain.WithOverride()) if err != nil { return err } if err := s.tc.RestoreDB(ctx); err != nil { s.keychain.DeleteKeypair() return err } return nil } func (s *Space) GetPublicKey(ctx context.Context) (string, error) { pub, err := s.keychain.GetStoredPublicKey() if err != nil { return "", err } publicKeyBytes, err := pub.Raw() if err != nil { return "", err } publicKeyHex := hex.EncodeToString(publicKeyBytes) return publicKeyHex, nil } func (s *Space) GetHubAuthToken(ctx context.Context) (string, error) { tokens, err := s.hub.GetTokensWithCache(ctx) if err != nil { return "", err } return tokens.HubToken, nil } func (s *Space) GetMnemonic(ctx context.Context) (string, error) { mnemonic, err := s.keychain.GetStoredMnemonic() if err != nil { return "", err } if mnemonic == "" { return "", errors.New("No mnemonic seed stored in the keychain") } return mnemonic, nil } func (s *Space) DeleteKeypair(ctx context.Context) error { err := s.waitForTextileInit(ctx) if err != nil { return err } // Tell the textile client to stop operations if err := s.tc.RemoveKeys(ctx); err != nil { return err } if err := s.keychain.DeleteKeypair(); err != nil { return err } // Clear badger store if err := s.store.DropAll(); err != nil { return err } return nil } ================================================ FILE: core/space/services/services_notifs.go ================================================ package services import ( "context" "strconv" "github.com/FleekHQ/space-daemon/core/space/domain" ) const notificationsLastSeenAtStoreKey = "notificationsLastSeenAt" func (s *Space) GetNotifications(ctx context.Context, seek string, limit int) ([]*domain.Notification, error) { err := s.waitForTextileHub(ctx) if err != nil { return nil, err } r, err := s.tc.GetMailAsNotifications(ctx, seek, limit) if err != nil { return nil, err } return r, nil } func (s *Space) SetNotificationsLastSeenAt(timestamp int64) error { t := strconv.FormatInt(timestamp, 10) err := s.store.Set([]byte(notificationsLastSeenAtStoreKey), []byte(t)) if err != nil { return err } return nil } func (s *Space) GetNotificationsLastSeenAt() (int64, error) { ts, err := s.store.Get([]byte(notificationsLastSeenAtStoreKey)) if err != nil { return 0, err } i, err := strconv.ParseInt(string(ts), 10, 64) if err != nil { return 0, err } return i, nil } ================================================ FILE: core/space/services/services_search.go ================================================ package services import ( "context" "fmt" "os" "strings" "github.com/FleekHQ/space-daemon/core/textile/model" "github.com/FleekHQ/space-daemon/core/space/domain" ) func (s *Space) SearchFiles(ctx context.Context, query string) ([]domain.SearchFileEntry, error) { searchResult, err := s.tc.GetModel().QuerySearchIndex(ctx, query) if err != nil { return nil, err } resultEntries := make([]domain.SearchFileEntry, len(searchResult)) for i, result := range searchResult { resultEntries[i] = domain.SearchFileEntry{ FileInfo: domain.FileInfo{ DirEntry: domain.DirEntry{ Path: strings.TrimPrefix(result.ItemPath, fmt.Sprintf("%c", os.PathSeparator)), IsDir: result.ItemType == string(model.DirectoryItem), Name: result.ItemName, FileExtension: result.ItemExtension, }, }, Bucket: result.BucketSlug, DbID: result.DbId, } } return resultEntries, nil } ================================================ FILE: core/space/services/services_sharing.go ================================================ package services import ( "archive/zip" "context" "encoding/hex" "encoding/json" "fmt" "io" "net/url" "os" "path/filepath" "strconv" "time" "github.com/opentracing/opentracing-go" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/log" "github.com/libp2p/go-libp2p-core/crypto" "github.com/pkg/errors" "github.com/FleekHQ/space-daemon/core/space/domain" t "github.com/FleekHQ/space-daemon/core/textile" "github.com/ipfs/go-cid" "github.com/textileio/dcrypto" ) func (s *Space) GenerateFileSharingLink( ctx context.Context, encryptionPassword string, path string, bucketName string, dbID string, ) (domain.FileSharingInfo, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Space.GenerateFileSharingLink") defer span.Finish() _, fileName := filepath.Split(path) var bucket t.Bucket var err error if dbID != "" { bucket, err = s.getBucketForRemoteFile(ctx, bucketName, dbID, path) } else { bucket, err = s.getBucketWithFallback(ctx, bucketName) } if err != nil { return domain.FileSharingInfo{}, err } encryptedFile, err := s.encryptBucketFile(ctx, encryptionPassword, path, bucket) if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, "file encryption failed") } _, err = encryptedFile.Seek(0, 0) if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, "file encryption failed") } log.Debug("Uploading shared file") return s.uploadSharedFileToIpfs( ctx, encryptionPassword, encryptedFile, fileName, bucketName, ) } func (s *Space) encryptBucketFile( ctx context.Context, password string, bucketPath string, bucket t.Bucket, ) (*os.File, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Space.encryptBucketFile") defer span.Finish() // tempFile is written from textile before encryption tempFile, err := s.createTempFileForPath(ctx, bucketPath) if err != nil { return nil, err } defer func() { tempFile.Close() _ = os.Remove(tempFile.Name()) }() // encrypted file is the final encrypted file encryptedFile, err := s.createTempFileForPath(ctx, bucketPath) if err != nil { return nil, err } err = bucket.GetFile(ctx, bucketPath, tempFile) if err != nil { return nil, errors.Wrap(err, "file encryption failed") } _, err = tempFile.Seek(0, 0) if err != nil { return nil, errors.Wrap(err, "file encryption failed") } encryptedReader, err := dcrypto.NewEncrypterWithPassword(tempFile, []byte(password)) if err != nil { return nil, errors.Wrap(err, "file encryption failed") } log.Debug("Copying encrypted file to disk") _, err = io.Copy(encryptedFile, encryptedReader) return encryptedFile, err } // uploads the shared file to ipfs through users public bucket in hub func (s *Space) uploadSharedFileToIpfs( ctx context.Context, password string, sharedContent io.Reader, fileName string, bucketName string, ) (domain.FileSharingInfo, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Space.uploadSharedFileToIpfs") defer span.Finish() b, err := s.tc.GetPublicShareBucket(ctx) if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, "failed to get public files bucket") } timestamp := time.Now().UnixNano() uploadResult, _, err := b.UploadFile(ctx, fmt.Sprintf("%s-%d", fileName, timestamp), sharedContent) if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, "publishing shared file failed") } encryptedFileHash := uploadResult.Cid().String() urlQuery := url.Values{} urlQuery.Add("fname", fileName) urlQuery.Add("hash", encryptedFileHash) return domain.FileSharingInfo{ Bucket: bucketName, SharedFileCid: encryptedFileHash, SharedFileKey: password, SpaceDownloadLink: fmt.Sprintf( "%s/files/share?%s", s.cfg.GetString(config.SpaceStorageSiteUrl, "https://app.space.storage"), urlQuery.Encode(), ), }, nil } // GenerateFilesSharingLink zips multiple files together func (s *Space) GenerateFilesSharingLink( ctx context.Context, encryptionPassword string, paths []string, bucketName, dbID string, ) (domain.FileSharingInfo, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Space.GenerateFilesSharingLink") defer span.Finish() if len(paths) == 0 { return EmptyFileSharingInfo, errors.New("no file passed to share link") } if len(paths) == 1 { return s.GenerateFileSharingLink(ctx, encryptionPassword, paths[0], bucketName, dbID) } var bucket t.Bucket var err error if dbID != "" { // Safe to use the first path to get the bucket as all shared files should be under the same dbID bucket, err = s.getBucketForRemoteFile(ctx, bucketName, dbID, paths[0]) } else { bucket, err = s.getBucketWithFallback(ctx, bucketName) } if err != nil { return domain.FileSharingInfo{}, err } // create zip file output filename := generateFilesSharingZip() // tempFile is written from textile before encryption tempFile, err := s.createTempFileForPath(ctx, filename) if err != nil { return domain.FileSharingInfo{}, err } defer func() { tempFile.Close() _ = os.Remove(tempFile.Name()) }() encryptedFile, err := s.createTempFileForPath(ctx, filename) if err != nil { return domain.FileSharingInfo{}, err } defer encryptedFile.Close() zipper := zip.NewWriter(tempFile) // write each file to zip for _, path := range paths { _, fileName := filepath.Split(path) writer, err := zipper.Create(fileName) if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, fmt.Sprintf("failed to compress item: %s", path)) } err = bucket.GetFile(ctx, path, writer) if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, fmt.Sprintf("failed to compress item: %s", path)) } } err = zipper.Close() if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, "creating compressed file failed") } _, err = tempFile.Seek(0, 0) if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, "file encryption failed") } encryptedReader, err := dcrypto.NewEncrypterWithPassword(tempFile, []byte(encryptionPassword)) if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, "file encryption failed") } _, err = io.Copy(encryptedFile, encryptedReader) if err != nil { return EmptyFileSharingInfo, err } _, err = encryptedFile.Seek(0, 0) if err != nil { return EmptyFileSharingInfo, errors.Wrap(err, "encryption failed") } return s.uploadSharedFileToIpfs( ctx, encryptionPassword, encryptedFile, filename, bucketName, ) } // OpenSharedFile fetched the ipfs file and decrypts it with the key. Then returns the decrypted // files location. NOTE: This only opens public link shared files and not those shared via direct invites. func (s *Space) OpenSharedFile(ctx context.Context, hash, password, filename string) (domain.OpenFileInfo, error) { parsedCid, err := cid.Parse(hash) if err != nil { return domain.OpenFileInfo{}, err } err = s.waitForTextileHub(ctx) if err != nil { return domain.OpenFileInfo{}, err } if password == "" { // try to fetch password from shared files _, password, err = s.tc.GetPublicReceivedFile(ctx, hash, true) if err != nil { return domain.OpenFileInfo{}, errors.Wrap(err, "password is required to open this file") } } encryptedFile, err := s.tc.DownloadPublicItem(ctx, parsedCid) if err != nil { return domain.OpenFileInfo{}, err } defer encryptedFile.Close() decryptedFile, err := s.createTempFileForPath(ctx, filename) if err != nil { return domain.OpenFileInfo{}, err } defer decryptedFile.Close() reader, err := dcrypto.NewDecrypterWithPassword(encryptedFile, []byte(password)) if err != nil { log.Error("initializing decrypter failed", err) return domain.OpenFileInfo{}, errors.New("incorrect password") } decryptedFileSize, err := io.Copy(decryptedFile, reader) if err != nil { return domain.OpenFileInfo{}, errors.Wrap(err, "decryption failed") } // Add accessed file to shared with me list _, err = s.tc.AcceptSharedFileLink(ctx, hash, password, filename, strconv.FormatInt(decryptedFileSize, 10)) if err != nil { return domain.OpenFileInfo{}, errors.Wrap(err, "accepting shared link failed") } return domain.OpenFileInfo{ Location: decryptedFile.Name(), }, nil } func (s *Space) ShareFilesViaPublicKey(ctx context.Context, paths []domain.FullPath, pubkeys []crypto.PubKey) error { err := s.waitForTextileHub(ctx) if err != nil { return err } enhancedPaths, enckeys, err := s.resolveFullPaths(ctx, paths) if err != nil { return err } for i, path := range enhancedPaths { _, err = s.tc.GetModel().CreateSentFileViaInvitation(ctx, path, "", enckeys[i]) if err != nil { return err } } err = s.tc.ManageShareFilesViaPublicKey(ctx, enhancedPaths, pubkeys, enckeys, domain.ReadWriteRoleAction) if err != nil { return err } for _, pk := range pubkeys { inviter, err := s.keychain.GetStoredPublicKey() if err != nil { return err } inviterRaw, err := inviter.Raw() if err != nil { return err } pkRaw, err := pk.Raw() if err != nil { return err } d := &domain.Invitation{ InviterPublicKey: hex.EncodeToString(inviterRaw), InviteePublicKey: hex.EncodeToString(pkRaw), ItemPaths: enhancedPaths, Keys: enckeys, } i, err := json.Marshal(d) if err != nil { return err } b := &domain.MessageBody{ Type: domain.INVITATION, Body: i, } j, err := json.Marshal(b) if err != nil { return err } _, err = s.tc.SendMessage(ctx, pk, j) if err != nil { return err } } return nil } func (s *Space) resolveFullPaths(ctx context.Context, paths []domain.FullPath) ([]domain.FullPath, [][]byte, error) { m := s.tc.GetModel() enhancedPaths := make([]domain.FullPath, len(paths)) enckeys := make([][]byte, len(paths)) for i, path := range paths { ep := domain.FullPath{ DbId: path.DbId, Bucket: path.Bucket, Path: path.Path, BucketKey: path.BucketKey, } // this handles personal bucket since for shared-with-me files // the dbid will be preset if ep.DbId == "" { b, err := s.tc.GetDefaultBucket(ctx) if err != nil { return nil, nil, err } bs, err := m.FindBucket(ctx, b.Slug()) if err != nil { return nil, nil, err } ep.DbId = bs.RemoteDbID } if ep.Bucket == "" || ep.Bucket == t.GetDefaultBucketSlug() { b, err := s.tc.GetDefaultBucket(ctx) if err != nil { return nil, nil, err } bs, err := m.FindBucket(ctx, b.GetData().Name) if err != nil { return nil, nil, err } ep.Bucket = t.GetDefaultMirrorBucketSlug() ep.BucketKey = bs.RemoteBucketKey enckeys[i] = bs.EncryptionKey } else { r, err := m.FindReceivedFile(ctx, path.DbId, path.Bucket, path.Path) if err != nil { return nil, nil, err } ep.Bucket = r.Bucket ep.BucketKey = r.BucketKey enckeys[i] = r.EncryptionKey } enhancedPaths[i] = ep } return enhancedPaths, enckeys, nil } func (s *Space) UnshareFilesViaPublicKey(ctx context.Context, paths []domain.FullPath, pubkeys []crypto.PubKey) error { err := s.waitForTextileHub(ctx) if err != nil { return err } enhancedPaths, enckeys, err := s.resolveFullPaths(ctx, paths) if err != nil { return err } err = s.tc.ManageShareFilesViaPublicKey(ctx, enhancedPaths, pubkeys, enckeys, domain.DeleteRoleAction) if err != nil { return err } return s.sendPathsRevokedInvitation(ctx, pubkeys, enhancedPaths, enckeys) } func (s *Space) sendPathsRevokedInvitation( ctx context.Context, pubkeys []crypto.PubKey, enhancedPaths []domain.FullPath, keys [][]byte, ) error { for _, pk := range pubkeys { uninviter, err := s.keychain.GetStoredPublicKey() if err != nil { return err } rawUniviter, err := uninviter.Raw() if err != nil { return err } pkRaw, err := pk.Raw() if err != nil { return err } d := &domain.RevokedInvitation{ InviterPublicKey: hex.EncodeToString(rawUniviter), InviteePublicKey: hex.EncodeToString(pkRaw), ItemPaths: enhancedPaths, Keys: keys, } i, err := json.Marshal(d) if err != nil { return err } b := &domain.MessageBody{ Type: domain.REVOKED_INVITATION, Body: i, } j, err := json.Marshal(b) if err != nil { return err } _, err = s.tc.SendMessage(ctx, pk, j) if err != nil { return err } } return nil } var errInvitationNotFound = errors.New("invitation not found") var errFailedToNotifyInviter = errors.New("failed to notify inviter of invitation status") // HandleSharedFilesInvitation accepts or rejects an invitation based on the invitation id func (s *Space) HandleSharedFilesInvitation(ctx context.Context, invitationId string, accept bool) error { err := s.waitForTextileHub(ctx) if err != nil { return err } n, err := s.tc.GetMailAsNotifications(ctx, invitationId, 1) if err != nil { log.Error("failed to get invitation", err) return errInvitationNotFound } if len(n) == 0 { log.Debug("shared file invitation not found", "invitationId:"+invitationId) return errInvitationNotFound } invitation, err := extractInvitation(n[0]) if err != nil { return err } if accept { invitation, err = s.tc.AcceptSharedFilesInvitation(ctx, invitation) if err != nil { return err } // notify inviter, it was accepted invitersPk, err := decodePublicKey(err, invitation.InviterPublicKey) if err != nil { log.Error("should not happen, but inviters public key is invalid", err) return errFailedToNotifyInviter } messageBody, err := json.Marshal(&invitation) if err != nil { log.Error("error encoding invitation response body", err) return errFailedToNotifyInviter } message, err := json.Marshal(&domain.MessageBody{ Type: domain.INVITATION_REPLY, Body: messageBody, }) if err != nil { log.Error("error encoding invitation response", err) return errFailedToNotifyInviter } _, err = s.tc.SendMessage(ctx, invitersPk, message) } else { invitation, err = s.tc.RejectSharedFilesInvitation(ctx, invitation) } if err != nil { return err } return err } func (s *Space) AddRecentlySharedPublicKeys(ctx context.Context, pubkeys []crypto.PubKey) error { err := s.waitForTextileInit(ctx) if err != nil { return err } var ps string for _, pk := range pubkeys { b, err := pk.Raw() if err != nil { return err } ps = hex.EncodeToString(b) // TODO: transaction _, err = s.tc.GetModel().CreateSharedPublicKey(ctx, ps) if err != nil { return nil } } return nil } func (s *Space) RecentlySharedPublicKeys(ctx context.Context) ([]crypto.PubKey, error) { err := s.waitForTextileInit(ctx) if err != nil { return nil, err } ret := []crypto.PubKey{} keys, err := s.tc.GetModel().ListSharedPublicKeys(ctx) if err != nil { return nil, err } for _, schema := range keys { b, err := hex.DecodeString(schema.PublicKey) if err != nil { return nil, err } p, err := crypto.UnmarshalEd25519PublicKey([]byte(b)) if err != nil { return nil, err } ret = append(ret, p) } return ret, nil } // Returns a list of shared files the user has received and accepted func (s *Space) GetSharedWithMeFiles(ctx context.Context, seek string, limit int) ([]*domain.SharedDirEntry, string, error) { err := s.waitForTextileInit(ctx) if err != nil { return nil, "", err } items, offset, err := s.tc.GetReceivedFiles(ctx, true, seek, limit) return items, offset, err } // Returns a list of shared files the user has shared func (s *Space) GetSharedByMeFiles(ctx context.Context, seek string, limit int) ([]*domain.SharedDirEntry, string, error) { err := s.waitForTextileInit(ctx) if err != nil { return nil, "", err } items, offset, err := s.tc.GetSentFiles(ctx, seek, limit) return items, offset, err } ================================================ FILE: core/space/services/services_vault.go ================================================ package services import ( "context" "encoding/hex" "errors" "strings" "github.com/FleekHQ/space-daemon/core/backup" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/vault" "github.com/libp2p/go-libp2p-core/crypto" ) const separator = "___" // Creates an obfuscated local file that contains everything needed to restore the state from this or another device func (s *Space) CreateLocalKeysBackup(ctx context.Context, path string) error { priv, _, err := s.keychain.GetStoredKeyPairInLibP2PFormat() if err != nil { return err } privInBytes, err := priv.Raw() if err != nil { return err } b := &backup.Backup{ PrivateKey: hex.EncodeToString(privInBytes), } if err := backup.MarshalBackup(path, b); err != nil { return err } return nil } // Restores the state by receiving the path to a local backup // Warning: This will delete any local state before restoring the backup func (s *Space) RecoverKeysByLocalBackup(ctx context.Context, path string) error { // Retrieve the backup b, err := backup.UnmarshalBackup(path) if err != nil { return err } privInBytes, err := hex.DecodeString(b.PrivateKey) if err != nil { return err } // Restore keychain priv, err := crypto.UnmarshalEd25519PrivateKey(privInBytes) if err != nil { return err } if err := s.keychain.ImportExistingKeyPair(priv, ""); err != nil { return err } if err := s.tc.RestoreDB(ctx); err != nil { s.keychain.DeleteKeypair() return err } return nil } // Uses vault service to fetch and decrypt a keypair set func (s *Space) RecoverKeysByPassphrase(ctx context.Context, uuid string, pass string, backupType domain.KeyBackupType) error { items, err := s.vault.Retrieve(uuid, pass, backupType) if err != nil { return err } if len(items) == 0 { return errors.New("Retrieved vault does not contain keys") } // TODO: Generalize to N keys privAndMnemonic := strings.Split(items[0].Value, separator) privInBytes, err := hex.DecodeString(privAndMnemonic[0]) if err != nil { return err } unmarshalledPriv, err := crypto.UnmarshalEd25519PrivateKey(privInBytes) if err != nil { return err } if err := s.keychain.ImportExistingKeyPair(unmarshalledPriv, privAndMnemonic[1]); err != nil { return err } if err := s.tc.RestoreDB(ctx); err != nil { s.keychain.DeleteKeypair() return err } return nil } // Uses the vault service to securely store the current keypair func (s *Space) BackupKeysByPassphrase(ctx context.Context, uuid string, pass string, backupType domain.KeyBackupType) error { tokens, err := s.GetAPISessionTokens(ctx) if err != nil { return err } priv, _, err := s.keychain.GetStoredKeyPairInLibP2PFormat() if err != nil { return err } privInBytes, err := priv.Raw() if err != nil { return err } mnemonic, err := s.keychain.GetStoredMnemonic() if err != nil { return err } // TODO: Generalize to item array once we support multiple keys item := vault.VaultItem{ ItemType: vault.PrivateKeyWithMnemonic, Value: hex.EncodeToString(privInBytes) + separator + mnemonic, } items := []vault.VaultItem{item} if _, err := s.vault.Store(uuid, pass, backupType, tokens.ServicesToken, items); err != nil { return err } return nil } // Tests a passphrase without storing anything to check if the passphrase is correct func (s *Space) TestPassphrase(ctx context.Context, uuid string, pass string) error { items, err := s.vault.Retrieve(uuid, pass, domain.PASSWORD) if err != nil { return err } if len(items) == 0 { return errors.New("Retrieved vault does not contain keys") } return nil } ================================================ FILE: core/space/services/sharing_utils.go ================================================ package services import ( "encoding/hex" "errors" "github.com/FleekHQ/space-daemon/core/space/domain" crypto "github.com/libp2p/go-libp2p-crypto" ) var EmptyFileSharingInfo = domain.FileSharingInfo{} func generateFilesSharingZip() string { //return fmt.Sprintf("space_shared_files-%d.zip", time.Now().UnixNano()) return "space_shared_files.zip" } func extractInvitation(notification *domain.Notification) (domain.Invitation, error) { if notification.NotificationType != domain.INVITATION { return domain.Invitation{}, errInvitationNotFound } notification.InvitationValue.InvitationID = notification.ID return notification.InvitationValue, nil } // NOTE: This assumes that the public key string is ed25519 hex encoded string func decodePublicKey(err error, pkString string) (crypto.PubKey, error) { pkBytes, err := hex.DecodeString(pkString) if err != nil { return nil, errors.New("invalid encoding for public key") } pk, err := crypto.UnmarshalEd25519PublicKey(pkBytes) if err != nil { return nil, errors.New("invalid public key format") } return pk, nil } ================================================ FILE: core/space/space.go ================================================ package space import ( "context" "errors" "io" "github.com/FleekHQ/space-daemon/core/permissions" "github.com/FleekHQ/space-daemon/core/textile/hub" "github.com/FleekHQ/space-daemon/core/vault" "github.com/libp2p/go-libp2p-core/crypto" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/env" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/space/services" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/core/textile" ) // Service Layer should not depend on gRPC dependencies type Service interface { RegisterSyncer(sync services.Syncer) OpenFile(ctx context.Context, path, bucketName, dbID string) (domain.OpenFileInfo, error) GetConfig(ctx context.Context) domain.AppConfig ListDirs(ctx context.Context, path string, bucketName string, listMembers bool) ([]domain.FileInfo, error) ListDir(ctx context.Context, path string, bucketName string, listMembers bool) ([]domain.FileInfo, error) GenerateKeyPair(ctx context.Context, useForce bool) (mnemonic string, err error) DeleteKeypair(ctx context.Context) error GetMnemonic(ctx context.Context) (mnemonic string, err error) RestoreKeyPairFromMnemonic(ctx context.Context, mnemonic string) error RecoverKeysByPassphrase(ctx context.Context, uuid string, pass string, backupType domain.KeyBackupType) error BackupKeysByPassphrase(ctx context.Context, uuid string, pass string, backupType domain.KeyBackupType) error TestPassphrase(ctx context.Context, uuid string, pass string) error GetPublicKey(ctx context.Context) (string, error) GetHubAuthToken(ctx context.Context) (string, error) CreateFolder(ctx context.Context, path string, bucketName string) error CreateBucket(ctx context.Context, slug string) (textile.Bucket, error) ListBuckets(ctx context.Context) ([]textile.Bucket, error) AddItems(ctx context.Context, sourcePaths []string, targetPath string, bucketName string) (<-chan domain.AddItemResult, domain.AddItemsResponse, error) AddItemWithReader(ctx context.Context, reader io.Reader, targetPath, bucketName string) (domain.AddItemResult, error) CreateIdentity(ctx context.Context, username string) (*domain.Identity, error) GetIdentityByUsername(ctx context.Context, username string) (*domain.Identity, error) GenerateFileSharingLink(ctx context.Context, encryptionPassword, path, bucketName, dbID string) (domain.FileSharingInfo, error) GenerateFilesSharingLink(ctx context.Context, encryptionPassword string, paths []string, bucketName, dbID string) (domain.FileSharingInfo, error) OpenSharedFile(ctx context.Context, cid, password, filename string) (domain.OpenFileInfo, error) ShareBucket(ctx context.Context, slug string) (*domain.ThreadInfo, error) JoinBucket(ctx context.Context, slug string, threadinfo *domain.ThreadInfo) (bool, error) CreateLocalKeysBackup(ctx context.Context, pathToKeyBackup string) error RecoverKeysByLocalBackup(ctx context.Context, pathToKeyBackup string) error GetNotifications(ctx context.Context, seek string, limit int) ([]*domain.Notification, error) ToggleBucketBackup(ctx context.Context, bucketSlug string, bucketBackup bool) error BucketBackupRestore(ctx context.Context, bucketSlug string) error ShareFilesViaPublicKey(ctx context.Context, paths []domain.FullPath, pubkeys []crypto.PubKey) error UnshareFilesViaPublicKey(ctx context.Context, paths []domain.FullPath, pks []crypto.PubKey) error HandleSharedFilesInvitation(ctx context.Context, invitationId string, accept bool) error GetAPISessionTokens(ctx context.Context) (*domain.APISessionTokens, error) AddRecentlySharedPublicKeys(ctx context.Context, pubkeys []crypto.PubKey) error RecentlySharedPublicKeys(ctx context.Context) ([]crypto.PubKey, error) GetSharedWithMeFiles(ctx context.Context, seek string, limit int) ([]*domain.SharedDirEntry, string, error) GetSharedByMeFiles(ctx context.Context, seek string, limit int) ([]*domain.SharedDirEntry, string, error) SetNotificationsLastSeenAt(timestamp int64) error GetNotificationsLastSeenAt() (int64, error) TruncateData(ctx context.Context) error SearchFiles(ctx context.Context, query string) ([]domain.SearchFileEntry, error) InitializeMasterAppToken(ctx context.Context) (*permissions.AppToken, error) RemoveDirOrFile(ctx context.Context, path, bucketName string) error } type serviceOptions struct { cfg config.Config env env.SpaceEnv } var defaultOptions = serviceOptions{} type ServiceOption func(o *serviceOptions) func NewService( store store.Store, tc textile.Client, sync services.Syncer, cfg config.Config, kc keychain.Keychain, v vault.Vault, h hub.HubAuth, opts ...ServiceOption, ) (Service, error) { if !store.IsOpen() { return nil, errors.New("service expects an opened store to work") } o := defaultOptions for _, opt := range opts { opt(&o) } if o.env == nil { o.env = env.New() } sv := services.NewSpace(store, tc, sync, cfg, o.env, kc, v, h) return sv, nil } func WithEnv(env env.SpaceEnv) ServiceOption { return func(o *serviceOptions) { if env != nil { o.env = env } } } ================================================ FILE: core/space/space_test.go ================================================ package space import ( "context" "encoding/hex" "errors" "fmt" "io" "io/ioutil" "log" "net/http" "net/http/httptest" "os" "path/filepath" "strings" "testing" "time" "github.com/textileio/dcrypto" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/libp2p/go-libp2p-core/crypto" "github.com/stretchr/testify/mock" "github.com/FleekHQ/space-daemon/core/space/services" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/hub" "github.com/FleekHQ/space-daemon/core/textile/model" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/core/vault" "github.com/FleekHQ/space-daemon/mocks" "github.com/stretchr/testify/assert" buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" ) var ( cfg *mocks.Config st *mocks.Store textileClient *mocks.Client mockPath *mocks.Path mockBucket *mocks.Bucket mockEnv *mocks.SpaceEnv mockSync *mocks.Syncer mockKeychain *mocks.Keychain mockVault *mocks.Vault mockHub *mocks.HubAuth mockModel *mocks.Model mockPubKey crypto.PubKey mockPrivKey crypto.PrivKey mockPubKeyHex string mockPrivKeyHex string ) type TearDown func() type GetTestDir func() *testDir func closeAndDelete(f *os.File) { f.Close() os.Remove(f.Name()) } type testDir struct { dir string fileNames []string } func initTestService(t *testing.T) (*services.Space, GetTestDir, TearDown) { st = new(mocks.Store) cfg = new(mocks.Config) textileClient = new(mocks.Client) mockPath = new(mocks.Path) mockBucket = new(mocks.Bucket) mockEnv = new(mocks.SpaceEnv) mockSync = new(mocks.Syncer) mockKeychain = new(mocks.Keychain) mockVault = new(mocks.Vault) mockHub = new(mocks.HubAuth) mockModel = new(mocks.Model) var dir string var err error if dir, err = ioutil.TempDir("", "space-test-folders"); err != nil { t.Fatalf("error creating temp dir for tests %s", err.Error()) } log.Println("temp dir", dir) tmpFile1, err := os.Create(dir + "/test1.txt") if err != nil { t.Fatalf("error creating temp file for tests %s", err.Error()) } tmpFile2, err := os.Create(dir + "/test2.pdf") if err != nil { t.Fatalf("error creating temp file for tests %s", err.Error()) } tmpFiles := []string{tmpFile1.Name(), tmpFile2.Name()} getTestDir := func() *testDir { return &testDir{ dir: dir, fileNames: tmpFiles, } } tearDown := func() { closeAndDelete(tmpFile1) closeAndDelete(tmpFile2) os.RemoveAll(dir) } mockPubKeyHex = "67730a6678566ead5911d71304854daddb1fe98a396551a4be01de65da01f3a9" mockPrivKeyHex = "dd55f8921f90fdf31c6ef9ad86bd90605602fd7d32dc8ea66ab72deb6a82821c67730a6678566ead5911d71304854daddb1fe98a396551a4be01de65da01f3a9" pubKeyBytes, _ := hex.DecodeString(mockPubKeyHex) privKeyBytes, _ := hex.DecodeString(mockPrivKeyHex) mockPubKey, _ = crypto.UnmarshalEd25519PublicKey(pubKeyBytes) mockPrivKey, _ = crypto.UnmarshalEd25519PrivateKey(privKeyBytes) // NOTE: if we need to test without the store open we must override on each test st.On("IsOpen").Return(true) sv, err := NewService(st, textileClient, mockSync, cfg, mockKeychain, mockVault, mockHub, WithEnv(mockEnv)) if err != nil { t.Fatal(err) } return sv.(*services.Space), getTestDir, tearDown } func TestNewService(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() assert.NotNil(t, sv) } func TestService_CreateBucket(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() slug := "testbucketslug" key := "testkey" path := "testpath" d1 := int64(1593405100) d2 := int64(1593405100) mb := &bucket.BucketData{ Key: key, Name: slug, Path: path, CreatedAt: d1, UpdatedAt: d2, } textileClient.On("CreateBucket", mock.Anything, mock.Anything).Return(mockBucket, nil) textileClient.On("IsInitialized").Return(true) mockBucket.On( "GetData", mock.Anything, ).Return(*mb, nil) res, err := sv.CreateBucket(context.Background(), "slug") assert.Nil(t, err) assert.NotEmpty(t, res) assert.Equal(t, key, res.GetData().Key) assert.Equal(t, slug, res.GetData().Name) assert.Equal(t, path, res.GetData().Path) assert.Equal(t, d1, res.GetData().CreatedAt) assert.Equal(t, d2, res.GetData().UpdatedAt) } func TestService_ListDirs(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() bucketPath := "/ipfs/bafybeian44ntmjjfjbqt4dlkq4fiuhfzcxfunzuuzhbb7xkrnsdjb2sjha" mockDirItems := &bucket.DirEntries{ Item: &buckets_pb.PathItem{ Items: []*buckets_pb.PathItem{ { Path: bucketPath + "/.textileseed", Name: ".textileseed", IsDir: false, Size: 16, Cid: "bafkreia4q63he72sgzrn64kpa2uu5it7utmqkdby6t3xck6umy77x7p2a1", Metadata: &buckets_pb.Metadata{ UpdatedAt: time.Now().Unix(), }, }, { Path: bucketPath + "/somedir", Name: "somedir", IsDir: true, Size: 0, Cid: "", Metadata: &buckets_pb.Metadata{ UpdatedAt: time.Now().Unix(), }, }, { Path: bucketPath + "/example.txt", Name: "example.txt", IsDir: false, Size: 16, Cid: "bafkreia4q63he72sgzrn64kpa2uu5it7utmqkdby6t3xck6umy77x7p2ae", Metadata: &buckets_pb.Metadata{ UpdatedAt: time.Now().Unix(), }, }, }, }, } mockDirItemsSubfolder := &bucket.DirEntries{ Item: &buckets_pb.PathItem{ Items: []*buckets_pb.PathItem{ { Path: bucketPath + "/somedir/example.txt", Name: "example.txt", IsDir: false, Size: 16, Cid: "bafkreia4q63he72sgzrn64kpa2uu5it7utmqkdby6t3xck6umy77x7p2ae", Metadata: &buckets_pb.Metadata{ UpdatedAt: time.Now().Unix(), }, }, }, }, } textileClient.On("GetDefaultBucket", mock.Anything).Return(mockBucket, nil) textileClient.On("IsInitialized").Return(true) mockBucket.On( "ListDirectory", mock.Anything, "", ).Return(mockDirItems, nil) mockBucket.On( "FileExists", mock.Anything, mock.Anything, ).Return(true, nil) mockBucket.On( "ListDirectory", mock.Anything, "/somedir", ).Return(mockDirItemsSubfolder, nil) mockBucket.On( "Slug", ).Return( "meow", ) mockMirrorFiles := make(map[string]*model.MirrorFileSchema) mockMirrorFiles[bucketPath+"/.textileseed"] = &model.MirrorFileSchema{ Backup: true, } mockMirrorFiles[bucketPath+"/somedir"] = &model.MirrorFileSchema{ Backup: true, } mockMirrorFiles[bucketPath+"/example.txt"] = &model.MirrorFileSchema{ Backup: true, } mockMirrorFiles[bucketPath+"/somedir/example.txt"] = &model.MirrorFileSchema{ Backup: true, } mockModel.On("FindMirrorFileByPaths", mock.Anything, mock.Anything).Return(mockMirrorFiles, nil) textileClient.On("GetModel").Return(mockModel) textileClient.On( "GetPathAccessRoles", mock.Anything, mock.Anything, mock.Anything, ).Return( []domain.Member{}, nil, ) res, err := sv.ListDirs(context.Background(), "", "", true) assert.Nil(t, err) assert.NotEmpty(t, res) // .textileseed shouldn't be part of the reply assert.Len(t, res, 3) if res[0].IsDir { // check for dir assert.True(t, res[0].IsDir) assert.Equal(t, "", res[0].FileExtension) } assert.False(t, res[1].IsDir) assert.Equal(t, "example.txt", res[1].Name) assert.Equal(t, "txt", res[1].FileExtension) assert.Equal(t, "bafkreia4q63he72sgzrn64kpa2uu5it7utmqkdby6t3xck6umy77x7p2ae", res[1].IpfsHash) assert.Equal(t, "/somedir/example.txt", res[1].Path) assert.False(t, res[2].IsDir) assert.Equal(t, "example.txt", res[2].Name) assert.Equal(t, "txt", res[2].FileExtension) assert.Equal(t, "bafkreia4q63he72sgzrn64kpa2uu5it7utmqkdby6t3xck6umy77x7p2ae", res[2].IpfsHash) assert.Equal(t, "/example.txt", res[2].Path) // assert mocks cfg.AssertExpectations(t) } // NOTE: update this test when it supports multiple buckets func TestService_OpenFile(t *testing.T) { sv, getDir, tearDown := initTestService(t) defer tearDown() testKey := "bucketKey" testPath := "/ipfs/bafybeievdakous3kamdgy6yxtmkvmibmro23kgf7xrduvwrxrlryzvu3sm/file.txt" testFileName := "file.txt" // setup mocks mockEnv.On("WorkingFolder").Return( getDir().dir, ) mockSync.On("GetOpenFilePath", testKey, testPath, mock.Anything, mock.Anything).Return( "", false, ) mockSync.On("AddFileWatch", mock.Anything).Return( nil, ) textileClient.On("GetDefaultBucket", mock.Anything).Return(mockBucket, nil) textileClient.On("IsInitialized").Return(true) mockBucket.On( "GetFile", mock.Anything, testPath, mock.Anything, ).Return(nil) mockBucket.On( "Key", ).Return(testKey) mockBucket.On( "Slug", ).Return(testKey) mockBucket.On( "ListDirectory", mock.Anything, mock.Anything, ).Return(&bucket.DirEntries{ Item: &buckets_pb.PathItem{ Cid: "", }, }, nil) testThreadID, err := utils.ParseDbIDFromString("AFKRGLCIX5CQWA2244J3GBH4ERF2MLNPJWVU72BPU2BGB5OOZH5PR7Q=") if err != nil { t.Fatal(err) } mockBucket.On( "GetThreadID", mock.Anything, ).Return( testThreadID, nil, ) res, err := sv.OpenFile(context.Background(), testPath, "", "") assert.Nil(t, err) assert.NotEmpty(t, res) assert.FileExists(t, res.Location) assert.Contains(t, res.Location, os.TempDir()) assert.True(t, strings.HasSuffix(res.Location, testFileName)) // assert mocks cfg.AssertExpectations(t) textileClient.AssertExpectations(t) } func TestService_AddItems_FilesOnly(t *testing.T) { sv, getTempDir, tearDown := initTestService(t) defer tearDown() // setup tests testKey := "bucketKey" bucketPath := "/tests" testSourcePaths := getTempDir().fileNames textileClient.On("GetDefaultBucket", mock.Anything).Return(mockBucket, nil) textileClient.On("IsInitialized").Return(true) mockBucket.On( "Key", ).Return(testKey) mockBucket.On( "Slug", ).Return("personal") mockPath.On("String").Return("hash") for _, f := range testSourcePaths { _, fileName := filepath.Split(f) mockBucket.On( "UploadFile", mock.Anything, bucketPath+"/"+fileName, mock.Anything, ).Return(nil, mockPath, nil) } ch, res, err := sv.AddItems(context.Background(), testSourcePaths, bucketPath, "") assert.Nil(t, err) assert.NotNil(t, ch) assert.NotEmpty(t, res) assert.Equal(t, int64(len(getTempDir().fileNames)), res.TotalFiles) count := 0 for res := range ch { count++ assert.NotNil(t, res) assert.Nil(t, res.Error) assert.NotEmpty(t, res.BucketPath) assert.NotEmpty(t, res.SourcePath) } assert.Equal(t, count, len(testSourcePaths)) // assert mocks textileClient.AssertExpectations(t) mockBucket.AssertNumberOfCalls(t, "UploadFile", len(testSourcePaths)) } func TestService_AddItems_Folder(t *testing.T) { sv, getTempDir, tearDown := initTestService(t) defer tearDown() // setup tests testKey := "bucketKey" bucketPath := "/tests" testSourcePaths := []string{getTempDir().dir} _, folderName := filepath.Split(getTempDir().dir) targetBucketPath := bucketPath + "/" + folderName textileClient.On("GetDefaultBucket", mock.Anything).Return(mockBucket, nil) textileClient.On("IsInitialized").Return(true) mockBucket.On( "Key", ).Return(testKey) mockPath.On("String").Return("hash") mockBucket.On( "CreateDirectory", mock.Anything, targetBucketPath, ).Return(nil, mockPath, nil) for _, f := range getTempDir().fileNames { _, fileName := filepath.Split(f) mockBucket.On( "UploadFile", mock.Anything, targetBucketPath+"/"+fileName, mock.Anything, ).Return(nil, mockPath, nil) } mockBucket.On( "Slug", ).Return("personal") ch, res, err := sv.AddItems(context.Background(), testSourcePaths, bucketPath, "") assert.Nil(t, err) assert.NotNil(t, ch) assert.NotEmpty(t, res) assert.Equal(t, int64(len(getTempDir().fileNames)+1), res.TotalFiles) count := 0 for res := range ch { count++ assert.NotNil(t, res) assert.Nil(t, res.Error) assert.NotEmpty(t, res.BucketPath) assert.NotEmpty(t, res.SourcePath) } assert.Equal(t, count, len(testSourcePaths)+len(getTempDir().fileNames)) // assert mocks textileClient.AssertExpectations(t) mockBucket.AssertNumberOfCalls(t, "UploadFile", len(getTempDir().fileNames)) mockBucket.AssertNumberOfCalls(t, "CreateDirectory", 1) } func TestService_AddItems_OnError(t *testing.T) { sv, getTempDir, tearDown := initTestService(t) defer tearDown() // setup tests testKey := "bucketKey" bucketPath := "/tests" testSourcePaths := getTempDir().fileNames textileClient.On("GetDefaultBucket", mock.Anything).Return(mockBucket, nil) textileClient.On("IsInitialized").Return(true) mockBucket.On( "Key", ).Return(testKey) mockPath.On("String").Return("hash") bucketError := errors.New("bucket failed") mockBucket.On( "UploadFile", mock.Anything, mock.Anything, mock.Anything, ).Return(nil, nil, bucketError) ch, _, err := sv.AddItems(context.Background(), testSourcePaths, bucketPath, "") assert.Nil(t, err) assert.NotNil(t, ch) count := 0 for res := range ch { count++ assert.NotNil(t, res) assert.NotNil(t, res.Error) assert.NotEmpty(t, res.SourcePath) assert.Empty(t, res.BucketPath) } assert.Equal(t, count, len(testSourcePaths)) // assert mocks textileClient.AssertExpectations(t) mockBucket.AssertNumberOfCalls(t, "UploadFile", len(getTempDir().fileNames)) } func TestService_CreateIdentity(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() createIdentityMock := func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(`{ "address": "0xd606f05a2a980f58737aa913553c8d6eac8b", "username": "dmerrill", "publicKey": "67730a6678566ead5911d71304854daddb1fe98a396551a4be01de65da01f3a9"}`)) } serverMock := func() *httptest.Server { handler := http.NewServeMux() handler.HandleFunc("/identities", createIdentityMock) srv := httptest.NewServer(handler) return srv } server := serverMock() defer server.Close() cfg.On("GetString", mock.Anything, mock.Anything).Return( // "https://td4uiovozc.execute-api.us-west-2.amazonaws.com/dev", // UNCOMMENT TO TEST REAL SERVER server.URL, ) testUsername := "dmerrill" mockKeychain.On( "GetStoredPublicKey", ).Return(mockPubKey, nil) identity, err := sv.CreateIdentity(context.Background(), testUsername) assert.Nil(t, err) assert.NotNil(t, identity) assert.Equal(t, identity.PublicKey, mockPubKeyHex) assert.Equal(t, identity.Username, testUsername) } func TestService_CreateIdentity_OnError(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() createIdentityMock := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) _, _ = w.Write([]byte(`{ "message": "Validation Error: An identity with the given username already exists"}`)) } serverMock := func() *httptest.Server { handler := http.NewServeMux() handler.HandleFunc("/identities", createIdentityMock) srv := httptest.NewServer(handler) return srv } server := serverMock() defer server.Close() cfg.On("GetString", mock.Anything, mock.Anything).Return( // "https://td4uiovozc.execute-api.us-west-2.amazonaws.com/dev", // UNCOMMENT TO TEST REAL SERVER server.URL, ) testUsername := "dmerrill" mockKeychain.On( "GetStoredPublicKey", ).Return(mockPubKey, nil) identity, err := sv.CreateIdentity(context.Background(), testUsername) assert.Nil(t, identity) assert.NotNil(t, err) assert.Equal(t, err, errors.New("Validation Error: An identity with the given username already exists")) } func TestService_GetIdentityByUsername(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() createIdentityMock := func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(`{ "address": "0xd606f05a2a980f58737aa913553c8d6eac8b", "username": "dmerrill", "publicKey": "67730a6678566ead5911d71304854daddb1fe98a396551a4be01de65da01f3a9"}`)) } serverMock := func() *httptest.Server { handler := http.NewServeMux() handler.HandleFunc("/identities/username/dmerrill", createIdentityMock) srv := httptest.NewServer(handler) return srv } server := serverMock() defer server.Close() cfg.On("GetString", mock.Anything, mock.Anything).Return( // "https://td4uiovozc.execute-api.us-west-2.amazonaws.com/dev", // UNCOMMENT TO TEST REAL SERVER server.URL, ) testUsername := "dmerrill" identity, err := sv.GetIdentityByUsername(context.Background(), testUsername) assert.Nil(t, err) assert.NotNil(t, identity) assert.NotNil(t, identity.Address) assert.NotNil(t, identity.PublicKey) assert.Equal(t, identity.Username, testUsername) } func TestService_GetIdentityByUsername_OnError(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() createIdentityMock := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) _, _ = w.Write([]byte(`{ "message": "Not Found Error: Identity with username dmerrill1 not found." }`)) } serverMock := func() *httptest.Server { handler := http.NewServeMux() handler.HandleFunc("/identities/username/dmerrill1", createIdentityMock) srv := httptest.NewServer(handler) return srv } server := serverMock() defer server.Close() cfg.On("GetString", mock.Anything, mock.Anything).Return( // "https://td4uiovozc.execute-api.us-west-2.amazonaws.com/dev", // UNCOMMENT TO TEST REAL SERVER server.URL, ) testUsername := "dmerrill1" identity, err := sv.GetIdentityByUsername(context.Background(), testUsername) assert.Nil(t, identity) assert.NotNil(t, err) assert.Equal(t, err, errors.New("Not Found Error: Identity with username dmerrill1 not found.")) } func TestService_GetPublicKey(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() mockKeychain.On( "GetStoredPublicKey", ).Return(mockPubKey, nil) pub, err := sv.GetPublicKey(context.Background()) assert.Nil(t, err) assert.NotNil(t, pub) assert.Equal(t, pub, mockPubKeyHex) } func TestService_BackupAndRestore(t *testing.T) { sv, getTestDir, tearDown := initTestService(t) defer tearDown() testDir := getTestDir() mockKeychain.On( "GetStoredKeyPairInLibP2PFormat", ).Return(mockPrivKey, mockPubKey, nil) ctx := context.Background() path := testDir.fileNames[0] err := sv.CreateLocalKeysBackup(ctx, path) backup, _ := ioutil.ReadFile(path) assert.Nil(t, err) assert.NotNil(t, backup) mockKeychain.On("ImportExistingKeyPair", mock.Anything, mock.Anything).Return(nil) textileClient.On("RestoreDB", mock.Anything).Return(nil) err = sv.RecoverKeysByLocalBackup(ctx, path) assert.Nil(t, err) mockKeychain.AssertCalled(t, "ImportExistingKeyPair", mockPrivKey, "") } func TestService_VaultBackup(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() pass := "strawberry123" uuid := "c907e7ef-7b36-4ab1-8a56-f788d7526a2c" backupType := domain.PASSWORD ctx := context.Background() mnemonic := "clog chalk blame black uncover frame before decide tuition maple crowd uncle" mockKeychain.On( "GetStoredKeyPairInLibP2PFormat", ).Return(mockPrivKey, mockPubKey, nil) mockKeychain.On("GetStoredMnemonic").Return(mnemonic, nil) mockVault.On("Store", uuid, pass, backupType, mock.Anything, mock.Anything).Return(nil, nil) mockHub.On("GetTokensWithCache", mock.Anything).Return(&hub.AuthTokens{ AppToken: "", HubToken: "", Key: "", Msg: "", Sig: "", }, nil) err := sv.BackupKeysByPassphrase(ctx, uuid, pass, backupType) assert.Nil(t, err) mockVault.AssertCalled(t, "Store", uuid, pass, backupType, mock.Anything, mock.Anything) } func TestService_VaultRestore(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() pass := "strawberry123" uuid := "c907e7ef-7b36-4ab1-8a56-f788d7526a2c" ctx := context.Background() mnemonic := "clog chalk blame black uncover frame before decide tuition maple crowd uncle" mockItem := vault.VaultItem{ ItemType: vault.PrivateKeyWithMnemonic, Value: mockPrivKeyHex + "___" + mnemonic, } mockItems := []vault.VaultItem{mockItem} mockVault.On("Retrieve", uuid, pass, domain.PASSWORD).Return(mockItems, nil) mockKeychain.On("ImportExistingKeyPair", mock.Anything, mock.Anything).Return(nil) textileClient.On("RestoreDB", mock.Anything).Return(nil) err := sv.RecoverKeysByPassphrase(ctx, uuid, pass, domain.PASSWORD) assert.Nil(t, err) mockKeychain.AssertCalled(t, "ImportExistingKeyPair", mockPrivKey, mnemonic) } func TestService_UnshareFilesViaPublicKey_Works(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() ctx := context.Background() textileClient.On("IsHealthy").Return(true) textileClient.On("GetModel").Return(mockModel) textileClient.On( "ManageShareFilesViaPublicKey", ctx, []domain.FullPath{}, []crypto.PubKey{}, [][]byte{}, domain.DeleteRoleAction, ).Return(nil) err := sv.UnshareFilesViaPublicKey(ctx, []domain.FullPath{}, []crypto.PubKey{}) assert.Nil(t, err) } func TestService_UnshareFilesViaPublicKey_Fails_IFTextileIsNotInitialized(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() ctx := context.Background() expectedErr := errors.New("textile is not initialized") errChan := make(chan error, 1) errChan <- expectedErr textileClient.On("WaitForHealthy").Return(errChan) textileClient.On("IsHealthy").Return(false) err := sv.UnshareFilesViaPublicKey(ctx, []domain.FullPath{}, []crypto.PubKey{}) assert.EqualError(t, err, expectedErr.Error()) } func TestService_HandleSharedFilesInvitation_FailIfInvitationNotFound(t *testing.T) { sv, _, tearDown := initTestService(t) ctx := context.Background() defer tearDown() textileClient.On("IsHealthy").Return(true) textileClient.On("GetMailAsNotifications", mock.Anything, "", 1). Return(nil, errors.New("failed fetching")) err := sv.HandleSharedFilesInvitation(ctx, "", true) assert.EqualError(t, err, "invitation not found") } func TestService_HandleSharedFilesInvitation_Accepts_Correctly(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() // setup ctx := context.Background() invitationId := "random-invitation-uuid" acceptInvite := true expectedInvitation := domain.Invitation{ InviterPublicKey: "b7a3c12dc0c8c748ab07525b701122b88bd78f600c76342d27f25e5f92444cde", Status: domain.PENDING, ItemPaths: []domain.FullPath{ { DbId: "random-db-id", Bucket: "personal", Path: "/", }, }, } textileClient.On("IsHealthy").Return(true) textileClient.On("GetMailAsNotifications", mock.Anything, invitationId, 1). Return([]*domain.Notification{ { NotificationType: domain.INVITATION, InvitationValue: expectedInvitation, }, }, nil) textileClient.On("AcceptSharedFilesInvitation", mock.Anything, expectedInvitation). Return(expectedInvitation, nil) textileClient.On("SendMessage", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) // execute err := sv.HandleSharedFilesInvitation(ctx, invitationId, acceptInvite) assert.NoError(t, err, "HandleSharedFilesInvitation failed") } func TestService_HandleSharedFilesInvitation_Rejects_Correctly(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() // setup ctx := context.Background() invitationId := "random-invitation-uuid" acceptInvite := false expectedInvitation := domain.Invitation{ InviterPublicKey: "b7a3c12dc0c8c748ab07525b701122b88bd78f600c76342d27f25e5f92444cde", Status: domain.PENDING, ItemPaths: []domain.FullPath{ { DbId: "random-db-id", Bucket: "personal", Path: "/", }, }, } textileClient.On("IsHealthy").Return(true) textileClient.On("GetMailAsNotifications", mock.Anything, invitationId, 1). Return([]*domain.Notification{ { NotificationType: domain.INVITATION, InvitationValue: expectedInvitation, }, }, nil) textileClient.On("RejectSharedFilesInvitation", mock.Anything, expectedInvitation). Return(expectedInvitation, nil) textileClient.On("SendMessage", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) // execute err := sv.HandleSharedFilesInvitation(ctx, invitationId, acceptInvite) assert.NoError(t, err, "HandleSharedFilesInvitation failed") } func TestService_OpenSharedFile_ShouldFail_When_PasswordCannotBeFetched(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() // setup ctx := context.Background() testFilename := "image.jpg" testHash := "bafkreidhby4wyrc3cr6hfsg54x6nequylzdhn254nep7z3g7adfkyddlcy" emptyPassword := "" textileClient.On("IsHealthy").Return(true) textileClient.On("GetPublicReceivedFile", mock.Anything, testHash, true). Return(nil, "", errors.New("not found")) // test _, err := sv.OpenSharedFile(ctx, testHash, emptyPassword, testFilename) // validate assert.Error(t, err, "OpenSharedFile should fail for non existent password") assert.Contains(t, err.Error(), "password is required to open this file", "OpenSharedFile should fail") } func TestService_OpenSharedFile_Should_AddOpenedFileToSharedWithMeList(t *testing.T) { sv, _, tearDown := initTestService(t) defer tearDown() // setup ctx := context.Background() testFilename := "letter.txt" expectedFileContent := "This is a love letter to the dweb. Be great" testHash := "bafkreidhby4wyrc3cr6hfsg54x6nequylzdhn254nep7z3g7adfkyddlcy" testPassword := "super-secret" emptyPassword := "" textileClient.On("IsHealthy").Return(true) textileClient.On("GetPublicReceivedFile", mock.Anything, testHash, true). Return(&domain.SharedDirEntry{}, testPassword, nil) textileClient.On("DownloadPublicItem", mock.Anything, mock.Anything). Return(encryptString(expectedFileContent, testPassword), nil) textileClient.On("AcceptSharedFileLink", mock.Anything, testHash, testPassword, testFilename, fmt.Sprintf("%d", len(expectedFileContent))). Return(&domain.SharedDirEntry{}, nil) // test (using empty password, so testPassword would be fetch from textileClient) result, err := sv.OpenSharedFile(ctx, testHash, emptyPassword, testFilename) // validate assert.NoError(t, err, "OpenSharedFile should not fail") actualFileContent, err := ioutil.ReadFile(result.Location) assert.NoError(t, err, "Failed to read decrypted file") assert.Equal(t, expectedFileContent, string(actualFileContent)) } func encryptString(content, password string) io.ReadCloser { reader, err := dcrypto.NewEncrypterWithPassword(strings.NewReader(content), []byte(password)) if err != nil { panic(err) } return ioutil.NopCloser(reader) } ================================================ FILE: core/spacefs/fs.go ================================================ package spacefs import ( "context" "syscall" "github.com/FleekHQ/space-daemon/core/fsds" ) // SpaceFS is represents the filesystem that FUSE Interacts with // It implements the FSOps interface // And is responsible for managing file access, encryption and decryption type SpaceFS struct { store fsds.FSDataSource } var _ = FSOps(&SpaceFS{}) // New initializes a SpaceFS instance using store as it source of informatioin func New(store fsds.FSDataSource) *SpaceFS { return &SpaceFS{ store: store, } } // Root implements the FSOps Root function // It returns the root directory of the file func (fs *SpaceFS) Root(ctx context.Context) (DirEntryOps, error) { entry, err := fs.store.Get(ctx, "/") if err != nil { return nil, err } return &SpaceDirectory{ fs: fs, entry: entry, }, nil } // LookupPath implements the FsOps interface for looking up information // in a directory func (fs *SpaceFS) LookupPath(ctx context.Context, path string) (DirEntryOps, error) { entry, err := fs.store.Get(ctx, path) if err != nil { return nil, syscall.ENOENT } if entry.IsDir() { return &SpaceDirectory{ fs: fs, entry: entry, }, nil } return &SpaceFile{ fs: fs, entry: entry, }, nil } func (fs *SpaceFS) CreateEntry(ctx context.Context, req CreateDirEntry) (DirEntryOps, error) { entry, err := fs.store.CreateEntry(ctx, req.Path, req.Mode) if err != nil { return nil, syscall.ENOENT } if entry.IsDir() { return &SpaceDirectory{ fs: fs, entry: entry, }, nil } return &SpaceFile{ fs: fs, entry: entry, }, nil } // RenameEntry should rename the directory entry from old to new func (fs *SpaceFS) RenameEntry(ctx context.Context, req RenameDirEntry) error { return fs.store.RenameEntry(ctx, req.OldPath, req.NewPath) } // DeleteEntry should delete the item at the path func (fs *SpaceFS) DeleteEntry(ctx context.Context, path string) error { return fs.store.DeleteEntry(ctx, path) } // Open a file at specified path func (fs *SpaceFS) Open(ctx context.Context, path string, mode FileHandlerMode) (FileHandler, error) { result, err := fs.store.Open(ctx, path) return result, err } // SpaceDirectory is a directory managed by space type SpaceDirectory struct { fs *SpaceFS entry *fsds.DirEntry } var _ = DirEntryOps(&SpaceDirectory{}) var _ = DirOps(&SpaceDirectory{}) // Path implements DirEntryOps Path() and return the path of the directory func (dir *SpaceDirectory) Path() string { return dir.entry.Path() } // Attribute implements DirEntryOps Attribute() and fetches the metadata of the directory func (dir *SpaceDirectory) Attribute(ctx context.Context) (DirEntryAttribute, error) { return dir.entry, nil } // ReadDir implements DirOps ReadDir and returns the list of entries in a directory func (dir *SpaceDirectory) ReadDir(ctx context.Context) ([]DirEntryOps, error) { childrenEntries, err := dir.fs.store.GetChildren(ctx, dir.entry.Path()) if err != nil { return nil, syscall.ENOENT } var result []DirEntryOps for _, entry := range childrenEntries { if entry.IsDir() { result = append(result, &SpaceDirectory{ fs: dir.fs, entry: entry, }) } else { result = append(result, &SpaceFile{ fs: dir.fs, entry: entry, }) } } return result, nil } // SpaceFile is a file managed by space type SpaceFile struct { fs *SpaceFS entry *fsds.DirEntry } var _ = FileOps(&SpaceFile{}) // Path implements DirEntryOps Path() and return the path of the directory func (f *SpaceFile) Path() string { return f.entry.Path() } // Attribute implements DirEntryOps Attribute() and fetches the metadata of the directory func (f *SpaceFile) Attribute(ctx context.Context) (DirEntryAttribute, error) { fileInfo, err := f.fs.store.Open(ctx, f.Path()) if err != nil { return nil, err } stats, err := fileInfo.Stats(ctx) if err != nil { return nil, err } return stats, nil } // Open implements FileOps Open // It should download/cache the content of the file and return a fileHandler // that can read the cached content. func (f *SpaceFile) Open(ctx context.Context, mode FileHandlerMode) (FileHandler, error) { fileHandler, err := f.fs.Open(ctx, f.entry.Path(), mode) return fileHandler, err } func (f *SpaceFile) Truncate(ctx context.Context, size uint64) error { fileInfo, err := f.fs.store.Open(ctx, f.Path()) if err != nil { return err } return fileInfo.Truncate(ctx, size) } ================================================ FILE: core/spacefs/fs_test.go ================================================ package spacefs import ( "testing" "github.com/stretchr/testify/assert" ) // func TestSpaceFS_LookupPath(t *testing.T) { assert.Equal(t, true, true) // ctx := context.Background() // memStore, err := fsds.NewIpfsDataSource(ctx) // if err != nil { // t.Fatal(err) // } // // fs, err := New(ctx, memStore) // if err != nil { // t.Fatal(err) // } // // result, err := fs.LookupPath("/static") // if err != nil { // t.Fatal(err) // } // // log.Printf("Path %s", result.Path()) // // result, err = fs.LookupPath("/static/js/2.b4ef1316.chunk.js") // if err != nil { // t.Fatal(err) // } // log.Printf("Path %s", result.Path()) // attr, err := result.Attribute() // if err != nil { // t.Fatal(err) // } // log.Printf("Name %s", attr.Name()) // log.Printf("IsDir %v", attr.IsDir()) // log.Printf("Size %d", attr.Size()) // // result, err = fs.LookupPath("/static/js") // if err != nil { // t.Fatal(err) // } // // dirOps, ok := result.(DirOps) // if !ok { // t.Fatal(errors.New("result is not a DirOps")) // } // jsDirectory, err := dirOps.ReadDir() // if err != nil { // t.Fatal(err) // } // for _, dir := range jsDirectory { // dirAttr, err := dir.Attribute() // if err != nil { // t.Fatal(err) // } // log.Printf("\nName: %s\nIs Dir: %v\nSize: %d\n", dirAttr.Name(), dirAttr.IsDir(), dirAttr.Size()) // } } ================================================ FILE: core/spacefs/interfaces.go ================================================ package spacefs import ( "context" "os" "time" ) type FileHandlerMode uint8 const ( ReadMode = FileHandlerMode(0) WriteMode ) // DirEntryAttribute similar to the FileInfo in the os.Package type DirEntryAttribute interface { Name() string // base name of the file Size() uint64 // length in bytes for files; can be anything for directories Mode() os.FileMode // file mode bits Uid() uint32 // user id of owner of entry Gid() uint32 // group id of owner of entry Ctime() time.Time // creation time ModTime() time.Time // modification time IsDir() bool } // DirEntryOps are the list of actions to be invoked on a directry entry // A directory entry is either a file or a folder. // See DirOps and FileOps for operations specific to those types type DirEntryOps interface { // Path should return the absolute path string for directory or file // Directory path's should end in `/` Path() string // Attribute should return the metadata information for the file Attribute(ctx context.Context) (DirEntryAttribute, error) } // DirOps are the list of actions that can be done on a directory type DirOps interface { DirEntryOps ReadDir(ctx context.Context) ([]DirEntryOps, error) } // FileHandler is in charge of reading, writing and closing access to a file // It should handle locking and track read and write offsets till it is closed type FileHandler interface { Read(ctx context.Context, data []byte, offset int64) (int, error) Write(ctx context.Context, data []byte, offset int64) (int, error) Close(ctx context.Context) error } // FileOps are the list of actions that can be done on a file type FileOps interface { DirEntryOps Open(ctx context.Context, mode FileHandlerMode) (FileHandler, error) Truncate(ctx context.Context, size uint64) error } type CreateDirEntry struct { Path string Mode os.FileMode } type RenameDirEntry struct { OldPath string NewPath string } // FSOps represents the filesystem operations type FSOps interface { // Root should return the root directory entry Root(ctx context.Context) (DirEntryOps, error) // LookupPath should return the directory entry at that particular path LookupPath(ctx context.Context, path string) (DirEntryOps, error) // Open a file at specific path, with specified mode Open(ctx context.Context, path string, mode FileHandlerMode) (FileHandler, error) // CreateEntry should create an directory entry and return either a FileOps or DirOps entry // depending on the mode CreateEntry(ctx context.Context, req CreateDirEntry) (DirEntryOps, error) // RenameEntry should rename the directory entry from old to new RenameEntry(ctx context.Context, req RenameDirEntry) error // DeleteEntry should delete the item at the path DeleteEntry(ctx context.Context, path string) error } ================================================ FILE: core/store/store.go ================================================ package store import ( "errors" "fmt" "os" "runtime" s "strings" "github.com/FleekHQ/space-daemon/core/util" "github.com/FleekHQ/space-daemon/core" "github.com/FleekHQ/space-daemon/log" badger "github.com/dgraph-io/badger" ) const DefaultRootDir = "~/.fleek-space" const BadgerFileName = "db" type store struct { rootDir string db *badger.DB isOpen bool } var _ = core.Component(store{}) type Store interface { Open() error Close() error Set(key []byte, value []byte) error SetString(key string, value string) error Get(key []byte) ([]byte, error) Remove(key []byte) error DropAll() error IsOpen() bool KeysWithPrefix(prefix string) ([]string, error) } type storeOptions struct { rootDir string } var defaultStoreOptions = storeOptions{ rootDir: DefaultRootDir, } // Idea taken from here https://medium.com/soon-london/variadic-configuration-functions-in-go-8cef1c97ce99 type Option func(o *storeOptions) func New(opts ...Option) *store { o := defaultStoreOptions for _, opt := range opts { opt(&o) } log.Info(fmt.Sprintf("using path %s for store", o.rootDir)) store := &store{ rootDir: o.rootDir, isOpen: false, } return store } func (store *store) Open() error { if store.isOpen { log.Warn("Trying to open an already open") return nil } rootDir, err := util.ResolvePath(s.Join([]string{store.rootDir, BadgerFileName}, "/")) if err != nil { return err } // We create the directory in case it doesn't exist yet if err := os.MkdirAll(rootDir, os.ModePerm); err != nil { return err } db, err := badger.Open( badger.DefaultOptions(rootDir). WithEventLogging(false). WithTruncate(runtime.GOOS == "windows"), ) if err != nil { return err } store.db = db store.isOpen = true return nil } func (store store) IsOpen() bool { return store.isOpen } func (store *store) Close() error { if !store.isOpen { return nil } err := store.db.Close() if err != nil { return err } store.isOpen = false return nil } // Testing that store is correctly working func (store *store) hotInit() { if err := store.Set([]byte("A"), []byte("B")); err != nil { log.Error("error", err) return } if val, err := store.Get([]byte("A")); err != nil { log.Error("error", err) } else { log.Info("Got store response") log.Info(string(val)) } } // Helper function for setting store path func WithPath(path string) Option { return func(o *storeOptions) { if path != "" { o.rootDir = path } } } func (store *store) getDb() (*badger.DB, error) { if store.isOpen == false { return nil, errors.New("Database has not been opened yet") } return store.db, nil } // Stores a key/value pair in the db. func (store *store) Set(key []byte, value []byte) error { db, err := store.getDb() if err != nil { return err } updateHandler := func(txn *badger.Txn) error { e := badger.NewEntry(key, value) err := txn.SetEntry(e) return err } if err := db.Update(updateHandler); err != nil { return err } return nil } // Removes a key/value pair in the db. func (store *store) Remove(key []byte) error { db, err := store.getDb() if err != nil { return err } removeHandler := func(txn *badger.Txn) error { err := txn.Delete(key) return err } if err := db.Update(removeHandler); err != nil { return err } return nil } func (store *store) SetString(key string, value string) error { return store.Set([]byte(key), []byte(value)) } // Given a key, retrieves the stored value. If the key is not found returns ErrKeyNotFound. func (store *store) Get(key []byte) ([]byte, error) { db, err := store.getDb() if err != nil { return nil, err } var valCopy []byte transactionHandler := func(txn *badger.Txn) error { if item, err := txn.Get(key); err != nil { return err } else { err = item.Value(func(val []byte) error { // Copying or parsing val is valid. valCopy = append([]byte{}, val...) return nil }) if err != nil { return err } return nil } } if err = db.View(transactionHandler); err != nil { return nil, err } return valCopy, nil } // Returns keys in the store filtered by prefix func (store store) KeysWithPrefix(prefix string) ([]string, error) { db, err := store.getDb() if err != nil { return nil, err } keys := make([]string, 0) db.View(func(txn *badger.Txn) error { it := txn.NewIterator(badger.DefaultIteratorOptions) defer it.Close() prefix := []byte(prefix) for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { item := it.Item() k := item.Key() keys = append(keys, string(k)) } return nil }) return keys, nil } func (store store) Shutdown() error { return store.Close() } func (store store) DropAll() error { db, err := store.getDb() if err != nil { return err } return db.DropAll() } ================================================ FILE: core/sync/fs.go ================================================ package sync import ( "context" "fmt" "os" ipfspath "github.com/ipfs/interface-go-ipfs-core/path" "github.com/FleekHQ/space-daemon/core/textile" "github.com/FleekHQ/space-daemon/log" ) func (h *watcherHandler) OnCreate(ctx context.Context, path string, fileInfo os.FileInfo) { log.Info( "FS Handler: OnCreate", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileName:%s", fileInfo.Name()), ) // TODO: Synchronizer lock check should ensure that no other operation is currently ongoing // with this path or its parent folder var result ipfspath.Resolved var newRoot ipfspath.Path var err error watchInfo, exists := h.bs.getOpenFileBucketSlugAndPath(path) if !exists { msg := fmt.Sprintf("error: could not find path %s", path) log.Error(msg, fmt.Errorf(msg)) return } bucketSlug := watchInfo.BucketSlug bucketPath := watchInfo.BucketPath b, err := h.bs.textileClient.GetBucket(ctx, bucketSlug, nil) if err != nil { msg := fmt.Sprintf("error: could not find bucket with slug %s", bucketSlug) log.Error(msg, fmt.Errorf(msg)) return } if fileInfo.IsDir() { existsOnTextile, err := b.DirExists(ctx, path) if err != nil { log.Error("Could not check if folder exists on textile", err) return } if existsOnTextile { log.Info("Folder alerady exists on textile") return } result, newRoot, err = b.CreateDirectory(ctx, path) } else { existsOnTextile, err := b.FileExists(ctx, path) if err != nil { log.Error("Could not check if file exists on textile", err) return } if existsOnTextile { log.Info("File alerady exists on textile") return } fileReader, err := os.Open(path) if err != nil { log.Error("Could not open file for upload", err) return } result, newRoot, err = b.UploadFile(ctx, bucketPath, fileReader) } if err != nil { log.Error("Uploading to textile failed", err, fmt.Sprintf("path:%s", path)) return } if err = result.IsValid(); err != nil { log.Error("Uploading to textile not valid", err, fmt.Sprintf("path:%s", path)) return } log.Info( "Successfully uploaded item to textile", fmt.Sprintf("bucketPath:%s", bucketPath), fmt.Sprintf("itemCid:%s", result.Cid()), fmt.Sprintf("rootCid:%s", newRoot.String()), ) // TODO: Update synchronizer/store (maybe in a defer function) } func (h *watcherHandler) OnRemove(ctx context.Context, path string, fileInfo os.FileInfo) { log.Info("FS Handler: OnRemove", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileName:%s", fileInfo.Name())) // TODO: Also synchronizer lock check here watchInfo, exists := h.bs.getOpenFileBucketSlugAndPath(path) if !exists { msg := fmt.Sprintf("error: could not find path %s", path) log.Error(msg, fmt.Errorf(msg)) return } bucketSlug := watchInfo.BucketSlug bucketPath := watchInfo.BucketPath b, err := h.bs.textileClient.GetBucket(ctx, bucketSlug, nil) if err != nil { msg := fmt.Sprintf("error: could not find bucket with slug %s", bucketSlug) log.Error(msg, fmt.Errorf(msg)) return } _, err = b.DeleteDirOrFile(ctx, bucketPath) if err != nil { log.Error("Deleting from textile failed", err, fmt.Sprintf("path:%s", path)) return } log.Info( "Successfully deleted item from textile", fmt.Sprintf("path:%s", path), ) // TODO: Update synchronizer/store (maybe in a defer function) } // OnWrite is invoked when a new file is created or files content is updated func (h *watcherHandler) OnWrite(ctx context.Context, path string, fileInfo os.FileInfo) { log.Info("FS Handler: OnWrite", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileName:%s", fileInfo.Name())) watchInfo, exists := h.bs.getOpenFileBucketSlugAndPath(path) if !exists { msg := fmt.Sprintf("error: could not find path %s", path) log.Error(msg, fmt.Errorf(msg)) return } var b textile.Bucket var err error bucketSlug := watchInfo.BucketSlug bucketPath := watchInfo.BucketPath if watchInfo.IsRemote { b, err = h.bs.textileClient.GetBucket(ctx, bucketSlug, &textile.GetBucketForRemoteFileInput{ Bucket: bucketSlug, DbID: watchInfo.DbId, Path: watchInfo.BucketPath, }) } else { b, err = h.bs.textileClient.GetBucket(ctx, bucketSlug, nil) } if err != nil { msg := fmt.Sprintf("error: could not find bucket with slug %s", bucketSlug) log.Error(msg, fmt.Errorf(msg)) return } fileReader, err := os.Open(path) if err != nil { log.Error("Could not open file for upload", err) return } _, _, err = b.UploadFile(ctx, bucketPath, fileReader) if err != nil { msg := fmt.Sprintf("error: could not sync file at path %s to bucket %s as %s", path, bucketSlug, bucketPath) log.Error(msg, fmt.Errorf(msg)) return } msg := fmt.Sprintf("success syncing file at path %s to bucket %s as %s", path, bucketSlug, bucketPath) log.Printf(msg) } func (h *watcherHandler) OnRename(ctx context.Context, path string, fileInfo os.FileInfo, oldPath string) { log.Info( "Watcher Handler: OnRename", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileName:%s", fileInfo.Name()), fmt.Sprintf("path:%s", oldPath), ) h.OnRemove(ctx, oldPath, fileInfo) h.OnCreate(ctx, path, fileInfo) } func (h *watcherHandler) OnMove(ctx context.Context, path string, fileInfo os.FileInfo, oldPath string) { log.Info( "Watcher Handler: OnMove", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileName:%s", fileInfo.Name()), fmt.Sprintf("path:%s", oldPath), ) h.OnRemove(ctx, oldPath, fileInfo) h.OnCreate(ctx, path, fileInfo) } ================================================ FILE: core/sync/notifier_default.go ================================================ package sync import ( "github.com/FleekHQ/space-daemon/core/events" ) type defaultNotifier struct{} func (d defaultNotifier) SendFileEvent(event events.FileEvent) { return } func (d defaultNotifier) SendTextileEvent(event events.TextileEvent) { return } ================================================ FILE: core/sync/sync.go ================================================ package sync import ( "context" "encoding/json" "errors" "fmt" "github.com/FleekHQ/space-daemon/core/events" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/space/services" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/core/textile" "github.com/FleekHQ/space-daemon/log" "golang.org/x/sync/errgroup" "github.com/FleekHQ/space-daemon/core/watcher" ) var ( ErrAddFileWatch = errors.New("error adding file to watch") ) const ( OpenFilesKeyPrefix = "openFiles#" ReverseOpenFilesKeyPrefix = "reverseOpenFiles#" ) type GrpcNotifier interface { SendFileEvent(event events.FileEvent) SendTextileEvent(event events.TextileEvent) } type BucketSynchronizer interface { WaitForReady() chan bool Start(ctx context.Context) error Shutdown() error RegisterNotifier(notifier GrpcNotifier) AddFileWatch(addFileInfo domain.AddWatchFile) error GetOpenFilePath(bucketSlug, bucketPath, dbID, cid string) (string, bool) } type TextileNotifier interface { SendTextileEvent(event events.TextileEvent) } // Implementation to handle events from FS type watcherHandler struct { client textile.Client bs *bucketSynchronizer } // Implementation to handle events from textile type textileHandler struct { notifier TextileNotifier bs *bucketSynchronizer } type bucketSynchronizer struct { folderWatcher watcher.FolderWatcher textileClient textile.Client fh *watcherHandler th *textileHandler notifier GrpcNotifier store store.Store ready chan bool } // Creates a new bucketSynchronizer instancelistenerEventHandler func New( folderWatcher watcher.FolderWatcher, textileClient textile.Client, store store.Store, notifier GrpcNotifier, ) *bucketSynchronizer { return &bucketSynchronizer{ folderWatcher: folderWatcher, textileClient: textileClient, fh: nil, th: nil, notifier: notifier, store: store, ready: make(chan bool), } } // Starts the folder watcher and the textile watcher. func (bs *bucketSynchronizer) Start(ctx context.Context) error { if bs.notifier == nil { log.Printf("using default notifier to start bucket sync") bs.notifier = &defaultNotifier{} } bs.fh = &watcherHandler{ client: bs.textileClient, bs: bs, } bs.folderWatcher.RegisterHandler(bs.fh) g, newCtx := errgroup.WithContext(ctx) g.Go(func() error { log.Debug("Starting watcher in bucketsync") return bs.folderWatcher.Watch(newCtx) }) // add open files to watcher keys, err := bs.store.KeysWithPrefix(OpenFilesKeyPrefix) if err != nil { log.Error("error getting keys from store", err) return err } log.Debug("start watching open files ...") for _, k := range keys { if fi, err := bs.getOpenFileInfo(k); err == nil { if services.PathExists(fi.LocalPath) { if err := bs.folderWatcher.AddFile(fi.LocalPath); err != nil { log.Error(fmt.Sprintf("error opening file at %s", fi.LocalPath), err) // remove fileInfo from store for cleanup bs.removeFileInfo(fi) } } } } bs.ready <- true err = g.Wait() if err != nil { return err } return nil } func (bs *bucketSynchronizer) WaitForReady() chan bool { return bs.ready } func (bs *bucketSynchronizer) Shutdown() error { // add shutdown logic here log.Debug("shutting down folder watcher in bucketsync") bs.folderWatcher.Close() log.Debug("shutting down textile thread listener in bucketsync") close(bs.ready) return nil } func (bs *bucketSynchronizer) RegisterNotifier(notifier GrpcNotifier) { bs.notifier = notifier } // TODO: add GC code logic to open files to cleanup // Adds a file to watcher list to keep track of func (bs *bucketSynchronizer) AddFileWatch(addFileInfo domain.AddWatchFile) error { if addFileInfo.LocalPath == "" { return ErrAddFileWatch } if addFileInfo.BucketKey == "" { return ErrAddFileWatch } if addFileInfo.BucketPath == "" { return ErrAddFileWatch } err := bs.addFileInfoToStore(addFileInfo) if err != nil { return err } err = bs.folderWatcher.AddFile(addFileInfo.LocalPath) if err != nil { return err } return nil } func (bs *bucketSynchronizer) GetOpenFilePath(bucketSlug, bucketPath, dbID, cid string) (string, bool) { var fi domain.AddWatchFile var err error reversKey := getOpenFileReverseKey(bucketSlug, bucketPath, dbID, cid) if fi, err = bs.getOpenFileInfo(reversKey); err != nil { return "", false } if fi.LocalPath == "" { return "", false } return fi.LocalPath, true } func getOpenFileKey(localPath string) string { return OpenFilesKeyPrefix + localPath } func getOpenFileReverseKey(bucketSlug, bucketPath, dbID, cid string) string { return ReverseOpenFilesKeyPrefix + bucketSlug + ":" + bucketPath + ":" + dbID + ":" + cid } func (bs *bucketSynchronizer) getOpenFileBucketSlugAndPath(localPath string) (domain.AddWatchFile, bool) { var fi domain.AddWatchFile var err error if fi, err = bs.getOpenFileInfo(getOpenFileKey(localPath)); err != nil { return domain.AddWatchFile{}, false } if fi.BucketSlug == "" { return domain.AddWatchFile{}, false } return fi, true } // Helper function to set open file info in the store func (bs *bucketSynchronizer) addFileInfoToStore(addFileInfo domain.AddWatchFile) error { out, err := json.Marshal(addFileInfo) if err != nil { return err } if err := bs.store.SetString(getOpenFileKey(addFileInfo.LocalPath), string(out)); err != nil { return err } reverseKey := getOpenFileReverseKey(addFileInfo.BucketSlug, addFileInfo.BucketPath, addFileInfo.DbId, addFileInfo.Cid) if err := bs.store.SetString(reverseKey, string(out)); err != nil { return err } return nil } // Helper function to remove file information from store func (bs *bucketSynchronizer) removeFileInfo(addFileInfo domain.AddWatchFile) error { if err := bs.store.Remove([]byte(getOpenFileKey(addFileInfo.LocalPath))); err != nil { return err } reverseKey := getOpenFileReverseKey(addFileInfo.BucketSlug, addFileInfo.BucketPath, addFileInfo.DbId, addFileInfo.Cid) if err := bs.store.Remove([]byte(reverseKey)); err != nil { return err } return nil } // Helper function to retrieve open file info from store func (bs *bucketSynchronizer) getOpenFileInfo(key string) (domain.AddWatchFile, error) { var fi []byte var err error if fi, err = bs.store.Get([]byte(key)); err != nil { return domain.AddWatchFile{}, err } var fileInfo domain.AddWatchFile if err := json.Unmarshal(fi, &fileInfo); err != nil { return domain.AddWatchFile{}, err } return fileInfo, nil } ================================================ FILE: core/sync/textile.go ================================================ package sync import ( "encoding/json" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/events" "github.com/FleekHQ/space-daemon/log" tc "github.com/textileio/go-threads/api/client" ) func (h *textileHandler) OnCreate(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) { log.Info("Default Listener Handler: OnCreate") instance := &bucket.BucketData{} if err := json.Unmarshal(listenEvent.Action.Instance, instance); err != nil { log.Error("failed to unmarshal listen result: %v", err) } evt := events.NewTextileEvent(instance.Name) h.notifier.SendTextileEvent(evt) } func (h *textileHandler) OnRemove(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) { log.Info("Default Listener Handler: OnRemove") instance := &bucket.BucketData{} if err := json.Unmarshal(listenEvent.Action.Instance, instance); err != nil { log.Error("failed to unmarshal listen result: %v", err) } evt := events.NewTextileEvent(instance.Name) h.notifier.SendTextileEvent(evt) } func (h *textileHandler) OnSave(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) { log.Info("Default Listener Handler: OnSave") instance := &bucket.BucketData{} if err := json.Unmarshal(listenEvent.Action.Instance, instance); err != nil { log.Error("failed to unmarshal listen result: %v", err) } evt := events.NewTextileEvent(instance.Name) h.notifier.SendTextileEvent(evt) } ================================================ FILE: core/sync/textile_test.go ================================================ package sync import ( "testing" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/mocks" "github.com/stretchr/testify/mock" tc "github.com/textileio/go-threads/api/client" ) func TestTextileHandler_OnCreate(t *testing.T) { n := new(mocks.TextileNotifier) th := &textileHandler{ notifier: n, } b := []byte(`{"Key":"bafzbeid2zp544qy6ktwdlr5xxsmsioclbxj42dkbqckm35e6l5biqlo3tq","Name":"test-bucket-1"}`) buck := &bucket.BucketData{} action := tc.Action{ Collection: "buckets", Type: 1, InstanceID: "dummy-id", Instance: b, } evt := &tc.ListenEvent{ Action: action, } n.On("SendTextileEvent", mock.Anything).Return() th.OnCreate(buck, evt) n.AssertExpectations(t) } func TestTextileHandler_OnRemove(t *testing.T) { n := new(mocks.TextileNotifier) th := &textileHandler{ notifier: n, } b := []byte(`{"Key":"bafzbeid2zp544qy6ktwdlr5xxsmsioclbxj42dkbqckm35e6l5biqlo3tq","Name":"test-bucket-1"}`) buck := &bucket.BucketData{} action := tc.Action{ Collection: "buckets", Type: 1, InstanceID: "dummy-id", Instance: b, } evt := &tc.ListenEvent{ Action: action, } n.On("SendTextileEvent", mock.Anything).Return() th.OnRemove(buck, evt) n.AssertExpectations(t) } func TestTextileHandler_OnSave(t *testing.T) { n := new(mocks.TextileNotifier) th := &textileHandler{ notifier: n, } b := []byte(`{"Key":"bafzbeid2zp544qy6ktwdlr5xxsmsioclbxj42dkbqckm35e6l5biqlo3tq","Name":"test-bucket-1"}`) buck := &bucket.BucketData{} action := tc.Action{ Collection: "buckets", Type: 1, InstanceID: "dummy-id", Instance: b, } evt := &tc.ListenEvent{ Action: action, } n.On("SendTextileEvent", mock.Anything).Return() th.OnSave(buck, evt) n.AssertExpectations(t) } ================================================ FILE: core/textile/README.md ================================================ # Textile Wrappers This package contains wrappers around Textile Threads and Buckets. ## Usage ### Initialization and Startup Initialize Space's Textile Client by runing ```go client := textile.NewClient(store) client.Start(ctx, config) ``` This will start the Textile connection, try to authenticate to the Hub, create a thread for metadata and a default bucket if there's none. #### TODO - Separate the initialization logic into a new function that can be called from an imported "wallet". That way the initialized metadata and initial buckets can be pulled from the Hub if they exist. ## Internal State Textile Client holds the following objects: - store: A connection to the store. Used to fetch keys and store the meta thread threadID. - threads: A connection to Textile's Thread Client, initiated after startup. - bucketsClient: A connection to Textile's Bucket Client, initiated after startup. - netc: Wraps Textile network operations. - isRunning: Boolean that is set to true if the initialization after calling Start finished successfully. - Ready: A channel that gets emitted to after startup. - cfg: A reference to the config object. - isConnectedToHub: A boolean indicating if the initial Hub connection and authorization succeeded. If false, bucket operations will not be replicated on the Hub. After initialization, Textile Client's state is mainly stored in the "meta thread". This meta thread stores a collection of the buckets the user has created or joined. This meta thread can be synced and joined in case the user wants to go cross-platform. The thread ID of the meta thread is stored in the local store. Operations over the meta thread are done in `collections.go`. Creating and joining buckets (`bucket_factory.go`) adds a bucket instance to the meta thread. Listing and getting buckets query the meta thread to obtain the bucket's threadID and name. Using this info, these methods instantiate a Bucket object (`./bucket/bucket.go`), which exposes methods to do in bucket operations such as listing and adding files to a bucket. ## Hub authentication Currently, we attempt to connect to the Hub on initialization. In coming releases, we might switch that so that it can be toggled on and off from the API. If the Hub connection succeeds, all bucket operations will include the auth token in the calls to Textile's bucket client. This will trigger Hub replication. The auth token is obtained by signing a challenge received from the Hub using the user private key. If the challenge is signed correctly, the Hub returns a non-expiring auth token that we store so that we don't need to re-authenticate. The logic for authenticating and prepending the keys before bucket operations can be seen in `bucket_factory.go` in the method `getBucketContext`. It creates a Context instance that includes all the necessary information for accessing the correct thread and include the correct auth token. ================================================ FILE: core/textile/account.go ================================================ package textile import ( "context" "github.com/FleekHQ/space-daemon/core/textile/utils" ) func (tc *textileClient) DeleteAccount(ctx context.Context) error { if err := tc.requiresRunning(); err != nil { return err } // delete local buckets bucks, err := tc.ListBuckets(ctx) if err != nil { return err } for _, b := range bucks { bs, err := tc.GetModel().FindBucket(ctx, b.Slug()) if err != nil { return err } dbid, err := b.GetThreadID(ctx) if err != nil { return err } ctx, _, err = tc.getBucketContext(ctx, utils.CastDbIDToString(*dbid), b.Slug(), false, bs.EncryptionKey) err = tc.bucketsClient.Remove(ctx, b.Key()) if err != nil { return err } ctx, _, err = tc.getBucketContext(ctx, bs.RemoteDbID, b.Slug(), true, bs.EncryptionKey) err = tc.hb.Remove(ctx, bs.RemoteBucketKey) if err != nil { return err } } // disable sync tc.DisableSync() // stop backgroundjobs tc.sync.Shutdown() // stop listener tc.DeleteListeners(ctx) return nil } ================================================ FILE: core/textile/buckd.go ================================================ package textile import ( "context" "fmt" "os" "time" connmgr "github.com/libp2p/go-libp2p-connmgr" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/log" "github.com/textileio/textile/v2/cmd" "github.com/textileio/textile/v2/core" ) var IpfsAddr string var MaxThreadsConn int var MinThreadsConn int type TextileBuckd struct { textile *core.Textile IsRunning bool Ready chan bool cfg config.Config } func NewBuckd(cfg config.Config) *TextileBuckd { return &TextileBuckd{ Ready: make(chan bool), cfg: cfg, } } func (tb *TextileBuckd) Start(ctx context.Context) error { IpfsAddr = tb.cfg.GetString(config.Ipfsaddr, "/ip4/127.0.0.1/tcp/5001") MinThreadsConn = tb.cfg.GetInt(config.MinThreadsConnection, 50) MaxThreadsConn = tb.cfg.GetInt(config.MaxThreadsConnection, 100) addrAPI := cmd.AddrFromStr(tb.cfg.GetString(config.BuckdApiMaAddr, "/ip4/127.0.0.1/tcp/3006")) addrAPIProxy := cmd.AddrFromStr(tb.cfg.GetString(config.BuckdApiProxyMaAddr, "/ip4/127.0.0.1/tcp/3007")) addrThreadsHost := cmd.AddrFromStr(tb.cfg.GetString(config.BuckdThreadsHostMaAddr, "/ip4/0.0.0.0/tcp/4006")) addrIpfsAPI := cmd.AddrFromStr(IpfsAddr) gatewayPort := tb.cfg.GetInt(config.BuckdGatewayPort, 8006) addrGatewayHost := cmd.AddrFromStr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", gatewayPort)) addrGatewayURL := fmt.Sprintf("http://127.0.0.1:%d", gatewayPort) buckdPath := tb.cfg.GetString(config.BuckdPath, "") if buckdPath == "" { homeDir, err := os.UserHomeDir() if err != nil { return err } buckdPath = homeDir + "/.buckd" log.Debug("No Buckd Path provided. Using default.", "path:"+buckdPath) } textile, err := core.NewTextile(ctx, core.Config{ RepoPath: buckdPath + "/repo", CollectionRepoPath: buckdPath + "/collections", AddrAPI: addrAPI, AddrAPIProxy: addrAPIProxy, AddrThreadsHost: addrThreadsHost, AddrIPFSAPI: addrIpfsAPI, AddrGatewayHost: addrGatewayHost, AddrGatewayURL: addrGatewayURL, //AddrPowergateAPI: addrPowergateApi, //UseSubdomains: config.Viper.GetBool("gateway.subdomains"), //DNSDomain: dnsDomain, //DNSZoneID: dnsZoneID, //DNSToken: dnsToken, ThreadsConnManager: connmgr.NewConnManager(MinThreadsConn, MaxThreadsConn, time.Second*20), Debug: false, }) if err != nil { return err } textile.Bootstrap() log.Info("Welcome to bucket", fmt.Sprintf("peerID:%s", textile.HostID().String())) log.Info("Sleeping for 5s to wait for buckd grpc ports to listen ...") time.Sleep(5 * time.Second) tb.textile = textile tb.IsRunning = true tb.Ready <- true return nil } func (tb *TextileBuckd) WaitForReady() chan bool { return tb.Ready } func (tb *TextileBuckd) Stop() error { tb.IsRunning = false err := tb.textile.Close(true) if err != nil { return err } return nil } func (tb *TextileBuckd) Shutdown() error { close(tb.Ready) return tb.Stop() } ================================================ FILE: core/textile/bucket/bucket.go ================================================ package bucket import ( "context" "io" "sync" "github.com/ipfs/interface-go-ipfs-core/path" "github.com/textileio/go-threads/core/thread" bucketsClient "github.com/textileio/textile/v2/api/bucketsd/client" bucketsproto "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/buckets" ) type BucketData struct { Key string `json:"_id"` Name string `json:"name"` Path string `json:"path"` DNSRecord string `json:"dns_record,omitempty"` //Archives Archives `json:"archives"` CreatedAt int64 `json:"created_at"` UpdatedAt int64 `json:"updated_at"` } type DirEntries bucketsproto.ListPathResponse type BucketsClient interface { PushPath(ctx context.Context, key, pth string, reader io.Reader, opts ...bucketsClient.Option) (result path.Resolved, root path.Resolved, err error) PullPath(ctx context.Context, key, pth string, writer io.Writer, opts ...bucketsClient.Option) error ListPath(ctx context.Context, key, pth string) (*bucketsproto.ListPathResponse, error) RemovePath(ctx context.Context, key, pth string, opts ...bucketsClient.Option) (path.Resolved, error) ListIpfsPath(ctx context.Context, ipfsPath path.Path) (*bucketsproto.ListIpfsPathResponse, error) PushPathAccessRoles(ctx context.Context, key, path string, roles map[string]buckets.Role) error } type EachFunc = func(ctx context.Context, b *Bucket, path string) error type BucketInterface interface { Slug() string Key() string GetData() BucketData GetContext(ctx context.Context) (context.Context, *thread.ID, error) GetClient() BucketsClient GetThreadID(ctx context.Context) (*thread.ID, error) DirExists(ctx context.Context, path string) (bool, error) FileExists(ctx context.Context, path string) (bool, error) UpdatedAt(ctx context.Context, path string) (int64, error) UploadFile( ctx context.Context, path string, reader io.Reader, ) (result path.Resolved, root path.Path, err error) DownloadFile( ctx context.Context, path string, reader io.Reader, ) (result path.Resolved, root path.Path, err error) GetFile( ctx context.Context, path string, w io.Writer, ) error CreateDirectory( ctx context.Context, path string, ) (result path.Resolved, root path.Path, err error) ListDirectory( ctx context.Context, path string, ) (*DirEntries, error) DeleteDirOrFile( ctx context.Context, path string, ) (path.Resolved, error) ItemsCount( ctx context.Context, path string, withRecursive bool, ) (int32, error) Each( ctx context.Context, path string, iterator EachFunc, withRecursive bool, ) (int, error) } type Notifier interface { OnUploadFile(bucketSlug string, bucketPath string, result path.Resolved, root path.Path) } // NOTE: all write operations should use the lock for the bucket to keep consistency // TODO: Maybe read operations dont need a lock, needs testing // struct for implementing bucket interface type Bucket struct { lock sync.RWMutex root *bucketsproto.Root bucketsClient BucketsClient getBucketContext GetBucketContextFn notifier Notifier } func (b *Bucket) Slug() string { return b.GetData().Name } type GetBucketContextFn func(context.Context, string) (context.Context, *thread.ID, error) func New( root *bucketsproto.Root, getBucketContext GetBucketContextFn, bucketsClient BucketsClient, ) *Bucket { return &Bucket{ root: root, bucketsClient: bucketsClient, getBucketContext: getBucketContext, notifier: nil, } } func (b *Bucket) Key() string { return b.GetData().Key } func (b *Bucket) GetData() BucketData { return BucketData{ Key: b.root.Key, Name: b.root.Name, Path: b.root.Path, DNSRecord: "", CreatedAt: b.root.CreatedAt, UpdatedAt: b.root.UpdatedAt, } } func (b *Bucket) GetContext(ctx context.Context) (context.Context, *thread.ID, error) { return b.getBucketContext(ctx, b.root.Name) } func (b *Bucket) GetClient() BucketsClient { return b.bucketsClient } func (b *Bucket) GetThreadID(ctx context.Context) (*thread.ID, error) { _, threadID, err := b.GetContext(ctx) if err != nil { return nil, err } return threadID, nil } func (b *Bucket) AttachNotifier(n Notifier) { b.notifier = n } ================================================ FILE: core/textile/bucket/bucket_dir.go ================================================ package bucket import ( "bytes" "context" "regexp" "strings" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/log" "github.com/ipfs/interface-go-ipfs-core/path" ) // Keep file is added to empty directories var keepFileName = ".keep" func (b *Bucket) DirExists(ctx context.Context, path string) (bool, error) { b.lock.RLock() defer b.lock.RUnlock() _, err := b.ListDirectory(ctx, path) log.Debug("returned from bucket call") if err != nil { // NOTE: not sure if this is the best approach but didnt // want to loop over items each time match, _ := regexp.MatchString(".*no link named.*under.*", err.Error()) if match { return false, nil } log.Info("error doing list path on non existent directoy: ", err.Error()) // Since a nil would be interpreted as a false return false, err } return true, nil } // CreateDirectory creates an empty directory // Because textile doesn't support empty directory an empty .keep file is created // in the directory func (b *Bucket) CreateDirectory(ctx context.Context, path string) (result path.Resolved, root path.Path, err error) { b.lock.Lock() defer b.lock.Unlock() ctx, _, err = b.GetContext(ctx) if err != nil { return nil, nil, err } // append .keep file to the end of the directory emptyDirPath := strings.TrimRight(path, "/") + "/" + keepFileName return b.bucketsClient.PushPath(ctx, b.Key(), emptyDirPath, &bytes.Buffer{}) } // ListDirectory returns a list of items in a particular directory func (b *Bucket) ListDirectory(ctx context.Context, path string) (*DirEntries, error) { b.lock.RLock() defer b.lock.RUnlock() ctx, _, err := b.GetContext(ctx) if err != nil { return nil, err } result, err := b.bucketsClient.ListPath(ctx, b.Key(), path) if err != nil { return nil, err } return (*DirEntries)(result), err } // DeleteDirOrFile will delete file or directory at path func (b *Bucket) DeleteDirOrFile(ctx context.Context, path string) (path.Resolved, error) { b.lock.Lock() defer b.lock.Unlock() ctx, _, err := b.GetContext(ctx) if err != nil { return nil, err } return b.bucketsClient.RemovePath(ctx, b.Key(), path) } // return the recursive items count for a path func (b *Bucket) ItemsCount(ctx context.Context, path string, withRecursive bool) (int32, error) { b.lock.RLock() defer b.lock.RUnlock() var count int32 dir, err := b.ListDirectory(ctx, path) if err != nil { return 0, err } count = dir.Item.ItemsCount if withRecursive { for _, item := range dir.Item.Items { if utils.IsMetaFileName(item.Name) { continue } if item.IsDir { n, err := b.ItemsCount(ctx, item.Path, withRecursive) if err != nil { return 0, err } count += n } } } return count, nil } // iterate over the bucket func (b *Bucket) Each(ctx context.Context, path string, iterator EachFunc, withRecursive bool) (int, error) { b.lock.RLock() defer b.lock.RUnlock() var count int dir, err := b.ListDirectory(ctx, path) if err != nil { return 0, err } for _, item := range dir.Item.Items { if utils.IsMetaFileName(item.Name) { continue } currItemPath := item.Name if path != "" { currItemPath = path + "/" + currItemPath } var n int if withRecursive && item.IsDir { if n, err = b.Each(ctx, currItemPath, iterator, withRecursive); err != nil { return 0, err } count += n continue } if err := iterator(ctx, b, currItemPath); err != nil { return 0, err } count += n } return count, nil } ================================================ FILE: core/textile/bucket/bucket_file.go ================================================ package bucket import ( "context" "io" "regexp" "time" "github.com/opentracing/opentracing-go" "github.com/FleekHQ/space-daemon/log" "github.com/ipfs/interface-go-ipfs-core/path" ) func (b *Bucket) FileExists(ctx context.Context, pth string) (bool, error) { b.lock.RLock() defer b.lock.RUnlock() ctx, _, err := b.GetContext(ctx) if err != nil { return false, err } listPathRes, err := b.bucketsClient.ListPath(ctx, b.GetData().Key, pth) if err != nil { return false, err } ctxWithDeadline, ctxCancel := context.WithDeadline(ctx, time.Now().Add(3*time.Second)) defer ctxCancel() // Call ListIpfsPath with deadline to avoid waiting too much for DHT to resolve _, err = b.bucketsClient.ListIpfsPath(ctxWithDeadline, path.New(listPathRes.Item.Cid)) if err != nil { match, _ := regexp.MatchString(".*no link named.*under.*", err.Error()) if match { return false, nil } // Since a nil would be interpreted as a false return false, err } return true, nil } func (b *Bucket) UpdatedAt(ctx context.Context, pth string) (int64, error) { b.lock.RLock() defer b.lock.RUnlock() ctx, _, err := b.GetContext(ctx) if err != nil { return 0, err } response, err := b.bucketsClient.ListPath(ctx, b.GetData().Key, pth) if err != nil { return 0, err } return response.Item.Metadata.UpdatedAt, nil } func (b *Bucket) UploadFile( ctx context.Context, path string, reader io.Reader, ) (result path.Resolved, root path.Path, err error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Bucket.UploadFile") defer span.Finish() b.lock.Lock() defer b.lock.Unlock() ctx, _, err = b.GetContext(ctx) if err != nil { return nil, nil, err } result, root, err = b.bucketsClient.PushPath(ctx, b.Key(), path, reader) if err != nil { return nil, nil, err } if b.notifier != nil { b.notifier.OnUploadFile(b.Slug(), path, result, root) } return result, root, nil } func (b *Bucket) DownloadFile(ctx context.Context, path string, reader io.Reader) (result path.Resolved, root path.Path, err error) { b.lock.Lock() defer b.lock.Unlock() ctx, _, err = b.GetContext(ctx) if err != nil { return nil, nil, err } result, root, err = b.bucketsClient.PushPath(ctx, b.Key(), path, reader) if err != nil { return nil, nil, err } // no notification return result, root, nil } // GetFile pulls path from bucket writing it to writer if it's a file. func (b *Bucket) GetFile(ctx context.Context, path string, w io.Writer) error { b.lock.RLock() defer b.lock.RUnlock() ctx, _, err := b.GetContext(ctx) if err != nil { return err } if err := b.bucketsClient.PullPath(ctx, b.Key(), path, w); err != nil { log.Error("error in GetFile from textile client", err) return err } return nil } ================================================ FILE: core/textile/bucket/crypto/crypto.go ================================================ package crypto import ( "bytes" b64 "encoding/base64" "errors" "io" "io/ioutil" "strings" ) func parseKeys(key []byte) (aesKey, iv, hmacKey []byte, err error) { if len(key) != aesKeySize+ivKeySize+hmacKeySize { return nil, nil, nil, errors.New("unsupported encryption keys provided.") } return key[:aesKeySize], key[aesKeySize:(aesKeySize + ivKeySize)], key[(aesKeySize + ivKeySize):], nil } // EncryptPathItems returns an encrypted path and a Reader that reads the encrypted data from the // plain reader passed into the function. // Encrypted data is AES-CTR of data + AES-512 HMAC of encrypted data // // NOTE: key must be a 64 byte long key // To decrypt the result of this function use the DecryptPathItems function func EncryptPathItems(key []byte, path string, plainReader io.Reader) (string, io.Reader, error) { // split key into key and secret // use key and secret and IV // encrypt path aesKey, iv, hmacKey, err := parseKeys(key) if err != nil { return "", nil, err } encryptedPath := "" pathParts := strings.Split(path, "/") pathPartsLen := len(pathParts) for i, pathItem := range pathParts { if pathItem == "" { continue } encryptedPathReader, err := NewEncryptReader( strings.NewReader(pathItem), aesKey, iv, hmacKey, ) if err != nil { return "", nil, err } encryptedPathItem, err := readAsBase64Strings(encryptedPathReader) if err != nil { return "", nil, err } encryptedPath += encryptedPathItem if i != pathPartsLen-1 { encryptedPath += "/" } } // encrypt data var encryptedReader io.Reader if plainReader != nil { var err error encryptedReader, err = NewEncryptReader( plainReader, aesKey, iv, hmacKey, ) if err != nil { return "", nil, err } } return encryptedPath, encryptedReader, nil } // DecryptPathItems returns a decrypted path string and an io.Reader that reads the decrypted data from the // encrypted reader passed into the function. // To only decrypt a path, pass in an empty byte buffer as the reader and check only for the string result. // // NOTE: key must be a 64 byte long key func DecryptPathItems(key []byte, path string, encryptedReader io.Reader) (string, io.ReadCloser, error) { // decrypt path aesKey, iv, hmacKey, err := parseKeys(key) if err != nil { return "", nil, err } decryptedPath := "" pathParts := strings.Split(path, "/") pathPartsLen := len(pathParts) for i, pathItem := range pathParts { if pathItem == "" { continue } encryptedEntryNameBytes, err := bytesFromBase64Strings(pathItem) if err != nil { return "", nil, err } decryptedPathReader, err := NewDecryptReader( bytes.NewBuffer(encryptedEntryNameBytes), aesKey, iv, hmacKey, ) if err != nil { return "", nil, err } decryptedPathItem, err := readBufferString(decryptedPathReader) if err != nil { return "", nil, err } decryptedPath += decryptedPathItem if i != pathPartsLen-1 { decryptedPath += "/" } } // decrypt data var decryptedReader io.ReadCloser if encryptedReader != nil { var err error decryptedReader, err = NewDecryptReader( encryptedReader, aesKey, iv, hmacKey, ) if err != nil { return "", nil, err } } return decryptedPath, decryptedReader, nil } func readBufferString(buf io.Reader) (string, error) { builder := new(strings.Builder) _, err := io.Copy(builder, buf) if err != nil { return "", err } return builder.String(), nil } func readAsBase64Strings(buf io.Reader) (string, error) { data, err := ioutil.ReadAll(buf) if err != nil { return "", err } encodedData := b64.URLEncoding.EncodeToString(data) return encodedData, nil } func bytesFromBase64Strings(data string) ([]byte, error) { decodedData, err := b64.URLEncoding.DecodeString(data) if err != nil { return nil, err } return decodedData, nil } ================================================ FILE: core/textile/bucket/crypto/crypto_test.go ================================================ package crypto import ( "bytes" "crypto/rand" "io/ioutil" "testing" "github.com/stretchr/testify/require" ) var validKeysSize = 80 func Test_EncryptPathItems_Fails_For_InvalidKeys(t *testing.T) { assert := require.New(t) key := make([]byte, 64) _, _ = rand.Read(key) _, _, err := EncryptPathItems(key, "", bytes.NewBufferString("")) assert.Error(err, "Encrypt Path Item with wrong key should fail") } func Test_DecryptPathItems_Fails_For_InvalidKeys(t *testing.T) { assert := require.New(t) key := make([]byte, 64) _, _ = rand.Read(key) _, _, err := DecryptPathItems(key, "", bytes.NewBufferString("")) assert.Error(err, "Decrypt Path Item with wrong key should fail") } func Test_EncryptPathItems_Works_With_DecryptPathItems(t *testing.T) { assert := require.New(t) key := make([]byte, validKeysSize) _, _ = rand.Read(key) plainPath := "smiggle/was/here" plainData := "This is the original unencrypted data" encryptedPath, encryptedReader, err := EncryptPathItems(key, plainPath, bytes.NewBufferString(plainData)) assert.NoError(err, "Error encrypting Path Items") assert.NotEqual(encryptedPath, plainPath, "Encrypted Path is equal to Plain Path") decryptedPath, decryptedReader, err := DecryptPathItems(key, encryptedPath, encryptedReader) assert.NoError(err, "Error decrypting Path Items") assert.Equal(plainPath, decryptedPath, "Plain Path is not equal to decrypted path") decryptedData, err := ioutil.ReadAll(decryptedReader) assert.NoError(err) assert.Equal(plainData, bytes.NewBuffer(decryptedData).String(), "Plain data is not equal to decrypted data") } func Test_EncryptPathItems_And_DecryptPathItems_Work_With_TopLevel_Files(t *testing.T) { assert := require.New(t) key := make([]byte, validKeysSize) _, _ = rand.Read(key) plainPath := "single_directory_entry" encryptedPath, _, err := EncryptPathItems(key, plainPath, nil) assert.NoError(err, "Error encrypting Path Items") assert.NotEqual(encryptedPath, plainPath, "Encrypted Path is equal to Plain Path") decryptedPath, _, err := DecryptPathItems(key, encryptedPath, nil) assert.NoError(err, "Error decrypting Path Items") assert.Equal(plainPath, decryptedPath, "Plain Path is not equal to decrypted path") } ================================================ FILE: core/textile/bucket/crypto/decrypter.go ================================================ package crypto import ( "bufio" "crypto/aes" "crypto/cipher" "crypto/hmac" "errors" "io" "os" "github.com/odeke-em/go-utils/tmpfile" ) const _16KB = 16 * 1024 var DecryptErr = errors.New("message corrupt or incorrect keys") // NewDecryptReader creates an io.ReadCloser wrapping an io.Reader using the keys and iv // to decode the content using AES and verify HMAC. func NewDecryptReader(r io.Reader, aesKey, iv, hmacKey []byte) (io.ReadCloser, error) { return newDecryptReader(r, aesKey, iv, hmacKey) } func newDecryptReader(r io.Reader, aesKey []byte, iv []byte, hmacKey []byte) (io.ReadCloser, error) { mac := make([]byte, hmacSize) h := hmac.New(hashFunc, hmacKey) dst, err := tmpfile.New(&tmpfile.Context{ Dir: os.TempDir(), Suffix: "space-encrypted-", }) if err != nil { return nil, err } // If there is an error, try to delete the temp file. defer func() { if err != nil { _ = dst.Done() } }() b, err := aes.NewCipher(aesKey) if err != nil { return nil, err } d := &decryptReader{ tmpFile: dst, sReader: &cipher.StreamReader{R: dst, S: cipher.NewCTR(b, iv)}, } w := io.MultiWriter(h, dst) buf := bufio.NewReaderSize(r, _16KB) for { b, err := buf.Peek(_16KB) if err != nil && err != io.EOF { return nil, err } if err == io.EOF { left := buf.Buffered() if left < hmacSize { return nil, DecryptErr } copy(mac, b[left-hmacSize:left]) _, err = io.CopyN(w, buf, int64(left-hmacSize)) if err != nil { return nil, err } break } _, err = io.CopyN(w, buf, _16KB-hmacSize) if err != nil { return nil, err } } if !hmac.Equal(mac, h.Sum(nil)) { return nil, DecryptErr } if _, err = dst.Seek(0, 0); err != nil { return nil, err } return d, nil } // decryptReader wraps a io.Reader decrypting its content. type decryptReader struct { tmpFile *tmpfile.TmpFile sReader *cipher.StreamReader } // Read implements io.Reader. func (d *decryptReader) Read(dst []byte) (int, error) { return d.sReader.Read(dst) } // Close implements io.Closer. func (d *decryptReader) Close() error { return d.tmpFile.Done() } ================================================ FILE: core/textile/bucket/crypto/encrypter.go ================================================ package crypto import ( "bytes" "crypto/aes" "crypto/cipher" "crypto/hmac" "crypto/sha512" "errors" "hash" "io" ) const ( aesKeySize = 32 ivKeySize = 16 hmacKeySize = 32 hmacSize = 64 ) var hashFunc = sha512.New // hashReadWriter hashes on write and on read finalizes the hash and returns it. // Writes after a Read will return an error. type hashReadWriter struct { hash hash.Hash done bool sum io.Reader } // Write implements io.Writer func (h *hashReadWriter) Write(p []byte) (int, error) { if h.done { return 0, errors.New("writing to hashReadWriter after read is not allowed") } return h.hash.Write(p) } // Read implements io.Reader. func (h *hashReadWriter) Read(p []byte) (int, error) { if !h.done { h.done = true h.sum = bytes.NewReader(h.hash.Sum(nil)) } return h.sum.Read(p) } // NewEncryptReader returns an io.Reader wrapping the provided io.Reader. func NewEncryptReader(r io.Reader, aesKey, iv, hmacKey []byte) (io.Reader, error) { if len(aesKey) != aesKeySize { return nil, errors.New("encryption key has incorrect length") } if len(iv) != ivKeySize { return nil, errors.New("encryption initialization vector size has incorrect length") } if len(hmacKey) != hmacKeySize { return nil, errors.New("encryption hmac key has incorrect length") } return newEncryptReader(r, aesKey, iv, hmacKey) } func newEncryptReader(r io.Reader, aesKey, iv, hmacKey []byte) (io.Reader, error) { b, err := aes.NewCipher(aesKey) if err != nil { return nil, err } h := hmac.New(hashFunc, hmacKey) hr := &hashReadWriter{hash: h} sr := &cipher.StreamReader{R: r, S: cipher.NewCTR(b, iv)} return io.MultiReader(io.TeeReader(sr, hr), hr), nil } ================================================ FILE: core/textile/bucket_factory.go ================================================ package textile import ( "context" "errors" "fmt" "github.com/FleekHQ/space-daemon/core/textile/common" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/space/domain" ma "github.com/multiformats/go-multiaddr" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/log" "github.com/alecthomas/jsonschema" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" bc "github.com/textileio/textile/v2/api/bucketsd/client" buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/cmd" tdb "github.com/textileio/textile/v2/threaddb" ) func NotFound(slug string) error { return errors.New(fmt.Sprintf("bucket %s not found", slug)) } type GetBucketForRemoteFileInput struct { Path string DbID string Bucket string } // Gets a wrapped bucket // remoteFile is optional. Include if looking for wrappers for remote buckets (mainly used for received files) func (tc *textileClient) GetBucket(ctx context.Context, slug string, remoteFile *GetBucketForRemoteFileInput) (Bucket, error) { if err := tc.requiresRunning(); err != nil { return nil, err } return tc.getBucket(ctx, slug, remoteFile) } // Gets a wrapped bucket // remoteFile is optional. Include if looking for wrappers for remote buckets (mainly used for received files) func (tc *textileClient) getBucket(ctx context.Context, slug string, remoteFile *GetBucketForRemoteFileInput) (Bucket, error) { var root *buckets_pb.Root getContextFn := tc.getOrCreateBucketContext bucketsClient := tc.bucketsClient var err error if remoteFile == nil { _, root, err = tc.getBucketRootFromSlug(ctx, slug) } else { root, getContextFn, err = tc.getBucketRootFromReceivedFile(ctx, remoteFile) bucketsClient = tc.hb } if err != nil { return nil, err } b := bucket.New( root, getContextFn, tc.getSecureBucketsClient(bucketsClient), ) // Attach a notifier if the bucket is local // So that local ops can be synced to the remote node if remoteFile == nil && tc.notifier != nil { b.AttachNotifier(tc.notifier) } return b, nil } func (tc *textileClient) getBucketForMirror(ctx context.Context, slug string) (Bucket, error) { root, getContextFn, _, err := tc.getBucketRootForMirror(ctx, slug) if err != nil { return nil, err } b := bucket.New( root, getContextFn, tc.getSecureBucketsClient(tc.hb), ) return b, nil } func (tc *textileClient) GetDefaultBucket(ctx context.Context) (Bucket, error) { return tc.GetBucket(ctx, defaultPersonalBucketSlug, nil) } func (tc *textileClient) getBucketContext(ctx context.Context, sDbID string, bucketSlug string, ishub bool, enckey []byte) (context.Context, *thread.ID, error) { dbID, err := utils.ParseDbIDFromString(sDbID) if err != nil { log.Error("Error casting thread id", err) return nil, nil, err } ctx, err = utils.GetThreadContext(ctx, bucketSlug, *dbID, ishub, tc.kc, tc.hubAuth, nil) if err != nil { return nil, nil, err } ctx = common.NewBucketEncryptionKeyContext(ctx, enckey) return ctx, dbID, err } // Returns a context that works for accessing a bucket func (tc *textileClient) getOrCreateBucketContext(ctx context.Context, bucketSlug string) (context.Context, *thread.ID, error) { m := tc.GetModel() bucketSchema, notFoundErr := m.FindBucket(ctx, bucketSlug) if notFoundErr == nil { // This means the bucket was already present in the schema var err error var dbID *thread.ID ctx, dbID, err = tc.getBucketContext(ctx, bucketSchema.DbID, bucketSlug, false, bucketSchema.EncryptionKey) if err != nil { return nil, nil, err } return ctx, dbID, err } // We need to create the thread and store it in the collection log.Debug("getOrCreateBucketContext: Thread ID not found in meta store. Generating a new one...") dbID := thread.NewIDV1(thread.Raw, 32) log.Debug("getOrCreateBucketContext: Creating Thread DB for bucket " + bucketSlug + " at db " + dbID.String()) managedKey, err := tc.kc.GetManagedThreadKey(getBucketThreadManagedKey(bucketSlug)) if err != nil { return nil, nil, err } pk, _, err := tc.kc.GetStoredKeyPairInLibP2PFormat() if err != nil { return nil, nil, err } if err := tc.threads.NewDB(ctx, dbID, db.WithNewManagedThreadKey(managedKey), db.WithNewManagedLogKey(pk)); err != nil { return nil, nil, err } log.Debug("getOrCreateBucketContext: Thread DB Created") bucketSchema, err = m.CreateBucket(ctx, bucketSlug, utils.CastDbIDToString(dbID)) if err != nil { return nil, nil, err } bucketCtx, _, err := tc.getBucketContext(ctx, utils.CastDbIDToString(dbID), bucketSlug, false, bucketSchema.EncryptionKey) if err != nil { return nil, nil, err } return bucketCtx, &dbID, err } func (tc *textileClient) ListBuckets(ctx context.Context) ([]Bucket, error) { if err := tc.requiresRunning(); err != nil { return nil, err } return tc.listBuckets(ctx) } func (tc *textileClient) listBuckets(ctx context.Context) ([]Bucket, error) { bucketList, err := tc.GetModel().ListBuckets(ctx) if err != nil { return nil, err } result := make([]Bucket, 0) for _, b := range bucketList { // Skip listing the mirror bucket if b.Slug == defaultPersonalMirrorBucketSlug { continue } bucketObj, err := tc.getBucket(ctx, b.Slug, nil) if err != nil { return nil, err } result = append(result, bucketObj) } return result, nil } func (tc *textileClient) getBucketRootFromReceivedFile(ctx context.Context, file *GetBucketForRemoteFileInput) (*buckets_pb.Root, bucket.GetBucketContextFn, error) { receivedFile, err := tc.GetModel().FindReceivedFile(ctx, file.DbID, file.Bucket, file.Path) if err != nil { return nil, nil, err } getCtxFn := func(ctx context.Context, slug string) (context.Context, *thread.ID, error) { return tc.getBucketContext(ctx, receivedFile.DbID, receivedFile.Bucket, true, receivedFile.EncryptionKey) } remoteCtx, _, err := getCtxFn(ctx, receivedFile.Bucket) if err != nil { return nil, nil, err } sbs := tc.getSecureBucketsClient(tc.hb) b, err := sbs.ListPath(remoteCtx, receivedFile.BucketKey, receivedFile.Path) if err != nil { return nil, nil, err } if b != nil { return b.GetRoot(), getCtxFn, nil } return nil, nil, NotFound(receivedFile.Bucket) } func (tc *textileClient) getBucketRootForMirror(ctx context.Context, slug string) (*buckets_pb.Root, bucket.GetBucketContextFn, string, error) { bucket, err := tc.GetModel().FindBucket(ctx, slug) if err != nil { return nil, nil, "", err } getCtxFn := func(ctx context.Context, slug string) (context.Context, *thread.ID, error) { return tc.getBucketContext(ctx, bucket.RemoteDbID, bucket.RemoteBucketSlug, true, bucket.EncryptionKey) } remoteCtx, _, err := getCtxFn(ctx, bucket.RemoteBucketSlug) if err != nil { return nil, nil, "", err } sbs := tc.getSecureBucketsClient(tc.hb) b, err := sbs.ListPath(remoteCtx, bucket.RemoteBucketKey, "") if err != nil { return nil, nil, "", err } if b != nil { return b.GetRoot(), getCtxFn, bucket.RemoteBucketSlug, nil } return nil, nil, "", NotFound(bucket.RemoteBucketSlug) } func (tc *textileClient) getBucketRootFromSlug(ctx context.Context, slug string) (context.Context, *buckets_pb.Root, error) { ctx, _, err := tc.getOrCreateBucketContext(ctx, slug) if err != nil { return nil, nil, err } bucketListReply, err := tc.bucketsClient.List(ctx) if err != nil { return nil, nil, err } for _, root := range bucketListReply.Roots { if root.Name == slug { return ctx, root, nil } } return nil, nil, NotFound(slug) } // Creates a bucket. func (tc *textileClient) CreateBucket(ctx context.Context, bucketSlug string) (Bucket, error) { if err := tc.requiresRunning(); err != nil { return nil, err } return tc.createBucket(ctx, bucketSlug) } func (tc *textileClient) createBucket(ctx context.Context, bucketSlug string) (Bucket, error) { log.Debug("Creating a new bucket with slug " + bucketSlug) var err error m := tc.GetModel() if b, _ := tc.getBucket(ctx, bucketSlug, nil); b != nil { return b, nil } ctx, dbID, err := tc.getOrCreateBucketContext(ctx, bucketSlug) if err != nil { return nil, err } log.Debug("Creating Bucket in db " + dbID.String()) // create bucket b, err := tc.bucketsClient.Create(ctx, bc.WithName(bucketSlug)) if err != nil { return nil, err } // We store the bucket in a meta thread so that we can later fetch a list of all buckets log.Debug("Bucket " + bucketSlug + " created. Storing metadata.") schema, err := m.CreateBucket(ctx, bucketSlug, utils.CastDbIDToString(*dbID)) if err != nil { return nil, err } tc.sync.NotifyBucketCreated(schema.Slug, schema.EncryptionKey) tc.sync.NotifyBucketRestore(bucketSlug) newB := bucket.New( b.Root, tc.getOrCreateBucketContext, tc.getSecureBucketsClient(tc.bucketsClient), ) return newB, nil } func (tc *textileClient) ShareBucket(ctx context.Context, bucketSlug string) (*db.Info, error) { bs, err := tc.GetModel().FindBucket(ctx, bucketSlug) if err != nil { return nil, err } dbID, err := utils.ParseDbIDFromString(bs.DbID) b, err := tc.threads.GetDBInfo(ctx, *dbID) // replicate to the hub hubma := tc.cfg.GetString(config.TextileHubMa, "") if hubma == "" { return nil, fmt.Errorf("no textile hub set") } if _, err := tc.netc.AddReplicator(ctx, *dbID, cmd.AddrFromStr(hubma)); err != nil { log.Error("Unable to replicate on the hub: ", err) // proceeding still because local/public IP // addresses could be used to join thread } return &b, err } func (tc *textileClient) joinBucketViaAddress(ctx context.Context, address string, key thread.Key, bucketSlug string, opts ...db.NewManagedOption) error { multiaddress, err := ma.NewMultiaddr(address) if err != nil { log.Error("Unable to parse multiaddr", err) return err } var ( schema *jsonschema.Schema indexes = []db.Index{{ Path: "path", }} ) reflector := jsonschema.Reflector{ExpandedStruct: true} schema = reflector.Reflect(&tdb.Bucket{}) newDbOpts := []db.NewManagedOption{db.WithNewManagedCollections(db.CollectionConfig{ Name: "buckets", Schema: schema, Indexes: indexes, })} newDbOpts = append(newDbOpts, opts...) err = tc.threads.NewDBFromAddr(ctx, multiaddress, key, newDbOpts...) if err != nil { log.Error("Unable to join addr", err) return err } dbID, err := thread.FromAddr(multiaddress) if err != nil { return err } newBucket, err := tc.GetModel().UpsertBucket(ctx, bucketSlug, utils.CastDbIDToString(dbID)) if err != nil { return err } newBucketCtx, _, err := tc.getBucketContext(ctx, utils.CastDbIDToString(dbID), bucketSlug, false, newBucket.EncryptionKey) if err != nil { return err } // Create bucket in buckets client in case it's not already there tc.bucketsClient.Create(newBucketCtx, bc.WithName(bucketSlug)) return nil } func (tc *textileClient) JoinBucket(ctx context.Context, slug string, ti *domain.ThreadInfo) (bool, error) { k, err := thread.KeyFromString(ti.Key) // get the DB ID from the first ma ma1, err := ma.NewMultiaddr(ti.Addresses[0]) if err != nil { return false, fmt.Errorf("Unable to parse multiaddr") } dbID, err := thread.FromAddr(ma1) if err != nil { return false, fmt.Errorf("Unable to parse db id") } for _, a := range ti.Addresses { if err := tc.joinBucketViaAddress(ctx, a, k, slug); err != nil { continue } return true, nil } log.Info("unable to join any advertised addresses, so joining via the hub instead") // if it reached here then no addresses worked, try the hub hubAddr := tc.cfg.GetString(config.TextileHubMa, "") + "/thread/" + dbID.String() if err := tc.joinBucketViaAddress(ctx, hubAddr, k, slug); err != nil { log.Error("error joining bucket from hub", err) return false, err } return true, nil } func (tc *textileClient) ToggleBucketBackup(ctx context.Context, bucketSlug string, bucketBackup bool) (bool, error) { bucketSchema, err := tc.GetModel().BucketBackupToggle(ctx, bucketSlug, bucketBackup) if err != nil { return false, err } if bucketSchema.Backup { tc.sync.NotifyBucketBackupOn(bucketSlug) } else { tc.sync.NotifyBucketBackupOff(bucketSlug) } return bucketSchema.Backup, nil } func (tc *textileClient) BucketBackupRestore(ctx context.Context, bucketSlug string) error { tc.sync.NotifyBucketRestore(bucketSlug) return nil } func (tc *textileClient) IsBucketBackup(ctx context.Context, bucketSlug string) bool { bucketSchema, err := tc.GetModel().FindBucket(ctx, bucketSlug) if err != nil { return false } return bucketSchema.Backup } func GetDefaultBucketSlug() string { return defaultPersonalBucketSlug } func GetDefaultMirrorBucketSlug() string { return defaultPersonalMirrorBucketSlug } // Attempts to restore buckets from a hub replication // Returns nil if there's nothing to restore or the restoration succeeded func (tc *textileClient) restoreBuckets(ctx context.Context) error { bucketList, err := tc.GetModel().ListBuckets(ctx) if err != nil { return err } if len(bucketList) == 0 && tc.shouldForceRestore { return errors.New("No buckets ready for restore") } dbs, err := tc.threads.ListDBs(ctx) if err != nil { return err } threadsInitialized := true for _, b := range bucketList { dbID, err := utils.ParseDbIDFromString(b.DbID) if err != nil { return err } if _, ok := dbs[*dbID]; !ok { threadsInitialized = false } } // Buckets already initialized if threadsInitialized { return nil } hubCtx, err := tc.getHubCtx(ctx) if err != nil { return err } hubmaStr := tc.cfg.GetString(config.TextileHubMa, "") pk, _, err := tc.kc.GetStoredKeyPairInLibP2PFormat() if err != nil { return err } // Check if there's a bucket replicated on the hub for _, b := range bucketList { dbID, err := utils.ParseDbIDFromString(b.DbID) if err != nil { return err } _, err = tc.hnetc.GetThread(hubCtx, *dbID) replThreadExists := err == nil if replThreadExists { hubmaWithThreadID := hubmaStr + "/thread/" + dbID.String() managedKey, err := tc.kc.GetManagedThreadKey(getBucketThreadManagedKey(b.Slug)) if err != nil { return err } err = tc.joinBucketViaAddress( ctx, hubmaWithThreadID, managedKey, b.Slug, db.WithNewManagedBackfillBlock(true), db.WithNewManagedLogKey(pk), db.WithNewManagedThreadKey(managedKey), ) if err != nil { log.Error("could not join replicated bucket", err) } if err != nil && tc.shouldForceRestore { return err } } } return nil } func getBucketThreadManagedKey(bucketSlug string) string { return "bucketKey_" + bucketSlug } ================================================ FILE: core/textile/client.go ================================================ package textile import ( "context" "crypto/tls" "errors" "fmt" "strings" "sync" "time" manet "github.com/multiformats/go-multiaddr/net" "github.com/FleekHQ/space-daemon/core/search" "github.com/FleekHQ/space-daemon/config" httpapi "github.com/ipfs/go-ipfs-http-client" iface "github.com/ipfs/interface-go-ipfs-core" "github.com/FleekHQ/space-daemon/core/keychain" db "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/hub" "github.com/FleekHQ/space-daemon/core/textile/model" "github.com/FleekHQ/space-daemon/core/textile/notifier" synchronizer "github.com/FleekHQ/space-daemon/core/textile/sync" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/core/util/address" "github.com/FleekHQ/space-daemon/log" ma "github.com/multiformats/go-multiaddr" threadsClient "github.com/textileio/go-threads/api/client" nc "github.com/textileio/go-threads/net/api/client" bucketsClient "github.com/textileio/textile/v2/api/bucketsd/client" "github.com/textileio/textile/v2/api/common" uc "github.com/textileio/textile/v2/api/usersd/client" "github.com/textileio/textile/v2/cmd" mail "github.com/textileio/textile/v2/mail/local" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) const healthcheckFailuresBeforeUnhealthy = 3 var HealthcheckMaxRetriesReachedErr = errors.New(fmt.Sprintf("textile client not initialized after %d attempts", healthcheckFailuresBeforeUnhealthy)) type textileClient struct { store db.Store kc keychain.Keychain threads *threadsClient.Client ht *threadsClient.Client bucketsClient *bucketsClient.Client mb Mailbox hb *bucketsClient.Client filesSearchEngine search.FilesSearchEngine isRunning bool isInitialized bool isSyncInitialized bool Ready chan bool keypairDeleted chan bool shuttingDown chan bool onHealthy chan error onInitialized chan bool cfg config.Config isConnectedToHub bool netc *nc.Client hnetc *nc.Client uc UsersClient mailEvents chan mail.MailboxEvent hubAuth hub.HubAuth mbNotifier GrpcMailboxNotifier failedHealthchecks int sync synchronizer.Synchronizer notifier bucket.Notifier ipfsClient iface.CoreAPI dbListeners map[string]Listener shouldForceRestore bool healthcheckMutex *sync.Mutex } // Creates a new Textile Client func NewClient( store db.Store, kc keychain.Keychain, hubAuth hub.HubAuth, uc UsersClient, mb Mailbox, search search.FilesSearchEngine, ) *textileClient { return &textileClient{ store: store, kc: kc, threads: nil, bucketsClient: nil, mb: mb, netc: nil, hnetc: nil, uc: uc, ht: nil, hb: nil, isRunning: false, isInitialized: false, isSyncInitialized: false, Ready: make(chan bool), keypairDeleted: make(chan bool), shuttingDown: make(chan bool), onHealthy: make(chan error), onInitialized: make(chan bool), mailEvents: make(chan mail.MailboxEvent), isConnectedToHub: false, hubAuth: hubAuth, mbNotifier: nil, failedHealthchecks: 0, sync: nil, notifier: nil, dbListeners: make(map[string]Listener), shouldForceRestore: false, healthcheckMutex: &sync.Mutex{}, filesSearchEngine: search, } } func (tc *textileClient) WaitForReady() chan bool { return tc.Ready } func (tc *textileClient) WaitForInitialized() chan bool { return tc.onInitialized } // Returns an error if it exceeds the max amount of attempts func (tc *textileClient) WaitForHealthy() chan error { return tc.onHealthy } func (tc *textileClient) IsInitialized() bool { return tc.isInitialized } // Healthy means initialized and connected to hub func (tc *textileClient) IsHealthy() bool { return tc.isInitialized && tc.isConnectedToHub } func (tc *textileClient) requiresRunning() error { if tc.isRunning == false || tc.isInitialized == false { return errors.New("ran an operation that requires starting and initializing textileClient first") } return nil } func (tc *textileClient) getHubCtx(ctx context.Context) (context.Context, error) { ctx, err := tc.hubAuth.GetHubContext(ctx) if err != nil { return nil, err } return ctx, nil } func (tc *textileClient) initializeSync(ctx context.Context) { getLocalBucketFn := func(ctx context.Context, slug string) (bucket.BucketInterface, error) { return tc.getBucket(ctx, slug, nil) } getMirrorBucketFn := func(ctx context.Context, slug string) (bucket.BucketInterface, error) { return tc.getBucketForMirror(ctx, slug) } tc.sync = synchronizer.New( tc.store, tc.GetModel(), tc.kc, tc.hubAuth, tc.hb, tc.ht, tc.netc, tc.cfg, getMirrorBucketFn, getLocalBucketFn, tc.getBucketContext, tc.addListener, ) tc.notifier = notifier.New(tc.sync) if err := tc.sync.RestoreQueue(); err != nil { log.Warn("Could not restore Textile synchronizer queue. Queue will start fresh.") } tc.isSyncInitialized = true tc.sync.Start(ctx) } // Starts the Textile Client func (tc *textileClient) start(ctx context.Context, cfg config.Config) error { tc.cfg = cfg auth := common.Credentials{} var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithPerRPCCredentials(auth)) var threads *threadsClient.Client var buckets *bucketsClient.Client var netc *nc.Client // by default it goes to local threads now addrAPI := cmd.AddrFromStr(tc.cfg.GetString(config.BuckdApiMaAddr, "/ip4/127.0.0.1/tcp/3006")) _, host, err := manet.DialArgs(addrAPI) if err != nil { return errors.New("invalid bucket daemon host provided: " + err.Error()) } log.Debug("Creating buckets client in " + host) if b, err := bucketsClient.NewClient(host, opts...); err != nil { cmd.Fatal(err) } else { buckets = b } log.Debug("Creating threads client in " + host) if t, err := threadsClient.NewClient(host, opts...); err != nil { cmd.Fatal(err) } else { threads = t } if n, err := nc.NewClient(host, opts...); err != nil { cmd.Fatal(err) } else { netc = n } ipfsNodeAddr := cfg.GetString(config.Ipfsnodeaddr, "/ip4/127.0.0.1/tcp/5001") if ipfsNodeAddr == "" { ipfsNodeAddr = "/ip4/127.0.0.1/tcp/5001" } multiAddr, err := ma.NewMultiaddr(ipfsNodeAddr) if err != nil { cmd.Fatal(err) } if ic, err := httpapi.NewApi(multiAddr); err != nil { cmd.Fatal(err) } else { tc.ipfsClient = ic } tc.bucketsClient = buckets tc.threads = threads tc.netc = netc tc.ht = getHubThreadsClient(tc.cfg.GetString(config.TextileHubTarget, "")) tc.hb = getHubBucketClient(tc.cfg.GetString(config.TextileHubTarget, "")) tc.hnetc = getHubNetworkClient(tc.cfg.GetString(config.TextileHubTarget, "")) tc.isRunning = true tc.healthcheck(ctx) tc.Ready <- true // Repeating healthcheck for { timeAfterNextCheck := 60 * time.Second // Do more frequent checks if the client is not initialized/running if tc.isConnectedToHub == false || tc.isInitialized == false { timeAfterNextCheck = 3 * time.Second } // If it's trying to shutdown we return right away if tc.isRunning == false { return nil } select { case <-time.After(timeAfterNextCheck): tc.healthcheck(ctx) // If we get notified that the keypair got deleted, start checking right away case <-tc.keypairDeleted: tc.healthcheck(ctx) // If it's trying to shutdown we return right away case <-ctx.Done(): return nil case <-tc.shuttingDown: return nil } } } func (tc *textileClient) checkHubConnection(ctx context.Context) error { // Get the public key to see if we have any // Reject right away if not _, err := tc.kc.GetStoredPublicKey() if err != nil { tc.isConnectedToHub = false return err } // Attempt to connect to the Hub hubctx, err := tc.getHubCtx(ctx) if err != nil { tc.isConnectedToHub = false log.Error("Could not connect to Textile Hub. Starting in offline mode.", err) return err } if tc.isConnectedToHub == false { // setup mailbox mailbox, err := tc.setupOrCreateMailBox(hubctx) if err != nil { log.Error("Unable to setup mailbox", err) tc.isConnectedToHub = false return err } tc.mb = mailbox if err := tc.listenForMessages(hubctx); err != nil { tc.isConnectedToHub = false log.Error("Could not listen for mailbox messages", err) return err } } tc.isConnectedToHub = true return nil } func getHubTargetOpts(host string) []grpc.DialOption { auth := common.Credentials{} var opts []grpc.DialOption if strings.Contains(host, "443") { creds := credentials.NewTLS(&tls.Config{}) opts = append(opts, grpc.WithTransportCredentials(creds)) auth.Secure = true } else { opts = append(opts, grpc.WithInsecure()) } opts = append(opts, grpc.WithPerRPCCredentials(auth)) return opts } func CreateUserClient(host string) UsersClient { opts := getHubTargetOpts(host) users, err := uc.NewClient(host, opts...) if err != nil { cmd.Fatal(err) } return users } func getHubThreadsClient(host string) *threadsClient.Client { opts := getHubTargetOpts(host) tc, err := threadsClient.NewClient(host, opts...) if err != nil { cmd.Fatal(err) } return tc } func getHubNetworkClient(host string) *nc.Client { opts := getHubTargetOpts(host) n, err := nc.NewClient(host, opts...) if err != nil { cmd.Fatal(err) } return n } func getHubBucketClient(host string) *bucketsClient.Client { opts := getHubTargetOpts(host) tc, err := bucketsClient.NewClient(host, opts...) if err != nil { cmd.Fatal(err) } return tc } func (tc *textileClient) initialize(ctx context.Context) error { err := tc.restoreBuckets(ctx) if err != nil { return err } buckets, err := tc.listBuckets(ctx) if err != nil { return err } pub, _ := tc.kc.GetStoredPublicKey() if pub != nil { address := address.DeriveAddress(pub) log.Debug("Initializing Textile client", fmt.Sprintf("address:%s", address)) } // Create default bucket if it doesnt exist defaultBucketExists := false for _, b := range buckets { if b.Slug() == defaultPersonalBucketSlug { defaultBucketExists = true } } if defaultBucketExists == false { _, err := tc.createBucket(ctx, defaultPersonalBucketSlug) if err != nil { log.Error("Error creating default bucket", err) return err } } if err = tc.initSearchIndex(ctx); err != nil { log.Error("Error initializing files search index", err) return err } if tc.sync != nil { tc.sync.NotifyBucketStartup(defaultPersonalBucketSlug) } _, err = tc.createDefaultPublicBucket(ctx) if err != nil { log.Warn("Failed to create default public bucket", "err:"+err.Error()) } tc.isInitialized = true // Non-blocking channel send in case there are no listeners registered select { case tc.onInitialized <- true: log.Debug("Notifying Textile Client init ready") default: // Do nothing } log.Debug("Textile Client initialized successfully") return nil } // Starts a Textile Client and also initializes default resources for it (default bucket and metathread). // Then leaves the process running to attempt to connect or to initialize if it's not already initialized func (tc *textileClient) Start(ctx context.Context, cfg config.Config) error { // Start Textile Client return tc.start(ctx, cfg) } // Used by delete account so we can disable it so it gets // enabled again during startup func (tc *textileClient) DisableSync() { tc.isSyncInitialized = false } // Closes connection to Textile func (tc *textileClient) Shutdown() error { tc.shuttingDown <- true tc.isRunning = false tc.isInitialized = false tc.isSyncInitialized = false tc.shouldForceRestore = false // Close channels close(tc.mailEvents) close(tc.Ready) close(tc.onHealthy) close(tc.keypairDeleted) close(tc.shuttingDown) tc.closeListeners() if err := tc.bucketsClient.Close(); err != nil { return err } if err := tc.threads.Close(); err != nil { return err } tc.sync.Shutdown() tc.bucketsClient = nil tc.threads = nil return nil } // Returns a thread client connection. Requires the client to be running. func (tc *textileClient) GetThreadsConnection() (*threadsClient.Client, error) { if err := tc.requiresRunning(); err != nil { return nil, err } return tc.threads, nil } func (tc *textileClient) IsRunning() bool { return tc.isRunning } func (tc *textileClient) GetFailedHealthchecks() int { return tc.failedHealthchecks } // Checks for connection and initialization needs. func (tc *textileClient) healthcheck(ctx context.Context) { tc.healthcheckMutex.Lock() defer tc.healthcheckMutex.Unlock() log.Debug("Textile Client healthcheck... Start.") if tc.isSyncInitialized == false { tc.initializeSync(ctx) } if tc.isInitialized == false { // NOTE: Initialize does not need a hub connection as remote syncing is done in a background process tc.initialize(ctx) } tc.checkHubConnection(ctx) if len(tc.dbListeners) == 0 { tc.initializeListeners(ctx) } switch { case tc.isInitialized == false: log.Debug("Textile Client healthcheck... Not initialized yet.") tc.failedHealthchecks = tc.failedHealthchecks + 1 case tc.isConnectedToHub == false: log.Debug("Textile Client healthcheck... Not connected to hub.") tc.failedHealthchecks = tc.failedHealthchecks + 1 default: log.Debug("Textile Client healthcheck... OK.") tc.failedHealthchecks = 0 // Non-blocking channel send in case there are no listeners registered select { case tc.onHealthy <- nil: log.Debug("Notifying health OK") default: // Do nothing } } if tc.failedHealthchecks >= 3 { // Non-blocking channel send in case there are no listeners registered select { case tc.onHealthy <- HealthcheckMaxRetriesReachedErr: log.Debug("Notifying healthcheck: max attempts surpassed") tc.failedHealthchecks = 0 default: // Do nothing } } } func (tc *textileClient) RemoveKeys(ctx context.Context) error { if err := tc.hubAuth.ClearCache(); err != nil { return err } if err := tc.clearLocalMailbox(); err != nil { return err } tc.isInitialized = false tc.isConnectedToHub = false tc.keypairDeleted <- true metathreadID, err := utils.NewDeterministicThreadID(tc.kc, utils.MetathreadThreadVariant) if err != nil { return err } err = tc.threads.DeleteDB(ctx, metathreadID) if err != nil { return err } return nil } func (tc *textileClient) GetModel() model.Model { return model.New( tc.store, tc.kc, tc.threads, tc.ht, tc.hubAuth, tc.cfg, tc.netc, tc.hnetc, tc.shouldForceRestore, tc.filesSearchEngine, ) } func (tc *textileClient) getSecureBucketsClient(baseClient *bucketsClient.Client) *SecureBucketClient { isRemote := baseClient == tc.hb return NewSecureBucketsClient(baseClient, tc.kc, tc.store, tc.threads, tc.ipfsClient, isRemote, tc.cfg) } func (tc *textileClient) requiresHubConnection() error { if err := tc.requiresRunning(); err != nil { return err } if tc.isConnectedToHub == false || tc.mb == nil { return errors.New("ran an operation that requires connection to hub") } return nil } func (tc *textileClient) AttachSynchronizerNotifier(notif synchronizer.EventNotifier) { tc.sync.AttachNotifier(notif) } // Initializes dbs from a backup. Returns error if it can't initialize func (tc *textileClient) RestoreDB(ctx context.Context) error { tc.healthcheckMutex.Lock() defer tc.healthcheckMutex.Unlock() tc.shouldForceRestore = true err := tc.initialize(ctx) tc.shouldForceRestore = false if err != nil { tc.kc.DeleteKeypair() return err } return nil } ================================================ FILE: core/textile/common/common.go ================================================ package common import "context" // NewBucketEncryptionKeyContext adds the encryption key to the context // which is used to encrypt and decrypt requests to buckets client. func NewBucketEncryptionKeyContext(ctx context.Context, key []byte) context.Context { if key == nil || len(key) == 0 { return ctx } return context.WithValue(ctx, "bucketEncryptionKey", key) } // BucketEncryptionKeyFromContext returns the bucket encryption key from a context. func BucketEncryptionKeyFromContext(ctx context.Context) ([]byte, bool) { key, ok := ctx.Value("bucketEncryptionKey").([]byte) return key, ok } ================================================ FILE: core/textile/event_handler.go ================================================ package textile import ( "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/sync" "github.com/FleekHQ/space-daemon/log" iface "github.com/ipfs/interface-go-ipfs-core" tc "github.com/textileio/go-threads/api/client" ) // EventHandler type EventHandler interface { OnCreate(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) OnRemove(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) OnSave(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) } // Implements EventHandler and defaults to logging actions performed type defaultListenerHandler struct{} func (h *defaultListenerHandler) OnCreate(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) { log.Info("Default Listener Handler: OnCreate") } func (h *defaultListenerHandler) OnRemove(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) { log.Info("Default Listener Handler: OnRemove") } func (h *defaultListenerHandler) OnSave(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) { log.Info("Default Listener Handler: OnSave") } type restorerListenerHandler struct { synchronizer sync.Synchronizer st store.Store ipfsClient iface.CoreAPI } func newRestorerListenerHandler(synchronizer sync.Synchronizer, st store.Store, ipfsClient iface.CoreAPI) *restorerListenerHandler { return &restorerListenerHandler{ synchronizer: synchronizer, st: st, ipfsClient: ipfsClient, } } func (h *restorerListenerHandler) OnCreate(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) { log.Debug("Restorer Listener Handler: OnCreate") h.synchronizer.NotifyBucketRestore(bucketData.Name) } func (h *restorerListenerHandler) OnRemove(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) { log.Debug("Restorer Listener Handler: OnRemove") h.synchronizer.NotifyBucketRestore(bucketData.Name) } func (h *restorerListenerHandler) OnSave(bucketData *bucket.BucketData, listenEvent *tc.ListenEvent) { log.Debug("Restorer Listener Handler: OnSave") h.synchronizer.NotifyBucketRestore(bucketData.Name) } ================================================ FILE: core/textile/hub/hmacTestKey ================================================ #5K+~ew{Z(T(P.ZGwb="=.!r.O͚gЀ ================================================ FILE: core/textile/hub/hub_auth.go ================================================ package hub import ( "bytes" "context" b64 "encoding/base64" "encoding/json" "errors" "sync" "time" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/log" "github.com/dgrijalva/jwt-go" mbase "github.com/multiformats/go-multibase" "github.com/textileio/go-threads/core/thread" "github.com/textileio/textile/v2/api/common" "golang.org/x/net/websocket" ) type sentMessageData struct { Signature string `json:"sig"` PublicKey string `json:"pubkey"` } type outMessage struct { Action string `json:"action"` Data sentMessageData `json:"data"` } type inMessageChallengeValue struct { Type string `json:"type"` Data []byte `json:"data"` } type inMessageChallenge struct { Type string `json:"type"` Value inMessageChallengeValue `json:"value"` } type inMessageTokenValue struct { Token string `json:"token"` Key string `json:"key"` Msg string `json:"msg"` Sig string `json:"sig"` AppToken string `json:"appToken"` } type inMessageToken struct { Type string `json:"type"` Value inMessageTokenValue `json:"value"` } type AuthTokens struct { HubToken string Key string Sig string AppToken string Msg string } type HubAuth interface { GetTokensWithCache(ctx context.Context) (*AuthTokens, error) GetHubContext(ctx context.Context) (context.Context, error) ClearCache() error } type hub struct { st store.Store kc keychain.Keychain cfg config.Config fetchTokensMutex *sync.Mutex } func New(st store.Store, kc keychain.Keychain, cfg config.Config) *hub { return &hub{ st: st, kc: kc, cfg: cfg, fetchTokensMutex: &sync.Mutex{}, } } const tokensStoreKey = "hubTokens" func isTokenExpired(t string) bool { token, _, err := new(jwt.Parser).ParseUnverified(t, jwt.MapClaims{}) if err != nil { return true } claims, ok := token.Claims.(jwt.MapClaims) if !ok { return true } var expiryTime time.Time switch exp := claims["exp"].(type) { case float64: expiryTime = time.Unix(int64(exp), 0) case json.Number: v, err := exp.Int64() if err != nil { return true } expiryTime = time.Unix(v, 0) } now := time.Now() return expiryTime.Before(now) } func (h *hub) retrieveTokens() (*inMessageTokenValue, error) { stored, err := h.st.Get([]byte(tokensStoreKey)) if err != nil { return nil, err } tokens := &inMessageTokenValue{} tokensBytes := bytes.NewBuffer(stored) if err := json.NewDecoder(tokensBytes).Decode(tokens); err != nil { return nil, err } expired := isTokenExpired(tokens.AppToken) if expired { return nil, errors.New("App token is expired") } return tokens, nil } func (h *hub) storeTokens(tokens *inMessageTokenValue) error { tokensBytes := new(bytes.Buffer) if err := json.NewEncoder(tokensBytes).Encode(tokens); err != nil { return err } if err := h.st.Set([]byte(tokensStoreKey), tokensBytes.Bytes()); err != nil { return err } return nil } // Removes the stored tokens func (h *hub) ClearCache() error { return h.st.Remove([]byte(tokensStoreKey)) } func (h *hub) getTokensThroughChallenge(ctx context.Context) (*inMessageTokenValue, error) { log.Debug("Token Challenge: Connecting through websocket") conn, err := websocket.Dial(h.cfg.GetString(config.SpaceServicesHubAuthURL, ""), "", "http://localhost/") if err != nil { return nil, err } defer conn.Close() log.Debug("Token Challenge: Connected") privateKey, _, err := h.kc.GetStoredKeyPairInLibP2PFormat() if err != nil { return nil, err } identity := thread.NewLibp2pIdentity(privateKey) pub := identity.GetPublic().String() // Request a challenge (a payload we need to sign) log.Debug("Token Challenge: Sending token request with pub key" + pub) tokenRequest := &outMessage{ Action: "token", Data: sentMessageData{ PublicKey: identity.GetPublic().String(), }, } err = websocket.JSON.Send(conn, tokenRequest) if err != nil { return nil, err } challenge := inMessageChallenge{} if err := websocket.JSON.Receive(conn, &challenge); err != nil { return nil, err } log.Debug("Token Challenge: Received challenge") solution, err := identity.Sign(ctx, challenge.Value.Data) if err != nil { return nil, err } signature := b64.StdEncoding.EncodeToString(solution) // Send back channel solution solMessage := &outMessage{ Action: "challenge", Data: sentMessageData{ Signature: signature, PublicKey: pub, }, } log.Debug("Token Challenge: Sending signature") err = websocket.JSON.Send(conn, solMessage) if err != nil { return nil, err } // Receive the token var token inMessageToken for token.Type != "token" { currToken := inMessageToken{} if err := websocket.JSON.Receive(conn, &token); err != nil { return nil, err } if currToken.Type == "token" { token = currToken } } if token.Type == "token" { log.Debug("Token Challenge: Received token successfully") return &token.Value, nil } return nil, errors.New("Did not receive a correct token challenge response") } func (h *hub) GetTokensWithCache(ctx context.Context) (*AuthTokens, error) { h.fetchTokensMutex.Lock() defer h.fetchTokensMutex.Unlock() if tokensInStore, _ := h.retrieveTokens(); tokensInStore != nil { return &AuthTokens{ HubToken: tokensInStore.Token, AppToken: tokensInStore.AppToken, Key: tokensInStore.Key, Sig: tokensInStore.Sig, Msg: tokensInStore.Msg, }, nil } tokens, err := h.getTokensThroughChallenge(ctx) if err != nil { return nil, err } if err := h.storeTokens(tokens); err != nil { return nil, err } return &AuthTokens{ HubToken: tokens.Token, AppToken: tokens.AppToken, Key: tokens.Key, Sig: tokens.Sig, Msg: tokens.Msg, }, nil } func (h *hub) GetHubContext(ctx context.Context) (context.Context, error) { tokens, err := h.GetTokensWithCache(ctx) if err != nil { return nil, err } _, sig, err := mbase.Decode(tokens.Sig) if err != nil { return nil, err } ctx = common.NewAPIKeyContext(ctx, tokens.Key) ctx = common.NewAPISigContext(ctx, tokens.Msg, sig) tok := thread.Token(tokens.HubToken) ctx = thread.NewTokenContext(ctx, tok) return ctx, nil } ================================================ FILE: core/textile/hub/hub_auth_test.go ================================================ package hub import ( "io/ioutil" "testing" "time" "github.com/dgrijalva/jwt-go" "github.com/stretchr/testify/assert" ) var hmacTestKey, _ = ioutil.ReadFile("hmacTestKey") func TestHubAuth_isTokenExpiredTrue(t *testing.T) { token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ "exp": time.Now().AddDate(0, 0, -1).Unix(), "iat": time.Now().Unix(), }) tokenStr, _ := token.SignedString(hmacTestKey) exp := isTokenExpired(tokenStr) assert.Equal(t, true, exp) } func TestHubAuth_isTokenExpiredFalse(t *testing.T) { token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ "exp": time.Now().AddDate(0, 0, 1).Unix(), "iat": time.Now().Unix(), }) tokenStr, _ := token.SignedString(hmacTestKey) exp := isTokenExpired(tokenStr) assert.Equal(t, false, exp) } ================================================ FILE: core/textile/listener.go ================================================ package textile import ( "context" "errors" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/textileio/go-threads/api/client" threadsClient "github.com/textileio/go-threads/api/client" ) func (tc *textileClient) Listen(ctx context.Context, dbID, threadName string) (<-chan threadsClient.ListenEvent, error) { db, err := utils.ParseDbIDFromString(dbID) if err != nil { return nil, err } newCtx, err := utils.GetThreadContext(ctx, "", *db, true, tc.kc, tc.hubAuth, tc.ht) if err != nil { return nil, err } return tc.ht.Listen(newCtx, *db, nil) } func (tc *textileClient) addListener(ctx context.Context, bucketSlug string) error { if err := tc.requiresHubConnection(); err != nil { return err } handler := newRestorerListenerHandler(tc.sync, tc.store, tc.ipfsClient) handlers := []EventHandler{handler} listener := NewListener(tc, bucketSlug, handlers) tc.dbListeners[bucketSlug] = listener go func() { err := listener.Listen(ctx) if err != nil { // Remove element from map as it's not listening anymore delete(tc.dbListeners, bucketSlug) } }() return nil } func (tc *textileClient) DeleteListeners(ctx context.Context) { for k, _ := range tc.dbListeners { delete(tc.dbListeners, k) } } func (tc *textileClient) initializeListeners(ctx context.Context) error { if err := tc.requiresHubConnection(); err != nil { return err } tc.closeListeners() buckets, err := tc.listBuckets(ctx) if err != nil { return err } for _, bucket := range buckets { tc.addListener(ctx, bucket.Slug()) } return nil } func (tc *textileClient) closeListeners() { for key, listener := range tc.dbListeners { listener.Close() delete(tc.dbListeners, key) } } type listener struct { client Client bucketSlug string handlers []EventHandler shutdown chan bool isRunning bool } func NewListener(client Client, bucketSlug string, handlers []EventHandler) *listener { return &listener{ client: client, bucketSlug: bucketSlug, handlers: handlers, shutdown: make(chan bool), isRunning: false, } } func (l *listener) Listen(ctx context.Context) error { bucketSchema, err := l.client.GetModel().FindBucket(ctx, l.bucketSlug) if bucketSchema == nil || bucketSchema.RemoteDbID == "" { return errors.New("Bucket does not have a linked mirror bucket") } bucket, err := l.client.GetBucket(ctx, l.bucketSlug, nil) if err != nil { return err } bucketData := bucket.GetData() eventChan, err := l.client.Listen(ctx, bucketSchema.RemoteDbID, bucketSchema.RemoteBucketSlug) if err != nil { return err } l.isRunning = true defer func() { l.isRunning = false }() Loop: for { select { case ev := <-eventChan: if ev.Err != nil { return ev.Err } if !l.client.IsRunning() { return nil } for _, handler := range l.handlers { switch ev.Action.Type { case client.ActionCreate: handler.OnCreate(&bucketData, &ev) case client.ActionSave: handler.OnSave(&bucketData, &ev) case client.ActionDelete: handler.OnRemove(&bucketData, &ev) } } case <-l.shutdown: break Loop } } return nil } func (l *listener) Close() { if !l.isRunning { return } l.shutdown <- true } ================================================ FILE: core/textile/mailbox.go ================================================ package textile import ( "context" "encoding/json" "errors" "os" "os/user" "path/filepath" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/textile/model" "github.com/FleekHQ/space-daemon/log" crypto "github.com/libp2p/go-libp2p-crypto" "github.com/textileio/go-threads/core/thread" "github.com/textileio/textile/v2/api/usersd/client" "github.com/textileio/textile/v2/cmd" mail "github.com/textileio/textile/v2/mail/local" ) type GrpcMailboxNotifier interface { SendNotificationEvent(notif *domain.Notification) } const mailboxSetupFlagStoreKey = "mailboxSetupFlag" type UsersClient interface { ListInboxMessages(ctx context.Context, opts ...client.ListOption) ([]client.Message, error) SendMessage(ctx context.Context, from thread.Identity, to thread.PubKey, body []byte) (msg client.Message, err error) SetupMailbox(ctx context.Context) (mailbox thread.ID, err error) } type Mailbox interface { ListInboxMessages(ctx context.Context, opts ...client.ListOption) ([]client.Message, error) SendMessage(ctx context.Context, to thread.PubKey, body []byte) (msg client.Message, err error) WatchInbox(ctx context.Context, mevents chan<- mail.MailboxEvent, offline bool) (<-chan cmd.WatchState, error) Identity() thread.Identity } func (tc *textileClient) parseMessage(ctx context.Context, msgs []client.Message) ([]*domain.Notification, error) { ns := make([]*domain.Notification, 0) ids := []string{} for _, n := range msgs { ids = append(ids, n.ID) } fileschemas, err := tc.GetModel().FindReceivedFilesByIds(ctx, ids) if err != nil { return nil, err } // make map so we dont have to iterate each time fsmap := make(map[string]*model.ReceivedFileSchema) for _, fs := range fileschemas { fsmap[fs.InvitationId] = fs } for _, msg := range msgs { p, err := msg.Open(ctx, tc.mb.Identity()) if err != nil { return nil, err } b := &domain.MessageBody{} err = json.Unmarshal(p, b) if err != nil { log.Error("Error parsing message into MessageBody type", err) // returning generic notification since body was not able to be parsed n := &domain.Notification{ ID: msg.ID, Body: string(p), CreatedAt: msg.CreatedAt.Unix(), ReadAt: msg.ReadAt.Unix(), } ns = append(ns, n) continue } n := &domain.Notification{ ID: msg.ID, Body: string(p), NotificationType: (*b).Type, CreatedAt: msg.CreatedAt.Unix(), ReadAt: msg.ReadAt.Unix(), } switch (*b).Type { case domain.INVITATION: i := &domain.Invitation{} err := json.Unmarshal((*b).Body, i) if err != nil { return nil, err } if fsmap[msg.ID] == nil { i.Status = domain.PENDING } else { if fsmap[msg.ID].Accepted { i.Status = domain.ACCEPTED } else { i.Status = domain.REJECTED } } i.InvitationID = msg.ID n.InvitationValue = *i n.RelatedObject = *i case domain.USAGEALERT: u := &domain.UsageAlert{} err := json.Unmarshal((*b).Body, u) if err != nil { return nil, err } n.UsageAlertValue = *u n.RelatedObject = *u case domain.REVOKED_INVITATION: invite := domain.RevokedInvitation{} if err := json.Unmarshal((*b).Body, &invite); err != nil { return nil, err } // NOTE: current, this would run every time this notification is fetched // we can further optimize this later to prevent unnecessary calls, but for now // it would run asynchronously. go func() { if err = tc.GetModel().DeleteReceivedFiles(ctx, invite.ItemPaths, invite.Keys); err != nil { log.Error("Failed to delete revoked files", err) } }() n.RevokedInvitationValue = invite n.RelatedObject = invite default: } ns = append(ns, n) } return ns, nil } func (tc *textileClient) SendMessage(ctx context.Context, recipient crypto.PubKey, body []byte) (*client.Message, error) { if err := tc.requiresHubConnection(); err != nil { return nil, err } var err error ctx, err = tc.getHubCtx(ctx) if err != nil { return nil, err } msg, err := tc.mb.SendMessage(ctx, thread.NewLibp2pPubKey(recipient), body) if err != nil { return nil, err } return &msg, nil } func (tc *textileClient) GetMailAsNotifications(ctx context.Context, seek string, limit int) ([]*domain.Notification, error) { if err := tc.requiresHubConnection(); err != nil { return nil, err } var err error ctx, err = tc.getHubCtx(ctx) if err != nil { return nil, err } notifs, err := tc.mb.ListInboxMessages(ctx, client.WithSeek(seek), client.WithLimit(limit)) if err != nil { return nil, err } ns, err := tc.parseMessage(ctx, notifs) if err != nil { return nil, err } return ns, nil } type handleMessage func(context.Context, interface{}) error func (tc *textileClient) listenForMessages(ctx context.Context) error { if tc.mbNotifier == nil { return errors.New("no mailbox notifier, run AttachMailboxNotifier first") } log.Info("Starting to listen for mailbox messages") var err error ctx, err = tc.getHubCtx(ctx) if err != nil { return err } // Handle mailbox events as they arrive go func() { for e := range tc.mailEvents { switch e.Type { case mail.NewMessage: // handle new message log.Info("Received mail: " + e.MessageID.String()) // need to fetch the message again because the event // payload doesn't have the full deets, will remove // once its fixed on txl end msg, err := tc.mb.ListInboxMessages(ctx, client.WithSeek(e.MessageID.String()), client.WithLimit(1)) if err != nil { return } p, err := tc.parseMessage(ctx, msg) if err != nil { log.Error("Unable to parse incoming message: ", err) } tc.mbNotifier.SendNotificationEvent(p[0]) case mail.MessageRead: // handle message read (inbox only) case mail.MessageDeleted: // handle message deleted } } }() // Start watching (the third param indicates we want to keep watching when offline) go func() { state, err := tc.mb.WatchInbox(ctx, tc.mailEvents, true) if err != nil { log.Error("Unable to watch mailbox, ", err) return } // TODO: handle connectivity state if needed for s := range state { log.Info("received inbox watch state: " + s.State.String()) } }() return nil } // Attachs a handler for mailbox notification events func (tc *textileClient) AttachMailboxNotifier(notif GrpcMailboxNotifier) { tc.mbNotifier = notif } func (tc *textileClient) createMailBox(ctx context.Context, maillib *mail.Mail, mbpath string) (*mail.Mailbox, error) { // create priv, _, err := tc.kc.GetStoredKeyPairInLibP2PFormat() if err != nil { return nil, err } id := thread.NewLibp2pIdentity(priv) mailbox, err := maillib.NewMailbox(ctx, mail.Config{ Path: mbpath, Identity: id, APIKey: tc.cfg.GetString(config.TextileUserKey, ""), APISecret: tc.cfg.GetString(config.TextileUserSecret, ""), }) if err != nil { return nil, err } tc.store.Set([]byte(mailboxSetupFlagStoreKey), []byte("true")) return mailbox, nil } func (tc *textileClient) getMailboxPath() string { usr, _ := user.Current() mbpath := filepath.Join( tc.cfg.GetString(config.SpaceStorePath, filepath.Join(usr.HomeDir, ".fleek-space/textile/mail")), "textile", "mail", ) return mbpath } func (tc *textileClient) setupOrCreateMailBox(ctx context.Context) (*mail.Mailbox, error) { maillib := mail.NewMail(cmd.NewClients(tc.cfg.GetString(config.TextileHubTarget, ""), true), mail.DefaultConfConfig()) mbpath := tc.getMailboxPath() var mailbox *mail.Mailbox dbid, err := tc.store.Get([]byte(mailboxSetupFlagStoreKey)) if err == nil && len(dbid) > 0 { // restore mailbox, err = maillib.GetLocalMailbox(ctx, mbpath) if err != nil { return nil, err } } else { mailbox, err = tc.createMailBox(ctx, maillib, mbpath) if err != nil { return nil, err } } mid := mailbox.Identity() log.Info("Mailbox identity: " + mid.GetPublic().String()) return mailbox, nil } func (tc *textileClient) clearLocalMailbox() error { mbpath := tc.getMailboxPath() return os.RemoveAll(mbpath) } ================================================ FILE: core/textile/mailbox_test.go ================================================ package textile_test import ( "encoding/hex" "testing" tc "github.com/FleekHQ/space-daemon/core/textile" "github.com/FleekHQ/space-daemon/mocks" crypto "github.com/libp2p/go-libp2p-crypto" ) var ( cfg *mocks.Config st *mocks.Store mockUc *mocks.UsersClient mockKc *mocks.Keychain mockPubKey crypto.PubKey mockPrivKey crypto.PrivKey mockMb *mocks.Mailbox mockHubAuth *mocks.HubAuth mockSearch *mocks.FilesSearchEngine ) type TearDown func() func initTestMailbox(t *testing.T) (tc.Client, TearDown) { st = new(mocks.Store) mockKc = new(mocks.Keychain) mockHubAuth = new(mocks.HubAuth) mockUc = new(mocks.UsersClient) mockMb = new(mocks.Mailbox) mockSearch = new(mocks.FilesSearchEngine) client := tc.NewClient(st, mockKc, mockHubAuth, mockUc, mockMb, mockSearch) mockPubKeyHex := "67730a6678566ead5911d71304854daddb1fe98a396551a4be01de65da01f3a9" mockPrivKeyHex := "dd55f8921f90fdf31c6ef9ad86bd90605602fd7d32dc8ea66ab72deb6a82821c67730a6678566ead5911d71304854daddb1fe98a396551a4be01de65da01f3a9" pubKeyBytes, _ := hex.DecodeString(mockPubKeyHex) privKeyBytes, _ := hex.DecodeString(mockPrivKeyHex) mockPubKey, _ = crypto.UnmarshalEd25519PublicKey(pubKeyBytes) mockPrivKey, _ = crypto.UnmarshalEd25519PrivateKey(privKeyBytes) tearDown := func() { st = nil client = nil mockUc = nil mockKc = nil } return client, tearDown } func TestSendMessage(t *testing.T) { // tc, tearDown := initTestMailbox(t) // defer tearDown() // assert.NotNil(t, tc) // _, rp, _ := crypto.GenerateEd25519Key(nil) // mockKc.On( // "GetStoredKeyPairInLibP2PFormat", // ).Return(mockPrivKey, mockPubKey, nil) // msg := uc.Message{ // ID: "testid", // } // mockMb.On("SendMessage", mock.Anything, mock.Anything, mock.Anything).Return(msg, nil) // mockHubAuth.On("GetHubContext", mock.Anything).Return(context.Background(), nil) // body := "mockbody" // rmsg, err := tc.SendMessage(context.Background(), rp, []byte(body)) // assert.NotNil(t, rmsg) // assert.Nil(t, err) // mockMb.AssertCalled(t, "SendMessage", context.Background(), thread.NewLibp2pPubKey(rp), []byte(body)) // assert.Equal(t, msg.ID, rmsg.ID) } // func TestSendMessageFailGettingSenderKey(t *testing.T) { // tc, tearDown := initTestMailbox(t) // defer tearDown() // assert.NotNil(t, tc) // _, rp, _ := crypto.GenerateEd25519Key(nil) // mockKc.On( // "GetStoredKeyPairInLibP2PFormat", // ).Return(nil, nil, keychain.ErrKeyPairNotFound) // msg := uc.Message{ // ID: "testid", // } // mockHubAuth.On("GetHubContext", mock.Anything).Return(context.Background(), nil) // mockMb.On("SendMessage", mock.Anything, mock.Anything, mock.Anything).Return(msg, nil) // body := "mockbody" // rmsg, err := tc.SendMessage(context.Background(), rp, []byte(body)) // assert.Nil(t, rmsg) // assert.NotNil(t, err) // assert.Equal(t, keychain.ErrKeyPairNotFound, err) // mockMb.AssertNotCalled(t, "SendMessage", mock.Anything, mock.Anything, mock.Anything) // } func TestSendMessageFailureOnHub(t *testing.T) { // tc, tearDown := initTestMailbox(t) // defer tearDown() // assert.NotNil(t, tc) // _, rp, _ := crypto.GenerateEd25519Key(nil) // errToRet := errors.New("failed sending message at the hub") // mockKc.On( // "GetStoredKeyPairInLibP2PFormat", // ).Return(mockPrivKey, mockPubKey, nil) // msg := uc.Message{} // mockMb.On("SendMessage", mock.Anything, mock.Anything, mock.Anything).Return(msg, errToRet) // mockHubAuth.On("GetHubContext", mock.Anything).Return(context.Background(), nil) // body := "mockbody" // rmsg, err := tc.SendMessage(context.Background(), rp, []byte(body)) // assert.Nil(t, rmsg) // assert.NotNil(t, err) // assert.Equal(t, errToRet, err) } ================================================ FILE: core/textile/mirror.go ================================================ package textile import ( "context" "fmt" "github.com/FleekHQ/space-daemon/log" ) const mirrorThreadKeyName = "mirrorV1" func (tc *textileClient) IsMirrorFile(ctx context.Context, path, bucketSlug string) bool { mirrorFile, _ := tc.GetModel().FindMirrorFileByPathAndBucketSlug(ctx, path, bucketSlug) if mirrorFile != nil { return true } return false } // set mirror file as backup // return true if mirror file is a backup func (tc *textileClient) isMirrorBackupFile(ctx context.Context, path, bucketSlug string) bool { mf, err := tc.GetModel().FindMirrorFileByPathAndBucketSlug(ctx, path, bucketSlug) if err != nil { log.Error(fmt.Sprintf("Error checking if path=%+v bucketSlug=%+v is a mirror backup file", path, bucketSlug), err) return false } if mf == nil { log.Warn(fmt.Sprintf("mirror file (path=%+v bucketSlug=%+v) does not exist", path, bucketSlug)) return false } return mf.Backup == true } ================================================ FILE: core/textile/model/buckets.go ================================================ package model import ( "context" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/log" "github.com/pkg/errors" "github.com/textileio/go-threads/api/client" core "github.com/textileio/go-threads/core/db" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" "github.com/textileio/go-threads/util" ) type BucketSchema struct { ID core.InstanceID `json:"_id"` Slug string `json:"slug"` Backup bool `json:"backup"` EncryptionKey []byte `json:"hub_key"` DbID string *MirrorBucketSchema } const bucketModelName = "BucketMetadata" // 32 bytes aes key + 16 bytes salt/IV + 32 bytes HMAC key const BucketEncryptionKeyLength = 32 + 16 + 32 var errBucketNotFound = errors.New("Bucket not found") func (m *model) CreateBucket(ctx context.Context, bucketSlug, dbID string) (*BucketSchema, error) { log.Debug("Model.CreateBucket: Storing bucket " + bucketSlug) if existingBucket, err := m.FindBucket(ctx, bucketSlug); err == nil { log.Debug("Model.CreateBucket: Bucket already in collection") return existingBucket, nil } log.Debug("Model.CreateBucket: Initializing db") metaCtx, metaDbID, err := m.initBucketModel(ctx) if err != nil && metaDbID == nil { return nil, err } bucketEncryptionKey, err := utils.RandBytes(BucketEncryptionKeyLength) if err != nil { return nil, errors.Wrap(err, "failed to generate bucket encryption key") } newInstance := &BucketSchema{ Slug: bucketSlug, ID: "", DbID: dbID, Backup: true, EncryptionKey: bucketEncryptionKey, MirrorBucketSchema: &MirrorBucketSchema{ HubAddr: "", RemoteBucketKey: "", RemoteDbID: "", RemoteBucketSlug: "", }, } instances := client.Instances{newInstance} log.Debug("Model.CreateBucket: Creating instance") res, err := m.threads.Create(metaCtx, *metaDbID, bucketModelName, instances) if err != nil { return nil, err } log.Debug("Model.CreateBucket: stored bucket with dbid " + newInstance.DbID) id := res[0] return &BucketSchema{ Slug: newInstance.Slug, ID: core.InstanceID(id), DbID: newInstance.DbID, Backup: newInstance.Backup, MirrorBucketSchema: &MirrorBucketSchema{ HubAddr: newInstance.MirrorBucketSchema.HubAddr, RemoteBucketKey: newInstance.MirrorBucketSchema.RemoteBucketKey, RemoteDbID: newInstance.MirrorBucketSchema.RemoteDbID, RemoteBucketSlug: newInstance.MirrorBucketSchema.RemoteBucketSlug, }, }, nil } func (m *model) UpsertBucket(ctx context.Context, bucketSlug, dbID string) (*BucketSchema, error) { metaCtx, metaDbID, err := m.initBucketModel(ctx) if err != nil && metaDbID == nil { return nil, err } if existingBucket, err := m.FindBucket(ctx, bucketSlug); err == nil { existingBucket.DbID = dbID instances := client.Instances{existingBucket} m.threads.Save(metaCtx, *metaDbID, bucketModelName, instances) return existingBucket, nil } return m.CreateBucket(ctx, bucketSlug, dbID) } func (m *model) BucketBackupToggle(ctx context.Context, bucketSlug string, backup bool) (*BucketSchema, error) { metaCtx, metaDbID, err := m.initBucketModel(ctx) if err != nil && metaDbID == nil { return nil, err } bucket, err := m.FindBucket(ctx, bucketSlug) if err != nil { return nil, err } bucket.Backup = backup instances := client.Instances{bucket} err = m.threads.Save(metaCtx, *metaDbID, bucketModelName, instances) if err != nil { return nil, err } return bucket, nil } func (m *model) FindBucket(ctx context.Context, bucketSlug string) (*BucketSchema, error) { metaCtx, dbID, err := m.initBucketModel(ctx) if err != nil || dbID == nil { return nil, err } rawBuckets, err := m.threads.Find(metaCtx, *dbID, bucketModelName, db.Where("slug").Eq(bucketSlug), &BucketSchema{}) if err != nil { return nil, err } if rawBuckets == nil { return nil, errBucketNotFound } buckets := rawBuckets.([]*BucketSchema) if len(buckets) == 0 { return nil, errBucketNotFound } return buckets[0], nil } func (m *model) ListBuckets(ctx context.Context) ([]*BucketSchema, error) { metaCtx, dbID, err := m.initBucketModel(ctx) if err != nil && dbID == nil { return nil, err } rawBuckets, err := m.threads.Find(metaCtx, *dbID, bucketModelName, &db.Query{}, &BucketSchema{}) if rawBuckets == nil { return []*BucketSchema{}, nil } buckets := rawBuckets.([]*BucketSchema) return buckets, nil } func (m *model) initBucketModel(ctx context.Context) (context.Context, *thread.ID, error) { metaCtx, dbID, err := m.getMetaThreadContext(ctx) if err != nil { return nil, nil, err } m.threads.NewCollection(metaCtx, *dbID, GetBucketCollectionConfig()) return metaCtx, dbID, nil } func GetBucketCollectionConfig() db.CollectionConfig { return db.CollectionConfig{ Name: bucketModelName, Schema: util.SchemaFromInstance(&BucketSchema{}, false), Indexes: []db.Index{{ Path: "slug", Unique: true, }}, } } ================================================ FILE: core/textile/model/mirror_file.go ================================================ package model import ( "context" "errors" "fmt" "path/filepath" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/log" "github.com/textileio/go-threads/api/client" core "github.com/textileio/go-threads/core/db" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" "github.com/textileio/go-threads/util" ) type MirrorFileSchema struct { ID core.InstanceID `json:"_id"` Path string `json:"path"` BucketSlug string `json:"bucket_slug"` Backup bool `json:"backup"` Shared bool `json:"shared"` BackupInProgress bool `json:"backupInProgress"` RestoreInProgress bool `json:"restoreInProgress"` DbID string } type MirrorBucketSchema struct { RemoteDbID string `json:"remoteDbId"` RemoteBucketKey string `json:"remoteBucketKey"` HubAddr string `json:"HubAddr"` RemoteBucketSlug string `json:"remoteBucketSlug"` } const mirrorFileModelName = "MirrorFile" var errMirrorFileNotFound = errors.New("Mirror file not found") var errMirrorFileAlreadyExists = errors.New("Mirror file already exists") func (m *model) CreateMirrorBucket(ctx context.Context, bucketSlug string, mirrorBucket *MirrorBucketSchema) (*BucketSchema, error) { metaCtx, metaDbID, err := m.initBucketModel(ctx) if err != nil && metaDbID == nil { return nil, err } bucket, err := m.FindBucket(ctx, bucketSlug) if err != nil { return nil, err } bucket.RemoteDbID = mirrorBucket.RemoteDbID bucket.HubAddr = mirrorBucket.HubAddr bucket.RemoteBucketKey = mirrorBucket.RemoteBucketKey bucket.RemoteBucketSlug = mirrorBucket.RemoteBucketSlug instances := client.Instances{bucket} err = m.threads.Save(metaCtx, *metaDbID, bucketModelName, instances) if err != nil { return nil, err } return bucket, nil } func (m *model) FindMirrorFileByPaths(ctx context.Context, paths []string) (map[string]*MirrorFileSchema, error) { metaCtx, dbID, err := m.initMirrorFileModel(ctx) if err != nil || dbID == nil { return nil, err } var qry *db.Query for i, path := range paths { if i == 0 { qry = db.Where("path").Eq(filepath.Clean(path)) } else { qry = qry.Or(db.Where("path").Eq(filepath.Clean(path))) } } rawMirrorFiles, err := m.threads.Find(metaCtx, *dbID, mirrorFileModelName, qry, &MirrorFileSchema{}) if err != nil { return nil, err } if rawMirrorFiles == nil { return nil, nil } mirror_files := rawMirrorFiles.([]*MirrorFileSchema) if len(mirror_files) == 0 { return nil, nil } mirror_map := make(map[string]*MirrorFileSchema) for _, mirror_file := range mirror_files { mirror_map[mirror_file.Path] = mirror_file } return mirror_map, nil } // Finds the metadata of a file that has been shared to the user func (m *model) FindMirrorFileByPathAndBucketSlug(ctx context.Context, path, bucketSlug string) (*MirrorFileSchema, error) { metaCtx, dbID, err := m.initMirrorFileModel(ctx) if err != nil || dbID == nil { return nil, err } rawMirrorFiles, err := m.threads.Find(metaCtx, *dbID, mirrorFileModelName, db.Where("path").Eq(path), &MirrorFileSchema{}) if err != nil { return nil, err } if rawMirrorFiles == nil { return nil, nil } mirror_files := rawMirrorFiles.([]*MirrorFileSchema) if len(mirror_files) == 0 { return nil, nil } log.Debug("Model.FindMirrorFileByPathAndBucketSlug: returning mirror file with dbid " + mirror_files[0].DbID) return mirror_files[0], nil } // create a new mirror file func (m *model) CreateMirrorFile(ctx context.Context, mirrorFile *domain.MirrorFile) (*MirrorFileSchema, error) { metaCtx, metaDbID, err := m.initMirrorFileModel(ctx) if err != nil && metaDbID == nil { return nil, err } mf, err := m.FindMirrorFileByPathAndBucketSlug(ctx, mirrorFile.Path, mirrorFile.BucketSlug) if err != nil { return nil, err } if mf != nil { return nil, errMirrorFileAlreadyExists } newInstance := &MirrorFileSchema{ Path: mirrorFile.Path, BucketSlug: mirrorFile.BucketSlug, Backup: mirrorFile.Backup, BackupInProgress: mirrorFile.BackupInProgress, RestoreInProgress: mirrorFile.RestoreInProgress, Shared: mirrorFile.Shared, } instances := client.Instances{newInstance} res, err := m.threads.Create(metaCtx, *metaDbID, mirrorFileModelName, instances) if err != nil { return nil, err } id := res[0] return &MirrorFileSchema{ Path: newInstance.Path, BucketSlug: newInstance.BucketSlug, Backup: newInstance.Backup, BackupInProgress: newInstance.BackupInProgress, RestoreInProgress: newInstance.RestoreInProgress, Shared: newInstance.Shared, ID: core.InstanceID(id), DbID: newInstance.DbID, }, nil } // update existing mirror file func (m *model) UpdateMirrorFile(ctx context.Context, mirrorFile *MirrorFileSchema) (*MirrorFileSchema, error) { metaCtx, metaDbID, err := m.initMirrorFileModel(ctx) if err != nil && metaDbID == nil { return nil, err } mf, err := m.FindMirrorFileByPathAndBucketSlug(ctx, mirrorFile.Path, mirrorFile.BucketSlug) if err != nil { return nil, err } if mf == nil { return nil, errMirrorFileNotFound } existingInstance := mirrorFile instances := client.Instances{existingInstance} err = m.threads.Save(metaCtx, *metaDbID, mirrorFileModelName, instances) if err != nil { return nil, err } log.Debug(fmt.Sprintf("saved mirror file (%+v)", mirrorFile)) return mf, nil } func (m *model) initMirrorFileModel(ctx context.Context) (context.Context, *thread.ID, error) { metaCtx, dbID, err := m.getMetaThreadContext(ctx) if err != nil { return nil, nil, err } m.threads.NewCollection(metaCtx, *dbID, GetMirrorFileCollectionConfig()) // Migrates db by adding new fields between old version of the daemon and a new one m.threads.UpdateCollection(metaCtx, *dbID, db.CollectionConfig{ Name: mirrorFileModelName, Schema: util.SchemaFromInstance(&MirrorFileSchema{}, false), }) return metaCtx, dbID, nil } func GetMirrorFileCollectionConfig() db.CollectionConfig { return db.CollectionConfig{ Name: mirrorFileModelName, Schema: util.SchemaFromInstance(&MirrorFileSchema{}, false), Indexes: []db.Index{{ Path: "path", Unique: true, // TODO: multicolumn index }}, } } ================================================ FILE: core/textile/model/model.go ================================================ package model import ( "context" "github.com/FleekHQ/space-daemon/core/search" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/core/textile/hub" "github.com/FleekHQ/space-daemon/core/textile/utils" threadsClient "github.com/textileio/go-threads/api/client" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" nc "github.com/textileio/go-threads/net/api/client" ) const metaThreadName = "metathreadV1" type model struct { st store.Store kc keychain.Keychain threads *threadsClient.Client hubAuth hub.HubAuth cfg config.Config netc *nc.Client hnetc *nc.Client ht *threadsClient.Client shouldForceRestore bool fsearch search.FilesSearchEngine } type Model interface { CreateBucket(ctx context.Context, bucketSlug, dbID string) (*BucketSchema, error) UpsertBucket(ctx context.Context, bucketSlug, dbID string) (*BucketSchema, error) BucketBackupToggle(ctx context.Context, bucketSlug string, backup bool) (*BucketSchema, error) FindBucket(ctx context.Context, bucketSlug string) (*BucketSchema, error) ListBuckets(ctx context.Context) ([]*BucketSchema, error) CreateReceivedFileViaInvitation( ctx context.Context, file domain.FullPath, invitationId string, accepted bool, key []byte, sharedBy string, ) (*ReceivedFileSchema, error) CreateReceivedFileViaPublicLink( ctx context.Context, ipfsHash string, password string, filename string, filesize string, accepted bool, ) (*ReceivedFileSchema, error) CreateSentFileViaInvitation( ctx context.Context, file domain.FullPath, invitationId string, key []byte, ) (*SentFileSchema, error) FindReceivedFile(ctx context.Context, remoteDbID, bucket, path string) (*ReceivedFileSchema, error) FindPublicLinkReceivedFile(ctx context.Context, ipfsHash string) (*ReceivedFileSchema, error) FindSentFile(ctx context.Context, remoteDbID, bucket, path string) (*SentFileSchema, error) CreateSharedPublicKey(ctx context.Context, pubKey string) (*SharedPublicKeySchema, error) ListSharedPublicKeys(ctx context.Context) ([]*SharedPublicKeySchema, error) CreateMirrorBucket(ctx context.Context, bucketSlug string, mirrorBucket *MirrorBucketSchema) (*BucketSchema, error) FindMirrorFileByPathAndBucketSlug(ctx context.Context, path, bucketSlug string) (*MirrorFileSchema, error) CreateMirrorFile(ctx context.Context, mirrorFile *domain.MirrorFile) (*MirrorFileSchema, error) UpdateMirrorFile(ctx context.Context, mirrorFile *MirrorFileSchema) (*MirrorFileSchema, error) ListReceivedFiles(ctx context.Context, accepted bool, seek string, limit int) ([]*ReceivedFileSchema, error) ListSentFiles(ctx context.Context, seek string, limit int) ([]*SentFileSchema, error) ListReceivedPublicFiles(ctx context.Context, cidHash string, accepted bool) ([]*ReceivedFileSchema, error) DeleteReceivedFiles(ctx context.Context, paths []domain.FullPath, keys [][]byte) error FindMirrorFileByPaths(ctx context.Context, paths []string) (map[string]*MirrorFileSchema, error) FindReceivedFilesByIds(ctx context.Context, ids []string) ([]*ReceivedFileSchema, error) InitSearchIndexCollection(ctx context.Context) error UpdateSearchIndexRecord( ctx context.Context, name, path string, itemType SearchItemType, bucketSlug, dbId string, ) (*SearchIndexRecord, error) QuerySearchIndex(ctx context.Context, query string) ([]*SearchIndexRecord, error) DeleteSearchIndexRecord(ctx context.Context, name, path, bucketSlug, dbId string) error } func New( st store.Store, kc keychain.Keychain, threads *threadsClient.Client, ht *threadsClient.Client, hubAuth hub.HubAuth, cfg config.Config, netc *nc.Client, hnetc *nc.Client, shouldForceRestore bool, search search.FilesSearchEngine, ) *model { return &model{ st: st, kc: kc, threads: threads, hubAuth: hubAuth, cfg: cfg, netc: netc, hnetc: hnetc, ht: ht, shouldForceRestore: shouldForceRestore, fsearch: search, } } func (m *model) findOrCreateMetaThreadID(ctx context.Context) (*thread.ID, error) { return utils.FindOrCreateDeterministicThread( ctx, utils.MetathreadThreadVariant, metaThreadName, m.kc, m.st, m.threads, m.cfg, m.netc, m.hnetc, m.hubAuth, m.shouldForceRestore, GetAllCollectionConfigs(), ) } func (m *model) getMetaThreadContext(ctx context.Context) (context.Context, *thread.ID, error) { var err error var dbID *thread.ID if dbID, err = m.findOrCreateMetaThreadID(ctx); err != nil { return nil, nil, err } metathreadCtx, err := utils.GetThreadContext(ctx, metaThreadName, *dbID, false, m.kc, m.hubAuth, m.threads) if err != nil { return nil, nil, err } return metathreadCtx, dbID, nil } func GetAllCollectionConfigs() []db.CollectionConfig { return []db.CollectionConfig{ GetBucketCollectionConfig(), GetMirrorFileCollectionConfig(), GetReceivedFileCollectionConfig(), GetSentFileCollectionConfig(), GetSharedPublicKeyCollectionConfig(), } } ================================================ FILE: core/textile/model/received_file.go ================================================ package model import ( "context" "errors" "time" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/log" "github.com/textileio/go-threads/api/client" core "github.com/textileio/go-threads/core/db" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" "github.com/textileio/go-threads/util" ) type ReceivedFileViaPublicLinkSchema struct { PublicIpfsHash string `json:"publicIpfsHash"` FilePassword string `json:"filePassword"` FileName string `json:"fileName"` FileSize string `json:"fileSize"` } type ReceivedFileViaInvitationSchema struct { DbID string `json:"dbId"` Bucket string `json:"bucket"` Path string `json:"path"` InvitationId string `json:"invitationId"` BucketKey string `json:"bucketKey"` EncryptionKey []byte `json:"encryptionKey"` SharedBy string `json:"sharedBy"` } // ReceivedFileSchema represents data of files shared with a user // A file is shared with a user either by direct invite to the user or through a publicly accessible link type ReceivedFileSchema struct { ID core.InstanceID `json:"_id"` Accepted bool `json:"accepted"` CreatedAt int64 `json:"created_at"` ReceivedFileViaInvitationSchema ReceivedFileViaPublicLinkSchema } func (r ReceivedFileSchema) IsPublicLinkReceived() bool { return r.InvitationId == "" } const receivedFileModelName = "ReceivedFile" var errReceivedFileNotFound = errors.New("Received file not found") // Creates the metadata for a file that has been shared to the user func (m *model) CreateReceivedFileViaInvitation( ctx context.Context, file domain.FullPath, invitationID string, accepted bool, key []byte, inviterPubKey string, ) (*ReceivedFileSchema, error) { log.Debug("Model.CreateReceivedFileViaInvitation: Storing received file", "file:"+file.Path) if existingFile, err := m.FindReceivedFile(ctx, file.DbId, file.Bucket, file.Path); err == nil { log.Debug("Model.CreateReceivedFileViaInvitation: Bucket already in collection") return existingFile, nil } newInstance := &ReceivedFileSchema{ ID: "", ReceivedFileViaInvitationSchema: ReceivedFileViaInvitationSchema{ DbID: file.DbId, Bucket: file.Bucket, Path: file.Path, InvitationId: invitationID, BucketKey: file.BucketKey, EncryptionKey: key, SharedBy: inviterPubKey, }, Accepted: accepted, CreatedAt: time.Now().UnixNano(), } return m.createReceivedFile(ctx, newInstance) } func (m *model) CreateReceivedFileViaPublicLink( ctx context.Context, ipfsHash string, password string, filename string, fileSize string, accepted bool, ) (*ReceivedFileSchema, error) { log.Debug( "Model.CreateReceivedFileViaPublicLink: Storing received file", "hash:"+ipfsHash, "filename:"+filename, ) if existingFile, err := m.FindPublicLinkReceivedFile(ctx, ipfsHash); err == nil { log.Debug("Model.CreateReceivedFileViaPublicLink: similar file already shared with user") return existingFile, nil } newInstance := &ReceivedFileSchema{ ReceivedFileViaPublicLinkSchema: ReceivedFileViaPublicLinkSchema{ PublicIpfsHash: ipfsHash, FilePassword: password, FileName: filename, FileSize: fileSize, }, ReceivedFileViaInvitationSchema: ReceivedFileViaInvitationSchema{ EncryptionKey: []byte(""), }, Accepted: accepted, CreatedAt: time.Now().UnixNano(), } return m.createReceivedFile(ctx, newInstance) } func (m *model) createReceivedFile(ctx context.Context, instance *ReceivedFileSchema) (*ReceivedFileSchema, error) { metaCtx, metaDbID, err := m.initReceivedFileModel(ctx) if err != nil && metaDbID == nil { return nil, err } instances := client.Instances{instance} res, err := m.threads.Create(metaCtx, *metaDbID, receivedFileModelName, instances) if err != nil { return nil, err } log.Debug("Model.createReceivedFile: stored received file", "instance_id:"+res[0]) id := res[0] return &ReceivedFileSchema{ ID: core.InstanceID(id), ReceivedFileViaInvitationSchema: instance.ReceivedFileViaInvitationSchema, ReceivedFileViaPublicLinkSchema: instance.ReceivedFileViaPublicLinkSchema, Accepted: instance.Accepted, CreatedAt: instance.CreatedAt, }, nil } func (m *model) FindReceivedFilesByIds(ctx context.Context, ids []string) ([]*ReceivedFileSchema, error) { metaCtx, dbID, err := m.initReceivedFileModel(ctx) if err != nil || dbID == nil { return nil, err } var qry *db.Query for i, id := range ids { if i == 0 { qry = db.Where("invitationId").Eq(id) } else { qry = qry.Or(db.Where("invitationId").Eq(id)) } } fileSchemasRaw, err := m.threads.Find(metaCtx, *dbID, receivedFileModelName, qry, &ReceivedFileSchema{}) if err != nil { return nil, err } fileSchemas := fileSchemasRaw.([]*ReceivedFileSchema) return fileSchemas, nil } // Finds the metadata of a file that has been shared to the user func (m *model) FindReceivedFile(ctx context.Context, remoteDbID, bucket, path string) (*ReceivedFileSchema, error) { metaCtx, dbID, err := m.initReceivedFileModel(ctx) if err != nil || dbID == nil { return nil, err } rawFiles, err := m.threads.Find(metaCtx, *dbID, receivedFileModelName, db.Where("dbId").Eq(remoteDbID).And("bucket").Eq(bucket).And("path").Eq(path), &ReceivedFileSchema{}) if err != nil { return nil, err } if rawFiles == nil { return nil, errReceivedFileNotFound } files := rawFiles.([]*ReceivedFileSchema) if len(files) == 0 { return nil, errReceivedFileNotFound } log.Debug("Model.FindReceivedFile: returning file with path " + files[0].Path) return files[0], nil } func (m *model) FindPublicLinkReceivedFile(ctx context.Context, ipfsHash string) (*ReceivedFileSchema, error) { metaCtx, dbID, err := m.initReceivedFileModel(ctx) if err != nil || dbID == nil { return nil, err } rawFiles, err := m.threads.Find( metaCtx, *dbID, receivedFileModelName, db.Where("publicIpfsHash").Eq(ipfsHash), &ReceivedFileSchema{}, ) if err != nil { return nil, err } if rawFiles == nil { return nil, errReceivedFileNotFound } files := rawFiles.([]*ReceivedFileSchema) if len(files) == 0 { return nil, errReceivedFileNotFound } log.Debug("Model.findPublicLinkReceivedFile: returning file with hash " + files[0].PublicIpfsHash) return files[0], nil } // Lists the metadata of files received by the user // use accepted bool to look up for either accepted or rejected files // If seek == "", will start looking from the beginning. If it's an existing ID it will start looking from that ID. func (m *model) ListReceivedFiles(ctx context.Context, accepted bool, seek string, limit int) ([]*ReceivedFileSchema, error) { metaCtx, dbID, err := m.initReceivedFileModel(ctx) if err != nil || dbID == nil { return nil, err } query := db.Where("accepted").Eq(accepted).LimitTo(limit) if seek != "" { query = query.SeekID(core.InstanceID(seek)) } rawFiles, err := m.threads.Find(metaCtx, *dbID, receivedFileModelName, query, &ReceivedFileSchema{}) if err != nil { return nil, err } if rawFiles == nil { return []*ReceivedFileSchema{}, nil } files := rawFiles.([]*ReceivedFileSchema) return files, nil } func (m *model) DeleteReceivedFiles(ctx context.Context, paths []domain.FullPath, keys [][]byte) error { if len(paths) == 0 { return nil } metaCtx, dbID, err := m.initReceivedFileModel(ctx) if err != nil || dbID == nil { return err } // build find query var findQuery *db.Query for i, path := range paths { q := db.Where("dbId").Eq(path.DbId). And("bucket").Eq(path.BucketKey). And("path").Eq(path.Path). And("encryptionKey").Eq(keys[i]) if findQuery == nil { findQuery = q } else { findQuery = findQuery.Or(q) } } rawFiles, err := m.threads.Find(metaCtx, *dbID, receivedFileModelName, findQuery, &ReceivedFileSchema{}) if err != nil { return err } if rawFiles == nil { return nil } // extract instance ids from result files := rawFiles.([]*ReceivedFileSchema) instanceIds := make([]string, len(files)) for i, file := range files { instanceIds[i] = file.ID.String() } return m.threads.Delete(metaCtx, *dbID, receivedFileModelName, instanceIds) } func (m *model) ListReceivedPublicFiles( ctx context.Context, cidHash string, accepted bool, ) ([]*ReceivedFileSchema, error) { metaCtx, dbID, err := m.initReceivedFileModel(ctx) if err != nil || dbID == nil { return nil, err } query := db.Where("accepted").Eq(accepted).And("publicIpfsHash").Eq(cidHash) rawFiles, err := m.threads.Find(metaCtx, *dbID, receivedFileModelName, query, &ReceivedFileSchema{}) if err != nil { return nil, err } if rawFiles == nil { return []*ReceivedFileSchema{}, nil } files := rawFiles.([]*ReceivedFileSchema) return files, nil } func (m *model) initReceivedFileModel(ctx context.Context) (context.Context, *thread.ID, error) { metaCtx, dbID, err := m.getMetaThreadContext(ctx) if err != nil { return nil, nil, err } if err := m.threads.NewCollection(metaCtx, *dbID, GetReceivedFileCollectionConfig()); err != nil { log.Debug("initReceivedFileModel: collection already exists") } return metaCtx, dbID, nil } func GetReceivedFileCollectionConfig() db.CollectionConfig { return db.CollectionConfig{ Name: receivedFileModelName, Schema: util.SchemaFromInstance(&ReceivedFileSchema{}, false), } } ================================================ FILE: core/textile/model/received_file_test.go ================================================ package model import ( "testing" "github.com/stretchr/testify/assert" ) func TestReceivedFileSchema_IsPublicLinkReceived_ShouldBeFalse_For_InvitationId(t *testing.T) { schema := ReceivedFileSchema{ ReceivedFileViaInvitationSchema: ReceivedFileViaInvitationSchema{ DbID: "some-db-id", Bucket: "personal-mirror", Path: "/", InvitationId: "some-invitation-id", BucketKey: "", EncryptionKey: []byte(""), }, } assert.False(t, schema.IsPublicLinkReceived(), "received file should not be public") } ================================================ FILE: core/textile/model/search.go ================================================ package model import ( "context" "path" "strings" "github.com/FleekHQ/space-daemon/core/search" "github.com/FleekHQ/space-daemon/log" ) type SearchItemType string const ( FileItem SearchItemType = "FILE" DirectoryItem SearchItemType = "DIRECTORY" DefaultSearchResultLimit int = 20 ) type SearchIndexRecord search.IndexRecord func (m *model) InitSearchIndexCollection(ctx context.Context) error { log.Debug("Model.InitSearchIndexCollection: Initializing db") return m.fsearch.Start() } func (m *model) UpdateSearchIndexRecord( ctx context.Context, name, itemPath string, itemType SearchItemType, bucketSlug, dbId string, ) (*SearchIndexRecord, error) { log.Debug("Model.UpdateSearchIndexRecord: Initializing db") if instance, err := m.fsearch.InsertFileData(ctx, &search.InsertIndexRecord{ ItemName: name, ItemExtension: strings.Replace(path.Ext(name), ".", "", -1), ItemPath: itemPath, ItemType: string(itemType), BucketSlug: bucketSlug, DbId: dbId, }); err != nil { return nil, err } else { return (*SearchIndexRecord)(instance), nil } } func (m *model) QuerySearchIndex(ctx context.Context, query string) ([]*SearchIndexRecord, error) { res, err := m.fsearch.QueryFileData(ctx, query, DefaultSearchResultLimit) if err != nil { return nil, err } result := make([]*SearchIndexRecord, len(res)) for i, item := range res { result[i] = (*SearchIndexRecord)(item) } return result, nil } // DeleteSearchIndexRecords updates the fsearch index by deleting records that match the name and path. func (m *model) DeleteSearchIndexRecord(ctx context.Context, name, path, bucketSlug, dbId string) error { return m.fsearch.DeleteFileData(ctx, &search.DeleteIndexRecord{ ItemName: name, ItemPath: path, BucketSlug: bucketSlug, DbId: dbId, }) } ================================================ FILE: core/textile/model/sent_file.go ================================================ package model import ( "context" "errors" "fmt" "time" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/log" "github.com/textileio/go-threads/api/client" core "github.com/textileio/go-threads/core/db" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" "github.com/textileio/go-threads/util" ) type SentFileViaInvitationSchema struct { DbID string `json:"dbId"` Bucket string `json:"bucket"` Path string `json:"path"` InvitationId string `json:"invitationId"` BucketKey string `json:"bucketKey"` EncryptionKey []byte `json:"encryptionKey"` } // SentFileSchema represents data of files shared by the user type SentFileSchema struct { ID core.InstanceID `json:"_id"` CreatedAt int64 `json:"created_at"` SentFileViaInvitationSchema } const sentFileModelName = "SentFile" var errSentFileNotFound = errors.New("Sent file not found") // Creates the metadata for a file that has been shared by the user func (m *model) CreateSentFileViaInvitation( ctx context.Context, file domain.FullPath, invitationID string, key []byte, ) (*SentFileSchema, error) { log.Debug(fmt.Sprintf("Model.CreateSentFileViaInvitation: Storing sent file file=%+v", file)) if existingFile, err := m.FindSentFile(ctx, file.DbId, file.Bucket, file.Path); err == nil { log.Debug("Model.CreateSentFileViaInvitation: file already in the collection") return existingFile, nil } newInstance := &SentFileSchema{ ID: "", SentFileViaInvitationSchema: SentFileViaInvitationSchema{ DbID: file.DbId, Bucket: file.Bucket, Path: file.Path, InvitationId: invitationID, BucketKey: file.BucketKey, EncryptionKey: key, }, CreatedAt: time.Now().UnixNano(), } return m.createSentFile(ctx, newInstance) } func (m *model) createSentFile(ctx context.Context, instance *SentFileSchema) (*SentFileSchema, error) { metaCtx, metaDbID, err := m.initSentFileModel(ctx) if err != nil && metaDbID == nil { return nil, err } instances := client.Instances{instance} res, err := m.threads.Create(metaCtx, *metaDbID, sentFileModelName, instances) if err != nil { return nil, err } log.Debug(fmt.Sprintf("Model.createSentFile: stored sent file res=%+v", res)) id := res[0] return &SentFileSchema{ ID: core.InstanceID(id), SentFileViaInvitationSchema: instance.SentFileViaInvitationSchema, CreatedAt: instance.CreatedAt, }, nil } // Finds the metadata of a file that has been shared by the user func (m *model) FindSentFile(ctx context.Context, remoteDbID, bucket, path string) (*SentFileSchema, error) { metaCtx, dbID, err := m.initSentFileModel(ctx) if err != nil || dbID == nil { return nil, err } rawFiles, err := m.threads.Find(metaCtx, *dbID, sentFileModelName, db.Where("dbId").Eq(remoteDbID).And("bucket").Eq(bucket).And("path").Eq(path), &SentFileSchema{}) if err != nil { return nil, err } if rawFiles == nil { return nil, errSentFileNotFound } files := rawFiles.([]*SentFileSchema) if len(files) == 0 { return nil, errSentFileNotFound } log.Debug(fmt.Sprintf("Model.FindSentFile: returning files=%+v", files)) return files[0], nil } // Lists the metadata of files sent by the user // If seek == "", will start looking from the beginning. If it's an existing ID it will start looking from that ID. func (m *model) ListSentFiles(ctx context.Context, seek string, limit int) ([]*SentFileSchema, error) { metaCtx, dbID, err := m.initSentFileModel(ctx) if err != nil || dbID == nil { return nil, err } query := db.OrderByID().LimitTo(limit) if seek != "" { query = query.SeekID(core.InstanceID(seek)) } rawFiles, err := m.threads.Find(metaCtx, *dbID, sentFileModelName, query, &SentFileSchema{}) if err != nil { return nil, err } if rawFiles == nil { return []*SentFileSchema{}, nil } files := rawFiles.([]*SentFileSchema) return files, nil } // XXX: this is to reuse the builders in the sharing.go func (sf *SentFileSchema) ReceivedFileSchema() *ReceivedFileSchema { return &ReceivedFileSchema{ ID: sf.ID, CreatedAt: sf.CreatedAt, ReceivedFileViaInvitationSchema: ReceivedFileViaInvitationSchema{ DbID: sf.DbID, Bucket: sf.Bucket, Path: sf.Path, InvitationId: sf.InvitationId, BucketKey: sf.BucketKey, EncryptionKey: sf.EncryptionKey, }, } } func (m *model) initSentFileModel(ctx context.Context) (context.Context, *thread.ID, error) { metaCtx, dbID, err := m.getMetaThreadContext(ctx) if err != nil { return nil, nil, err } if err := m.threads.NewCollection(metaCtx, *dbID, GetSentFileCollectionConfig()); err != nil { log.Debug("initSentFileModel: collection already exists") } return metaCtx, dbID, nil } func GetSentFileCollectionConfig() db.CollectionConfig { return db.CollectionConfig{ Name: sentFileModelName, Schema: util.SchemaFromInstance(&SentFileSchema{}, false), } } ================================================ FILE: core/textile/model/shared_public_key.go ================================================ package model import ( "context" "errors" "time" "github.com/FleekHQ/space-daemon/log" "github.com/textileio/go-threads/api/client" core "github.com/textileio/go-threads/core/db" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" "github.com/textileio/go-threads/util" ) type SharedPublicKeySchema struct { ID core.InstanceID `json:"_id"` DbID string `json:"dbId"` PublicKey string `json:"public_key"` UpdatedAt int64 `json:"updated_at"` CreatedAt int64 `json:"created_at"` } const sharedPublicKeyModel = "SharedPublicKey" var errSharedPublicKeyNotFound = errors.New("Shared public key not found") // Creates the metadata for a shared public key func (m *model) CreateSharedPublicKey(ctx context.Context, pubKey string) (*SharedPublicKeySchema, error) { log.Debug("Model.CreateSharedPublicKey: Storing shared public key " + pubKey) if existingPublicKey, err := m.FindSharedPublicKey(ctx, pubKey); err == nil { log.Debug("Model.CreateSharedPublicKey: Shared public key already in collection") return existingPublicKey, nil } log.Debug("Model.CreateSharedPublicKey: Initializing db") metaCtx, metaDbID, err := m.initSharedPublicKey(ctx) if err != nil && metaDbID == nil { return nil, err } now := time.Now().UnixNano() newInstance := &SharedPublicKeySchema{ ID: "", PublicKey: pubKey, UpdatedAt: now, CreatedAt: now, } instances := client.Instances{newInstance} log.Debug("Model.CreateSharedPublicKey: Creating instance") res, err := m.threads.Create(metaCtx, *metaDbID, sharedPublicKeyModel, instances) if err != nil { return nil, err } log.Debug("Model.CreateSharedPublicKey: stored shared public key " + newInstance.PublicKey) id := res[0] return &SharedPublicKeySchema{ ID: core.InstanceID(id), DbID: newInstance.DbID, PublicKey: newInstance.PublicKey, UpdatedAt: newInstance.UpdatedAt, CreatedAt: newInstance.CreatedAt, }, nil } // Finds the metadata of a shared public key func (m *model) FindSharedPublicKey(ctx context.Context, pubKey string) (*SharedPublicKeySchema, error) { metaCtx, dbID, err := m.initReceivedFileModel(ctx) if err != nil || dbID == nil { return nil, err } rawKeys, err := m.threads.Find(metaCtx, *dbID, sharedPublicKeyModel, db.Where("public_key").Eq(pubKey), &SharedPublicKeySchema{}) if err != nil { return nil, err } if rawKeys == nil { return nil, errReceivedFileNotFound } files := rawKeys.([]*SharedPublicKeySchema) if len(files) == 0 { return nil, errReceivedFileNotFound } log.Debug("Model.FindReceivedFile: returning shared public key " + files[0].PublicKey) return files[0], nil } func (m *model) initSharedPublicKey(ctx context.Context) (context.Context, *thread.ID, error) { metaCtx, dbID, err := m.getMetaThreadContext(ctx) if err != nil { return nil, nil, err } managedKey, err := m.kc.GetManagedThreadKey(metaThreadName) if err != nil { log.Error("error getting managed thread key", err) return nil, nil, err } if err = m.threads.NewDB(metaCtx, *dbID, db.WithNewManagedThreadKey(managedKey)); err != nil { log.Debug("initSharedPublicKey: db already exists") } if err := m.threads.NewCollection(metaCtx, *dbID, GetSharedPublicKeyCollectionConfig()); err != nil { log.Debug("initSharedPublicKey: collection already exists") } return metaCtx, dbID, nil } const listSharedPublicKeysLimit = 128 func (m *model) ListSharedPublicKeys(ctx context.Context) ([]*SharedPublicKeySchema, error) { metaCtx, dbID, err := m.initSharedPublicKey(ctx) if err != nil && dbID == nil { return nil, err } query := &db.Query{} query.Limit = listSharedPublicKeysLimit query.Sort.FieldPath = "CreatedAt" query.Sort.Desc = false rawKeys, err := m.threads.Find(metaCtx, *dbID, sharedPublicKeyModel, query, &SharedPublicKeySchema{}) if rawKeys == nil { return []*SharedPublicKeySchema{}, nil } keys := rawKeys.([]*SharedPublicKeySchema) return keys, nil } func GetSharedPublicKeyCollectionConfig() db.CollectionConfig { return db.CollectionConfig{ Name: sharedPublicKeyModel, Schema: util.SchemaFromInstance(&SharedPublicKeySchema{}, false), } } ================================================ FILE: core/textile/notifier/notifier.go ================================================ package notifier import ( "github.com/FleekHQ/space-daemon/core/textile/sync" "github.com/ipfs/interface-go-ipfs-core/path" ) type Notifier struct { s sync.Synchronizer } func New(s sync.Synchronizer) *Notifier { return &Notifier{ s: s, } } func (n *Notifier) OnUploadFile(bucketSlug string, bucketPath string, result path.Resolved, root path.Path) { n.s.NotifyItemAdded(bucketSlug, bucketPath) } ================================================ FILE: core/textile/public.go ================================================ package textile import ( "context" "io" "github.com/opentracing/opentracing-go" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/ipfs" "github.com/ipfs/go-cid" "github.com/pkg/errors" api_buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/log" "github.com/textileio/go-threads/core/thread" bc "github.com/textileio/textile/v2/api/bucketsd/client" ) // Get a public bucket on hub. Public bucket has no encryption and its content should be accessible directly via ipfs/ipns // Only use this bucket for items that is okay to be publicly shared func (tc *textileClient) GetPublicShareBucket(ctx context.Context) (Bucket, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "TextileClient.GetPublicShareBucket") defer span.Finish() if err := tc.requiresRunning(); err != nil { return nil, err } return tc.getOrCreatePublicBucket(ctx, defaultPublicShareBucketSlug) } func (tc *textileClient) createDefaultPublicBucket(ctx context.Context) (Bucket, error) { ctx, dbId, err := tc.getPublicShareBucketContext(ctx, defaultPublicShareBucketSlug) if err != nil { return nil, err } bucketRoot, err := tc.createPublicBucket(ctx, *dbId, defaultPublicShareBucketSlug) if err != nil { return nil, err } newB := bucket.New( bucketRoot, tc.getPublicShareBucketContext, tc.hb, ) return newB, nil } func (tc *textileClient) getOrCreatePublicBucket(ctx context.Context, bucketSlug string) (Bucket, error) { ctx, dbId, err := tc.getPublicShareBucketContext(ctx, bucketSlug) if err != nil { return nil, err } // find if bucket exists buckets, err := tc.hb.List(ctx) if err != nil { return nil, errors.Wrap(err, "failed to get public bucket") } if buckets != nil { for _, bucketRoot := range buckets.Roots { if bucketRoot.Name == bucketSlug { return bucket.New( bucketRoot, tc.getPublicShareBucketContext, tc.hb, ), nil } } } // else create bucketRoot bucketRoot, err := tc.createPublicBucket(ctx, *dbId, bucketSlug) if err != nil { return nil, err } newB := bucket.New( bucketRoot, tc.getPublicShareBucketContext, tc.hb, ) return newB, nil } func (tc *textileClient) getPublicShareBucketContext(ctx context.Context, bucketSlug string) (context.Context, *thread.ID, error) { dbId, err := tc.getPublicShareThread(ctx) if err != nil { return nil, nil, err } ctx, err = utils.GetThreadContext(ctx, bucketSlug, dbId, true, tc.kc, tc.hubAuth, nil) if err != nil { return nil, nil, err } return ctx, &dbId, nil } // Creates a public bucket for current user. func (tc *textileClient) createPublicBucket(ctx context.Context, dbId thread.ID, bucketSlug string) (*api_buckets_pb.Root, error) { log.Debug("Creating a new public bucket") hubCtx, _, err := tc.getBucketContext(ctx, utils.CastDbIDToString(dbId), bucketSlug, true, nil) if err != nil { return nil, err } b, err := tc.hb.Create(hubCtx, bc.WithName(bucketSlug), bc.WithPrivate(false)) if err != nil { return nil, err } return b.Root, nil } const publicShareThreadStoreKey = "publicSharedThreadKey" // Creates a remote hub thread for the public sharing bucket func (tc *textileClient) getPublicShareThread(ctx context.Context) (thread.ID, error) { // check if db id already exists storedDbId, err := tc.store.Get([]byte(publicShareThreadStoreKey)) if err == nil { return thread.Cast(storedDbId) } // else create new db ctx, err = tc.getHubCtx(ctx) if err != nil { return thread.Undef, err } dbId := thread.NewIDV1(thread.Raw, 32) if err := tc.ht.NewDB(ctx, dbId); err != nil { return thread.Undef, err } log.Debug("Public share thread created") err = tc.store.Set([]byte(publicShareThreadStoreKey), []byte(dbId)) if err != nil { return thread.Undef, errors.Wrap(err, "failed to persist public share thread") } return dbId, nil } // DownloadPublicGatewayItem download a cid content from the hubs public gateway func (tc *textileClient) DownloadPublicItem(ctx context.Context, cid cid.Cid) (io.ReadCloser, error) { gatewayUrl := tc.cfg.GetString(config.TextileHubGatewayUrl, "https://hub.textile.io") return ipfs.DownloadIpfsItemViaGateway(ctx, gatewayUrl, cid) } ================================================ FILE: core/textile/search.go ================================================ package textile import "context" func (tc *textileClient) initSearchIndex(ctx context.Context) error { err := tc.GetModel().InitSearchIndexCollection(ctx) return err } ================================================ FILE: core/textile/secure_bucket_client.go ================================================ package textile import ( "context" "crypto/sha256" "encoding/hex" "errors" "fmt" "io" "io/ioutil" "os" "regexp" "strings" "sync" "sync/atomic" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/ipfs" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/core/textile/common" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/log" "github.com/FleekHQ/space-daemon/core/textile/bucket/crypto" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" "github.com/ipfs/go-cid" ipfsfiles "github.com/ipfs/go-ipfs-files" iface "github.com/ipfs/interface-go-ipfs-core" "github.com/ipfs/interface-go-ipfs-core/options" "github.com/ipfs/interface-go-ipfs-core/path" threadsClient "github.com/textileio/go-threads/api/client" bc "github.com/textileio/textile/v2/api/bucketsd/client" bucketsClient "github.com/textileio/textile/v2/api/bucketsd/client" bucketspb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/buckets" ) var textileRelPathRegex = regexp.MustCompile(`/ip[f|n]s/[^/]*(?P/.*)`) // SecureBucketClient implements the BucketsClient Interface // It encrypts data being pushed to the underlying textile client // and also decrypts response from the underlying textile client type SecureBucketClient struct { client *bucketsClient.Client kc keychain.Keychain st store.Store threads *threadsClient.Client ipfsClient iface.CoreAPI isRemote bool cfg config.Config } func NewSecureBucketsClient( client *bucketsClient.Client, kc keychain.Keychain, st store.Store, threads *threadsClient.Client, ipfsClient iface.CoreAPI, isRemote bool, cfg config.Config, ) *SecureBucketClient { return &SecureBucketClient{ client: client, kc: kc, st: st, threads: threads, ipfsClient: ipfsClient, isRemote: isRemote, cfg: cfg, } } func (s *SecureBucketClient) PushPath(ctx context.Context, key, path string, reader io.Reader, opts ...bc.Option) (result path.Resolved, root path.Resolved, err error) { path = cleanBucketPath(path) encryptionKey, err := s.getBucketEncryptionKey(ctx) if err != nil { return nil, nil, err } // encrypt path before uploading encryptedPath, encryptedReader, err := s.encryptPathData(ctx, encryptionKey, path, reader) if err != nil { return nil, nil, err } return s.client.PushPath(ctx, key, encryptedPath, encryptedReader) // For now ignoring parsing results since it not being used downstream // but putting a TODO here in the meantime } func (s *SecureBucketClient) PushPathAccessRoles(ctx context.Context, key, path string, roles map[string]buckets.Role) error { encryptionKey, err := s.getBucketEncryptionKey(ctx) if err != nil { return err } encryptedPath, _, err := s.encryptPathData(ctx, encryptionKey, path, nil) if err != nil { return err } return s.client.PushPathAccessRoles(ctx, key, encryptedPath, roles) } func (s *SecureBucketClient) PullPathAccessRoles(ctx context.Context, key, path string) (map[string]buckets.Role, error) { encryptionKey, err := s.getBucketEncryptionKey(ctx) if err != nil { return nil, err } encryptedPath, _, err := s.encryptPathData(ctx, encryptionKey, path, nil) if err != nil { return nil, err } return s.client.PullPathAccessRoles(ctx, key, encryptedPath) } func (s *SecureBucketClient) PullPath(ctx context.Context, key, path string, writer io.Writer, opts ...bc.Option) error { encryptionKey, err := s.getBucketEncryptionKey(ctx) if err != nil { return err } encryptedPath, _, err := s.encryptPathData(ctx, encryptionKey, path, nil) if err != nil { return err } errs := make(chan error) pipeReader, pipeWriter := io.Pipe() // pipe the writes from buckets to reader to be decrypted go func() { defer pipeWriter.Close() if err := s.racePullFile(ctx, key, encryptedPath, pipeWriter, opts...); err != nil { errs <- err } }() go func() { defer close(errs) _, r, err := s.decryptPathData(ctx, encryptionKey, "", pipeReader) if err != nil { errs <- err return } defer r.Close() // copy decrypted reads to original writer if _, err := io.Copy(writer, r); err != nil { errs <- err return } }() err = <-errs return err } func (s *SecureBucketClient) overwriteDecryptedItem(ctx context.Context, item *bucketspb.PathItem) error { encryptionKey, err := s.getBucketEncryptionKey(ctx) if err != nil { return err } if utils.IsMetaFileName(item.Name) { return nil } // decrypt file name item.Name, _, err = s.decryptPathData(ctx, encryptionKey, item.Name, nil) if err != nil { return err } // decrypts file path matchedPaths := textileRelPathRegex.FindStringSubmatch(item.Path) if len(matchedPaths) > 1 { item.Path, _, err = s.decryptPathData(ctx, encryptionKey, matchedPaths[1], nil) if err != nil { return err } } // Item size is generally (content size + hmac (64 bytes)) if item.Size >= 64 { item.Size = item.Size - 64 } return nil } func (s *SecureBucketClient) ListIpfsPath(ctx context.Context, pth path.Path) (*bucketspb.ListIpfsPathResponse, error) { return s.client.ListIpfsPath(ctx, pth) } func (s *SecureBucketClient) ListPath(ctx context.Context, key, path string) (*bucketspb.ListPathResponse, error) { path = cleanBucketPath(path) encryptionKey, err := s.getBucketEncryptionKey(ctx) if err != nil { return nil, err } encryptedPath, _, err := s.encryptPathData(ctx, encryptionKey, path, nil) if err != nil { return nil, err } result, err := s.client.ListPath(ctx, key, encryptedPath) if err != nil { return nil, err } // decrypt result items for _, item := range result.Item.Items { err = s.overwriteDecryptedItem(ctx, item) if err != nil { // Don't error on a single file not decrypted log.Debug(fmt.Sprintf("Error decrypting a file: %s", err.Error())) } } // decrypt root item err = s.overwriteDecryptedItem(ctx, result.Item) if err != nil { // Don't error on a single file not decrypted log.Debug(fmt.Sprintf("Error decrypting a file: %s", err.Error())) } return result, nil } func (s *SecureBucketClient) RemovePath(ctx context.Context, key, path string, opts ...bc.Option) (path.Resolved, error) { path = cleanBucketPath(path) encryptionKey, err := s.getBucketEncryptionKey(ctx) if err != nil { return nil, err } // encrypt path before submitting delete encryptedPath, _, err := s.encryptPathData(ctx, encryptionKey, path, nil) if err != nil { return nil, err } return s.client.RemovePath(ctx, key, encryptedPath, opts...) } func (s *SecureBucketClient) getBucketEncryptionKey(ctx context.Context) ([]byte, error) { if key, exists := common.BucketEncryptionKeyFromContext(ctx); exists { return key, nil } return nil, errors.New("bucket encryption key missing") } func (s *SecureBucketClient) encryptPathData( ctx context.Context, key []byte, path string, dataReader io.Reader, ) (string, io.Reader, error) { return crypto.EncryptPathItems(key, path, dataReader) } func (s *SecureBucketClient) decryptPathData( ctx context.Context, key []byte, path string, dataReader io.Reader, ) (string, io.ReadCloser, error) { return crypto.DecryptPathItems(key, path, dataReader) } // Cleans path used to access data in buckets // Currently only removes prefix path if exists. // would later include logic to normalize paths from other operating systems like windows func cleanBucketPath(path string) string { return strings.TrimPrefix(path, "/") } type pathPullingFn func(context.Context, string, string, io.Writer, ...bc.Option) (bool, error) type pullSuccessResponse struct { file *os.File shouldCache bool } func getTempFileName(encPath string) string { tempFilePath := sha256.Sum256([]byte(encPath)) return hex.EncodeToString(tempFilePath[:]) } func (s *SecureBucketClient) racePullFile(ctx context.Context, key, encPath string, w io.Writer, opts ...bc.Option) error { pullers := []pathPullingFn{s.pullFileFromLocal, s.pullFileFromDHT, s.pullFileFromClient} var pullSuccessClosed uint32 var pullSuccessMutex sync.Mutex pullSuccess := make(chan *pullSuccessResponse) defer func() { pullSuccessMutex.Lock() defer pullSuccessMutex.Unlock() atomic.StoreUint32(&pullSuccessClosed, 1) close(pullSuccess) }() errc := make(chan error) ctxWithCancel, cancelPulls := context.WithCancel(ctx) pendingFns := len(pullers) erroredFns := 0 for _, fn := range pullers { f, err := ioutil.TempFile("", "*-"+getTempFileName(encPath)) if err != nil { cancelPulls() return err } defer f.Close() defer os.Remove(f.Name()) go func(fn pathPullingFn, f *os.File) { shouldCache, err := fn(ctxWithCancel, key, encPath, f, opts...) if err != nil { errc <- err return } chanRes := &pullSuccessResponse{ file: f, shouldCache: shouldCache, } if ctxWithCancel.Err() != nil { errc <- ctxWithCancel.Err() return } pullSuccessMutex.Lock() defer pullSuccessMutex.Unlock() if atomic.LoadUint32(&pullSuccessClosed) == 0 { pullSuccess <- chanRes } errc <- nil }(fn, f) } var pullErr error // Wait for either all pullers to fail or for one to succeed go func() { for { select { case err := <-errc: pendingFns-- if err != nil { erroredFns++ pullErr = err } if pendingFns <= 0 && erroredFns >= len(pullers) { // All functions failed. Stop waiting pullSuccess <- nil } if pendingFns <= 0 { close(errc) return } } } }() pullResponse := <-pullSuccess cancelPulls() // Return error if all pull functions failed if erroredFns >= len(pullers) || pullResponse == nil { return pullErr } finalFile := pullResponse.file shouldCache := pullResponse.shouldCache // Copy pulled file to upstream writer resErrc := make(chan error) defer close(resErrc) go func() { from, err := os.Open(finalFile.Name()) if err != nil { resErrc <- err return } defer from.Close() _, err = io.Copy(w, from) resErrc <- err }() // Copy pulled file to local cache cacheErrc := make(chan error) defer close(cacheErrc) go func() { var err error if !shouldCache { cacheErrc <- nil return } from, err := os.Open(finalFile.Name()) if err != nil { cacheErrc <- err return } defer from.Close() p, err := s.ipfsClient.Unixfs().Add( ctx, ipfsfiles.NewReaderFile(from), options.Unixfs.Pin(false), // Turn to true when we enable DHT discovery options.Unixfs.Progress(false), options.Unixfs.CidVersion(1), ) if err != nil { cacheErrc <- err return } bucketPath, err := s.client.ListPath(ctx, key, encPath) if err != nil { cacheErrc <- err return } encCid := bucketPath.Item.Cid cidBinary := p.Cid().Bytes() err = s.st.Set(getFileCacheKey(encCid), cidBinary) cacheErrc <- err }() if err := <-resErrc; err != nil { return err } if err := <-cacheErrc; err != nil { return err } return nil } const FileCachePrefix = "file_cache" func getFileCacheKey(encCid string) []byte { return []byte(FileCachePrefix + ":" + encCid) } func (s *SecureBucketClient) pullFileFromClient(ctx context.Context, key, encPath string, w io.Writer, opts ...bc.Option) (shouldCache bool, err error) { shouldCache = true if s.isRemote == false { // File already in local bucket shouldCache = false } if err = s.client.PullPath(ctx, key, encPath, w, opts...); err != nil { return false, err } return shouldCache, nil } var errNoLocalClient = errors.New("No cache client available") func (s *SecureBucketClient) pullFileFromLocal(ctx context.Context, key, encPath string, w io.Writer, opts ...bc.Option) (shouldCache bool, err error) { shouldCache = false bucketPath, err := s.client.ListPath(ctx, key, encPath) if err != nil { return false, err } encCid := bucketPath.Item.Cid cidBinary, err := s.st.Get(getFileCacheKey(encCid)) if cidBinary == nil || err != nil { return false, errors.New("CID not stored in local cache") } _, c, err := cid.CidFromBytes(cidBinary) if err != nil { return false, err } node, err := s.ipfsClient.Unixfs().Get(ctx, path.New(c.String())) if err != nil { return false, err } defer node.Close() file := ipfsfiles.ToFile(node) if file == nil { return false, errors.New("File is a directory") } if _, err := io.Copy(w, file); err != nil { return false, err } return shouldCache, nil } func (s *SecureBucketClient) pullFileFromDHT(ctx context.Context, key, encPath string, w io.Writer, opts ...bc.Option) (shouldCache bool, err error) { shouldCache = false bucketPath, err := s.client.ListPath(ctx, key, encPath) if err != nil { return false, err } encCid := bucketPath.Item.Cid cid, err := cid.Decode(encCid) if err != nil { return false, err } ipfsAddr := s.cfg.GetString(config.Ipfsaddr, "/ip4/127.0.0.1/tcp/5001") maddr, err := ma.NewMultiaddr(ipfsAddr) if err != nil { log.Error(fmt.Sprintf("Unable to parse IPFS Multiaddr: %s", ipfsAddr), err) return false, err } _, host, err := manet.DialArgs(maddr) if err != nil { log.Error(fmt.Sprintf("Unable to dial IPFS Multiaddr: %+v", maddr), err) return false, err } reader, err := ipfs.DownloadIpfsItem(ctx, host, cid) if err != nil { log.Error(fmt.Sprintf("Unable to download IPFS CID %s from host %s", cid.String(), host), err) return false, err } if _, err := io.Copy(w, reader); err != nil { return false, err } return shouldCache, nil } const cacheBucketThreadName = "cache_bucket" ================================================ FILE: core/textile/sharing.go ================================================ package textile import ( "context" "encoding/hex" "fmt" "path/filepath" "strconv" "strings" "time" "github.com/FleekHQ/space-daemon/core/textile/model" "github.com/pkg/errors" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/util/address" "github.com/FleekHQ/space-daemon/log" crypto "github.com/libp2p/go-libp2p-crypto" "github.com/textileio/go-threads/core/thread" "github.com/textileio/textile/v2/buckets" ) func (tc *textileClient) ManageShareFilesViaPublicKey( ctx context.Context, paths []domain.FullPath, pubkeys []crypto.PubKey, keys [][]byte, role domain.SharedFilesRoleAction, ) error { var err error ctx, err = tc.getHubCtx(ctx) if err != nil { return err } var bucketRole buckets.Role switch role { case domain.ReadWriteRoleAction: // NOTE: setting to admin because receiving user // should be able to see members and reshare // as well bucketRole = buckets.Admin case domain.DeleteRoleAction: bucketRole = buckets.None default: return errors.New("unsupported shared files role") } for i, pth := range paths { ctx, _, err = tc.getBucketContext(ctx, pth.DbId, pth.Bucket, true, keys[i]) if err != nil { return err } log.Info("Adding roles", "path:"+pth.Path, "role:"+bucketRole.String()) roles := make(map[string]buckets.Role) for _, pk := range pubkeys { tpk := thread.NewLibp2pPubKey(pk) roles[tpk.String()] = bucketRole } sbc := tc.getSecureBucketsClient(tc.hb) err := sbc.PushPathAccessRoles(ctx, pth.BucketKey, pth.Path, roles) if err != nil { return err } } return nil } var errInvitationNotPending = errors.New("invitation is no more pending") var errInvitationAlreadyAccepted = errors.New("invitation is already accepted") var errInvitationAlreadyRejected = errors.New("invitation is already rejected") func (tc *textileClient) AcceptSharedFilesInvitation( ctx context.Context, invitation domain.Invitation, ) (domain.Invitation, error) { if invitation.Status == domain.ACCEPTED { return domain.Invitation{}, errInvitationAlreadyAccepted } if invitation.Status != domain.PENDING { return domain.Invitation{}, errInvitationNotPending } err := tc.createReceivedFiles(ctx, invitation, true) if err != nil { return domain.Invitation{}, err } invitation.Status = domain.ACCEPTED return invitation, nil } func (tc *textileClient) RejectSharedFilesInvitation( ctx context.Context, invitation domain.Invitation, ) (domain.Invitation, error) { if invitation.Status == domain.REJECTED { return domain.Invitation{}, errInvitationAlreadyRejected } if invitation.Status != domain.PENDING { return domain.Invitation{}, errInvitationNotPending } err := tc.createReceivedFiles(ctx, invitation, false) if err != nil { return domain.Invitation{}, err } invitation.Status = domain.REJECTED return invitation, nil } func (tc *textileClient) createReceivedFiles( ctx context.Context, invitation domain.Invitation, accepted bool, ) error { if len(invitation.ItemPaths) != len(invitation.Keys) { return errors.New("size of encryption keys does not match all items shared") } // TODO: Make this is call a transaction on threads so any failure can be easily reverted var allErr error for i, path := range invitation.ItemPaths { encryptionKeys := []byte("") if accepted { encryptionKeys = invitation.Keys[i] } receivedFile, err := tc.GetModel().CreateReceivedFileViaInvitation(ctx, path, invitation.InvitationID, accepted, encryptionKeys, invitation.InviterPublicKey) // compose each create error if err != nil { if allErr == nil { allErr = errors.Wrap(err, "Failed to accept some invitations") } allErr = errors.Wrap(err, allErr.Error()) } else { if accepted { tc.sync.NotifyIndexItemAdded(receivedFile.Bucket, receivedFile.Path, receivedFile.DbID) } } } return allErr } func (tc *textileClient) AcceptSharedFileLink( ctx context.Context, cidHash, password, filename, fileSize string, ) (*domain.SharedDirEntry, error) { receivedFile, err := tc.GetModel().CreateReceivedFileViaPublicLink(ctx, cidHash, password, filename, fileSize, true) if err != nil { return nil, err } return tc.buildPublicLinkSharedDirEntry(ctx, receivedFile) } func (tc *textileClient) GetPublicReceivedFile( ctx context.Context, cidHash string, accepted bool, ) (*domain.SharedDirEntry, string, error) { files, err := tc.GetModel().ListReceivedPublicFiles(ctx, cidHash, accepted) if err != nil { return nil, "", err } if len(files) == 0 { return nil, "", errors.New("not found") } entry, err := tc.buildPublicLinkSharedDirEntry(ctx, files[0]) if err != nil { return nil, "", err } return entry, files[0].FilePassword, nil } func (tc *textileClient) GetReceivedFiles( ctx context.Context, accepted bool, seek string, limit int, ) ([]*domain.SharedDirEntry, string, error) { files, err := tc.GetModel().ListReceivedFiles(ctx, accepted, seek, limit) if err != nil { return nil, "", err } items := []*domain.SharedDirEntry{} if len(files) == 0 { return items, "", nil } var res *domain.SharedDirEntry for _, file := range files { if file.IsPublicLinkReceived() { res, err = tc.buildPublicLinkSharedDirEntry(ctx, file) } else { res, err = tc.buildInvitationSharedDirEntry(ctx, file, false) } if err != nil { return nil, "", err } items = append(items, res) } offset := files[len(files)-1].ID.String() return items, offset, nil } func (tc *textileClient) GetSentFiles( ctx context.Context, seek string, limit int, ) ([]*domain.SharedDirEntry, string, error) { files, err := tc.GetModel().ListSentFiles(ctx, seek, limit) if err != nil { return nil, "", err } items := []*domain.SharedDirEntry{} if len(files) == 0 { return items, "", nil } var res *domain.SharedDirEntry for _, file := range files { res, err = tc.buildInvitationSharedDirEntry(ctx, file.ReceivedFileSchema(), true) if err != nil { return nil, "", err } items = append(items, res) } offset := files[len(files)-1].ID.String() return items, offset, nil } func (tc *textileClient) buildPublicLinkSharedDirEntry( ctx context.Context, file *model.ReceivedFileSchema, ) (*domain.SharedDirEntry, error) { res := &domain.SharedDirEntry{ FileInfo: domain.FileInfo{ IpfsHash: file.PublicIpfsHash, LocallyAvailable: false, BackedUp: true, BackupInProgress: false, DirEntry: domain.DirEntry{ Path: file.FileName, IsDir: false, Name: file.FileName, SizeInBytes: file.FileSize, FileExtension: strings.Replace(filepath.Ext(file.FileName), ".", "", -1), Created: time.Unix(0, file.CreatedAt).Format(time.RFC3339), Updated: time.Unix(0, file.CreatedAt).Format(time.RFC3339), }, }, Members: []domain.Member{}, IsPublicLink: true, SharedBy: file.SharedBy, } return res, nil } func (tc *textileClient) buildInvitationSharedDirEntry( ctx context.Context, file *model.ReceivedFileSchema, isSentFiles bool, ) (*domain.SharedDirEntry, error) { ctx, _, err := tc.getBucketContext(ctx, file.DbID, file.Bucket, true, file.EncryptionKey) if err != nil { return nil, err } sbc := tc.getSecureBucketsClient(tc.hb) f, err := sbc.ListPath(ctx, file.BucketKey, file.Path) if err != nil { return nil, err } ipfsHash := f.Item.Cid name := f.Item.Name isDir := false size := f.GetItem().Size ext := strings.Replace(filepath.Ext(name), ".", "", -1) updatedAt := f.GetItem().Metadata.UpdatedAt rs, err := sbc.PullPathAccessRoles(ctx, file.BucketKey, file.Path) if err != nil { // TEMP: returning empty members list until we // fix it on textile side //return nil, "", err rs = make(map[string]buckets.Role) } members := make([]domain.Member, 0) for pubk, _ := range rs { key := &thread.Libp2pPubKey{} err = key.UnmarshalString(pubk) if err != nil { log.Error(fmt.Sprintf("key.UnmarshalString(pubk=%+v)", pubk), err) return nil, err } pk := key.PubKey b, err := pk.Raw() if err != nil { return nil, err } members = append(members, domain.Member{ Address: address.DeriveAddress(pk), PublicKey: hex.EncodeToString(b), }) } fileBucket := file.Bucket fileDbID := file.DbID if isSentFiles { fileBucket = defaultPersonalBucketSlug fileDbID = "" } res := &domain.SharedDirEntry{ Bucket: fileBucket, DbID: fileDbID, FileInfo: domain.FileInfo{ IpfsHash: ipfsHash, LocallyAvailable: isSentFiles, BackedUp: true, // TODO: Reflect correct state when we add local updates syncing to remote BackupInProgress: false, RestoreInProgress: false, DirEntry: domain.DirEntry{ Path: file.Path, IsDir: isDir, Name: name, SizeInBytes: strconv.FormatInt(size, 10), FileExtension: ext, Created: time.Unix(0, file.CreatedAt).Format(time.RFC3339), Updated: time.Unix(0, updatedAt).Format(time.RFC3339), }, }, SharedBy: file.SharedBy, Members: members, } return res, nil } func (tc *textileClient) GetPathAccessRoles(ctx context.Context, b Bucket, path string) ([]domain.Member, error) { var err error var bucketSlug, bucketKey string bucketSlug = b.Slug() bucket, err := tc.GetModel().FindBucket(ctx, bucketSlug) if err != nil { return nil, err } bucketKey = bucket.RemoteBucketKey hubCtx, _, err := tc.getBucketContext(ctx, bucket.RemoteDbID, bucketSlug, true, bucket.EncryptionKey) if err != nil { return nil, err } sbc := tc.getSecureBucketsClient(tc.hb) rs, err := sbc.PullPathAccessRoles(hubCtx, bucketKey, path) if err != nil { // log.Error(fmt.Sprintf("PullPathAccessRoles not resolved (bucketKey=%s bucketSlug=%s path=%s)", bucketKey, bucketSlug, path), err) return []domain.Member{}, nil } // log.Debug(fmt.Sprintf("PullPathAccessRoles roles=%+v", rs)) members := make([]domain.Member, 0) for pubk, _ := range rs { key := &thread.Libp2pPubKey{} err = key.UnmarshalString(pubk) if err != nil { log.Error(fmt.Sprintf("key.UnmarshalString(pubk=%+v)", pubk), err) return nil, err } pk := key.PubKey b, err := pk.Raw() if err != nil { return nil, err } members = append(members, domain.Member{ Address: address.DeriveAddress(pk), PublicKey: hex.EncodeToString(b), }) } return members, nil } // return true if file was shared // XXX: export this func? func (tc *textileClient) isSharedFile(ctx context.Context, bucket Bucket, path string) bool { sbc := tc.getSecureBucketsClient(tc.hb) roles, err := sbc.PullPathAccessRoles(ctx, bucket.Key(), path) if err != nil { return false } pk, err := tc.kc.GetStoredPublicKey() if err != nil { return false } tpk := thread.NewLibp2pPubKey(pk) // shared means other roles than the user delete(roles, tpk.String()) return len(roles) > 0 } ================================================ FILE: core/textile/sync/mirror.go ================================================ package sync import ( "context" "fmt" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/textile/model" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/log" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" bucketsClient "github.com/textileio/textile/v2/api/bucketsd/client" api_buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/buckets" ) const mirrorThreadKeyName = "mirrorV1" func (s *synchronizer) setMirrorFileBackup(ctx context.Context, path, bucketSlug string, isInProgress bool) error { mf, err := s.model.FindMirrorFileByPathAndBucketSlug(ctx, path, bucketSlug) if err != nil { return err } if mf != nil { // update mf.Backup = !isInProgress mf.BackupInProgress = isInProgress _, err = s.model.UpdateMirrorFile(ctx, mf) if err != nil { return err } } else { // create mf := &domain.MirrorFile{ Path: path, BucketSlug: bucketSlug, Backup: !isInProgress, BackupInProgress: isInProgress, Shared: false, } _, err := s.model.CreateMirrorFile(ctx, mf) if err != nil { return err } } return nil } // unset mirror file as backup func (s *synchronizer) unsetMirrorFileBackup(ctx context.Context, path, bucketSlug string) error { mf, err := s.model.FindMirrorFileByPathAndBucketSlug(ctx, path, bucketSlug) if err != nil { return err } if mf == nil { log.Warn(fmt.Sprintf("mirror file (path=%+v bucketSlug=%+v) does not exist", path, bucketSlug)) return nil } // do not delete the instance because it might be shared mf.Backup = false mf.BackupInProgress = false if _, err = s.model.UpdateMirrorFile(ctx, mf); err != nil { return err } return nil } func (s *synchronizer) addCurrentUserAsFileOwner(ctx context.Context, bucket, path string) error { bucketModel, err := s.model.FindBucket(ctx, bucket) if err != nil { return err } roles := make(map[string]buckets.Role) pk, err := s.kc.GetStoredPublicKey() if err != nil { return err } tpk := thread.NewLibp2pPubKey(pk) roles[tpk.String()] = buckets.Admin mirror, err := s.getMirrorBucket(ctx, bucket) if err != nil { return err } bucketsClient := mirror.GetClient() bucketCtx, _, err := s.getBucketCtx(ctx, bucketModel.RemoteDbID, bucketModel.RemoteBucketSlug, true, bucketModel.EncryptionKey) if err != nil { return err } return bucketsClient.PushPathAccessRoles(bucketCtx, mirror.GetData().Key, path, roles) } // Creates a mirror bucket. func (s *synchronizer) createMirrorBucket(ctx context.Context, slug string, enckey []byte) (*model.MirrorBucketSchema, error) { newSlug := slug + "_mirror" log.Debug("Creating a new mirror bucket with slug " + newSlug) dbID, err := s.createMirrorThread(ctx, newSlug) if err != nil { return nil, err } hubCtx, _, err := s.getBucketCtx(ctx, utils.CastDbIDToString(*dbID), newSlug, true, enckey) if err != nil { return nil, err } existingBuckets, err := s.hubBuckets.List(hubCtx) if err != nil { return nil, err } var root *api_buckets_pb.Root for _, b := range existingBuckets.Roots { if b.Name == newSlug { log.Debug("Mirror bucket with slug " + newSlug + " already exists") root = b break } } if root == nil { createResp, err := s.hubBuckets.Create(hubCtx, bucketsClient.WithName(newSlug)) if err != nil { return nil, err } root = createResp.Root } return &model.MirrorBucketSchema{ RemoteDbID: utils.CastDbIDToString(*dbID), RemoteBucketKey: root.Key, RemoteBucketSlug: newSlug, HubAddr: s.cfg.GetString(config.TextileHubTarget, ""), }, nil } // Creates a remote hub thread for the mirror bucket func (s *synchronizer) createMirrorThread(ctx context.Context, slug string) (*thread.ID, error) { log.Debug("createMirrorThread: Generating a new threadID ...") var err error ctx, err = s.hubAuth.GetHubContext(ctx) if err != nil { return nil, err } dbID, err := utils.NewDeterministicThreadID(s.kc, utils.MirrorBucketVariantGen(slug)) if err != nil { return nil, err } managedKey, err := s.kc.GetManagedThreadKey(mirrorThreadKeyName + "_" + slug) if err != nil { log.Error("error getting managed thread key", err) return nil, err } // If dbID is not found, GetDBInfo returns "thread not found" error info, err := s.hubThreads.GetDBInfo(ctx, dbID) if err == nil { log.Debug("createMirrorThread: Db already exists with name " + info.Name) return &dbID, nil } log.Debug("createMirrorThread: Creating Thread DB for bucket at db " + dbID.String()) if err := s.hubThreads.NewDB(ctx, dbID, db.WithNewManagedThreadKey(managedKey)); err != nil { return nil, err } log.Debug("createMirrorThread: Thread DB Created") return &dbID, nil } ================================================ FILE: core/textile/sync/pinning.go ================================================ package sync import ( "context" "io" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/log" ) func (s *synchronizer) uploadFileToRemote(ctx context.Context, bucket, path string) error { mirrorBucket, err := s.getMirrorBucket(ctx, bucket) if err != nil { return err } localBucket, err := s.getBucket(ctx, bucket) if err != nil { return err } return s.uploadFileToBucket(ctx, localBucket, mirrorBucket, path) } func (s *synchronizer) uploadFileToBucket(ctx context.Context, sourceBucket, targetBucket bucket.BucketInterface, path string) error { pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() errc := make(chan error, 1) // go routine for piping go func() { defer close(errc) defer pipeWriter.Close() if err := sourceBucket.GetFile(ctx, path, pipeWriter); err != nil { errc <- err return } errc <- nil }() if _, _, err := targetBucket.UploadFile(ctx, path, pipeReader); err != nil { return err } if err := <-errc; err != nil { return err } if err := s.addCurrentUserAsFileOwner(ctx, targetBucket.Slug(), path); err != nil { // not returning since we dont want to halt the whole process // also acl will still work since they are the owner // of the thread so this is more for showing members view log.Error("Unable to push path access roles for owner", err) } return nil } func (s *synchronizer) downloadFile(ctx context.Context, sourceBucket, targetBucket bucket.BucketInterface, path string) error { pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() errc := make(chan error, 1) // go routine for piping go func() { defer close(errc) defer pipeWriter.Close() if err := sourceBucket.GetFile(ctx, path, pipeWriter); err != nil { errc <- err return } errc <- nil }() if _, _, err := targetBucket.DownloadFile(ctx, path, pipeReader); err != nil { return err } if err := <-errc; err != nil { return err } if err := s.addCurrentUserAsFileOwner(ctx, targetBucket.Slug(), path); err != nil { // not returning since we dont want to halt the whole process // also acl will still work since they are the owner // of the thread so this is more for showing members view log.Error("Unable to push path access roles for owner", err) } return nil } // backup all files in a bucket func (s *synchronizer) uploadAllFilesInPath(ctx context.Context, bucket, path string) error { localBucket, err := s.getBucket(ctx, bucket) if err != nil { return err } dir, err := localBucket.ListDirectory(ctx, path) if err != nil { return err } for _, item := range dir.Item.Items { if utils.IsMetaFileName(item.Name) { continue } if item.IsDir { err := s.uploadAllFilesInPath(ctx, bucket, item.Path) if err != nil { return err } continue } // If the current item is a file, we add it to the queue so that it both gets pinned and synced s.NotifyItemAdded(bucket, item.Path) } return nil } func (s *synchronizer) deleteFileFromRemote(ctx context.Context, bucket, path string) (err error) { mirrorBucket, err := s.getMirrorBucket(ctx, bucket) if err != nil { return err } _, err = mirrorBucket.DeleteDirOrFile(ctx, path) if err != nil { return err } return nil } func (s *synchronizer) deleteAllFilesInPath(ctx context.Context, bucket, path string) error { localBucket, err := s.getBucket(ctx, bucket) if err != nil { return err } dir, err := localBucket.ListDirectory(ctx, path) if err != nil { return err } for _, item := range dir.Item.Items { if utils.IsMetaFileName(item.Name) { continue } if item.IsDir { err := s.deleteAllFilesInPath(ctx, bucket, item.Path) if err != nil { return err } continue } // If the current item is a file, we add it to the queue so that it both gets pinned and synced s.NotifyItemRemoved(bucket, item.Path) } return nil } ================================================ FILE: core/textile/sync/queue.go ================================================ package sync import ( "container/list" "encoding/json" "fmt" ) const QueueStoreKey = "TextileSyncTaskQueue" type marshalledQueue struct { QueueAsSlice []Task `json:"queueAsSlice"` FileQueueAsSlice []Task `json:"fileQueueAsSlice"` } func (s *synchronizer) enqueueTask(task *Task, queue *list.List) { if s.isTaskEnqueued(task) == false { queue.PushBack(task) s.queueHashMap[task.ID] = task } } func (s *synchronizer) enqueueTaskAtFront(task *Task, queue *list.List) { if s.isTaskEnqueued(task) == false { queue.PushFront(task) s.queueHashMap[task.ID] = task } } func (s *synchronizer) dequeueTask(queue *list.List) *Task { queueItem := queue.Front() s.taskQueue.Remove(queueItem) task := queueItem.Value.(*Task) delete(s.queueHashMap, task.ID) return task } func (s *synchronizer) storeQueue() error { // Store main queue queueAsSlice := []Task{} currEl := s.taskQueue.Front() for currEl != nil { queueAsSlice = append(queueAsSlice, *currEl.Value.(*Task)) currEl = currEl.Next() } // Store file pinning queue fileQueueAsSlice := []Task{} currEl = s.filePinningQueue.Front() for currEl != nil { fileQueueAsSlice = append(fileQueueAsSlice, *currEl.Value.(*Task)) currEl = currEl.Next() } objToMarshal := &marshalledQueue{ QueueAsSlice: queueAsSlice, FileQueueAsSlice: fileQueueAsSlice, } marshalled, err := json.Marshal(objToMarshal) if err != nil { return err } err = s.st.Set([]byte(QueueStoreKey), marshalled) if err != nil { return err } return nil } func (s *synchronizer) restoreQueue() error { queueMutex1 := s.queueMutexMap[s.taskQueue] queueMutex2 := s.queueMutexMap[s.filePinningQueue] queueMutex1.Lock() queueMutex2.Lock() defer queueMutex1.Unlock() defer queueMutex2.Unlock() data, err := s.st.Get([]byte(QueueStoreKey)) if err != nil { return err } queue := &marshalledQueue{} err = json.Unmarshal(data, queue) if err != nil { return err } for _, el := range queue.QueueAsSlice { s.enqueueTask(&el, s.taskQueue) } for _, el := range queue.FileQueueAsSlice { s.enqueueTask(&el, s.filePinningQueue) } return nil } func (s *synchronizer) isTaskEnqueued(task *Task) bool { existingTask := s.queueHashMap[task.ID] if existingTask == nil { return false } isPending := existingTask.State == taskQueued || existingTask.State == taskPending if isPending { return true } return false } func (s *synchronizer) queueString(queue *list.List) string { queueName := "buckets" if queue == s.filePinningQueue { queueName = "file pinning" } failed, queued, pending := 0, 0, 0 for curr := queue.Front(); curr != nil; curr = curr.Next() { task := curr.Value.(*Task) switch task.State { case taskPending: pending++ case taskFailed: failed++ case taskQueued: queued++ } } return fmt.Sprintf("Textile sync [%s]: Total: %d, Queued: %d, Pending: %d, Failed: %d", queueName, queue.Len(), queued, pending, failed) } ================================================ FILE: core/textile/sync/restore.go ================================================ package sync import ( "context" "github.com/FleekHQ/space-daemon/core/events" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/log" api_buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" ) // return the targetBucket if path is newer there, srcBucket otherwise func (s *synchronizer) newerBucketPath(ctx context.Context, srcBucket, targetBucket bucket.BucketInterface, path string) (bucket.BucketInterface, error) { targetUpdatedAt, err := targetBucket.UpdatedAt(ctx, path) if err != nil { return nil, err } srcUpdatedAt, err := srcBucket.UpdatedAt(ctx, path) if err != nil { // Path might not exist in src bucket return targetBucket, nil } if srcUpdatedAt >= targetUpdatedAt { return srcBucket, nil } return targetBucket, nil } // restore bucket by downloading files to the local from the mirror bucket func (s *synchronizer) restoreBucket(ctx context.Context, bucketSlug string) error { localBucket, err := s.getBucket(ctx, bucketSlug) if err != nil { log.Error("Error in getBucket", err) return err } mirrorBucket, err := s.getMirrorBucket(ctx, bucketSlug) if err != nil { log.Error("Error in getMirrorBucket", err) return err } iterator := func(c context.Context, b *bucket.Bucket, itemPath string) error { exists, _ := localBucket.FileExists(c, itemPath) if exists { newerBucket, err := s.newerBucketPath(c, localBucket, mirrorBucket, itemPath) if err != nil { return err } if newerBucket == localBucket { // do not overwrite: mirror is not newer return nil } } bucketModel, err := s.model.FindBucket(ctx, bucketSlug) if err != nil { return err } item, err := mirrorBucket.ListDirectory(ctx, itemPath) if s.eventNotifier != nil && err == nil { info := utils.MapDirEntryToFileInfo(api_buckets_pb.ListPathResponse(*item), itemPath) info.BackedUp = true info.LocallyAvailable = exists info.RestoreInProgress = true s.eventNotifier.SendFileEvent(events.NewFileEvent(info, events.FileRestoring, bucketSlug, bucketModel.DbID)) } s.NotifyFileRestore(bucketSlug, itemPath) return nil } if _, err = mirrorBucket.Each(ctx, "", iterator, true); err != nil { return err } return nil } ================================================ FILE: core/textile/sync/sync.go ================================================ package sync import ( "context" "github.com/FleekHQ/space-daemon/core/events" ) type EventNotifier interface { SendFileEvent(event events.FileEvent) } type Synchronizer interface { NotifyItemAdded(bucket, path string) NotifyItemRemoved(bucket, path string) NotifyBucketCreated(bucket string, enckey []byte) NotifyBucketBackupOn(bucket string) NotifyBucketBackupOff(bucket string) NotifyBucketRestore(bucket string) NotifyFileRestore(bucket, path string) NotifyBucketStartup(bucket string) NotifyIndexItemAdded(bucket, path, dbId string) Start(ctx context.Context) RestoreQueue() error Shutdown() String() string AttachNotifier(EventNotifier) } ================================================ FILE: core/textile/sync/sync_test.go ================================================ package sync_test import ( "context" "errors" sy "sync" "testing" "github.com/FleekHQ/space-daemon/core/textile" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/sync" "github.com/FleekHQ/space-daemon/mocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/textileio/go-threads/core/thread" ) var ( mockStore *mocks.Store mockClient *mocks.Client mockModel *mocks.Model mockKeychain *mocks.Keychain mockHubAuth *mocks.HubAuth mockCfg *mocks.Config mockRemoteFile = &textile.GetBucketForRemoteFileInput{ Bucket: "", DbID: "", Path: "", } ) func initSync(t *testing.T) sync.Synchronizer { mockStore = new(mocks.Store) mockModel = new(mocks.Model) mockKeychain = new(mocks.Keychain) mockHubAuth = new(mocks.HubAuth) mockCfg = new(mocks.Config) mockClient = new(mocks.Client) mockStore.On("IsOpen").Return(true) getLocalBucketFn := func(ctx context.Context, slug string) (bucket.BucketInterface, error) { return mockClient.GetBucket(ctx, slug, nil) } getMirrorBucketFn := func(ctx context.Context, slug string) (bucket.BucketInterface, error) { return mockClient.GetBucket(ctx, slug, mockRemoteFile) } addListenerFn := func(ctx context.Context, slug string) error { return nil } getBucketCtxFn := func(ctx context.Context, sDbID string, bucketSlug string, ishub bool, enckey []byte) (context.Context, *thread.ID, error) { return ctx, nil, nil } s := sync.New(mockStore, mockModel, mockKeychain, mockHubAuth, nil, nil, nil, mockCfg, getMirrorBucketFn, getLocalBucketFn, getBucketCtxFn, addListenerFn) return s } var mutex = &sy.Mutex{} func TestSync_ProcessTask(t *testing.T) { mutex.Lock() defer mutex.Unlock() s := initSync(t) ctx := context.Background() s.NotifyItemAdded("Bucket", "path") // Makes the processAddItem and processPinFilefail right away mockModel.On("FindBucket", mock.Anything, mock.Anything).Return(nil, errors.New("some error")) // mockClient.On("GetBucket", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("some error")) mockStore.On("Set", mock.Anything, mock.Anything).Return(nil) s.Start(ctx) s.Shutdown() expectedState := "Textile sync [file pinning]: Total: 0, Queued: 0, Pending: 0, Failed: 0\nTextile sync [buckets]: Total: 1, Queued: 1, Pending: 0, Failed: 0\n" assert.Equal(t, expectedState, s.String()) mockModel.AssertExpectations(t) mockClient.AssertExpectations(t) } func TestSync_Restore(t *testing.T) { mutex.Lock() defer mutex.Unlock() s := initSync(t) ctx := context.Background() s.NotifyItemAdded("Bucket", "path") // Makes the processAddItem and processPinFilefail right away mockModel.On("FindBucket", mock.Anything, mock.Anything).Return(nil, errors.New("some error")) mockClient.On("GetBucket", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("some error")) mockStore.On("Set", []byte(sync.QueueStoreKey), mock.Anything).Return(nil) s.Start(ctx) s.Shutdown() ogMockStore := mockStore s2 := initSync(t) // Make Store.Get return the data set previously storeArgs := ogMockStore.Calls[0].Arguments bytes := storeArgs.Get(1) mockStore.On("Get", []byte(sync.QueueStoreKey)).Return(bytes, nil) err := s2.RestoreQueue() mockModel.On("FindBucket", mock.Anything, mock.Anything).Return(nil, errors.New("some error")) mockClient.On("GetBucket", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("some error")) mockStore.On("Set", []byte(sync.QueueStoreKey), mock.Anything).Return(nil) // Note we are not calling NotifyItemAdded therefore the state must be picked from the Restore func s2.Start(ctx) s2.Shutdown() assert.Nil(t, err) assert.Equal(t, s.String(), s2.String()) } ================================================ FILE: core/textile/sync/synchronizer.go ================================================ package sync import ( "container/list" "context" "encoding/hex" "errors" "fmt" "sync" "time" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/hub" "github.com/FleekHQ/space-daemon/core/textile/model" "github.com/FleekHQ/space-daemon/log" threadsClient "github.com/textileio/go-threads/api/client" "github.com/textileio/go-threads/core/thread" nc "github.com/textileio/go-threads/net/api/client" bucketsClient "github.com/textileio/textile/v2/api/bucketsd/client" ) type GetMirrorBucketFn func(ctx context.Context, slug string) (bucket.BucketInterface, error) type GetBucketFn func(ctx context.Context, slug string) (bucket.BucketInterface, error) type GetBucketCtxFn func(ctx context.Context, sDbID string, bucketSlug string, ishub bool, enckey []byte) (context.Context, *thread.ID, error) type AddBucketListenerFn func(ctx context.Context, bucketSlug string) error const maxParallelTasks = 16 type synchronizer struct { taskQueue *list.List filePinningQueue *list.List queueHashMap map[string]*Task st store.Store model model.Model syncNeeded chan (bool) shuttingDownMap map[*list.List]chan (bool) queueMutexMap map[*list.List]*sync.Mutex getMirrorBucket GetMirrorBucketFn getBucket GetBucketFn getBucketCtx GetBucketCtxFn addBucketListener AddBucketListenerFn kc keychain.Keychain hubAuth hub.HubAuth hubBuckets *bucketsClient.Client hubThreads *threadsClient.Client cfg config.Config netc *nc.Client queueWg *sync.WaitGroup eventNotifier EventNotifier isRunning bool } // Creates a new Synchronizer func New( st store.Store, model model.Model, kc keychain.Keychain, hubAuth hub.HubAuth, hb *bucketsClient.Client, ht *threadsClient.Client, netc *nc.Client, cfg config.Config, getMirrorBucket GetMirrorBucketFn, getBucket GetBucketFn, getBucketCtx GetBucketCtxFn, addBucketListenerFn AddBucketListenerFn, ) *synchronizer { taskQueue := list.New() filePinningQueue := list.New() queueMutexMap := make(map[*list.List]*sync.Mutex) queueMutexMap[taskQueue] = &sync.Mutex{} queueMutexMap[filePinningQueue] = &sync.Mutex{} shuttingDownMap := make(map[*list.List]chan bool) shuttingDownMap[taskQueue] = make(chan bool) shuttingDownMap[filePinningQueue] = make(chan bool) queueWg := &sync.WaitGroup{} return &synchronizer{ taskQueue: taskQueue, filePinningQueue: filePinningQueue, queueHashMap: make(map[string]*Task), st: st, model: model, syncNeeded: make(chan bool), shuttingDownMap: shuttingDownMap, queueMutexMap: queueMutexMap, getMirrorBucket: getMirrorBucket, getBucket: getBucket, getBucketCtx: getBucketCtx, addBucketListener: addBucketListenerFn, kc: kc, hubAuth: hubAuth, hubBuckets: hb, hubThreads: ht, cfg: cfg, netc: netc, queueWg: queueWg, isRunning: false, } } // Notify Textile synchronizer that an add item operation needs to be synced func (s *synchronizer) NotifyItemAdded(bucket, path string) { t := newTask(addItemTask, []string{bucket, path}) s.enqueueTask(t, s.taskQueue) s.notifySyncNeeded() } // Notify Textile synchronizer that a remove item operation needs to be synced func (s *synchronizer) NotifyItemRemoved(bucket, path string) { t := newTask(removeItemTask, []string{bucket, path}) s.enqueueTask(t, s.taskQueue) s.notifySyncNeeded() } func (s *synchronizer) NotifyBucketCreated(bucket string, enckey []byte) { t := newTask(createBucketTask, []string{bucket, hex.EncodeToString(enckey)}) s.enqueueTask(t, s.taskQueue) s.notifySyncNeeded() } func (s *synchronizer) NotifyBucketBackupOn(bucket string) { t := newTask(bucketBackupOnTask, []string{bucket}) t.Parallelizable = true s.enqueueTask(t, s.taskQueue) s.notifySyncNeeded() } func (s *synchronizer) NotifyBucketBackupOff(bucket string) { t := newTask(bucketBackupOffTask, []string{bucket}) s.enqueueTask(t, s.taskQueue) s.notifySyncNeeded() } func (s *synchronizer) NotifyBucketRestore(bucket string) { t := newTask(bucketRestoreTask, []string{bucket}) s.enqueueTask(t, s.taskQueue) s.notifySyncNeeded() } func (s *synchronizer) NotifyFileRestore(bucket, path string) { t := newTask(restoreFileTask, []string{bucket, path}) s.enqueueTask(t, s.taskQueue) s.notifySyncNeeded() } func (s *synchronizer) NotifyBucketStartup(bucket string) { s.NotifyBucketRestore(bucket) s.NotifyBucketBackupOn(bucket) // does nothing if !bucket.Backup s.notifySyncNeeded() } func (s *synchronizer) NotifyIndexItemAdded(bucket, path, dbId string) { t := newTask(addIndexItemTask, []string{bucket, path, dbId}) t.Parallelizable = true t.MaxRetries = 2 s.enqueueTask(t, s.taskQueue) s.notifySyncNeeded() } func (s *synchronizer) notifySyncNeeded() { if !s.isRunning { return } select { case s.syncNeeded <- true: default: } } // Starts the synchronizer, which will constantly be checking if there are syncing tasks pending func (s *synchronizer) Start(ctx context.Context) { s.queueWg.Add(2) s.isRunning = true // Sync loop go func() { s.startSyncLoop(ctx, s.taskQueue) s.queueWg.Done() }() go func() { s.startSyncLoop(ctx, s.filePinningQueue) s.queueWg.Done() }() } // Restores a previously initialized queue func (s *synchronizer) RestoreQueue() error { if err := s.restoreQueue(); err != nil { return err } return nil } func (s *synchronizer) startSyncLoop(ctx context.Context, queue *list.List) { queueMutex := s.queueMutexMap[queue] // Initial sync queueMutex.Lock() s.sync(ctx, queue) queueMutex.Unlock() Loop: for { queueMutex.Lock() timeAfterNextSync := 30 * time.Second select { case <-time.After(timeAfterNextSync): s.sync(ctx, queue) case <-s.syncNeeded: s.sync(ctx, queue) // Break execution in case of shutdown case <-ctx.Done(): queueMutex.Unlock() s.Shutdown() break Loop case <-s.shuttingDownMap[queue]: queueMutex.Unlock() break Loop } queueMutex.Unlock() } } func (s *synchronizer) Shutdown() { s.shuttingDownMap[s.taskQueue] <- true s.shuttingDownMap[s.filePinningQueue] <- true s.queueWg.Wait() if err := s.storeQueue(); err != nil { log.Error("Error while storing Textile task queue state", err) } s.isRunning = false close(s.shuttingDownMap[s.taskQueue]) close(s.shuttingDownMap[s.filePinningQueue]) close(s.syncNeeded) } func (s *synchronizer) String() string { queues := []*list.List{s.filePinningQueue, s.taskQueue} res := "" for _, q := range queues { res = res + s.queueString(q) + "\n" } return res } var errMaxRetriesSurpassed = errors.New("max retries surpassed") func (s *synchronizer) executeTask(ctx context.Context, t *Task) error { var err error switch t.Type { case addItemTask: err = s.processAddItem(ctx, t) case removeItemTask: err = s.processRemoveItem(ctx, t) case pinFileTask: err = s.processPinFile(ctx, t) case unpinFileTask: err = s.processUnpinFile(ctx, t) case createBucketTask: err = s.processCreateBucket(ctx, t) case bucketBackupOnTask: err = s.processBucketBackupOn(ctx, t) case bucketBackupOffTask: err = s.processBucketBackupOff(ctx, t) case bucketRestoreTask: err = s.processBucketRestoreTask(ctx, t) case restoreFileTask: err = s.processRestoreFile(ctx, t) case addIndexItemTask: err = s.processAddIndexItemTask(ctx, t) case removeIndexItemTask: err = s.processRemoveIndexItemTask(ctx, t) default: log.Warn("Unexpected action on Textile sync, executeTask") } if err != nil { t.State = taskFailed t.Retries++ // Remove from queue if it surpassed the max amount of retries if t.MaxRetries != -1 && t.Retries > t.MaxRetries { t.State = taskDequeued return errMaxRetriesSurpassed } // Retry task t.State = taskQueued } else { t.State = taskSucceeded } return err } func (s *synchronizer) sync(ctx context.Context, queue *list.List) error { queueName := "buckets" if queue == s.filePinningQueue { queueName = "file pinning" } log.Debug(fmt.Sprintf("Textile sync [%s]: Sync start", queueName)) log.Debug(s.queueString(queue)) parallelTaskCount := 0 ptWg := sync.WaitGroup{} for curr := queue.Front(); curr != nil; curr = curr.Next() { task := curr.Value.(*Task) if task.State != taskQueued { // If task is already in process or finished, skip continue } log.Debug(fmt.Sprintf("Textile sync [%s]: Processing task %s", queueName, task.Type)) task.State = taskPending handleExecResult := func(err error) { if err == nil { // Task completed successfully log.Debug(fmt.Sprintf("Textile sync [%s]: task completed succesfully", queueName)) } else { log.Error(fmt.Sprintf("Textile sync [%s]: task failed", queueName), err) } } if task.Parallelizable && parallelTaskCount < maxParallelTasks { parallelTaskCount++ ptWg.Add(1) go func() { err := s.executeTask(ctx, task) handleExecResult(err) parallelTaskCount-- ptWg.Done() }() } else { err := s.executeTask(ctx, task) handleExecResult(err) if err != nil { // Break from the loop (avoid executing next tasks) return err } } } ptWg.Wait() // Remove successful and dequeued tasks from queue curr := queue.Front() for curr != nil { task := curr.Value.(*Task) next := curr.Next() switch task.State { case taskDequeued: queue.Remove(curr) case taskSucceeded: queue.Remove(curr) default: } curr = next } log.Debug(fmt.Sprintf("Textile sync [%s]: Sync end", queueName)) return nil } func (s *synchronizer) AttachNotifier(notif EventNotifier) { s.eventNotifier = notif } ================================================ FILE: core/textile/sync/task-executors.go ================================================ package sync import ( "context" "encoding/hex" "errors" "path" "golang.org/x/sync/errgroup" "github.com/FleekHQ/space-daemon/core/textile/model" api_buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/FleekHQ/space-daemon/log" "github.com/FleekHQ/space-daemon/core/events" "github.com/FleekHQ/space-daemon/core/textile/utils" ) func checkTaskType(t *Task, tp taskType) error { if tp != t.Type { return errors.New("expected different task type at Textile synchronizer") } return nil } func (s *synchronizer) processAddItem(ctx context.Context, task *Task) error { if err := checkTaskType(task, addItemTask); err != nil { return err } bucket := task.Args[0] path := task.Args[1] bucketModel, err := s.model.FindBucket(ctx, bucket) if err != nil { return err } mirrorFile, err := s.model.FindMirrorFileByPathAndBucketSlug(ctx, path, bucket) if bucketModel.Backup && mirrorFile == nil { if err := s.setMirrorFileBackup(ctx, path, bucket, true); err != nil { return err } } localBucket, err := s.getBucket(ctx, bucket) if err != nil { return err } item, err := localBucket.ListDirectory(ctx, path) if s.eventNotifier != nil && err == nil { info := utils.MapDirEntryToFileInfo(api_buckets_pb.ListPathResponse(*item), path) info.LocallyAvailable = true info.BackupInProgress = true s.eventNotifier.SendFileEvent(events.NewFileEvent(info, events.FileBackupInProgress, bucket, bucketModel.DbID)) } pft := newTask(pinFileTask, []string{bucket, path}) s.enqueueTask(pft, s.filePinningQueue) s.notifySyncNeeded() s.NotifyIndexItemAdded(bucket, path, "") return nil } func (s *synchronizer) processRemoveItem(ctx context.Context, task *Task) error { if err := checkTaskType(task, removeItemTask); err != nil { return err } bucket := task.Args[0] path := task.Args[1] uft := newTask(unpinFileTask, []string{bucket, path}) s.enqueueTask(uft, s.filePinningQueue) rIndexTask := newTask(removeIndexItemTask, []string{bucket, path, ""}) s.enqueueTask(rIndexTask, s.taskQueue) s.notifySyncNeeded() if err := s.unsetMirrorFileBackup(ctx, path, bucket); err != nil { return err } err := s.deleteFileFromRemote(ctx, bucket, path) return err } func (s *synchronizer) processPinFile(ctx context.Context, task *Task) error { if err := checkTaskType(task, pinFileTask); err != nil { return err } bucket := task.Args[0] path := task.Args[1] if err := s.uploadFileToRemote(ctx, bucket, path); err != nil { return err } s.setMirrorFileBackup(ctx, path, bucket, false) localBucket, err := s.getBucket(ctx, bucket) if err != nil { return err } bucketModel, err := s.model.FindBucket(ctx, bucket) if err != nil { return err } item, err := localBucket.ListDirectory(ctx, path) if s.eventNotifier != nil && err == nil { info := utils.MapDirEntryToFileInfo(api_buckets_pb.ListPathResponse(*item), path) info.LocallyAvailable = true info.BackedUp = true s.eventNotifier.SendFileEvent(events.NewFileEvent(info, events.FileBackupReady, bucket, bucketModel.DbID)) } return nil } func (s *synchronizer) processUnpinFile(ctx context.Context, task *Task) error { if err := checkTaskType(task, unpinFileTask); err != nil { return err } bucket := task.Args[0] path := task.Args[1] err := s.deleteFileFromRemote(ctx, bucket, path) return err } func (s *synchronizer) processCreateBucket(ctx context.Context, task *Task) error { if err := checkTaskType(task, createBucketTask); err != nil { return err } bucket := task.Args[0] enckey, err := hex.DecodeString(task.Args[1]) if err != nil { return err } mirror, err := s.createMirrorBucket(ctx, bucket, enckey) if mirror != nil { _, err = s.model.CreateMirrorBucket(ctx, bucket, mirror) if err != nil { return err } } if err := s.addBucketListener(ctx, bucket); err != nil { return err } return nil } func (s *synchronizer) processBucketBackupOn(ctx context.Context, task *Task) error { if err := checkTaskType(task, bucketBackupOnTask); err != nil { return err } bucket := task.Args[0] bucketModel, err := s.model.FindBucket(ctx, bucket) if err != nil { return err } // race if bucketModel.Backup == false { return nil } dbID, err := utils.ParseDbIDFromString(bucketModel.DbID) if err != nil { return err } if err := s.replicateThreadToHub(ctx, dbID); err != nil { return err } return s.uploadAllFilesInPath(ctx, bucket, "") } func (s *synchronizer) processBucketBackupOff(ctx context.Context, task *Task) error { if err := checkTaskType(task, bucketBackupOffTask); err != nil { return err } bucket := task.Args[0] bucketModel, err := s.model.FindBucket(ctx, bucket) if err != nil { return err } // race if bucketModel.Backup == true { return nil } dbID, err := utils.ParseDbIDFromString(bucketModel.DbID) if err != nil { return err } if err := s.dereplicateThreadFromHub(ctx, dbID); err != nil { return err } return s.deleteAllFilesInPath(ctx, bucket, "") } func (s *synchronizer) processBucketRestoreTask(ctx context.Context, task *Task) error { if err := checkTaskType(task, bucketRestoreTask); err != nil { return err } bucket := task.Args[0] bucketSchema, err := s.model.FindBucket(ctx, bucket) if err != nil { return err } if bucketSchema.RemoteDbID == "" { t := newTask(createBucketTask, []string{bucket, hex.EncodeToString(bucketSchema.EncryptionKey)}) s.enqueueTaskAtFront(t, s.taskQueue) return errors.New("trying to restore a bucket that has not been replicated. Recreating mirror bucket.") } if err := s.restoreBucket(ctx, bucket); err != nil { return err } return nil } func (s *synchronizer) processRestoreFile(ctx context.Context, task *Task) error { if err := checkTaskType(task, restoreFileTask); err != nil { return err } bucket := task.Args[0] path := task.Args[1] localBucket, err := s.getBucket(ctx, bucket) if err != nil { return err } mirrorBucket, err := s.getMirrorBucket(ctx, bucket) if err != nil { return err } newerBucket, err := s.newerBucketPath(ctx, localBucket, mirrorBucket, path) if err != nil { return err } if newerBucket == localBucket { // do not overwrite: mirror is not newer return nil } // TODO: use timestamp or CID for check if err = s.downloadFile(ctx, mirrorBucket, localBucket, path); err != nil { return err } bucketModel, err := s.model.FindBucket(ctx, bucket) if err != nil { return err } item, err := mirrorBucket.ListDirectory(ctx, path) if s.eventNotifier != nil && err == nil { info := utils.MapDirEntryToFileInfo(api_buckets_pb.ListPathResponse(*item), path) info.LocallyAvailable = true info.BackedUp = true s.eventNotifier.SendFileEvent(events.NewFileEvent(info, events.FileRestored, bucket, bucketModel.DbID)) } return err } func (s *synchronizer) processAddIndexItemTask(ctx context.Context, task *Task) error { if err := checkTaskType(task, addIndexItemTask); err != nil { return err } bucket := task.Args[0] itemPath := task.Args[1] dbId := task.Args[2] if dbId != "" { // handle shared file instances file, err := s.model.FindReceivedFile(ctx, dbId, itemPath, bucket) if err != nil { log.Error( "ProcessIndexItemTask: unable to find shared file", err, "dbId:"+dbId, "itemPath:"+itemPath, "bucket:"+bucket, ) return err } _, err = s.model.UpdateSearchIndexRecord(ctx, file.FileName, file.Path, model.FileItem, file.Bucket, dbId) if err != nil { log.Error( "ProcessIndexItemTask: failed to index shared file", err, ) return err } } else { erg, ctx := errgroup.WithContext(ctx) // index file erg.Go(func() error { fileName := path.Base(itemPath) _, err := s.model.UpdateSearchIndexRecord(ctx, fileName, itemPath, model.FileItem, bucket, "") if err != nil { log.Error( "ProcessIndexItemTask: failed to index file", err, ) return err } return nil }) // index parent dir erg.Go(func() error { parentPath := path.Dir(itemPath) if parentPath == "/" { return nil } dirName := path.Base(parentPath) _, err := s.model.UpdateSearchIndexRecord(ctx, dirName, parentPath, model.DirectoryItem, bucket, "") if err != nil { log.Error( "ProcessIndexItemTask: failed to index directory", err, ) return err } return nil }) err := erg.Wait() if err != nil { return err } } return nil } func (s *synchronizer) processRemoveIndexItemTask(ctx context.Context, task *Task) error { if err := checkTaskType(task, removeIndexItemTask); err != nil { return err } bucket := task.Args[0] itemPath := task.Args[1] dbId := task.Args[2] fileName := path.Base(itemPath) return s.model.DeleteSearchIndexRecord(ctx, fileName, itemPath, bucket, dbId) } ================================================ FILE: core/textile/sync/task.go ================================================ package sync import ( "strings" ) type taskType string const ( addItemTask taskType = "ADD_ITEM" removeItemTask taskType = "REMOVE_ITEM" createBucketTask taskType = "CREATE_BUCKET" pinFileTask taskType = "PIN_FILE" unpinFileTask taskType = "UNPIN_FILE" bucketBackupOnTask taskType = "TOGGLE_BACKUP_ON" bucketBackupOffTask taskType = "TOGGLE_BACKUP_OFF" bucketRestoreTask taskType = "BUCKET_RESTORE" restoreFileTask taskType = "RESTORE_FILE" addIndexItemTask taskType = "ADD_INDEX_ITEM" removeIndexItemTask taskType = "REMOVE_INDEX_ITEM" ) type taskState string const ( taskQueued taskState = "QUEUED" taskPending taskState = "PENDING" taskSucceeded taskState = "SUCCESS" taskFailed taskState = "FAILED" taskDequeued taskState = "DEQUEUED" ) type Task struct { ID string `json:"id"` State taskState `json:"state"` Type taskType `json:"type"` Args []string `json:"args"` Parallelizable bool `json:"parallelizable"` // Set to -1 for infinite retries MaxRetries int `json:"maxRetries"` Retries int `json:"retries"` } func newTask(t taskType, args []string) *Task { id := string(t) + "_" + strings.Join(args, "_") return &Task{ ID: id, State: taskQueued, Type: t, Args: args, Parallelizable: false, MaxRetries: -1, Retries: 0, } } ================================================ FILE: core/textile/sync/threads.go ================================================ package sync import ( "context" "fmt" "github.com/FleekHQ/space-daemon/config" "github.com/textileio/go-threads/core/thread" "github.com/textileio/textile/v2/cmd" ) // replicate a local thread on the hub func (s *synchronizer) replicateThreadToHub(ctx context.Context, dbID *thread.ID) error { hubma := s.cfg.GetString(config.TextileHubMa, "") if hubma == "" { return fmt.Errorf("no textile hub set") } _, err := s.netc.AddReplicator(ctx, *dbID, cmd.AddrFromStr(hubma)) if err != nil { return err } return nil } // dereplicate a local thread from the hub func (s *synchronizer) dereplicateThreadFromHub(ctx context.Context, dbID *thread.ID) error { // TODO return nil } ================================================ FILE: core/textile/textile.go ================================================ package textile import ( "context" "io" "github.com/ipfs/go-cid" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/textile/bucket" "github.com/FleekHQ/space-daemon/core/textile/model" "github.com/FleekHQ/space-daemon/core/textile/sync" "github.com/libp2p/go-libp2p-core/crypto" "github.com/textileio/go-threads/db" buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/api/usersd/client" threadsClient "github.com/textileio/go-threads/api/client" ) const ( hubTarget = "127.0.0.1:3006" threadsTarget = "127.0.0.1:3006" defaultPersonalBucketSlug = "personal" defaultCacheBucketSlug = "personal_cache" defaultPersonalMirrorBucketSlug = "personal_mirror" defaultPublicShareBucketSlug = "personal_public" ) type BucketRoot buckets_pb.Root type Bucket interface { bucket.BucketInterface } type Client interface { IsRunning() bool IsInitialized() bool IsHealthy() bool GetDefaultBucket(ctx context.Context) (Bucket, error) GetBucket(ctx context.Context, slug string, remoteFile *GetBucketForRemoteFileInput) (Bucket, error) GetThreadsConnection() (*threadsClient.Client, error) GetModel() model.Model ListBuckets(ctx context.Context) ([]Bucket, error) ShareBucket(ctx context.Context, bucketSlug string) (*db.Info, error) JoinBucket(ctx context.Context, slug string, ti *domain.ThreadInfo) (bool, error) CreateBucket(ctx context.Context, bucketSlug string) (Bucket, error) ToggleBucketBackup(ctx context.Context, bucketSlug string, bucketBackup bool) (bool, error) BucketBackupRestore(ctx context.Context, bucketSlug string) error SendMessage(ctx context.Context, recipient crypto.PubKey, body []byte) (*client.Message, error) Shutdown() error WaitForReady() chan bool WaitForHealthy() chan error WaitForInitialized() chan bool Start(ctx context.Context, cfg config.Config) error GetMailAsNotifications(ctx context.Context, seek string, limit int) ([]*domain.Notification, error) ManageShareFilesViaPublicKey( ctx context.Context, paths []domain.FullPath, pubkeys []crypto.PubKey, keys [][]byte, role domain.SharedFilesRoleAction, ) error AcceptSharedFilesInvitation(ctx context.Context, invitation domain.Invitation) (domain.Invitation, error) RejectSharedFilesInvitation(ctx context.Context, invitation domain.Invitation) (domain.Invitation, error) AcceptSharedFileLink( ctx context.Context, cidHash, password, filename, fileSize string, ) (*domain.SharedDirEntry, error) RemoveKeys(ctx context.Context) error AttachMailboxNotifier(notif GrpcMailboxNotifier) AttachSynchronizerNotifier(notif sync.EventNotifier) GetReceivedFiles(ctx context.Context, accepted bool, seek string, limit int) ([]*domain.SharedDirEntry, string, error) GetPublicReceivedFile(ctx context.Context, cidHash string, accepted bool) (*domain.SharedDirEntry, string, error) GetSentFiles(ctx context.Context, seek string, limit int) ([]*domain.SharedDirEntry, string, error) GetPathAccessRoles(ctx context.Context, b Bucket, path string) ([]domain.Member, error) GetPublicShareBucket(ctx context.Context) (Bucket, error) DownloadPublicItem(ctx context.Context, cid cid.Cid) (io.ReadCloser, error) GetFailedHealthchecks() int DeleteAccount(ctx context.Context) error Listen(ctx context.Context, dbID, threadName string) (<-chan threadsClient.ListenEvent, error) RestoreDB(ctx context.Context) error DisableSync() } type Buckd interface { Stop() error Start(ctx context.Context) error } type Listener interface { Listen(context.Context) error Close() } ================================================ FILE: core/textile/utils/utils.go ================================================ package utils import ( "context" "crypto/rand" "crypto/sha512" "encoding/base32" "encoding/binary" "encoding/hex" "errors" "path/filepath" "strconv" "strings" "time" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/store" "github.com/FleekHQ/space-daemon/core/textile/hub" "github.com/FleekHQ/space-daemon/log" crypto "github.com/libp2p/go-libp2p-crypto" tc "github.com/textileio/go-threads/api/client" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" nc "github.com/textileio/go-threads/net/api/client" bucketsproto "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/api/common" "github.com/textileio/textile/v2/cmd" "golang.org/x/crypto/pbkdf2" ) func CastDbIDToString(dbID thread.ID) string { bytes := dbID.Bytes() return base32.StdEncoding.EncodeToString(bytes) } func ParseDbIDFromString(dbID string) (*thread.ID, error) { bytes, err := base32.StdEncoding.DecodeString(dbID) if err != nil { return nil, err } id, err := thread.Cast(bytes) if err != nil { return nil, err } return &id, nil } type DeterministicThreadVariant string const ( MetathreadThreadVariant DeterministicThreadVariant = "metathread" MirrorBucketVariant DeterministicThreadVariant = "mirror_bucket" ) func MirrorBucketVariantGen(mirrorBucketSlug string) DeterministicThreadVariant { return DeterministicThreadVariant(string(MirrorBucketVariant) + ":" + mirrorBucketSlug) } func NewDeterministicThreadID(kc keychain.Keychain, threadVariant DeterministicThreadVariant) (thread.ID, error) { size := 32 variant := thread.Raw priv, _, err := kc.GetStoredKeyPairInLibP2PFormat() if err != nil { return thread.ID([]byte{}), err } privInBytes, err := priv.Raw() if err != nil { return thread.ID([]byte{}), err } // Do a key derivation based on the private key, a constant nonce, and the thread variant num := pbkdf2.Key(privInBytes, []byte("threadID"+threadVariant), 256, size, sha512.New) if err != nil { return thread.ID([]byte{}), err } // The following code just concats the key derived from the private key (num) // with some constants such as the thread version and the textile thread variant numlen := len(num) // two 8 bytes (max) numbers plus num buf := make([]byte, 2*binary.MaxVarintLen64+numlen) n := binary.PutUvarint(buf, thread.V1) n += binary.PutUvarint(buf[n:], uint64(variant)) cn := copy(buf[n:], num) if cn != numlen { panic("copy length is inconsistent") } return thread.ID(buf[:n+numlen]), nil } func getThreadName(userPubKey []byte, bucketSlug string) string { return hex.EncodeToString(userPubKey) + "-" + bucketSlug } // Readies a context to access a thread given its name and dbid func GetThreadContext(parentCtx context.Context, threadName string, dbID thread.ID, hub bool, kc keychain.Keychain, hubAuth hub.HubAuth, threadsClient *tc.Client) (context.Context, error) { var err error ctx := parentCtx // Some threads will be on the hub and some will be local, this flag lets you specify // where it is if hub { ctx, err = hubAuth.GetHubContext(ctx) if err != nil { return nil, err } } var publicKey crypto.PubKey var privKey crypto.PrivKey if privKey, publicKey, err = kc.GetStoredKeyPairInLibP2PFormat(); err != nil { return nil, err } var pubKeyInBytes []byte if pubKeyInBytes, err = publicKey.Bytes(); err != nil { return nil, err } if threadsClient != nil { tok, err := threadsClient.GetToken(ctx, thread.NewLibp2pIdentity(privKey)) if err != nil { return nil, err } ctx = thread.NewTokenContext(ctx, tok) } ctx = common.NewThreadNameContext(ctx, getThreadName(pubKeyInBytes, threadName)) ctx = common.NewThreadIDContext(ctx, dbID) return ctx, nil } // randBytes returns random bytes in a byte slice of size. func RandBytes(size int) ([]byte, error) { b := make([]byte, size) _, err := rand.Read(b) return b, err } var metaFileNames = map[string]bool{ ".textileseed": true, ".textile": true, ".DS_Store": true, ".Trashes": true, ".localized": true, } func IsMetaFileName(pathOrName string) bool { _, name := filepath.Split(pathOrName) return metaFileNames[name] } const threadIDStoreKey = "thread_id" // Returns the store key for a thread ID. It uses the keychain to obtain the public key, since the store key depends on it. func getDeterministicthreadStoreKey(kc keychain.Keychain, variant DeterministicThreadVariant) ([]byte, error) { pub, err := kc.GetStoredPublicKey() if err != nil { return nil, err } pubInBytes, err := pub.Raw() if err != nil { return nil, err } result := []byte(threadIDStoreKey + "_" + string(variant)) result = append(result, pubInBytes...) return result, nil } // Finds or creates a thread that's based on the user private key and the specified variant // Using the same private key, variant and thread name will always end up generating the same key func FindOrCreateDeterministicThread( ctx context.Context, variant DeterministicThreadVariant, threadName string, kc keychain.Keychain, st store.Store, threads *tc.Client, cfg config.Config, netc *nc.Client, hnetc *nc.Client, hubAuth hub.HubAuth, shouldForceRestore bool, dbCollectionConfigs []db.CollectionConfig, ) (*thread.ID, error) { storeKey, err := getDeterministicthreadStoreKey(kc, variant) if err != nil { return nil, err } pk, _, err := kc.GetStoredKeyPairInLibP2PFormat() if err != nil { return nil, err } if val, _ := st.Get(storeKey); val != nil { // Cast the stored dbID from bytes to thread.ID if dbID, err := thread.Cast(val); err != nil { return nil, err } else { return &dbID, nil } } // thread id does not exist yet // We need to create an ID that's derived deterministically from the user private key // The reason for this is that the user needs to be able to restore the exact ID when moving across devices. // The only consideration is that we must try to avoid dbID collisions with other users. dbID, err := NewDeterministicThreadID(kc, variant) if err != nil { return nil, err } dbIDInBytes := dbID.Bytes() managedKey, err := kc.GetManagedThreadKey(threadName) if err != nil { return nil, err } threadCtx, err := GetThreadContext(ctx, threadName, dbID, false, kc, nil, threads) if err != nil { return nil, err } hubmaStr := cfg.GetString(config.TextileHubMa, "") hubma := cmd.AddrFromStr(hubmaStr) hubmaWithThreadID := hubmaStr + "/thread/" + dbID.String() hubCtx, err := hubAuth.GetHubContext(ctx) if err != nil { return nil, err } _, err = hnetc.GetThread(hubCtx, dbID) replThreadExists := err == nil if !replThreadExists && shouldForceRestore { return nil, err } if replThreadExists { // Try to join remote db in case it was already replicated err = threads.NewDBFromAddr( threadCtx, cmd.AddrFromStr(hubmaWithThreadID), managedKey, db.WithNewManagedBackfillBlock(true), db.WithNewManagedThreadKey(managedKey), db.WithNewManagedName(threadName), db.WithNewManagedLogKey(pk), db.WithNewManagedCollections( dbCollectionConfigs..., ), ) if err == nil || err.Error() == "rpc error: code = Unknown desc = db already exists" || err.Error() == "rpc error: code = Unknown desc = log already exists" { return successfulThreadCreation(st, &dbID, dbIDInBytes, storeKey) } else if shouldForceRestore == true { log.Error("Textile threads require forced restore but there was a restoration issue", err) return nil, err } } err = threads.NewDB(threadCtx, dbID, db.WithNewManagedLogKey(pk), db.WithNewManagedThreadKey(managedKey), db.WithNewManagedName(threadName)) if err != nil && err.Error() != "rpc error: code = Unknown desc = db already exists" { return nil, err } if _, err := netc.AddReplicator(threadCtx, dbID, hubma); err == nil { return successfulThreadCreation(st, &dbID, dbIDInBytes, storeKey) } else { log.Error("error while replicating metathread", err) } return &dbID, nil } func successfulThreadCreation(st store.Store, dbID *thread.ID, dbIDInBytes, storeKey []byte) (*thread.ID, error) { if err := st.Set(storeKey, dbIDInBytes); err != nil { newErr := errors.New("error while storing thread id: check your local space db accessibility") return nil, newErr } return dbID, nil } func MapDirEntryToFileInfo(entry bucketsproto.ListPathResponse, itemPath string) domain.FileInfo { item := entry.Item info := domain.FileInfo{ DirEntry: domain.DirEntry{ Path: itemPath, IsDir: item.IsDir, Name: item.Name, SizeInBytes: strconv.FormatInt(item.Size, 10), FileExtension: strings.Replace(filepath.Ext(item.Name), ".", "", -1), // FIXME: real created at needed Created: time.Unix(0, item.Metadata.UpdatedAt).Format(time.RFC3339), Updated: time.Unix(0, item.Metadata.UpdatedAt).Format(time.RFC3339), Members: []domain.Member{}, }, IpfsHash: item.Cid, BackedUp: false, LocallyAvailable: false, BackupInProgress: false, RestoreInProgress: false, } return info } ================================================ FILE: core/textile/utils/utils_test.go ================================================ package utils_test import ( "encoding/hex" "testing" "github.com/FleekHQ/space-daemon/core/textile/utils" "github.com/FleekHQ/space-daemon/mocks" "github.com/libp2p/go-libp2p-core/crypto" "github.com/stretchr/testify/assert" ) var ( mockStore *mocks.Store mockKeychain *mocks.Keychain mockPubKey crypto.PubKey mockPrivKey crypto.PrivKey ) func initMocks(t *testing.T) { mockStore = new(mocks.Store) mockStore.On("IsOpen").Return(true) mockKeychain = new(mocks.Keychain) mockPubKeyHex := "67730a6678566ead5911d71304854daddb1fe98a396551a4be01de65da01f3a9" mockPrivKeyHex := "dd55f8921f90fdf31c6ef9ad86bd90605602fd7d32dc8ea66ab72deb6a82821c67730a6678566ead5911d71304854daddb1fe98a396551a4be01de65da01f3a9" pubKeyBytes, _ := hex.DecodeString(mockPubKeyHex) privKeyBytes, _ := hex.DecodeString(mockPrivKeyHex) mockPubKey, _ = crypto.UnmarshalEd25519PublicKey(pubKeyBytes) mockPrivKey, _ = crypto.UnmarshalEd25519PrivateKey(privKeyBytes) } func TestUtils_NewDeterministicThreadID(t *testing.T) { initMocks(t) mockKeychain.On( "GetStoredKeyPairInLibP2PFormat", ).Return(mockPrivKey, mockPubKey, nil) threadID, err := utils.NewDeterministicThreadID(mockKeychain, utils.MetathreadThreadVariant) assert.Nil(t, err) threadIDCopy, err := utils.NewDeterministicThreadID(mockKeychain, utils.MetathreadThreadVariant) assert.Nil(t, err) // Generate a thread ID from a different private key (changed the last char) mockPrivKeyHex := "dd55f8921f90fdf31c6ef9ad86bd90605602fd7d32dc8ea66ab72deb6a82821c67730a6678566ead5911d71304854daddb1fe98a396551a4be01de65da01f3a8" privKeyBytes, _ := hex.DecodeString(mockPrivKeyHex) diffPrivKey, _ := crypto.UnmarshalEd25519PrivateKey(privKeyBytes) newMockKeychain := new(mocks.Keychain) newMockKeychain.On( "GetStoredKeyPairInLibP2PFormat", ).Return(diffPrivKey, nil, nil) diffThreadID, err := utils.NewDeterministicThreadID(newMockKeychain, utils.MetathreadThreadVariant) assert.Nil(t, err) assert.Equal(t, threadID, threadIDCopy) assert.NotEqual(t, threadID, diffThreadID) } ================================================ FILE: core/util/address/PROTOCOL.md ================================================ # Protocols ## Address derivation Public keys are created using `ed25519` elliptic curve algorithm. An address is a hash of the public key to obtain a smaller length representation of it. To derive the address, we: 1- Take the SHA3-256 hash of the public key (end up with 32 bytes string). 2- Drop the first 14 bytes of the hash 3- Convert the remaining 18 bytes to hex and prepend `0x`. (38 characters in total) ================================================ FILE: core/util/address/address.go ================================================ package address import ( "encoding/hex" "github.com/libp2p/go-libp2p-core/crypto" "golang.org/x/crypto/sha3" ) // Returns the address representation of a public key // If the public key is malformed it returns an empty string func DeriveAddress(pubKey crypto.PubKey) string { pubBytes, err := pubKey.Raw() if err != nil { return "" } hf := sha3.New256() hf.Write(pubBytes) // Get the hex representation of the SHA3-256 hash hexHash := hex.EncodeToString(hf.Sum(nil)) // Drop the first 14 bytes (28 characters) trimmed := hexHash[28:] return "0x" + trimmed } ================================================ FILE: core/util/paths.go ================================================ package util import ( "os" s "strings" "github.com/mitchellh/go-homedir" ) // ResolvePath resolves a path into its full qualified path // alias like `~` is expanded based on the current user func ResolvePath(path string) (string, error) { fullPath := path if home, err := homedir.Dir(); err == nil { // If the path contains ~, we replace it with the actual home directory fullPath = s.Replace(path, "~", home, -1) } else { return "", err } return fullPath, nil } // DirEntryExists check if the file or directory with the given path exits. func DirEntryExists(filename string) bool { fi, err := os.Lstat(filename) if fi != nil || (err != nil && !os.IsNotExist(err)) { return true } return false } ================================================ FILE: core/util/rlimit/rlimit_unix.go ================================================ // +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package rlimit import ( "fmt" "math" "syscall" "github.com/FleekHQ/space-daemon/log" ) // Sets rlimit to the maximum allowed value in UNIX systems func SetRLimit() { var rLimit syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) if err != nil { log.Error("Error Getting Rlimit. Please run `ulimit -n 1000` from a privileged user to avoid issues when running the space daemon.", err) return } log.Debug(fmt.Sprintf("Got Rlimit: Cur: %d, Max: %d", rLimit.Cur, rLimit.Max)) // Max allowed value is 10240 even when rLimit.Max can go beyond that rLimit.Cur = uint64(math.Min(10240, float64(rLimit.Max))) err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit) if err != nil { log.Error("Error setting Rlimit. Please run `ulimit -n 1000` from a privileged user to avoid issues when running the space daemon.", err) return } log.Debug(fmt.Sprintf("Set Rlimit: Cur: %d, Max: %d", rLimit.Cur, rLimit.Max)) } ================================================ FILE: core/util/rlimit/rlimit_windows.go ================================================ package rlimit // Rlimit not supported on windows func SetRLimit() { return } ================================================ FILE: core/vault/vault.go ================================================ package vault import ( "bytes" "crypto/aes" "crypto/cipher" "crypto/rand" "crypto/sha512" "encoding/base64" "encoding/json" "errors" "io" "io/ioutil" "net/http" "github.com/FleekHQ/space-daemon/core/space/domain" "golang.org/x/crypto/pbkdf2" ) type vault struct { vaultAPIURL string vaultSaltSecret string } type VaultItemType string // Vault item types const ( PrivateKeyWithMnemonic VaultItemType = "PrivateKeyWithMnemonic" ) type VkVersion string const ( VkVersion1 VkVersion = "V1" ) // AES requires key length equal to 16, 24 or 32 bytes const vaultKeyLength = 32 type VaultItem struct { ItemType VaultItemType Value string } type storeVaultRequest struct { Vault string `json:"vault"` Vsk string `json:"vsk"` Type string `json:"type"` } type StoredVault struct { Vault string Vsk string } type retrieveVaultRequest struct { Vsk string `json:"vsk"` Type string `json:"type"` } type retrieveVaultResponse struct { EncryptedVault string `json:"encryptedVault"` } type Vault interface { Store(uuid string, passphrase string, backupType domain.KeyBackupType, apiToken string, items []VaultItem) (*StoredVault, error) Retrieve(uuid string, passphrase string, backupType domain.KeyBackupType) ([]VaultItem, error) } func New(vaultAPIURL string, vaultSaltSecret string) *vault { return &vault{ vaultAPIURL: vaultAPIURL, vaultSaltSecret: vaultSaltSecret, } } func (v *vault) Store(uuid string, passphrase string, backupType domain.KeyBackupType, apiToken string, items []VaultItem) (*StoredVault, error) { // Generate vault file vf, err := json.Marshal(items) if err != nil { return nil, err } // Compute vault key vk := v.computeVk(uuid, passphrase, VkVersion1) // Encrypt vault file using vault key encVf, err := encrypt(vf, vk) if err != nil { return nil, err } // Compute vault service key vsk := v.computeVsk(vk, passphrase, VkVersion1) // Submit encrypted file and vsk to vault service storeRequest := &storeVaultRequest{ Vault: base64.RawStdEncoding.EncodeToString(encVf), Vsk: base64.RawStdEncoding.EncodeToString(vsk), Type: domain.KeyBackupType(backupType).String(), } reqJSON, err := json.Marshal(storeRequest) if err != nil { return nil, err } client := &http.Client{ CheckRedirect: http.DefaultClient.CheckRedirect, } req, err := http.NewRequest("POST", v.vaultAPIURL+"/vaults", bytes.NewBuffer(reqJSON)) req.Header.Add("Authorization", apiToken) resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() _, err = parseAPIResponse(resp) if err != nil { return nil, err } result := &StoredVault{ Vault: storeRequest.Vault, Vsk: storeRequest.Vsk, } return result, nil } func (v *vault) Retrieve(uuid string, passphrase string, backupType domain.KeyBackupType) ([]VaultItem, error) { // Compute vault key vk := v.computeVk(uuid, passphrase, VkVersion1) // Compute vault service key vsk := v.computeVsk(vk, passphrase, VkVersion1) // Send retrieve request to vault service reqJSON, err := json.Marshal(&retrieveVaultRequest{ Vsk: base64.RawStdEncoding.EncodeToString(vsk), Type: domain.KeyBackupType(backupType).String(), }) if err != nil { return nil, err } resp, err := http.Post( v.vaultAPIURL+"/vaults/"+uuid, "application/json", bytes.NewBuffer(reqJSON), ) if err != nil { return nil, err } defer resp.Body.Close() body, err := parseAPIResponse(resp) if err != nil { return nil, err } var parsedBody retrieveVaultResponse err = json.Unmarshal(body, &parsedBody) if err != nil { return nil, err } // Decrypt encrypted vault file encVfBase64 := parsedBody.EncryptedVault encVf, err := base64.RawStdEncoding.DecodeString(encVfBase64) if err != nil { return nil, err } vf, err := decrypt(encVf, vk) if err != nil { return nil, err } var items []VaultItem err = json.Unmarshal(vf, &items) if err != nil { return nil, err } return items, nil } func (v *vault) computeVk(uuid string, pass string, version VkVersion) []byte { // In the future, we can increase iterations by doing a switch and mapping // version to a given amount of iterations. // version = V1 defaults to 100.000 iterations iterations := 100000 return pbkdf2.Key([]byte(pass), []byte(string(version)+v.vaultSaltSecret+uuid), iterations, vaultKeyLength, sha512.New) } func (v *vault) computeVsk(vk []byte, pass string, version VkVersion) []byte { iterations := 100000 return pbkdf2.Key(vk, []byte(string(version)+v.vaultSaltSecret+pass), iterations, vaultKeyLength, sha512.New) } func encrypt(data []byte, key []byte) ([]byte, error) { block, err := aes.NewCipher(key[:]) if err != nil { return nil, err } gcm, err := cipher.NewGCM(block) if err != nil { return nil, err } nonce := make([]byte, gcm.NonceSize()) _, err = io.ReadFull(rand.Reader, nonce) if err != nil { return nil, err } return gcm.Seal(nonce, nonce, data, nil), nil } func decrypt(ciphertext []byte, key []byte) ([]byte, error) { block, err := aes.NewCipher(key[:]) if err != nil { return nil, err } gcm, err := cipher.NewGCM(block) if err != nil { return nil, err } if len(ciphertext) < gcm.NonceSize() { return nil, errors.New("malformed ciphertext") } return gcm.Open(nil, ciphertext[:gcm.NonceSize()], ciphertext[gcm.NonceSize():], nil, ) } func parseAPIResponse(resp *http.Response) ([]byte, error) { body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } if resp.StatusCode < 200 || resp.StatusCode >= 400 { var returnedErr domain.APIError err = json.Unmarshal(body, &returnedErr) if err != nil { return nil, err } if returnedErr.Message != "" { return nil, errors.New(returnedErr.Message) } return nil, errors.New("Unexpected API error") } return body, nil } ================================================ FILE: core/vault/vault_test.go ================================================ package vault_test import ( "net/http" "net/http/httptest" "testing" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/vault" "github.com/stretchr/testify/assert" ) const testSaltSecret = "someSecret" const testUuid = "c907e7ef-7b36-4ab1-8a56-f788d7526a2c" const testPassphrase = "banana1234" const testAPIToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJwdWJrZXkiOiJhZTRiMmFiNjU4ZmJiNzcyMjE0MDRkNjU3YzZiNzQyZDJlZjdjNTI2YjZhNWE5YzIwMGNjZjkzZmNhMWRjZTYzIiwidXVpZCI6ImM5MDdlN2VmLTdiMzYtNGFiMS04YTU2LWY3ODhkNzUyNmEyYyIsImlhdCI6MTU5ODI4NTA0MSwiZXhwIjoxNjAwODc3MDQxfQ.dgp8UhWCLjsU0SjxXwSb3g0jEurt2jAKPaY3B_eO-qE" func TestVault_StoreAndRetrieve(t *testing.T) { testVaultItems := []vault.VaultItem{ { ItemType: vault.PrivateKeyWithMnemonic, Value: "SomePrivateKey", }, } storeVaultMock := func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(`{}`)) } testBackupType := domain.PASSWORD serverMock := func() *httptest.Server { handler := http.NewServeMux() handler.HandleFunc("/vaults", storeVaultMock) srv := httptest.NewServer(handler) return srv } server := serverMock() defer server.Close() v := vault.New( // "https://f4nmmmkstb.execute-api.us-west-2.amazonaws.com/dev", // UNCOMMENT TO TEST REAL SERVER server.URL, testSaltSecret, ) storeRequest, err := v.Store(testUuid, testPassphrase, testBackupType, testAPIToken, testVaultItems) assert.Nil(t, err) if err != nil { return } assert.NotNil(t, storeRequest) assert.NotNil(t, storeRequest.Vault) assert.NotEqual(t, "", storeRequest.Vault) assert.NotNil(t, storeRequest.Vsk) assert.NotEqual(t, "", storeRequest.Vsk) retrieveVaultMock := func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(`{"encryptedVault": "` + storeRequest.Vault + `"}`)) } serverMock2 := func() *httptest.Server { handler := http.NewServeMux() handler.HandleFunc("/vaults/"+testUuid, retrieveVaultMock) srv := httptest.NewServer(handler) return srv } server2 := serverMock2() defer server2.Close() v2 := vault.New( // "https://f4nmmmkstb.execute-api.us-west-2.amazonaws.com/dev", // UNCOMMENT TO TEST REAL SERVER server2.URL, testSaltSecret, ) retrievedItems, err := v2.Retrieve(testUuid, testPassphrase, domain.PASSWORD) assert.Nil(t, err) if err != nil { return } assert.NotNil(t, retrievedItems) // Assert response matches what we initially vaulted assert.Equal(t, testVaultItems[0].ItemType, retrievedItems[0].ItemType) assert.Equal(t, testVaultItems[0].Value, retrievedItems[0].Value) } func TestVault_StoreServerError(t *testing.T) { testVaultItems := []vault.VaultItem{ { ItemType: vault.PrivateKeyWithMnemonic, Value: "SomePrivateKey", }, } testBackupType := domain.PASSWORD storeVaultMock := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) _, _ = w.Write([]byte(`{ "message": "Unauthorized Error: Authorization token is invalid."}`)) } serverMock := func() *httptest.Server { handler := http.NewServeMux() handler.HandleFunc("/vaults", storeVaultMock) srv := httptest.NewServer(handler) return srv } server := serverMock() defer server.Close() v := vault.New( // "https://f4nmmmkstb.execute-api.us-west-2.amazonaws.com/dev", // UNCOMMENT TO TEST REAL SERVER server.URL, testSaltSecret, ) storeRequest, err := v.Store(testUuid, testPassphrase, testBackupType, testAPIToken, testVaultItems) assert.NotNil(t, err) assert.Nil(t, storeRequest) } func TestVault_RetrieveServerError(t *testing.T) { retrieveVaultMock := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) _, _ = w.Write([]byte(`{ "message": "Unauthorized Error: Incorrect uuid or password."}`)) } serverMock := func() *httptest.Server { handler := http.NewServeMux() handler.HandleFunc("/vaults/"+testUuid, retrieveVaultMock) srv := httptest.NewServer(handler) return srv } server := serverMock() defer server.Close() v := vault.New( // "https://f4nmmmkstb.execute-api.us-west-2.amazonaws.com/dev", // UNCOMMENT TO TEST REAL SERVER server.URL, testSaltSecret, ) retrievedItems, err := v.Retrieve(testUuid, testPassphrase, domain.PASSWORD) assert.NotNil(t, err) assert.Nil(t, retrievedItems) } ================================================ FILE: core/watcher/blacklist.go ================================================ //+build !windows package watcher import ( "os" ) // isBlacklisted return true if the file or path is not a supported entry // to trigger file watcher event handler func isBlacklisted(path string, fileInfo os.FileInfo) bool { return fileInfo.Name()[0:1] == "." } ================================================ FILE: core/watcher/blacklist_windows.go ================================================ package watcher import ( "os" "golang.org/x/sys/windows" ) // isBlacklisted return true if the file or path is not a supported entry // to trigger file watcher event handler func isBlacklisted(path string, fileInfo os.FileInfo) bool { pointer, err := windows.UTF16PtrFromString(path) if err != nil { return false } attributes, err := windows.GetFileAttributes(pointer) if err != nil { return false } return attributes&windows.FILE_ATTRIBUTE_HIDDEN != 0 } ================================================ FILE: core/watcher/handler.go ================================================ package watcher import ( "context" "fmt" "os" "github.com/FleekHQ/space-daemon/log" ) // EventHandler type EventHandler interface { OnCreate(ctx context.Context, path string, fileInfo os.FileInfo) OnRemove(ctx context.Context, path string, fileInfo os.FileInfo) OnWrite(ctx context.Context, path string, fileInfo os.FileInfo) OnRename(ctx context.Context, path string, fileInfo os.FileInfo, oldPath string) OnMove(ctx context.Context, path string, fileInfo os.FileInfo, oldPath string) } // Implements EventHandler and defaults to logging actions performed type defaultWatcherHandler struct{} func (h *defaultWatcherHandler) OnCreate( ctx context.Context, path string, fileInfo os.FileInfo, ) { log.Info("Default Watcher Handler: OnCreate", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileInfo:%v", fileInfo)) } func (h *defaultWatcherHandler) OnRemove( ctx context.Context, path string, fileInfo os.FileInfo, ) { log.Info("Default Watcher Handler: OnRemove", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileInfo:%v", fileInfo)) } func (h *defaultWatcherHandler) OnWrite( ctx context.Context, path string, fileInfo os.FileInfo, ) { log.Info("Default Watcher Handler: OnWrite", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileInfo:%v", fileInfo)) } func (h *defaultWatcherHandler) OnRename( ctx context.Context, path string, fileInfo os.FileInfo, oldPath string, ) { log.Info( "Default Watcher Handler: OnRename", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileInfo:%v", fileInfo), fmt.Sprintf("path:%s", oldPath), ) } func (h *defaultWatcherHandler) OnMove( ctx context.Context, path string, fileInfo os.FileInfo, oldPath string, ) { log.Info( "Default Watcher Handler: OnMove", fmt.Sprintf("path:%s", path), fmt.Sprintf("fileInfo:%v", fileInfo), fmt.Sprintf("path:%s", oldPath), ) } ================================================ FILE: core/watcher/options.go ================================================ package watcher type watcherOptions struct { paths []string } // Option configuration for the FileWatcher // Use exported Option factory functions type Option func(option *watcherOptions) // WithPaths configures the list of paths the file watcher would be watching recursively. // For best results do not include two paths withing the same directory func WithPaths(path ...string) Option { return func(option *watcherOptions) { for _, p := range path { option.paths = append(option.paths, p) } } } ================================================ FILE: core/watcher/watcher.go ================================================ package watcher import ( "context" "errors" "fmt" s "strings" "sync" fsutils "github.com/FleekHQ/space-daemon/core/space/services" "github.com/mitchellh/go-homedir" "time" "github.com/radovskyb/watcher" "github.com/FleekHQ/space-daemon/log" ) var ( ErrFolderPathNotFound = errors.New("could not find a folder path for watcher") ) type FolderWatcher interface { RegisterHandler(handler EventHandler) AddFile(path string) error Watch(ctx context.Context) error Close() } type folderWatcher struct { w *watcher.Watcher lock sync.Mutex publishLock sync.RWMutex options watcherOptions started bool closed bool handlers []EventHandler } // New creates an new instance of folder watcher func New(configs ...Option) (*folderWatcher, error) { options := watcherOptions{} for _, config := range configs { config(&options) } w := watcher.New() for _, path := range options.paths { if home, err := homedir.Dir(); err == nil { // If the root directory contains ~, we replace it with the actual home directory path = s.Replace(path, "~", home, -1) } if path == "" { log.Fatal(ErrFolderPathNotFound) return nil, ErrFolderPathNotFound } err := w.AddRecursive(path) if err != nil { return nil, err } } return &folderWatcher{ w: w, options: options, }, nil } func (fw *folderWatcher) RegisterHandler(handler EventHandler) { fw.publishLock.Lock() defer fw.publishLock.Unlock() fw.handlers = append(fw.handlers, handler) } func (fw *folderWatcher) AddFile(path string) error { if fsutils.IsPathDir(path) { return errors.New(fmt.Sprintf("unable to watch path %s folder is not supporter", path)) } err := fw.w.Add(path) if err != nil { return err } return err } // Watch will start listening of changes on the folderWatcher path and trigger the handler with any update events // This is a block operation func (fw *folderWatcher) Watch(ctx context.Context) error { fw.setToStarted() go func() { for { select { case <-fw.w.Closed: log.Debug("Watcher graceful shutdown triggered") return case <-ctx.Done(): fw.Close() case event, ok := <-fw.w.Event: if ok { if len(fw.handlers) == 0 { fw.publishEventToHandler(ctx, &defaultWatcherHandler{}, event) } else { fw.publishEvent(ctx, event) } } case err, ok := <-fw.w.Error: if !ok { return } log.Fatal(err) } } }() log.Debug("Starting watcher", fmt.Sprintf("filePath:%s", fw.options.paths)) // This is blocking err := fw.w.Start(time.Millisecond * 100) fw.started = false if err != nil { return err } return nil } func (fw *folderWatcher) setToStarted() { fw.lock.Lock() defer fw.lock.Unlock() if fw.started { return } fw.started = true } func (fw *folderWatcher) publishEvent(ctx context.Context, event watcher.Event) { fw.publishLock.RLock() defer fw.publishLock.RUnlock() for _, handler := range fw.handlers { fw.publishEventToHandler(ctx, handler, event) } } func (fw *folderWatcher) publishEventToHandler( ctx context.Context, handler EventHandler, event watcher.Event, ) { if isBlacklisted(event.Path, event.FileInfo) { log.Debug("Skipping blacklisted file/folder event") return } switch event.Op { case watcher.Create: handler.OnCreate(ctx, event.Path, event.FileInfo) case watcher.Remove: handler.OnRemove(ctx, event.Path, event.FileInfo) case watcher.Write: handler.OnWrite(ctx, event.Path, event.FileInfo) case watcher.Rename: handler.OnRename(ctx, event.Path, event.FileInfo, event.OldPath) case watcher.Move: handler.OnMove(ctx, event.Path, event.FileInfo, event.OldPath) } } // Close will stop the watching operation and unblock watch calls func (fw *folderWatcher) Close() { fw.lock.Lock() defer fw.lock.Unlock() if !fw.started || fw.closed { return } fw.closed = true fw.w.Close() } func (fw *folderWatcher) Shutdown() error { fw.Close() return nil } ================================================ FILE: core/watcher/watcher_test.go ================================================ package watcher import ( "context" "os" "testing" "time" "github.com/stretchr/testify/mock" w "github.com/radovskyb/watcher" ) // TODO: Use mockery to create mocks interface implementations type handlerMock struct { mock.Mock } func (h *handlerMock) OnCreate(ctx context.Context, path string, fileInfo os.FileInfo) { h.Called(ctx, path, fileInfo) } func (h *handlerMock) OnRemove(ctx context.Context, path string, fileInfo os.FileInfo) { h.Called(ctx, path, fileInfo) } func (h *handlerMock) OnWrite(ctx context.Context, path string, fileInfo os.FileInfo) { h.Called(ctx, path, fileInfo) } func (h *handlerMock) OnRename(ctx context.Context, path string, fileInfo os.FileInfo, oldPath string) { h.Called(ctx, path, fileInfo, oldPath) } func (h *handlerMock) OnMove(ctx context.Context, path string, fileInfo os.FileInfo, oldPath string) { h.Called(ctx, path, fileInfo, oldPath) } func isTriggeredEvent(info os.FileInfo) bool { return info.Name() == "triggered event" } func startWatcher(t *testing.T, watchPaths ...string) (context.Context, FolderWatcher, error) { ctx := context.Background() watcher, err := New(WithPaths(watchPaths...)) if err != nil { return nil, nil, err } // execute go func() { err = watcher.Watch(ctx) if err != nil { t.Fatal(err) } }() return ctx, watcher, nil } func TestFolderWatcher_Watch_Triggers_Handler_OnCreate(t *testing.T) { // setup _, watcher, err := startWatcher(t) if err != nil { t.Fatal(err) } handler := new(handlerMock) handler.On("OnCreate", mock.Anything, "-", mock.MatchedBy(isTriggeredEvent)).Return() watcher.RegisterHandler(handler) // trigger event watcher.(*folderWatcher).w.TriggerEvent(w.Create, nil) // wait a few ms for async event to trigger handler <-time.After(time.Millisecond * 100) // assert handler.AssertNumberOfCalls(t, "OnCreate", 1) handler.AssertExpectations(t) // cleanup watcher.Close() } func TestFolderWatcher_Watch_Triggers_Handler_OnRemove(t *testing.T) { // setup _, watcher, err := startWatcher(t) if err != nil { t.Fatal(err) } handler := new(handlerMock) handler.On("OnRemove", mock.Anything, "-", mock.MatchedBy(isTriggeredEvent)).Return() watcher.RegisterHandler(handler) // trigger event watcher.(*folderWatcher).w.TriggerEvent(w.Remove, nil) // wait a few ms for async event to trigger handler <-time.After(time.Millisecond * 100) // assert handler.AssertNumberOfCalls(t, "OnRemove", 1) handler.AssertExpectations(t) // cleanup watcher.Close() } ================================================ FILE: coverage/.gitkeep ================================================ ================================================ FILE: devtools/googleapis/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: devtools/googleapis/README.grpc-gateway ================================================ Google APIs ============ Project: Google APIs URL: https://github.com/google/googleapis Revision: 3544ab16c3342d790b00764251e348705991ea4b License: Apache License 2.0 Imported Files --------------- - google/api/annotations.proto - google/api/http.proto - google/api/httpbody.proto Generated Files ---------------- They are generated from the .proto files by protoc-gen-go. - google/api/annotations.pb.go - google/api/http.pb.go ================================================ FILE: devtools/googleapis/google/api/annotations.proto ================================================ // Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.api; import "google/api/http.proto"; import "google/protobuf/descriptor.proto"; option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; option java_multiple_files = true; option java_outer_classname = "AnnotationsProto"; option java_package = "com.google.api"; option objc_class_prefix = "GAPI"; extend google.protobuf.MethodOptions { // See `HttpRule`. HttpRule http = 72295728; } ================================================ FILE: devtools/googleapis/google/api/http.proto ================================================ // Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.api; option cc_enable_arenas = true; option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; option java_multiple_files = true; option java_outer_classname = "HttpProto"; option java_package = "com.google.api"; option objc_class_prefix = "GAPI"; // Defines the HTTP configuration for an API service. It contains a list of // [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method // to one or more HTTP REST API methods. message Http { // A list of HTTP configuration rules that apply to individual API methods. // // **NOTE:** All service configuration rules follow "last one wins" order. repeated HttpRule rules = 1; // When set to true, URL path parmeters will be fully URI-decoded except in // cases of single segment matches in reserved expansion, where "%2F" will be // left encoded. // // The default behavior is to not decode RFC 6570 reserved characters in multi // segment matches. bool fully_decode_reserved_expansion = 2; } // `HttpRule` defines the mapping of an RPC method to one or more HTTP // REST API methods. The mapping specifies how different portions of the RPC // request message are mapped to URL path, URL query parameters, and // HTTP request body. The mapping is typically specified as an // `google.api.http` annotation on the RPC method, // see "google/api/annotations.proto" for details. // // The mapping consists of a field specifying the path template and // method kind. The path template can refer to fields in the request // message, as in the example below which describes a REST GET // operation on a resource collection of messages: // // // service Messaging { // rpc GetMessage(GetMessageRequest) returns (Message) { // option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; // } // } // message GetMessageRequest { // message SubMessage { // string subfield = 1; // } // string message_id = 1; // mapped to the URL // SubMessage sub = 2; // `sub.subfield` is url-mapped // } // message Message { // string text = 1; // content of the resource // } // // The same http annotation can alternatively be expressed inside the // `GRPC API Configuration` YAML file. // // http: // rules: // - selector: .Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // // This definition enables an automatic, bidrectional mapping of HTTP // JSON to RPC. Example: // // HTTP | RPC // -----|----- // `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` // // In general, not only fields but also field paths can be referenced // from a path pattern. Fields mapped to the path pattern cannot be // repeated and must have a primitive (non-message) type. // // Any fields in the request message which are not bound by the path // pattern automatically become (optional) HTTP query // parameters. Assume the following definition of the request message: // // // service Messaging { // rpc GetMessage(GetMessageRequest) returns (Message) { // option (google.api.http).get = "/v1/messages/{message_id}"; // } // } // message GetMessageRequest { // message SubMessage { // string subfield = 1; // } // string message_id = 1; // mapped to the URL // int64 revision = 2; // becomes a parameter // SubMessage sub = 3; // `sub.subfield` becomes a parameter // } // // // This enables a HTTP JSON to RPC mapping as below: // // HTTP | RPC // -----|----- // `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` // // Note that fields which are mapped to HTTP parameters must have a // primitive type or a repeated primitive type. Message types are not // allowed. In the case of a repeated type, the parameter can be // repeated in the URL, as in `...?param=A¶m=B`. // // For HTTP method kinds which allow a request body, the `body` field // specifies the mapping. Consider a REST update method on the // message resource collection: // // // service Messaging { // rpc UpdateMessage(UpdateMessageRequest) returns (Message) { // option (google.api.http) = { // put: "/v1/messages/{message_id}" // body: "message" // }; // } // } // message UpdateMessageRequest { // string message_id = 1; // mapped to the URL // Message message = 2; // mapped to the body // } // // // The following HTTP JSON to RPC mapping is enabled, where the // representation of the JSON in the request body is determined by // protos JSON encoding: // // HTTP | RPC // -----|----- // `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the // request body. This enables the following alternative definition of // the update method: // // service Messaging { // rpc UpdateMessage(Message) returns (Message) { // option (google.api.http) = { // put: "/v1/messages/{message_id}" // body: "*" // }; // } // } // message Message { // string message_id = 1; // string text = 2; // } // // // The following HTTP JSON to RPC mapping is enabled: // // HTTP | RPC // -----|----- // `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in // the body. This makes this option more rarely used in practice of // defining REST APIs. The common usage of `*` is in custom methods // which don't use the URL at all for transferring data. // // It is possible to define multiple HTTP methods for one RPC by using // the `additional_bindings` option. Example: // // service Messaging { // rpc GetMessage(GetMessageRequest) returns (Message) { // option (google.api.http) = { // get: "/v1/messages/{message_id}" // additional_bindings { // get: "/v1/users/{user_id}/messages/{message_id}" // } // }; // } // } // message GetMessageRequest { // string message_id = 1; // string user_id = 2; // } // // // This enables the following two alternative HTTP JSON to RPC // mappings: // // HTTP | RPC // -----|----- // `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` // `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` // // # Rules for HTTP mapping // // The rules for mapping HTTP path, query parameters, and body fields // to the request message are as follows: // // 1. The `body` field specifies either `*` or a field path, or is // omitted. If omitted, it indicates there is no HTTP request body. // 2. Leaf fields (recursive expansion of nested messages in the // request) can be classified into three types: // (a) Matched in the URL template. // (b) Covered by body (if body is `*`, everything except (a) fields; // else everything under the body field) // (c) All other fields. // 3. URL query parameters found in the HTTP request are mapped to (c) fields. // 4. Any body sent with an HTTP request can contain only (b) fields. // // The syntax of the path template is as follows: // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; // Segment = "*" | "**" | LITERAL | Variable ; // Variable = "{" FieldPath [ "=" Segments ] "}" ; // FieldPath = IDENT { "." IDENT } ; // Verb = ":" LITERAL ; // // The syntax `*` matches a single path segment. The syntax `**` matches zero // or more path segments, which must be the last part of the path except the // `Verb`. The syntax `LITERAL` matches literal text in the path. // // The syntax `Variable` matches part of the URL path as specified by its // template. A variable template must not contain other variables. If a variable // matches a single path segment, its template may be omitted, e.g. `{var}` // is equivalent to `{var=*}`. // // If a variable contains exactly one path segment, such as `"{var}"` or // `"{var=*}"`, when such a variable is expanded into a URL path, all characters // except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the // Discovery Document as `{var}`. // // If a variable contains one or more path segments, such as `"{var=foo/*}"` // or `"{var=**}"`, when such a variable is expanded into a URL path, all // characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables // show up in the Discovery Document as `{+var}`. // // NOTE: While the single segment variable matches the semantics of // [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 // Simple String Expansion, the multi segment variable **does not** match // RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion // does not expand special characters like `?` and `#`, which would lead // to invalid URLs. // // NOTE: the field paths in variables and in the `body` must not refer to // repeated fields or map fields. message HttpRule { // Selects methods to which this rule applies. // // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. string selector = 1; // Determines the URL pattern is matched by this rules. This pattern can be // used with any of the {get|put|post|delete|patch} methods. A custom method // can be defined using the 'custom' field. oneof pattern { // Used for listing and getting information about resources. string get = 2; // Used for updating a resource. string put = 3; // Used for creating a resource. string post = 4; // Used for deleting a resource. string delete = 5; // Used for updating a resource. string patch = 6; // The custom pattern is used for specifying an HTTP method that is not // included in the `pattern` field, such as HEAD, or "*" to leave the // HTTP method unspecified for this rule. The wild-card rule is useful // for services that provide content to Web (HTML) clients. CustomHttpPattern custom = 8; } // The name of the request field whose value is mapped to the HTTP body, or // `*` for mapping all fields not captured by the path pattern to the HTTP // body. NOTE: the referred field must not be a repeated field and must be // present at the top-level of request message type. string body = 7; // Optional. The name of the response field whose value is mapped to the HTTP // body of response. Other response fields are ignored. When // not set, the response message will be used as HTTP body of response. string response_body = 12; // Additional HTTP bindings for the selector. Nested bindings must // not contain an `additional_bindings` field themselves (that is, // the nesting may only be one level deep). repeated HttpRule additional_bindings = 11; } // A custom pattern is used for defining custom HTTP verb. message CustomHttpPattern { // The name of this custom HTTP verb. string kind = 1; // The path matched by this custom verb. string path = 2; } ================================================ FILE: devtools/googleapis/google/api/httpbody.proto ================================================ // Copyright 2018 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // syntax = "proto3"; package google.api; import "google/protobuf/any.proto"; option cc_enable_arenas = true; option go_package = "google.golang.org/genproto/googleapis/api/httpbody;httpbody"; option java_multiple_files = true; option java_outer_classname = "HttpBodyProto"; option java_package = "com.google.api"; option objc_class_prefix = "GAPI"; // Message that represents an arbitrary HTTP body. It should only be used for // payload formats that can't be represented as JSON, such as raw binary or // an HTML page. // // // This message can be used both in streaming and non-streaming API methods in // the request as well as the response. // // It can be used as a top-level request field, which is convenient if one // wants to extract parameters from either the URL or HTTP template into the // request fields and also want access to the raw HTTP body. // // Example: // // message GetResourceRequest { // // A unique request id. // string request_id = 1; // // // The raw HTTP body is bound to this field. // google.api.HttpBody http_body = 2; // } // // service ResourceService { // rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); // rpc UpdateResource(google.api.HttpBody) returns // (google.protobuf.Empty); // } // // Example with streaming methods: // // service CaldavService { // rpc GetCalendar(stream google.api.HttpBody) // returns (stream google.api.HttpBody); // rpc UpdateCalendar(stream google.api.HttpBody) // returns (stream google.api.HttpBody); // } // // Use of this type only changes how the request and response bodies are // handled, all other features will continue to work unchanged. message HttpBody { // The HTTP Content-Type header value specifying the content type of the body. string content_type = 1; // The HTTP request/response body as raw binary. bytes data = 2; // Application specific response metadata. Must be set in the first response // for streaming APIs. repeated google.protobuf.Any extensions = 3; } ================================================ FILE: devtools/googleapis/google/rpc/code.proto ================================================ // Copyright 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.rpc; option go_package = "google.golang.org/genproto/googleapis/rpc/code;code"; option java_multiple_files = true; option java_outer_classname = "CodeProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; // The canonical error codes for Google APIs. // // // Sometimes multiple error codes may apply. Services should return // the most specific error code that applies. For example, prefer // `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. // Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. enum Code { // Not an error; returned on success // // HTTP Mapping: 200 OK OK = 0; // The operation was cancelled, typically by the caller. // // HTTP Mapping: 499 Client Closed Request CANCELLED = 1; // Unknown error. For example, this error may be returned when // a `Status` value received from another address space belongs to // an error space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. // // HTTP Mapping: 500 Internal Server Error UNKNOWN = 2; // The client specified an invalid argument. Note that this differs // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments // that are problematic regardless of the state of the system // (e.g., a malformed file name). // // HTTP Mapping: 400 Bad Request INVALID_ARGUMENT = 3; // The deadline expired before the operation could complete. For operations // that change the state of the system, this error may be returned // even if the operation has completed successfully. For example, a // successful response from a server could have been delayed long // enough for the deadline to expire. // // HTTP Mapping: 504 Gateway Timeout DEADLINE_EXCEEDED = 4; // Some requested entity (e.g., file or directory) was not found. // // Note to server developers: if a request is denied for an entire class // of users, such as gradual feature rollout or undocumented whitelist, // `NOT_FOUND` may be used. If a request is denied for some users within // a class of users, such as user-based access control, `PERMISSION_DENIED` // must be used. // // HTTP Mapping: 404 Not Found NOT_FOUND = 5; // The entity that a client attempted to create (e.g., file or directory) // already exists. // // HTTP Mapping: 409 Conflict ALREADY_EXISTS = 6; // The caller does not have permission to execute the specified // operation. `PERMISSION_DENIED` must not be used for rejections // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` // instead for those errors). `PERMISSION_DENIED` must not be // used if the caller can not be identified (use `UNAUTHENTICATED` // instead for those errors). This error code does not imply the // request is valid or the requested entity exists or satisfies // other pre-conditions. // // HTTP Mapping: 403 Forbidden PERMISSION_DENIED = 7; // The request does not have valid authentication credentials for the // operation. // // HTTP Mapping: 401 Unauthorized UNAUTHENTICATED = 16; // Some resource has been exhausted, perhaps a per-user quota, or // perhaps the entire file system is out of space. // // HTTP Mapping: 429 Too Many Requests RESOURCE_EXHAUSTED = 8; // The operation was rejected because the system is not in a state // required for the operation's execution. For example, the directory // to be deleted is non-empty, an rmdir operation is applied to // a non-directory, etc. // // Service implementors can use the following guidelines to decide // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: // (a) Use `UNAVAILABLE` if the client can retry just the failing call. // (b) Use `ABORTED` if the client should retry at a higher level // (e.g., when a client-specified test-and-set fails, indicating the // client should restart a read-modify-write sequence). // (c) Use `FAILED_PRECONDITION` if the client should not retry until // the system state has been explicitly fixed. E.g., if an "rmdir" // fails because the directory is non-empty, `FAILED_PRECONDITION` // should be returned since the client should not retry unless // the files are deleted from the directory. // // HTTP Mapping: 400 Bad Request FAILED_PRECONDITION = 9; // The operation was aborted, typically due to a concurrency issue such as // a sequencer check failure or transaction abort. // // See the guidelines above for deciding between `FAILED_PRECONDITION`, // `ABORTED`, and `UNAVAILABLE`. // // HTTP Mapping: 409 Conflict ABORTED = 10; // The operation was attempted past the valid range. E.g., seeking or // reading past end-of-file. // // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may // be fixed if the system state changes. For example, a 32-bit file // system will generate `INVALID_ARGUMENT` if asked to read at an // offset that is not in the range [0,2^32-1], but it will generate // `OUT_OF_RANGE` if asked to read from an offset past the current // file size. // // There is a fair bit of overlap between `FAILED_PRECONDITION` and // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific // error) when it applies so that callers who are iterating through // a space can easily look for an `OUT_OF_RANGE` error to detect when // they are done. // // HTTP Mapping: 400 Bad Request OUT_OF_RANGE = 11; // The operation is not implemented or is not supported/enabled in this // service. // // HTTP Mapping: 501 Not Implemented UNIMPLEMENTED = 12; // Internal errors. This means that some invariants expected by the // underlying system have been broken. This error code is reserved // for serious errors. // // HTTP Mapping: 500 Internal Server Error INTERNAL = 13; // The service is currently unavailable. This is most likely a // transient condition, which can be corrected by retrying with // a backoff. // // See the guidelines above for deciding between `FAILED_PRECONDITION`, // `ABORTED`, and `UNAVAILABLE`. // // HTTP Mapping: 503 Service Unavailable UNAVAILABLE = 14; // Unrecoverable data loss or corruption. // // HTTP Mapping: 500 Internal Server Error DATA_LOSS = 15; } ================================================ FILE: devtools/googleapis/google/rpc/error_details.proto ================================================ // Copyright 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.rpc; import "google/protobuf/duration.proto"; option go_package = "google.golang.org/genproto/googleapis/rpc/errdetails;errdetails"; option java_multiple_files = true; option java_outer_classname = "ErrorDetailsProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; // Describes when the clients can retry a failed request. Clients could ignore // the recommendation here or retry when this information is missing from error // responses. // // It's always recommended that clients should use exponential backoff when // retrying. // // Clients should wait until `retry_delay` amount of time has passed since // receiving the error response before retrying. If retrying requests also // fail, clients should use an exponential backoff scheme to gradually increase // the delay between retries based on `retry_delay`, until either a maximum // number of retires have been reached or a maximum retry delay cap has been // reached. message RetryInfo { // Clients should wait at least this long between retrying the same request. google.protobuf.Duration retry_delay = 1; } // Describes additional debugging info. message DebugInfo { // The stack trace entries indicating where the error occurred. repeated string stack_entries = 1; // Additional debugging information provided by the server. string detail = 2; } // Describes how a quota check failed. // // For example if a daily limit was exceeded for the calling project, // a service could respond with a QuotaFailure detail containing the project // id and the description of the quota limit that was exceeded. If the // calling project hasn't enabled the service in the developer console, then // a service could respond with the project id and set `service_disabled` // to true. // // Also see RetryDetail and Help types for other details about handling a // quota failure. message QuotaFailure { // A message type used to describe a single quota violation. For example, a // daily quota or a custom quota that was exceeded. message Violation { // The subject on which the quota check failed. // For example, "clientip:" or "project:". string subject = 1; // A description of how the quota check failed. Clients can use this // description to find more about the quota configuration in the service's // public documentation, or find the relevant quota limit to adjust through // developer console. // // For example: "Service disabled" or "Daily Limit for read operations // exceeded". string description = 2; } // Describes all quota violations. repeated Violation violations = 1; } // Describes what preconditions have failed. // // For example, if an RPC failed because it required the Terms of Service to be // acknowledged, it could list the terms of service violation in the // PreconditionFailure message. message PreconditionFailure { // A message type used to describe a single precondition failure. message Violation { // The type of PreconditionFailure. We recommend using a service-specific // enum type to define the supported precondition violation types. For // example, "TOS" for "Terms of Service violation". string type = 1; // The subject, relative to the type, that failed. // For example, "google.com/cloud" relative to the "TOS" type would // indicate which terms of service is being referenced. string subject = 2; // A description of how the precondition failed. Developers can use this // description to understand how to fix the failure. // // For example: "Terms of service not accepted". string description = 3; } // Describes all precondition violations. repeated Violation violations = 1; } // Describes violations in a client request. This error type focuses on the // syntactic aspects of the request. message BadRequest { // A message type used to describe a single bad request field. message FieldViolation { // A path leading to a field in the request body. The value will be a // sequence of dot-separated identifiers that identify a protocol buffer // field. E.g., "field_violations.field" would identify this field. string field = 1; // A description of why the request element is bad. string description = 2; } // Describes all violations in a client request. repeated FieldViolation field_violations = 1; } // Contains metadata about the request that clients can attach when filing a bug // or providing other forms of feedback. message RequestInfo { // An opaque string that should only be interpreted by the service generating // it. For example, it can be used to identify requests in the service's logs. string request_id = 1; // Any data that was used to serve this request. For example, an encrypted // stack trace that can be sent back to the service provider for debugging. string serving_data = 2; } // Describes the resource that is being accessed. message ResourceInfo { // A name for the type of resource being accessed, e.g. "sql table", // "cloud storage bucket", "file", "Google calendar"; or the type URL // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". string resource_type = 1; // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. string resource_name = 2; // The owner of the resource (optional). // For example, "user:" or "project:". string owner = 3; // Describes what error is encountered when accessing this resource. // For example, updating a cloud project may require the `writer` permission // on the developer console project. string description = 4; } // Provides links to documentation or for performing an out of band action. // // For example, if a quota check failed with an error indicating the calling // project hasn't enabled the accessed service, this can contain a URL pointing // directly to the right place in the developer console to flip the bit. message Help { // Describes a URL link. message Link { // Describes what the link offers. string description = 1; // The URL of the link. string url = 2; } // URL(s) pointing to additional information on handling the current error. repeated Link links = 1; } // Provides a localized error message that is safe to return to the user // which can be attached to an RPC error. message LocalizedMessage { // The locale used following the specification defined at // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. // Examples are: "en-US", "fr-CH", "es-MX" string locale = 1; // The localized error message in the above locale. string message = 2; } ================================================ FILE: devtools/googleapis/google/rpc/status.proto ================================================ // Copyright 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.rpc; import "google/protobuf/any.proto"; option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; option java_multiple_files = true; option java_outer_classname = "StatusProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; // The `Status` type defines a logical error model that is suitable for different // programming environments, including REST APIs and RPC APIs. It is used by // [gRPC](https://github.com/grpc). The error model is designed to be: // // - Simple to use and understand for most users // - Flexible enough to meet unexpected needs // // # Overview // // The `Status` message contains three pieces of data: error code, error message, // and error details. The error code should be an enum value of // [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The // error message should be a developer-facing English message that helps // developers *understand* and *resolve* the error. If a localized user-facing // error message is needed, put the localized message in the error details or // localize it in the client. The optional error details may contain arbitrary // information about the error. There is a predefined set of error detail types // in the package `google.rpc` that can be used for common error conditions. // // # Language mapping // // The `Status` message is the logical representation of the error model, but it // is not necessarily the actual wire format. When the `Status` message is // exposed in different client libraries and different wire protocols, it can be // mapped differently. For example, it will likely be mapped to some exceptions // in Java, but more likely mapped to some error codes in C. // // # Other uses // // The error model and the `Status` message can be used in a variety of // environments, either with or without APIs, to provide a // consistent developer experience across different environments. // // Example uses of this error model include: // // - Partial errors. If a service needs to return partial errors to the client, // it may embed the `Status` in the normal response to indicate the partial // errors. // // - Workflow errors. A typical workflow has multiple steps. Each step may // have a `Status` message for error reporting. // // - Batch operations. If a client uses batch request and batch response, the // `Status` message should be used directly inside batch response, one for // each error sub-response. // // - Asynchronous operations. If an API call embeds asynchronous operation // results in its response, the status of those operations should be // represented directly using the `Status` message. // // - Logging. If some API errors are stored in logs, the message `Status` could // be used directly after any stripping needed for security/privacy reasons. message Status { // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. int32 code = 1; // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. string message = 2; // A list of messages that carry the error details. There is a common set of // message types for APIs to use. repeated google.protobuf.Any details = 3; } ================================================ FILE: docs/crypto/vault.md ================================================ # Vault A vault is used to securely store user private keys based on a master password. It is hosted on the cloud and uses the cryptography described here to assure it can't access the data directly. It works very similarly to password managers. ## Vault data model ### Map uuids to public keys (uses address book service for this) | uuid | public key | |------|------------| | 1 | 0xa | | 2 | 0xb | | 3 | 0xc | ### Maps uuids to vaults | uuid | vault | vskHash | |------|----------------------------------------------------------------------------------|----------| | 1 | Encrypted({ a: somePrivateKey, b: otherPrivateKey, c: anotherPrivateKey }) | 0xabc... | ## Vault Flows ### Storing private keys The client needs to complete a challenge to prove they have access to a given public key. Once they have proven access, the server allows replacing the vault file for a new one. #### Private key signing challenge flow: 1. Client sends to the server their public key 2. Server issues a challenge 3. Client signs the challenge using its private key 4. Server verifies signature matches the public key, returning a JSON Web Token (JWT) #### Storing the private key 1. Client creates the vault file (`vf`), which is a JSON document that maps public keys to their private keys, but can also contain anything we want to store. 2. Client computes its vault key (`vk`). To do this, it runs `PBKDF2(password, salt, iterations, hashingFn)`, where `password` is the master password, `salt` is the user's `uuid`, `iterations` is a high number to prevent brute force (set to 100.000 as of now), and `hashingFn` is SHA512 which is the industry standard for a secure hashing function. 3. Using `vk`, client encrypts `vf` using AES, obtaining `vk(vf)`. 4. Client computes the vault service key (`vsk`) by doing key derivation again: `PBKDF2(vk, password, iterations, hashingFn)`, where `password` is the master password. 5. Client submits `vk(vf)`, `vsk` and the JWT back to the server. 6. Server verifies the JWT and successfully stores `vk(vf)` for the user with the given uuid. 7. Server stores `vskHash = PBKDF2(vsk, iterations, hashingFn)` using a really high value for `iterations`. #### Retrieving the private key 1. Client computes `vk` and `vsk` again as in step (2) and (4) of the previous section. 2. Client sends a retrieve request to the server with `vsk` and `uuid` as the params. 3. Server computes `vskHash` as in (7) of the previous section. 4. Server checks `vskHash` matches the one stored. If it does, it returns `vk(vf)`. If not, returns a "Wrong password" error. 5. Client decrypts `vk(vf)` using `vk`, obtaining `vf` back and getting access to its private keys. ## Takeaways - The client only needs to remember the master password and the uuid (which is obtained through a username, so it needs to remember the username). - The server only receives `vsk` and therefore cannot decrypt `vk(vf)` from it alone. It can bruteforce `vsk` to obtain `vk`, but given `vk` is a SHA512 hash already, it'd take a billion years. - If a middleman intercepts the client->server message, and somehow gets to decrypt the first layer of protection which is TLS, it can't decrypt `vk(vf)` without `vk`. - The server should implement rate-limitting to protect weak master passwords from being cracked. ================================================ FILE: docs/sharing/types-of-sharing.md ================================================ # Types of Sharing Sharing can happen at the file level or the bucket level. Legacy sharing was done at the bucket level so those interfaces are left for continued usage. Space app however will now rely on file level access control and sharing hence there will be a mixed set of interfaces for sharing. In bucket sharing, you can share an entire bucket but getting the thread info to share. A bucket holds the file structure and pointers to each file. For each bucket, we use an additional Textile thread. This thread holds meta information around the bucket that's needed for sharing. In this doc you can read about the different types of sharing we support. In file level sharing, a mirror copy of the bucket is made in the hub and only that shared path will be added to that bucket and access controlled via Textile hub's new file level access control feature. ## Bucket sharing The most simple sharing type. When you create a bucket (using the `CreateBucket` gRPC endpoint), a bucket with a single member, the creator of the bucket, will be created. If you use the `ShareBucket` gRPC, you can add all members you want. This is very similar to creating a team, or creating a channel in Slack. ## File level sharing For this, a set of paths are shared and like previously described, a mirror bucket is created with just the paths that are being shared copied over. Furthermore since these files will be on the hub, a Space encryption layer is added where a new key will be used for each file. Finally the file specific key for the paths being shared will be sent via hub inboxing so that there is a way to retreive and decrypt files shared through a hub without exposing the content to the hub. ## Public File Sharing When calling `GeneratePublicFileLink`, the file is going to be encrypted and uploaded to IPFS. The link will point to a gateway so that anyone with the decryption key will be able to download the file. We are evaluating also creating a bucket around this single file, so that the link can also be used to join the bucket and modify the file collaboratively. ================================================ FILE: examples/ipfsLite/ipfsLite.go ================================================ package main import "log" func main() { log.Println("hello world IPFS!") } ================================================ FILE: examples/textileBucketsClient/README.md ================================================ # textile in Go poc Temporarily, buckets APIs aren't available in local threads so using the hub for all interactions. Once that is merged into threads, then we can use local one. To run the in-process `threadsd` example: 1. Build `go build .` 2. Run `./textileBucketsClient threads` To run the bucket creation with hub example: 1. Set the host, key and secret in set-envs. Use textile-hub-dev.fleek.co for the host. Key and secret should be the one shared in Slack or generated using the `tt` cli (see extra notes below) 2. `source set-envs` 3. `go build .` 4. `./textileBucketsClient hub` ### Textile CLI 1. Download bundle at https://github.com/textileio/textile/releases 2. Extract 3. Go into extracted folder and run `./install` 4. Run `tt init --api=textile-hub-dev.fleek.co:3006` 5. When asked for email validation, hit http://textile-hub-dev.fleek.co:8006/confirm/textilesession to auto validate 6. Run `tt keys create --api=textile-hub-dev.fleek.co:3006` to create keys. 7. You can also use all the other `tt` commands pointing to our dev hub by adding the flag `--api=textile-hub-dev.fleek.co:3006` ================================================ FILE: examples/textileBucketsClient/bucket-sync/bucket-sync.go ================================================ package main import ( "bytes" "context" "crypto/rand" "crypto/tls" "log" "os" "strings" "time" "github.com/libp2p/go-libp2p-core/crypto" tc "github.com/textileio/go-threads/api/client" "github.com/textileio/go-threads/core/thread" bc "github.com/textileio/textile/v2/api/bucketsd/client" buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/api/common" tb "github.com/textileio/textile/v2/buckets" "github.com/textileio/textile/v2/cmd" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) type TextileBucketRoot buckets_pb.Root func main() { host := os.Getenv("TXL_HUB_TARGET") key := os.Getenv("TXL_USER_KEY") secret := os.Getenv("TXL_USER_SECRET") var threads *tc.Client var buckets *bc.Client var err error auth := common.Credentials{} var opts []grpc.DialOption hubTarget := host threadstarget := host if strings.Contains(host, "443") { creds := credentials.NewTLS(&tls.Config{}) opts = append(opts, grpc.WithTransportCredentials(creds)) auth.Secure = true } else { opts = append(opts, grpc.WithInsecure()) } opts = append(opts, grpc.WithPerRPCCredentials(auth)) buckets, err = bc.NewClient(hubTarget, opts...) if err != nil { cmd.Fatal(err) } threads, err = tc.NewClient(threadstarget, opts...) if err != nil { cmd.Fatal(err) } user1, _, err := crypto.GenerateEd25519Key(rand.Reader) if err != nil { log.Println("error creating user1") log.Fatal(err) } user2, _, err := crypto.GenerateEd25519Key(rand.Reader) if err != nil { log.Println("error creating user2") log.Fatal(err) } // user 1 creates bucket and adds file ctx := context.Background() ctx = common.NewAPIKeyContext(ctx, key) ctx, err = common.CreateAPISigContext(ctx, time.Now().Add(time.Minute*2), secret) if err != nil { log.Println("error creating APISigContext") log.Fatal(err) } tok, err := threads.GetToken(ctx, thread.NewLibp2pIdentity(user1)) if err != nil { log.Println("error calling GetToken") log.Fatal(err) } ctx = thread.NewTokenContext(ctx, tok) bucket1name := "testbucket1" ctx = common.NewThreadNameContext(ctx, bucket1name) dbID := thread.NewIDV1(thread.Raw, 32) if err := threads.NewDB(ctx, dbID); err != nil { log.Println("error calling threads.NewDB") log.Fatal(err) } ctx = common.NewThreadIDContext(ctx, dbID) buck, err := buckets.Create(ctx, bc.WithName(bucket1name), bc.WithPrivate(true)) log.Println("created bucket: " + buck.Root.Name) filepath := "file1" f := &bytes.Buffer{} f.Write([]byte("hello space")) _, _, err = buckets.PushPath(ctx, buck.Root.Key, filepath, f) if err != nil { log.Println("error pushing path") log.Fatal(err) } roles := make(map[string]tb.Role) tpk := thread.NewLibp2pPubKey(user2.GetPublic()) roles[tpk.String()] = tb.Admin err = buckets.PushPathAccessRoles(ctx, buck.Root.Key, filepath, roles) if err != nil { log.Println("error sharing path") log.Fatal(err) } // user 2 tries to access ctx1 := context.Background() ctx1 = common.NewAPIKeyContext(ctx1, key) ctx1, err = common.CreateAPISigContext(ctx1, time.Now().Add(time.Minute*2), secret) tok, err = threads.GetToken(ctx1, thread.NewLibp2pIdentity(user2)) ctx1 = thread.NewTokenContext(ctx1, tok) if err != nil { log.Println("error creating context") log.Fatal(err) } ctx1 = common.NewThreadNameContext(ctx1, bucket1name) ctx1 = common.NewThreadIDContext(ctx1, dbID) var buf bytes.Buffer err = buckets.PullPath(ctx1, buck.Root.Key, filepath, &buf) if err != nil { log.Println("error pulling path") log.Fatal(err) } s := buf.String() log.Println("fetch file content: " + s) } ================================================ FILE: examples/textileBucketsClient/buckets.go ================================================ package main import ( "bytes" "context" "crypto/rand" "crypto/tls" "encoding/json" "errors" "fmt" "log" "net" "net/http" "os" "strings" "time" "github.com/improbable-eng/grpc-web/go/grpcweb" connmgr "github.com/libp2p/go-libp2p-connmgr" crypto "github.com/libp2p/go-libp2p-crypto" ma "github.com/multiformats/go-multiaddr" "github.com/textileio/go-threads/api" tc "github.com/textileio/go-threads/api/client" tpb "github.com/textileio/go-threads/api/pb" tCommon "github.com/textileio/go-threads/common" "github.com/textileio/go-threads/core/thread" netapi "github.com/textileio/go-threads/net/api" netapiclient "github.com/textileio/go-threads/net/api/client" netpb "github.com/textileio/go-threads/net/api/pb" "github.com/textileio/go-threads/util" bc "github.com/textileio/textile/v2/api/bucketsd/client" pb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/api/common" uc "github.com/textileio/textile/v2/api/usersd/client" "github.com/textileio/textile/v2/cmd" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) const ctxTimeout = 30 func authCtx(duration time.Duration) (context.Context, context.CancelFunc) { ctx, cancel := context.WithTimeout(context.Background(), duration) return ctx, cancel } // these next 2 helpers are from the lib but wasnt // sure how to export them func threadCtx(duration time.Duration) (context.Context, context.CancelFunc) { ctx, cancel := authCtx(duration) ctx = common.NewThreadIDContext(ctx, getThreadID()) return ctx, cancel } func getThreadID() (id thread.ID) { // get from Space config instead idstr := os.Getenv("thread") if idstr != "" { var err error id, err = thread.Decode(idstr) if err != nil { cmd.Fatal(err) } } return } func runThreadsLocally() { hostAddr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/4006") if err != nil { log.Fatal(err) } apiAddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/6006") if err != nil { log.Fatal(err) } apiProxyAddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/6007") if err != nil { log.Fatal(err) } repo := ".threads" debug := false n, err := tCommon.DefaultNetwork( repo, tCommon.WithNetHostAddr(hostAddr), tCommon.WithConnectionManager(connmgr.NewConnManager(100, 400, time.Second*20)), tCommon.WithNetDebug(debug)) if err != nil { log.Fatal(err) } defer n.Close() n.Bootstrap(util.DefaultBoostrapPeers()) service, err := api.NewService(n, api.Config{ RepoPath: repo, Debug: debug, }) if err != nil { log.Fatal(err) } netService, err := netapi.NewService(n, netapi.Config{ Debug: debug, }) if err != nil { log.Fatal(err) } target, err := util.TCPAddrFromMultiAddr(apiAddr) if err != nil { log.Fatal(err) } ptarget, err := util.TCPAddrFromMultiAddr(apiProxyAddr) if err != nil { log.Fatal(err) } server := grpc.NewServer() listener, err := net.Listen("tcp", target) if err != nil { log.Fatal(err) } go func() { tpb.RegisterAPIServer(server, service) netpb.RegisterAPIServer(server, netService) if err := server.Serve(listener); err != nil && !errors.Is(err, grpc.ErrServerStopped) { log.Fatalf("serve error: %v", err) } }() webrpc := grpcweb.WrapServer( server, grpcweb.WithOriginFunc(func(origin string) bool { return true }), grpcweb.WithWebsockets(true), grpcweb.WithWebsocketOriginFunc(func(req *http.Request) bool { return true })) proxy := &http.Server{ Addr: ptarget, } proxy.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if webrpc.IsGrpcWebRequest(r) || webrpc.IsAcceptableGrpcCorsRequest(r) || webrpc.IsGrpcWebSocketRequest(r) { webrpc.ServeHTTP(w, r) } }) go func() { if err := proxy.ListenAndServe(); err != nil && err != http.ErrServerClosed { log.Fatalf("proxy error: %v", err) } }() defer func() { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := proxy.Shutdown(ctx); err != nil { log.Fatal(err) } server.GracefulStop() if err := n.Close(); err != nil { log.Fatal(err) } }() fmt.Println("Welcome to Threads!") fmt.Println("Your peer ID is " + n.Host().ID().String()) log.Println("threadsd started") select {} } type Bucket struct { Key string `json:"_id"` Name string `json:"name"` Path string `json:"path"` DNSRecord string `json:"dns_record,omitempty"` //Archives Archives `json:"archives"` CreatedAt int64 `json:"created_at"` UpdatedAt int64 `json:"updated_at"` } func initUser(threads *tc.Client, buckets *bc.Client, users *uc.Client, netclient *netapiclient.Client, user string, bucketSlug string) *pb.CreateResponse { // only needed for hub connections key := os.Getenv("TXL_USER_KEY") secret := os.Getenv("TXL_USER_SECRET") if key == "" || secret == "" { return nil } // TODO: this should be happening in an auth lambda ctx := context.Background() ctx = common.NewAPIKeyContext(ctx, key) var apiSigCtx context.Context var err error if apiSigCtx, err = common.CreateAPISigContext(ctx, time.Now().Add(time.Minute), secret); err != nil { return nil } ctx = apiSigCtx if err != nil { log.Println("error creating APISigContext") log.Fatal(err) } // TODO: get from key manager instead sk, _, err := crypto.GenerateEd25519Key(rand.Reader) // TODO: CTX has to be made from session key received from lambda // ctx on next line needs to be rebuilt from the authorization from the lambda tok, err := threads.GetToken(ctx, thread.NewLibp2pIdentity(sk)) ctx = thread.NewTokenContext(ctx, tok) mid, err := users.SetupMailbox(ctx) if err != nil { log.Println("Unable to setup sender mailbox", err) return nil } log.Println("Sender Mailbox id: ", mid.String()) // generate random recipient rsk, _, err := crypto.GenerateEd25519Key(rand.Reader) id := thread.NewLibp2pIdentity(rsk) rctx := context.Background() rctx = common.NewAPIKeyContext(rctx, key) var rapiSigCtx context.Context if rapiSigCtx, err = common.CreateAPISigContext(rctx, time.Now().Add(time.Minute), secret); err != nil { return nil } rctx = rapiSigCtx rtok, err := threads.GetToken(rctx, thread.NewLibp2pIdentity(rsk)) rctx = thread.NewTokenContext(rctx, rtok) mid2, err := users.SetupMailbox(rctx) if err != nil { log.Println("Unable to setup recipient mailbox", err) return nil } log.Println("Recipient Mailbox id: ", mid2.String()) msg, err := users.SendMessage(ctx, thread.NewLibp2pIdentity(sk), id.GetPublic(), []byte("hello")) if err != nil { log.Println("Unable to send", err) return nil } log.Println("msg: " + string(msg.Body)) // create thread ctx = common.NewThreadNameContext(ctx, user+"-"+bucketSlug) dbID := thread.NewIDV1(thread.Raw, 32) // TODO: store threadid in config if err := threads.NewDB(ctx, dbID); err != nil { log.Println("error calling threads.NewDB") log.Fatal(err) } ctx = common.NewThreadIDContext(ctx, dbID) // create bucket buck, err := buckets.Create(ctx, bc.WithName(bucketSlug), bc.WithPrivate(true)) buckets.Create(ctx, bc.WithName(bucketSlug+"2"), bc.WithPrivate(true)) log.Println("finished creating bucket") hostid, err := netclient.GetHostID(ctx) if err != nil { log.Println("error getting HOST ID: ", err) } log.Println("HOSTID: ", hostid) newCtx, cancel := context.WithCancel(ctx) defer cancel() opt := tc.ListenOption{} //listPath on a folder that doesnt exist lp, err := buckets.ListPath(ctx, buck.Root.Key, "random/folderA/doesntexists") if err != nil { log.Println("error doing list path on non existent directoy: ", err) } log.Println("lp1: ", lp) db, err := users.ListThreads(ctx) if err != nil { fmt.Println("error getting list dbs") fmt.Println(err) } fmt.Println("listing dbs") for k, v := range db.GetList() { fmt.Println("looping through thread id: ", k) fmt.Println("db info: ", v) } emptyDirPath := strings.TrimRight("dummy", "/") + "/" + ".keep" _, _, err = buckets.PushPath(ctx, buck.Root.Key, emptyDirPath, &bytes.Buffer{}) //listPath on a folder that exists r := strings.NewReader("IPFS test data for reader") r2 := strings.NewReader("IPFS test data ./tfor reader2") buckets.PushPath(ctx, buck.Root.Key, "another/folderB/file1", r) buckets.PushPath(ctx, buck.Root.Key, "another/folderB/file2", r2) lp, err = buckets.ListPath(ctx, buck.Root.Key, "another/folderB") if err != nil { log.Println("error doing list path on non existent directoy: ", err) } log.Println("lp2: ", lp) // put in go routine channel, err := threads.Listen(newCtx, dbID, []tc.ListenOption{opt}) log.Println("finished creating channel") if err != nil { log.Fatalf("failed to call listen: %v", err) } go func() { time.Sleep(time.Second) buckets.Create(ctx, bc.WithName(bucketSlug+"3"), bc.WithPrivate(true)) }() // a separete go routine that keeps checking if msgs are there // and calls handler function val, ok := <-channel if !ok { log.Println("channel no longer active at first events") } else { log.Println("received from channel!!!!") log.Println(val) instance := &Bucket{} if err = json.Unmarshal(val.Action.Instance, instance); err != nil { log.Fatalf("failed to unmarshal listen result: %v", err) } log.Printf("instance: %+v", *instance) } val, ok = <-channel if !ok { log.Println("channel 2 no longer active at first events") } else { log.Println("received 2 from channel!!!!") log.Println(val) } log.Println("finished creating channel") if err != nil { log.Fatalf("failed to call listen: %v", err) } val, ok = <-channel if !ok { log.Println("channel no longer active at first events") } else { log.Println("received from channel!!!!") log.Println(val) } val, ok = <-channel if !ok { log.Println("channel 2 no longer active at first events") } else { log.Println("received 2 from channel!!!!") log.Println(val) } return buck } func main() { mode := os.Args[1] if mode == "threads" { log.Println("running in process threads") runThreadsLocally() return } if mode == "hub" { var threads *tc.Client var buckets *bc.Client // might need these for other ops so leaving here as commented // out and below var users *uc.Client // var hub *hc.Client var err error host := os.Getenv("TXL_HUB_TARGET") threadstarget := os.Getenv("TXL_THREADS_TARGET") fmt.Println("hub host: " + host) fmt.Println("threads host: " + threadstarget) auth := common.Credentials{} var opts []grpc.DialOption hubTarget := host if strings.Contains(host, "443") { creds := credentials.NewTLS(&tls.Config{}) opts = append(opts, grpc.WithTransportCredentials(creds)) auth.Secure = true } else { opts = append(opts, grpc.WithInsecure()) } opts = append(opts, grpc.WithPerRPCCredentials(auth)) buckets, err = bc.NewClient(hubTarget, opts...) if err != nil { cmd.Fatal(err) } threads, err = tc.NewClient(threadstarget, opts...) if err != nil { cmd.Fatal(err) } users, err = uc.NewClient(hubTarget, opts...) if err != nil { cmd.Fatal(err) } netclient, err := netapiclient.NewClient(host, opts...) if err != nil { cmd.Fatal(err) } log.Println("Finished client init, calling user init ...") // hub res := initUser(threads, buckets, users, netclient, "test-user", "test-bucket") log.Println(res) } } ================================================ FILE: examples/textileBucketsClient/create-thread-with-key/create-thread-with-key.go ================================================ package main import ( "context" "crypto/ed25519" "fmt" "log" "os" "os/user" ma "github.com/multiformats/go-multiaddr" tc "github.com/textileio/go-threads/api/client" "github.com/textileio/go-threads/core/thread" "github.com/textileio/go-threads/db" "github.com/textileio/textile/v2/api/common" "github.com/textileio/textile/v2/cmd" "github.com/textileio/textile/v2/core" "google.golang.org/grpc" ) var IpfsAddr string var MongoUsr string var MongoPw string var MongoHost string var MongoRepSet string const exampleThreadName = "meow" func main() { IpfsAddr = os.Getenv("IPFS_ADDR") MongoUsr = os.Getenv("MONGO_USR") MongoPw = os.Getenv("MONGO_PW") MongoHost = os.Getenv("MONGO_HOST") MongoRepSet = os.Getenv("MONGO_REPLICA_SET") addrAPI := cmd.AddrFromStr("/ip4/127.0.0.1/tcp/3006") addrAPIProxy := cmd.AddrFromStr("/ip4/127.0.0.1/tcp/3007") addrThreadsHost := cmd.AddrFromStr("/ip4/0.0.0.0/tcp/4006") addrIpfsAPI := cmd.AddrFromStr(IpfsAddr) addrGatewayHost := cmd.AddrFromStr("/ip4/127.0.0.1/tcp/8006") addrGatewayURL := "http://127.0.0.1:8006" fmt.Println("mongo host: ", MongoHost) addrMongoURI := "mongodb://" + MongoUsr + ":" + MongoPw + "@" + MongoHost + "/?ssl=true&replicaSet=" + MongoRepSet + "&authSource=admin&retryWrites=true&w=majority" usr, err := user.Current() if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() textile, err := core.NewTextile(ctx, core.Config{ RepoPath: usr.HomeDir + "/.buckd/repo", AddrAPI: addrAPI, AddrAPIProxy: addrAPIProxy, AddrThreadsHost: addrThreadsHost, AddrIPFSAPI: addrIpfsAPI, AddrGatewayHost: addrGatewayHost, AddrGatewayURL: addrGatewayURL, AddrMongoURI: addrMongoURI, AddrMongoName: "buckets", Debug: false, }) if err != nil { log.Fatal(err) } defer textile.Close(false) textile.Bootstrap() fmt.Println("Welcome to Buckets!") fmt.Println("Your peer ID is " + textile.HostID().String()) fmt.Println("starting join thread") addr := os.Getenv("JOIN_THREAD_ADDR") key := os.Getenv("JOIN_THREAD_KEY") m1, _ := ma.NewMultiaddr(addr) var threads *tc.Client host := "127.0.0.1:3006" auth := common.Credentials{} var opts []grpc.DialOption threadstarget := host opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithPerRPCCredentials(auth)) threads, err = tc.NewClient(threadstarget, opts...) if err != nil { cmd.Fatal(err) } threadCtx := context.Background() k, err := thread.KeyFromString(key) pub, _, err := ed25519.GenerateKey(nil) if err != nil { fmt.Println("error generating key: ", err) return } // no need to crypto.UnmarshalEd25519PublicKey(pub) managedKey, err := thread.KeyFromBytes(pub) if err != nil { fmt.Println("error key from bytes: ", err) return } err = threads.NewDBFromAddr(threadCtx, m1, k, db.WithNewManagedThreadKey(managedKey)) if err != nil { fmt.Println("error new db from addr: ", err) } } ================================================ FILE: examples/textileBucketsClient/join-thread/join-thread.go ================================================ package main import ( "context" "fmt" "log" "os" "os/user" ma "github.com/multiformats/go-multiaddr" tc "github.com/textileio/go-threads/api/client" "github.com/textileio/go-threads/core/thread" "github.com/textileio/textile/v2/api/common" "github.com/textileio/textile/v2/cmd" "github.com/textileio/textile/v2/core" "google.golang.org/grpc" ) var IpfsAddr string var MongoUsr string var MongoPw string var MongoHost string var MongoRepSet string func main() { IpfsAddr = os.Getenv("IPFS_ADDR") MongoUsr = os.Getenv("MONGO_USR") MongoPw = os.Getenv("MONGO_PW") MongoHost = os.Getenv("MONGO_HOST") MongoRepSet = os.Getenv("MONGO_REPLICA_SET") addrAPI := cmd.AddrFromStr("/ip4/127.0.0.1/tcp/3006") addrAPIProxy := cmd.AddrFromStr("/ip4/127.0.0.1/tcp/3007") addrThreadsHost := cmd.AddrFromStr("/ip4/0.0.0.0/tcp/4006") addrIpfsAPI := cmd.AddrFromStr(IpfsAddr) addrGatewayHost := cmd.AddrFromStr("/ip4/127.0.0.1/tcp/8006") addrGatewayURL := "http://127.0.0.1:8006" fmt.Println("mongo host: ", MongoHost) addrMongoURI := "mongodb://" + MongoUsr + ":" + MongoPw + "@" + MongoHost + "/?ssl=true&replicaSet=" + MongoRepSet + "&authSource=admin&retryWrites=true&w=majority" usr, err := user.Current() if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() textile, err := core.NewTextile(ctx, core.Config{ RepoPath: usr.HomeDir + "/.buckd/repo", AddrAPI: addrAPI, AddrAPIProxy: addrAPIProxy, AddrThreadsHost: addrThreadsHost, AddrIPFSAPI: addrIpfsAPI, AddrGatewayHost: addrGatewayHost, AddrGatewayURL: addrGatewayURL, AddrMongoURI: addrMongoURI, AddrMongoName: "buckets", Debug: false, }) if err != nil { log.Fatal(err) } defer textile.Close(false) textile.Bootstrap() fmt.Println("Welcome to Buckets!") fmt.Println("Your peer ID is " + textile.HostID().String()) fmt.Println("starting join thread") addr := os.Getenv("JOIN_THREAD_ADDR") key := os.Getenv("JOIN_THREAD_KEY") m1, _ := ma.NewMultiaddr(addr) var threads *tc.Client host := "127.0.0.1:3006" auth := common.Credentials{} var opts []grpc.DialOption threadstarget := host opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithPerRPCCredentials(auth)) threads, err = tc.NewClient(threadstarget, opts...) if err != nil { cmd.Fatal(err) } threadCtx := context.Background() k, err := thread.KeyFromString(key) err = threads.NewDBFromAddr(threadCtx, m1, k) if err != nil { fmt.Println("error new db from addr: ", err) } db, err := threads.ListDBs(threadCtx) fmt.Println("about to loop thru dbs: ", db) for k, v := range db { fmt.Println("looping through thread id: ", k) fmt.Println("db info: ", v) } } ================================================ FILE: examples/textileBucketsClient/local-buck/local-buck.go ================================================ package main import ( "context" "fmt" "os" "os/user" "github.com/FleekHQ/space-daemon/log" tc "github.com/textileio/go-threads/api/client" "github.com/textileio/go-threads/core/thread" nc "github.com/textileio/go-threads/net/api/client" bc "github.com/textileio/textile/v2/api/bucketsd/client" "github.com/textileio/textile/v2/api/common" "github.com/textileio/textile/v2/cmd" "github.com/textileio/textile/v2/core" "google.golang.org/grpc" ) var IpfsAddr string var MongoUsr string var MongoPw string var MongoHost string var MongoRepSet string func main() { IpfsAddr = os.Getenv("IPFS_ADDR") MongoUsr = os.Getenv("MONGO_USR") MongoPw = os.Getenv("MONGO_PW") MongoHost = os.Getenv("MONGO_HOST") MongoRepSet = os.Getenv("MONGO_REPLICA_SET") addrAPI := cmd.AddrFromStr("/ip4/127.0.0.1/tcp/3006") addrAPIProxy := cmd.AddrFromStr("/ip4/127.0.0.1/tcp/3007") addrThreadsHost := cmd.AddrFromStr("/ip4/0.0.0.0/tcp/4006") addrIpfsAPI := cmd.AddrFromStr(IpfsAddr) addrGatewayHost := cmd.AddrFromStr("/ip4/127.0.0.1/tcp/8006") addrGatewayURL := "http://127.0.0.1:8006" fmt.Println("mongo host: ", MongoHost) addrMongoURI := "mongodb://" + MongoUsr + ":" + MongoPw + "@" + MongoHost + "/?ssl=true&replicaSet=" + MongoRepSet + "&authSource=admin&retryWrites=true&w=majority" usr, err := user.Current() if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() textile, err := core.NewTextile(ctx, core.Config{ RepoPath: usr.HomeDir + "/.buckd/repo", AddrAPI: addrAPI, AddrAPIProxy: addrAPIProxy, AddrThreadsHost: addrThreadsHost, AddrIPFSAPI: addrIpfsAPI, AddrGatewayHost: addrGatewayHost, AddrGatewayURL: addrGatewayURL, AddrMongoURI: addrMongoURI, AddrMongoName: "buckets", Debug: false, }) if err != nil { log.Fatal(err) } defer textile.Close(false) textile.Bootstrap() fmt.Println("Welcome to Buckets!") fmt.Println("Your peer ID is " + textile.HostID().String()) // now create a bucket on that thread var threads *tc.Client var buckets *bc.Client var netc *nc.Client host := "127.0.0.1:3006" auth := common.Credentials{} var opts []grpc.DialOption hubTarget := host threadstarget := host opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithPerRPCCredentials(auth)) buckets, err = bc.NewClient(hubTarget, opts...) if err != nil { cmd.Fatal(err) } threads, err = tc.NewClient(threadstarget, opts...) if err != nil { cmd.Fatal(err) } netc, err = nc.NewClient(host, opts...) log.Info("Finished client init, calling user init ...") threadCtx := context.Background() threadCtx = common.NewThreadNameContext(threadCtx, "testthreadname") dbID := thread.NewIDV1(thread.Raw, 32) if err := threads.NewDB(threadCtx, dbID); err != nil { log.Info("error calling threads.NewDB") log.Fatal(err) } ctx = common.NewThreadIDContext(threadCtx, dbID) buck, err := buckets.Create(ctx, bc.WithName("personal"), bc.WithPrivate(true)) fmt.Println("info: ", buck) db, err := threads.ListDBs(ctx) fmt.Println("got back from listdbs") for k, v := range db { fmt.Println("looping through thread id: ", k) fmt.Println("db info - Addrs: ", v.Addrs) fmt.Println("db info - Key: ", v.Key) fmt.Println("db info - Name: ", v.Name) // replicate on hub peerid, err := netc.AddReplicator(ctx, dbID, cmd.AddrFromStr(os.Getenv("TXL_HUB_MA"))) if err != nil { fmt.Println("Unable to replicate on the hub: " + err.Error()) } fmt.Println("peerid: ", peerid) } } ================================================ FILE: examples/textileBucketsClient/open-share-file/open-share-file.go ================================================ package main import ( "bytes" "context" "crypto/rand" "crypto/tls" "log" "os" "strings" "time" "github.com/libp2p/go-libp2p-core/crypto" tc "github.com/textileio/go-threads/api/client" "github.com/textileio/go-threads/core/thread" bc "github.com/textileio/textile/v2/api/bucketsd/client" buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/api/common" tb "github.com/textileio/textile/v2/buckets" "github.com/textileio/textile/v2/cmd" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) type TextileBucketRoot buckets_pb.Root func main() { host := os.Getenv("TXL_HUB_TARGET") key := os.Getenv("TXL_USER_KEY") secret := os.Getenv("TXL_USER_SECRET") var threads *tc.Client var buckets *bc.Client var err error auth := common.Credentials{} var opts []grpc.DialOption hubTarget := host threadstarget := host if strings.Contains(host, "443") { creds := credentials.NewTLS(&tls.Config{}) opts = append(opts, grpc.WithTransportCredentials(creds)) auth.Secure = true } else { opts = append(opts, grpc.WithInsecure()) } opts = append(opts, grpc.WithPerRPCCredentials(auth)) buckets, err = bc.NewClient(hubTarget, opts...) if err != nil { cmd.Fatal(err) } threads, err = tc.NewClient(threadstarget, opts...) if err != nil { cmd.Fatal(err) } user1, _, err := crypto.GenerateEd25519Key(rand.Reader) if err != nil { log.Println("error creating user1") log.Fatal(err) } user2, _, err := crypto.GenerateEd25519Key(rand.Reader) if err != nil { log.Println("error creating user2") log.Fatal(err) } // user 1 creates bucket and adds file ctx := context.Background() ctx = common.NewAPIKeyContext(ctx, key) ctx, err = common.CreateAPISigContext(ctx, time.Now().Add(time.Minute*2), secret) if err != nil { log.Println("error creating APISigContext") log.Fatal(err) } tok, err := threads.GetToken(ctx, thread.NewLibp2pIdentity(user1)) if err != nil { log.Println("error calling GetToken") log.Fatal(err) } ctx = thread.NewTokenContext(ctx, tok) bucket1name := "testbucket1" ctx = common.NewThreadNameContext(ctx, bucket1name) dbID := thread.NewIDV1(thread.Raw, 32) if err := threads.NewDB(ctx, dbID); err != nil { log.Println("error calling threads.NewDB") log.Fatal(err) } ctx = common.NewThreadIDContext(ctx, dbID) buck, err := buckets.Create(ctx, bc.WithName(bucket1name), bc.WithPrivate(true)) log.Println("created bucket: " + buck.Root.Name) filepath := "file1" f := &bytes.Buffer{} f.Write([]byte("hello space")) _, _, err = buckets.PushPath(ctx, buck.Root.Key, filepath, f) if err != nil { log.Println("error pushing path") log.Fatal(err) } roles := make(map[string]tb.Role) tpk := thread.NewLibp2pPubKey(user2.GetPublic()) roles[tpk.String()] = tb.Admin err = buckets.PushPathAccessRoles(ctx, buck.Root.Key, filepath, roles) if err != nil { log.Println("error sharing path") log.Fatal(err) } // user 2 tries to access ctx1 := context.Background() ctx1 = common.NewAPIKeyContext(ctx1, key) ctx1, err = common.CreateAPISigContext(ctx1, time.Now().Add(time.Minute*2), secret) tok, err = threads.GetToken(ctx1, thread.NewLibp2pIdentity(user2)) ctx1 = thread.NewTokenContext(ctx1, tok) if err != nil { log.Println("error creating context") log.Fatal(err) } ctx1 = common.NewThreadNameContext(ctx1, bucket1name) ctx1 = common.NewThreadIDContext(ctx1, dbID) var buf bytes.Buffer err = buckets.PullPath(ctx1, buck.Root.Key, filepath, &buf) if err != nil { log.Println("error pulling path") log.Fatal(err) } s := buf.String() log.Println("fetch file content: " + s) } ================================================ FILE: examples/textileBucketsClient/set-envs ================================================ export TXL_HUB_TARGET= export TXL_USER_KEY= export TXL_USER_SECRET= export MONGO_PW= export MONGO_USR= export MONGO_HOST= export KEY_SEED= export THREAD_ID= export IPFS_ADDR= export JOIN_THREAD_ADDR= export JOIN_THREAD_KEY= ================================================ FILE: examples/textileBucketsClient/sync-test/sync-test.go ================================================ package main import ( "bytes" "context" "crypto/ed25519" "encoding/hex" "fmt" "log" "os" "time" "github.com/libp2p/go-libp2p-core/crypto" tc "github.com/textileio/go-threads/api/client" "github.com/textileio/go-threads/core/thread" bc "github.com/textileio/textile/v2/api/bucketsd/client" buckets_pb "github.com/textileio/textile/v2/api/bucketsd/pb" "github.com/textileio/textile/v2/api/common" "github.com/textileio/textile/v2/cmd" "google.golang.org/grpc" ) type TextileBucketRoot buckets_pb.Root func main() { seed := os.Getenv("KEY_SEED") threadID := os.Getenv("THREAD_ID") host := os.Getenv("TXL_HUB_TARGET") key := os.Getenv("TXL_USER_KEY") secret := os.Getenv("TXL_USER_SECRET") var threads *tc.Client var buckets *bc.Client var err error auth := common.Credentials{} var opts []grpc.DialOption hubTarget := host threadstarget := host opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithPerRPCCredentials(auth)) buckets, err = bc.NewClient(hubTarget, opts...) if err != nil { cmd.Fatal(err) } threads, err = tc.NewClient(threadstarget, opts...) if err != nil { cmd.Fatal(err) } ctx := context.Background() ctx = common.NewAPIKeyContext(ctx, key) ctx, err = common.CreateAPISigContext(ctx, time.Now().Add(time.Minute*2), secret) if err != nil { log.Println("error creating APISigContext") log.Fatal(err) } sb, err := hex.DecodeString(seed) pvk := ed25519.NewKeyFromSeed(sb) pbk := make([]byte, 32) copy(pbk, pvk[32:]) var unmarshalledPriv crypto.PrivKey var unmarshalledPub crypto.PubKey if unmarshalledPriv, err = crypto.UnmarshalEd25519PrivateKey(pvk); err != nil { log.Fatal("Cant get libp2p version of priv key") return } if unmarshalledPub, err = crypto.UnmarshalEd25519PublicKey(pbk); err != nil { log.Fatal("Cant get libp2p version of pub key") return } log.Println("got libp2p keys") tok, err := threads.GetToken(ctx, thread.NewLibp2pIdentity(unmarshalledPriv)) ctx = thread.NewTokenContext(ctx, tok) var pubKeyInBytes []byte if pubKeyInBytes, err = unmarshalledPub.Bytes(); err != nil { log.Fatal("Cant get bytes of pubkey") return } ctx = common.NewThreadNameContext(ctx, hex.EncodeToString(pubKeyInBytes)+"-personal") dbBytes, err := hex.DecodeString(threadID) dbID, err := thread.Cast(dbBytes) ctx = common.NewThreadIDContext(ctx, dbID) log.Println("got thread id ctx") bucketList, err := buckets.List(ctx) if err != nil { log.Fatal("Cant get list of buckets", err) return } result := make([]*TextileBucketRoot, 0) for _, r := range bucketList.Roots { log.Println("looping through bucket: ", (*TextileBucketRoot)(r).Name) if (*TextileBucketRoot)(r).Name == "personal" { _, _, err = buckets.PushPath(ctx, (*TextileBucketRoot)(r).Key, fmt.Sprint(int32(time.Now().Unix()))+"synctestfile.md", &bytes.Buffer{}) result = append(result, (*TextileBucketRoot)(r)) } } } ================================================ FILE: go.mod ================================================ module github.com/FleekHQ/space-daemon go 1.14 replace github.com/textileio/go-threads => github.com/FleekHQ/go-threads v1.0.1-0.20201028195307-d9371c20fe66 replace github.com/textileio/textile/v2 => github.com/FleekHQ/textile/v2 v2.0.0-20201127024116-cee5aaade92c replace github.com/libp2p/go-libp2p-pubsub => github.com/libp2p/go-libp2p-pubsub v0.3.2 replace github.com/libp2p/go-libp2p-core => github.com/libp2p/go-libp2p-core v0.6.1 replace github.com/libp2p/go-libp2p => github.com/libp2p/go-libp2p v0.10.3 replace github.com/libp2p/go-libp2p-swarm => github.com/libp2p/go-libp2p-swarm v0.2.8 require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc github.com/99designs/keyring v1.1.5 github.com/alecthomas/jsonschema v0.0.0-20191017121752-4bb6e3fae4f2 github.com/blevesearch/bleve v1.0.12 github.com/creamdog/gonfig v0.0.0-20160810132730-80d86bfb5a37 github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect github.com/dgraph-io/badger v1.6.2 github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/golang/protobuf v1.4.3 github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 github.com/grpc-ecosystem/grpc-gateway v1.14.6 github.com/hsanjuan/ipfs-lite v1.1.17 // indirect github.com/improbable-eng/grpc-web v0.13.0 github.com/ipfs/go-cid v0.0.7 github.com/ipfs/go-ipfs v0.7.0 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-config v0.10.0 github.com/ipfs/go-ipfs-files v0.0.8 github.com/ipfs/go-ipfs-http-client v0.1.0 github.com/ipfs/go-ipld-format v0.2.0 github.com/ipfs/go-merkledag v0.3.2 github.com/ipfs/go-path v0.0.8 // indirect github.com/ipfs/go-unixfs v0.2.4 github.com/ipfs/interface-go-ipfs-core v0.4.0 github.com/jmhodges/levigo v1.0.0 // indirect github.com/joho/godotenv v1.3.0 github.com/keybase/go-kext v0.0.0-20200218013902-e4a86908886a github.com/libp2p/go-libp2p-connmgr v0.2.4 github.com/libp2p/go-libp2p-core v0.7.0 github.com/libp2p/go-libp2p-crypto v0.1.0 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-multiaddr v0.3.1 github.com/multiformats/go-multibase v0.0.3 github.com/multiformats/go-multihash v0.0.14 github.com/odeke-em/go-utils v0.0.0-20170224015737-e8ebaed0777a github.com/onsi/ginkgo v1.14.2 github.com/onsi/gomega v1.10.3 github.com/opentracing/opentracing-go v1.2.0 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.7.1 // indirect github.com/radovskyb/watcher v1.0.7 github.com/rs/cors v1.7.0 github.com/sirupsen/logrus v1.7.0 github.com/stretchr/testify v1.6.1 github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/textileio/dcrypto v0.0.1 github.com/textileio/go-threads v1.0.1 github.com/textileio/textile/v2 v2.1.7 github.com/tyler-smith/go-bip39 v1.0.2 github.com/uber/jaeger-client-go v2.23.1+incompatible golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02 google.golang.org/genproto v0.0.0-20200702021140-07506425bd67 google.golang.org/grpc v1.33.1 google.golang.org/protobuf v1.25.0 gorm.io/driver/sqlite v1.1.3 gorm.io/gorm v1.20.5 gotest.tools v2.2.0+incompatible ) ================================================ FILE: go.sum ================================================ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510= bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0 h1:69FNAINiZfsEuwH3fKq8QrAAnHz+2m4XL4kVYi5BX0Q= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= contrib.go.opencensus.io/exporter/jaeger v0.1.0 h1:WNc9HbA38xEQmsI40Tjd/MNU/g8byN2Of7lwIjv0Jdc= contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= contrib.go.opencensus.io/exporter/prometheus v0.2.0/go.mod h1:TYmVAyE8Tn1lyPcltF5IYYfWp2KHu7lQGIZnj8iZMys= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/99designs/keyring v1.1.5 h1:wLv7QyzYpFIyMSwOADq1CLTF9KbjbBfcnfmOGJ64aO4= github.com/99designs/keyring v1.1.5/go.mod h1:7hsVvt2qXgtadGevGJ4ujg+u8m6SpJ5TpHqTozIPqf0= github.com/AlecAivazis/survey/v2 v2.0.7/go.mod h1:mlizQTaPjnR4jcpwRSaSlkbsRfYFEyKgLQvYTzxxiHA= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190823232136-616930265c33 h1:2/E2IVdZoHh/aCBq4Gchy2MGWkTmbReP46/Wnt9qhKs= github.com/AndreasBriese/bbloom v0.0.0-20190823232136-616930265c33/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/FleekHQ/go-threads v1.0.1-0.20201028195307-d9371c20fe66 h1:JppBUaU4v56XxHj8fOMK4jjYCKCdiEMOrhXibu/Jbs8= github.com/FleekHQ/go-threads v1.0.1-0.20201028195307-d9371c20fe66/go.mod h1:mQgVlEzC++pJ0+EyrhW19y5ZHa8xogdYQq/8pLUvOm4= github.com/FleekHQ/space-daemon v0.0.33/go.mod h1:dpCcfU+b6FP+whLdnPwaSAbcmVGQwYcNedsbEumE/Mc= github.com/FleekHQ/textile/v2 v2.0.0-20201116173414-db43a5d7591f h1:Phw2fWt6eJ2SAVXJNZaGXqQyT3I5HDOxibM5ocqVLvM= github.com/FleekHQ/textile/v2 v2.0.0-20201116173414-db43a5d7591f/go.mod h1:BXhNUwvl7dR0fvNi7xaKTSUEent09vJ7bhNYkMNzQ7c= github.com/FleekHQ/textile/v2 v2.0.0-20201127024116-cee5aaade92c h1:Qu2C3VP+G0maRQYrOxyU0HTqVBNXAjh/5Uyw3Y/DIQ8= github.com/FleekHQ/textile/v2 v2.0.0-20201127024116-cee5aaade92c/go.mod h1:telji7Pfrll6wugWd0cSk1u3FSsTESOPfL39r0QTloU= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/Stebalien/go-bitfield v0.0.0-20180330043415-076a62f9ce6e/go.mod h1:3oM7gXIttpYDAJXpVNnSCiUMYBLIZ6cb1t+Ip982MRo= github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI= github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE= github.com/alecthomas/jsonschema v0.0.0-20191017121752-4bb6e3fae4f2 h1:swGeCLPiUQ647AIRnFxnAHdzlg6IPpmU6QdkOPZINt8= github.com/alecthomas/jsonschema v0.0.0-20191017121752-4bb6e3fae4f2/go.mod h1:Juc2PrI3wtNfUwptSvAIeNx+HrETwHQs6nf+TkOJlOA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apoorvam/goterminal v0.0.0-20180523175556-614d345c47e5/go.mod h1:E7x8aDc3AQzDKjEoIZCt+XYheHk2OkP+p2UgeNjecH8= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/awalterschulze/gographviz v0.0.0-20190522210029-fa59802746ab/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.29.15/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.32.11 h1:1nYF+Tfccn/hnAZsuwPPMSCVUVnx3j6LKOpx/WhgH0A= github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.2 h1:Z0CN0Yb4ig9sGPXkvAQcGJfnrrMQ5QYLCMPRi9iD7YE= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blevesearch/bleve v1.0.12 h1:2qJUSBpU/h1z8x3ERRB5WwpmEpJwoivPqmDpHzv4tuk= github.com/blevesearch/bleve v1.0.12/go.mod h1:G0ErXWdIrUSYZLPoMpS9Z3saTnTsk4ebhPsVv/+0nxk= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo= github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac= github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= github.com/blevesearch/zap/v11 v11.0.12 h1:ZA+80yajko2tXr1kmbSoVRMCo0mFZAVJmoijjYsZuwc= github.com/blevesearch/zap/v11 v11.0.12/go.mod h1:JLfFhc8DWP01zMG/6VwEY2eAnlJsTN1vDE4S0rC5Y78= github.com/blevesearch/zap/v12 v12.0.12 h1:9eWaL9/2hcjy1VR3lrl/b+kWh5G7w/BkNYI07mWActw= github.com/blevesearch/zap/v12 v12.0.12/go.mod h1:1HrB4hhPfI8u8x4SPYbluhb8xhflpPvvj8EcWImNnJY= github.com/blevesearch/zap/v13 v13.0.4 h1:eoRvJmLeIQUs1mAF+fAFALg1dPHOI1e1KFuXL0I7us4= github.com/blevesearch/zap/v13 v13.0.4/go.mod h1:YdB7UuG7TBWu/1dz9e2SaLp1RKfFfdJx+ulIK5HR1bA= github.com/blevesearch/zap/v14 v14.0.3 h1:ccEv296u6DEUHFF9U4W2E/6/WkbuDrS9/1VJM34SCzA= github.com/blevesearch/zap/v14 v14.0.3/go.mod h1:oObAhcDHw7p1ahiTCqhRkdxdl7UA8qpvX10pSgrTMHc= github.com/blevesearch/zap/v15 v15.0.1 h1:jEism63eY+qdcvwXH0K8MiKhv5tb10T1k7SNx6fauCM= github.com/blevesearch/zap/v15 v15.0.1/go.mod h1:ho0frqAex2ktT9cYFAxQpoQXsxb/KEfdjpx4s49rf/M= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d h1:QgeLLoPD3kRVmeu/1al9iIpIANMi9O1zXFm8BnYGCJg= github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d/go.mod h1:Jbj8eKecMNwf0KFI75skSUZqMB4UCRcndUScVBTWyUI= github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/caarlos0/spin v1.1.0/go.mod h1:HOC4pUvfhjXR2yDt+sEY9dRc2m4CCaK5z5oQYAbzXSA= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4eamEDs= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.11.6 h1:gErXaYucoS8aHdmoJnF4RMFiXJH449sk6rCtoP6EhrE= github.com/cloudflare/cloudflare-go v0.11.6/go.mod h1:lmCbgQdBeSQlMv0W0OSqoGgl8aFrgc5oXHhWMt47dh0= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= github.com/couchbase/vellum v1.0.2 h1:BrbP0NKiyDdndMPec8Jjhy0U47CZ0Lgx3xUC2r9rZqw= github.com/couchbase/vellum v1.0.2/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creamdog/gonfig v0.0.0-20160810132730-80d86bfb5a37 h1:1oltS/xFsArksN6n2nXIYU5tkkDBqKgpcOvfPsTepR4= github.com/creamdog/gonfig v0.0.0-20160810132730-80d86bfb5a37/go.mod h1:Hhbh5su1JZ8cglUlxBwQjz0uwtmFhV/0D6DgvU3oT+4= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U= github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk= github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498 h1:Y9vTBSsV4hSwPSj4bacAU/eSnV3dAxVpepaghAdhGoQ= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= github.com/drand/drand v0.9.2-0.20200616080806-a94e9c1636a4/go.mod h1:Bu8QYdU0YdB2ZQZezHxabmOIciddiwLRnyV4nuZ2HQE= github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= github.com/drand/kyber v1.1.0/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= github.com/drand/kyber-bls12381 v0.1.0/go.mod h1:N1emiHpm+jj7kMlxEbu3MUyOiooTgNySln564cgD9mk= github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvsekhvalnov/jose2go v0.0.0-20180829124132-7f401d37b68a h1:mq+R6XEM6lJX5VlLyZIrUSP8tSuJp82xTK89hvBwJbU= github.com/dvsekhvalnov/jose2go v0.0.0-20180829124132-7f401d37b68a/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302 h1:QV0ZrfBLpFc2KDk+a4LJefDczXnonRwrYrQJY/9L4dA= github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302/go.mod h1:qBlWZqWeVx9BjvqBsnC/8RUlAYpIFmPvgROcw0n1scE= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/filecoin-project/chain-validation v0.0.6-0.20200615191232-6be1a8c6ed09/go.mod h1:HEJn6kOXMNhCNBYNTO/lrEI7wSgqCOR6hN5ecfYUnC8= github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072/go.mod h1:PtH9YP0rURHUKHrKeEBeWg/BqIBMQOz8wtlXlVGREBE= github.com/filecoin-project/filecoin-ffi v0.26.1-0.20200508175440-05b30afeb00d/go.mod h1:vlQ7sDkbrtM70QMJFDvEyTDywY5SvIjadRCUB+76l90= github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d/go.mod h1:XE4rWG1P7zWPaC11Pkn1CVR20stqN52MnMkIrF4q6ZU= github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f/go.mod h1:+If3s2VxyjZn+KGGZIoRXBDSFQ9xL404JBJGf4WhEj0= github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= github.com/filecoin-project/go-address v0.0.2-0.20200504173055-8b6f2fb2b3ef/go.mod h1:SrA+pWVoUivqKOfC+ckVYbx41hWz++HxJcrlmHNnebU= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-data-transfer v0.3.0/go.mod h1:cONglGP4s/d+IUQw5mWZrQK+FQATQxr3AXzi4dRh0l4= github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= github.com/filecoin-project/go-data-transfer v1.1.0/go.mod h1:ZAH51JZFR8NZC4FPiDPG+swjgui0q6zTMJbztc6pHhY= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-markets v0.3.0/go.mod h1:UXsXi43AyUQ5ieb4yIaLgk4PVt7TAbl1UCccuNw+7ds= github.com/filecoin-project/go-fil-markets v1.0.4/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24/go.mod h1:j6zV//WXIIY5kky873Q3iIKt/ViOE8rcijovmpxrXzM= github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6/go.mod h1:0HgYnrkeSU4lu1p+LEOeDpFsNBssa0OGGriWdA4hvaE= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v0.0.0-20200612181802-4eb3d0c68eba/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= github.com/filecoin-project/lotus v0.4.0/go.mod h1:RGmcSJ6+0D3vXcBgNk6T7fT9Y5UBZ+Aowse3cTi+yZA= github.com/filecoin-project/lotus v1.1.3/go.mod h1:cyY86E8Z30ka3OtlwUoRAFMBjejNHhZqudX6DfZ0ABg= github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM= github.com/filecoin-project/sector-storage v0.0.0-20200618073200-d9de9b7cb4b4/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM= github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA= github.com/filecoin-project/specs-actors v0.0.0-20200226200336-94c9b92b2775/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.2.0/go.mod h1:rlv5Mx9wUhV8Qsz+vUezZNm+zL4tK08O0HreKKPB2Wc= github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/storage-fsm v0.0.0-20200617183754-4380106d3e94/go.mod h1:q1YCutTSMq/yGYvDPHReT37bPfDLHltnwJutzR9kOY0= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gabriel-vasile/mimetype v1.1.0/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= github.com/gabriel-vasile/mimetype v1.1.1 h1:qbN9MPuRf3bstHu9zkI9jDWNfH//9+9kHxr9oRBBBOA= github.com/gabriel-vasile/mimetype v1.1.1/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/location v0.0.2 h1:QZKh1+K/LLR4KG/61eIO3b7MLuKi8tytQhV6texLgP4= github.com/gin-contrib/location v0.0.2/go.mod h1:NGoidiRlf0BlA/VKSVp+g3cuSMeTmip/63PhEjRhUAc= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2 h1:xLG16iua01X7Gzms9045s2Y2niNpvSY/Zb1oBwgNYZY= github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2/go.mod h1:VhW/Ch/3FhimwZb8Oj+qJmdMmoB8r7lmJ5auRjm50oQ= github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi v4.0.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-chi/chi v4.1.1+incompatible h1:MmTgB0R8Bt/jccxp+t6S/1VGIKdJw5J74CK/c9tTfA4= github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4= github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v30 v30.1.0/go.mod h1:n8jBpHl45a/rlBUtRJMOG4GhNADUQFEufcolZ95JfU8= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosimple/slug v1.9.0 h1:r5vDcYrFz9BmfIAMC829un9hq7hKM4cHUrsv36LbEqs= github.com/gosimple/slug v1.9.0/go.mod h1:AMZ+sOVe65uByN3kgEyf9WEBKBCSS+dJjMX9x4vDJbg= github.com/gosuri/uilive v0.0.4/go.mod h1:V/epo5LjjlDE5RJUcqx8dbw+zc93y5Ya3yg8tfZ74VI= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-middleware v1.2.1/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 h1:FlFbCRLd5Jr4iYXZufAvgWN6Ao0JrI5chLINnUXDDr0= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gxed/go-shellwords v1.0.3/go.mod h1:N7paucT91ByIjmVJHhvoarjoQnmsi3Jd3vH7VqgtMxQ= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE= github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k= github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hsanjuan/ipfs-lite v1.1.12/go.mod h1:YBI2I6SLkPCvVK/MjxNKy6Ihs9rQPPHnvCvdSNlXysE= github.com/hsanjuan/ipfs-lite v1.1.15/go.mod h1:LZffwuToye++XukPW/GUAw5XewT+t2lgZl8h/8RqFfs= github.com/hsanjuan/ipfs-lite v1.1.17 h1:Rk/QZD9TeSbRIm+7FbziZPMXRHr7XNVRx+YRJnLWQLc= github.com/hsanjuan/ipfs-lite v1.1.17/go.mod h1:ZetJanzQEAqWj+OwzIppE/S7x+Azu4WFF6PNMLnQGoY= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/improbable-eng/grpc-web v0.12.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs= github.com/improbable-eng/grpc-web v0.13.0 h1:7XqtaBWaOCH0cVGKHyvhtcuo6fgW32Y10yRKrDHFHOc= github.com/improbable-eng/grpc-web v0.13.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs= github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= github.com/ipfs/go-bitswap v0.0.3/go.mod h1:jadAZYsP/tcRMl47ZhFxhaNuDQoXawT8iHMg+iFoQbg= github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps= github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= github.com/ipfs/go-bitswap v0.2.8/go.mod h1:2Yjog0GMdH8+AsxkE0DI9D2mANaUTxbVVav0pPoZoug= github.com/ipfs/go-bitswap v0.2.13/go.mod h1:SDXpLeKZagyVVc8/z7sGtmM/lz8lyAmSzrUx3Ge3GXw= github.com/ipfs/go-bitswap v0.2.19/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo= github.com/ipfs/go-bitswap v0.2.20/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo= github.com/ipfs/go-bitswap v0.3.1 h1:YG2QTj7xEJsSvttivoKl4gBbDYtkxsFoliKtepNncyc= github.com/ipfs/go-bitswap v0.3.1/go.mod h1:AyWWfN3moBzQX0banEtfKOfbXb3ZeoOeXnZGNPV9S6w= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-blockservice v0.0.3/go.mod h1:/NNihwTi6V2Yr6g8wBI+BSwPuURpBRMtYNGrlxZ8KuI= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.1.1/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I= github.com/ipfs/go-blockservice v0.1.2/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I= github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.1.4 h1:Vq+MlsH8000KbbUciRyYMEw/NNP8UAGmcqKi4uWmFGA= github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger v0.2.4/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger v0.2.6 h1:Hy8jw4rifxtRDrqpvC1yh36oIyE37KDzsUzlHUPOFiU= github.com/ipfs/go-ds-badger v0.2.6/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= github.com/ipfs/go-ds-flatfs v0.4.4/go.mod h1:e4TesLyZoA8k1gV/yCuBTnt2PJtypn4XUlB5n8KQMZY= github.com/ipfs/go-ds-flatfs v0.4.5 h1:4QceuKEbH+HVZ2ZommstJMi3o3II+dWS3IhLaD7IGHs= github.com/ipfs/go-ds-flatfs v0.4.5/go.mod h1:e4TesLyZoA8k1gV/yCuBTnt2PJtypn4XUlB5n8KQMZY= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= github.com/ipfs/go-filestore v0.0.3/go.mod h1:dvXRykFzyyXN2CdNlRGzDAkXMDPyI+D7JE066SiKLSE= github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y= github.com/ipfs/go-fs-lock v0.0.5/go.mod h1:fq8gXFwbi1on9KScveHuVJ2wjuqo5jaDgCtZdKLuCO8= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.1.1/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= github.com/ipfs/go-graphsync v0.5.0 h1:iaByvxq88Ys1KcaQzTS1wmRhNsNEo3SaUiSGqTSbGmM= github.com/ipfs/go-graphsync v0.5.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200204200533-99b8553ef242/go.mod h1:kq3Pi+UP3oHhAdKexE+kHHYRKMoFNuGero0R7q3hWGg= github.com/ipfs/go-hamt-ipld v0.1.1-0.20200501020327-d53d20a7063e/go.mod h1:giiPqWYCnRBYpNTsJ/EX1ojldX5kTXrXYckSJQ7ko9M= github.com/ipfs/go-hamt-ipld v0.1.1-0.20200605182717-0310ad2b0b1f/go.mod h1:phOFBB7W73N9dg1glcb1fQ9HtQFDUpeyJgatW8ns0bw= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs v0.6.1-0.20200817102359-90a573354af2/go.mod h1:mP0bh6FMyOF6SI6W/LsKvPyP1mxHCsVXQFIBAqnde4s= github.com/ipfs/go-ipfs v0.7.0 h1:8qJkP8PounMHhbWJ+sOij5FV3mlJhP+mhCg2JeDV1mg= github.com/ipfs/go-ipfs v0.7.0/go.mod h1:4UNBZMgbAZ6/+xUZDlMkGxMFPiu1RB67+TaNVvKV7ZQ= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= github.com/ipfs/go-ipfs-blockstore v1.0.2 h1:Z8nUlBHK7wVKPKliQCQR9tLgUtz4J2QRbqFcJrqzM+E= github.com/ipfs/go-ipfs-blockstore v1.0.2/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= github.com/ipfs/go-ipfs-cmds v0.1.0/go.mod h1:TiK4e7/V31tuEb8YWDF8lN3qrnDH+BS7ZqWIeYJlAs8= github.com/ipfs/go-ipfs-cmds v0.2.2/go.mod h1:kqlUrp6m2ceoaJe40cXpADCi5aS6NKRn0NIeuLp5CeM= github.com/ipfs/go-ipfs-cmds v0.3.0/go.mod h1:ZgYiWVnCk43ChwoH8hAmI1IRbuVtq3GSTHwtRB/Kqhk= github.com/ipfs/go-ipfs-cmds v0.4.0 h1:xUavIxA9Ts8U6PAHmQBvDGMlGfUrQ13Rymd+5t8LIF4= github.com/ipfs/go-ipfs-cmds v0.4.0/go.mod h1:ZgYiWVnCk43ChwoH8hAmI1IRbuVtq3GSTHwtRB/Kqhk= github.com/ipfs/go-ipfs-config v0.0.11/go.mod h1:wveA8UT5ywN26oKStByzmz1CO6cXwLKKM6Jn/Hfw08I= github.com/ipfs/go-ipfs-config v0.5.3/go.mod h1:nSLCFtlaL+2rbl3F+9D4gQZQbT1LjRKx7TJg/IHz6oM= github.com/ipfs/go-ipfs-config v0.9.0/go.mod h1:GQUxqb0NfkZmEU92PxqqqLVVFTLpoGGUlBaTyDaAqrE= github.com/ipfs/go-ipfs-config v0.10.0 h1:QdTFdqCg3Zpvpz6wHc6B7UGwSnierqq0h8BwyUntjGA= github.com/ipfs/go-ipfs-config v0.10.0/go.mod h1:Ei/FLgHGTdPyqCPK0oPCwGTe8VSnsjJjx7HZqUb6Ry0= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= github.com/ipfs/go-ipfs-files v0.0.2/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA= github.com/ipfs/go-ipfs-http-client v0.0.5/go.mod h1:8EKP9RGUrUex4Ff86WhnKU7seEBOtjdgXlY9XHYvYMw= github.com/ipfs/go-ipfs-http-client v0.0.6-0.20200512220018-7002cce28cb1/go.mod h1:h3VsuLMjVWwiTwH03gMfKvlFNur8m8a0HHk3f3KsEeg= github.com/ipfs/go-ipfs-http-client v0.1.0 h1:YrJ+/vqmZF1ignpxfHUaJEax7e4tgbaFCTLfIS5yFZY= github.com/ipfs/go-ipfs-http-client v0.1.0/go.mod h1:8e2dQbntMZKxLfny+tyXJ7bJHZFERp/2vyzZdvkeLMc= github.com/ipfs/go-ipfs-pinner v0.0.4 h1:EmxhS3vDsCK/rZrsgxX0Le9m2drBcGlUd7ah/VyFYVE= github.com/ipfs/go-ipfs-pinner v0.0.4/go.mod h1:s4kFZWLWGDudN8Jyd/GTpt222A12C2snA2+OTdy/7p8= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-provider v0.4.3 h1:k54OHXZcFBkhL6l3GnPS9PfpaLeLqZjVASG1bgfBdfQ= github.com/ipfs/go-ipfs-provider v0.4.3/go.mod h1:rcQBVqfblDQRk5LaCtf2uxuKxMJxvKmF5pLS0pO4au4= github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-cbor v0.0.1/go.mod h1:RXHr8s4k0NE0TKhnrxqZC9M888QfsBN9rhS5NjfKzY8= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= github.com/ipfs/go-ipld-git v0.0.3 h1:/YjkjCyo5KYRpW+suby8Xh9Cm/iH9dAgGV6qyZ1dGus= github.com/ipfs/go-ipld-git v0.0.3/go.mod h1:RuvMXa9qtJpDbqngyICCU/d+cmLFXxLsbIclmD0Lcr0= github.com/ipfs/go-ipns v0.0.2 h1:oq4ErrV4hNQ2Eim257RTYRgfOSV/s8BDaf9iIl4NwFs= github.com/ipfs/go-ipns v0.0.2/go.mod h1:WChil4e0/m9cIINWLxZe1Jtf77oz5L05rO2ei/uKJ5U= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2-0.20200609205458-f8d20c392cb7/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 h1:3bijxqzQ1O9yg7gd7Aqk80oaEvsJ+uXw0zSvi2qR3Jw= github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.1.0/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.3.0/go.mod h1:4pymaZLhSLNVuiCITYrpViD6vmfZ/Ws4n/L9tfNv3S4= github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= github.com/ipfs/go-mfs v0.1.2 h1:DlelNSmH+yz/Riy0RjPKlooPg0KML4lXGdLw7uZkfAg= github.com/ipfs/go-mfs v0.1.2/go.mod h1:T1QBiZPEpkPLzDqEJLNnbK55BVKVlNi2a+gVm4diFo0= github.com/ipfs/go-path v0.0.3/go.mod h1:zIRQUez3LuQIU25zFjC2hpBTHimWx7VK5bjZgRLbbdo= github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= github.com/ipfs/go-path v0.0.8 h1:R0k6t9x/pa+g8qzl5apQIPurJFozXhopks3iw3MX+jU= github.com/ipfs/go-path v0.0.8/go.mod h1:VpDkSBKQ9EFQOUgi54Tq/O/tGi8n1RfYNks13M3DEs8= github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8= github.com/ipfs/go-unixfs v0.1.0/go.mod h1:lysk5ELhOso8+Fed9U1QTGey2ocsfaZ18h0NCO2Fj9s= github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= github.com/ipfs/interface-go-ipfs-core v0.2.3/go.mod h1:Tihp8zxGpUeE3Tokr94L6zWZZdkRQvG5TL6i9MuNE+s= github.com/ipfs/interface-go-ipfs-core v0.2.6/go.mod h1:Tihp8zxGpUeE3Tokr94L6zWZZdkRQvG5TL6i9MuNE+s= github.com/ipfs/interface-go-ipfs-core v0.2.7/go.mod h1:Tihp8zxGpUeE3Tokr94L6zWZZdkRQvG5TL6i9MuNE+s= github.com/ipfs/interface-go-ipfs-core v0.3.0/go.mod h1:Tihp8zxGpUeE3Tokr94L6zWZZdkRQvG5TL6i9MuNE+s= github.com/ipfs/interface-go-ipfs-core v0.4.0 h1:+mUiamyHIwedqP8ZgbCIwpy40oX7QcXUbo4CZOeJVJg= github.com/ipfs/interface-go-ipfs-core v0.4.0/go.mod h1:UJBcU6iNennuI05amq3FQ7g0JHUkibHFAfhfUIy927o= github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs= github.com/ipfs/iptb-plugins v0.2.2/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs= github.com/ipfs/iptb-plugins v0.3.0/go.mod h1:5QtOvckeIw4bY86gSH4fgh3p3gCSMn3FmIKr4gaBncA= github.com/ipld/go-car v0.1.1-0.20200429200904-c222d793c339/go.mod h1:eajxljm6I8o3LitnFeVEmucwZmz7+yLSiKce9yYMefg= github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae/go.mod h1:2mvxpu4dKRnuH3mj5u6KW/tmRSCcXvy/KYiJ4nC6h4c= github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 h1:6phjU3kXvCEWOZpu+Ob0w6DzgPFZmDLgLPxJhD8RxEY= github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 h1:RbRHv8epkmvBYA5cGfz68GUSbOgx5j/7ObLIl4Rsif0= github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= github.com/ipld/go-ipld-prime-proto v0.1.0 h1:j7gjqrfwbT4+gXpHwEx5iMssma3mnctC7YaCimsFP70= github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-is-domain v1.0.3/go.mod h1:xbRLRb0S7FgzDBTJlguhDVwLYM/5yNtvktxj2Ttfy7Q= github.com/jbenet/go-is-domain v1.0.5 h1:r92uiHbMEJo9Fkey5pMBtZAzjPQWic0ieo7Jw1jEuQQ= github.com/jbenet/go-is-domain v1.0.5/go.mod h1:xbRLRb0S7FgzDBTJlguhDVwLYM/5yNtvktxj2Ttfy7Q= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-assets v0.0.0-20160921144138-4f4301a06e15/go.mod h1:Fdm/oWRW+CH8PRbLntksCNtmcCBximKPkVQYvmMl80k= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.7.0 h1:qJ7piXPrjP3mDrfHf5ATkxfLix8ANs226vpo0aACOn0= github.com/jhump/protoreflect v1.7.0/go.mod h1:RZkzh7Hi9J7qT/sPlWnJ/UwZqCJvciFxKDA0UCeltSM= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU= github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3/go.mod h1:BYpt4ufZiIGv2nXn4gMxnfKV306n3mWXgNu/d2TqdTU= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/keybase/go-kext v0.0.0-20200218013902-e4a86908886a h1:dHYanXneM8GTWrIW32GWFKFF3YFkNu17tTnkFmA7Jxw= github.com/keybase/go-kext v0.0.0-20200218013902-e4a86908886a/go.mod h1:Vtc6+1lR1vL60Xxv2mpL6qf5rZdagBZ/eOsXHa3cHFE= github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM= github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d/go.mod h1:JJNrCn9otv/2QP4D7SMJBgaleKpOf66PnW6F5WGNRIc= github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-conn-security-multistream v0.2.0 h1:uNiDjS58vrvJTg9jO6bySd1rMKejieG7v45ekqHbZ1M= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-libp2p v0.10.3 h1:Bc8/VjmC+pICtK6xG8YgVutZvCdK0MsroWCHP+6AdFQ= github.com/libp2p/go-libp2p v0.10.3/go.mod h1:0ER6iPSaPeQjryNgOnm9bLNpMJCYmuw54xJXsVR17eE= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.3.2 h1:OhDSwVVaq7liTaRIsFFYvsaPp0pn2yi0WazejZ4DUmo= github.com/libp2p/go-libp2p-autonat v0.3.2/go.mod h1:0OzOi1/cVc7UcxfOddemYD5vzEqi4fwRbnZcJGLi68U= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= github.com/libp2p/go-libp2p-circuit v0.1.3/go.mod h1:Xqh2TjSy8DD5iV2cCOMzdynd6h8OTBGoV1AWbWor3qM= github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= github.com/libp2p/go-libp2p-circuit v0.3.1 h1:69ENDoGnNN45BNDnBd+8SXSetDuw0eJFcGmOvvtOgBw= github.com/libp2p/go-libp2p-circuit v0.3.1/go.mod h1:8RMIlivu1+RxhebipJwFDA45DasLx+kkrp4IlJj53F4= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= github.com/libp2p/go-libp2p-connmgr v0.2.1/go.mod h1:JReKEFcgzSHKT9lL3rhYcUtXBs9uMIiMKJGM1tl3xJE= github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskWE7pvqs= github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= github.com/libp2p/go-libp2p-daemon v0.2.2/go.mod h1:kyrpsLB2JeNYR2rvXSVWyY0iZuRIMhqzWR3im9BV6NQ= github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= github.com/libp2p/go-libp2p-discovery v0.5.0 h1:Qfl+e5+lfDgwdrXdu4YNCWyEo3fWuP+WgN9mN0iWviQ= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= github.com/libp2p/go-libp2p-gostream v0.2.0/go.mod h1:nN/Aw00orrADXaXgNCeYjCtQrk6eT20PX/G8F12NW/s= github.com/libp2p/go-libp2p-gostream v0.2.1 h1:JjA9roGokaR2BgWmaI/3HQu1/+jSbVVDLatQGnVdGjI= github.com/libp2p/go-libp2p-gostream v0.2.1/go.mod h1:1Mjp3LDmkqICe5tH9yLVNCqFaRTy6OwBvuJV6j1b9Nk= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= github.com/libp2p/go-libp2p-http v0.1.5 h1:FfLnzjlEzV4/6UCXCpPXRYZNoGCfogqCFjd7eF0Jbm8= github.com/libp2p/go-libp2p-http v0.1.5/go.mod h1:2YfPjsQxUlBGFQl2u461unkQ7ukwiSs7NX2eSslOJiU= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= github.com/libp2p/go-libp2p-kad-dht v0.7.10/go.mod h1:COi43/398wedaaNzvnbnWQ8qeG629Nu7Aj0WixHmD3A= github.com/libp2p/go-libp2p-kad-dht v0.8.1/go.mod h1:u3rbYbp3CSraAHD5s81CJ3hHozKTud/UOXfAgh93Gek= github.com/libp2p/go-libp2p-kad-dht v0.8.2/go.mod h1:u3rbYbp3CSraAHD5s81CJ3hHozKTud/UOXfAgh93Gek= github.com/libp2p/go-libp2p-kad-dht v0.8.3/go.mod h1:HnYYy8taJWESkqiESd1ngb9XX/XGGsMA5G0Vj2HoSh4= github.com/libp2p/go-libp2p-kad-dht v0.9.0/go.mod h1:LEKcCFHxnvypOPaqZ0m6h0fLQ9Y8t1iZMOg7a0aQDD4= github.com/libp2p/go-libp2p-kad-dht v0.10.0/go.mod h1:LEKcCFHxnvypOPaqZ0m6h0fLQ9Y8t1iZMOg7a0aQDD4= github.com/libp2p/go-libp2p-kad-dht v0.11.0 h1:ZLhlmDKsFiOkPhTzfEqBrMy/1Tqx+Dk6UgbHM5//IQM= github.com/libp2p/go-libp2p-kad-dht v0.11.0/go.mod h1:5ojtR2acDPqh/jXf5orWy8YGb8bHQDS+qeDcoscL/PI= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= github.com/libp2p/go-libp2p-kbucket v0.4.1/go.mod h1:7sCeZx2GkNK1S6lQnGUW5JYZCFPnXzAZCCBBS70lytY= github.com/libp2p/go-libp2p-kbucket v0.4.2/go.mod h1:7sCeZx2GkNK1S6lQnGUW5JYZCFPnXzAZCCBBS70lytY= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= github.com/libp2p/go-libp2p-mplex v0.2.4/go.mod h1:mI7iOezdWFOisvUwaYd3IDrJ4oVmgoXK8H331ui39CE= github.com/libp2p/go-libp2p-mplex v0.3.0 h1:CZyqqKP0BSGQyPLvpRQougbfXaaaJZdGgzhCpJNuNSk= github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk= github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.3.2 h1:k3cJm5JW5mjaWZkobS50sJLJWaB2mBi0HW4eRlE8mSo= github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk= github.com/libp2p/go-libp2p-pubsub-router v0.3.0/go.mod h1:6kZb1gGV1yGzXTfyNsi4p+hyt1JnA1OMGHeExTOJR3A= github.com/libp2p/go-libp2p-pubsub-router v0.3.2 h1:BGC4irCUXlwmlCSxnA2DVDNY8JqhfAUUaiq3CZvcddw= github.com/libp2p/go-libp2p-pubsub-router v0.3.2/go.mod h1:G4MAvYzPxhoR0LEBluS9Ow+Nnr/8iDalUN+RNwVgNkY= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.7.1/go.mod h1:TD31to4E5exogR/GWHClXCfkktigjAl5rXSt7HoxNvY= github.com/libp2p/go-libp2p-quic-transport v0.8.0/go.mod h1:F2FG/6Bzz0U6essUVxDzE0s9CrY4XGLbl7QEmDNvU7A= github.com/libp2p/go-libp2p-quic-transport v0.9.0 h1:WPuq5nV/chmIZIzvrkC2ulSdAQ0P0BDvgvAhZFOZ59E= github.com/libp2p/go-libp2p-quic-transport v0.9.0/go.mod h1:xyY+IgxL0qsW7Kiutab0+NlxM0/p9yRtrGTYsuMWf70= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= github.com/libp2p/go-libp2p-routing-helpers v0.2.1/go.mod h1:rTLUHlGDZbXHANJAWP2xW7ruPNJLj41/GnCBiR+qgjU= github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= github.com/libp2p/go-libp2p-secio v0.2.2 h1:rLLPvShPQAcY6eNurKNZq3eZjPWfU9kXF2eI9jIYdrg= github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= github.com/libp2p/go-libp2p-swarm v0.2.8 h1:cIUUvytBzNQmGSjnXFlI6UpoBGsaud82mJPIJVfkDlg= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.2.0/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.1 h1:TJxRVPY9SjH7TNrNC80l1OJMBiWhs1qpKmeB+1Ug3xU= github.com/libp2p/go-libp2p-yamux v0.4.1/go.mod h1:FA/NjRYRVNjqOzpGuGqcruH7jAU2mYIjtKBicVOL3dc= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.2.0 h1:Ov/D+8oBlbRkjBs1R1Iua8hJ8cUfbdiW8EOdZuxcgaI= github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-socket-activation v0.0.2/go.mod h1:KP44C+yZ7gA8sTxavgaD0b8vXVFJwam2CEW0s7+f094= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= github.com/libp2p/go-stream-muxer-multistream v0.3.0 h1:TqnSHPJEIqDEO7h1wZZ0p3DXdvDSiLHQidKKUGZtiOY= github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= github.com/libp2p/go-tcp-transport v0.2.1 h1:ExZiVQV+h+qL16fzCWtd1HSzPsqWottJ8KXwWaVi8Ns= github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw= github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381 h1:bqDmpDG49ZRnB5PcgP0RXtQvnMSgIF14M7CBd2shtXs= github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucas-clemente/quic-go v0.17.3/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucas-clemente/quic-go v0.18.0/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg= github.com/lucas-clemente/quic-go v0.18.1 h1:DMR7guC0NtVS8zNZR3IO7NARZvZygkSC56GGtC6cyys= github.com/lucas-clemente/quic-go v0.18.1/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailgun/mailgun-go/v3 v3.6.4 h1:+cvbZRgLSHivbz/w1iWLmxVl6Bqf4geD2D7QMj4+8PE= github.com/mailgun/mailgun-go/v3 v3.6.4/go.mod h1:ZjVnH8S0dR2BLjvkZc/rxwerdcirzlA12LQDuGAadR0= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/manifoldco/promptui v0.7.0 h1:3l11YT8tm9MnwGFQ4kETwkzpAwY2Jt9lCrumCUW4+z4= github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= github.com/marten-seemann/qpack v0.2.0/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.0/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.3 h1:j7a/xn1U6TKA/PHHxqZuzh64CdtRc7rU9M+AvkOl5bA= github.com/mattn/go-sqlite3 v1.14.3/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.30 h1:Qww6FseFn8PRfw07jueqIXqodm0JKiiKuK0DeXSqfyo= github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.0 h1:iDwIio/3gk2QtLLEsqU5lInaMzos0hDTz8a6lazSFVw= github.com/mitchellh/mapstructure v1.3.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1 h1:1bxa+W7j9wZKTZREySx1vPMs2TqrYWjVZ7zE6/XLG1I= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= github.com/multiformats/go-multistream v0.1.2/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.0 h1:6AuNmQVKUkRnddw2YiDjt5Elit40SFxMJkVnhmETXtU= github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/namsral/flag v1.7.4-pre/go.mod h1:OXldTctbM6SWH1K899kPZcf65KxJiD7MsceFUpB5yDo= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/odeke-em/go-utils v0.0.0-20170224015737-e8ebaed0777a h1:ID4uUGPTiRgDphmzLbX3+teq4mhp2d6Isw2Tb8cMuJU= github.com/odeke-em/go-utils v0.0.0-20170224015737-e8ebaed0777a/go.mod h1:I31zE0t3yGARXW3nOJIdaNT1BJ2uPHKP0xjmjfRQEVg= github.com/odeke-em/go-uuid v0.0.0-20151221120446-b211d769a9aa h1:XEhClAZN5U0GUTFRgRdPNgAKO4mP++S+zbqXH+Pr9nU= github.com/odeke-em/go-uuid v0.0.0-20151221120446-b211d769a9aa/go.mod h1:omlfAqAAOXYL53jxw8wG+G2xH7NqbkJPlDeGP9YpP6g= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid/v2 v2.0.2 h1:r4fFzBm+bv0wNKNh5eXTwU7i85y5x+uwkxCUTNVQqLc= github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/ory/dockertest/v3 v3.6.0/go.mod h1:4ZOpj8qBUmh8fcBSVzkH2bws2s91JdGvHUqan4GHEuQ= github.com/ory/dockertest/v3 v3.6.2/go.mod h1:EFLcVUOl8qCwp9NyDAcCDtq/QviLtYswW/VbWzUnTNE= github.com/oschwald/geoip2-golang v1.4.0/go.mod h1:8QwxJvRImBH+Zl6Aa6MaIcs5YdlZSTKtzmPGzQqi9ng= github.com/oschwald/maxminddb-golang v1.6.0/go.mod h1:DUJFucBg2cvqx42YmDa/+xHvb0elJtOm3o4aFQ/nb/w= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/statsd_exporter v0.15.0/go.mod h1:Dv8HnkoLQkeEjkIE4/2ndAA7WL1zHKK7WMqFQqu72rw= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/radovskyb/watcher v1.0.7 h1:AYePLih6dpmS32vlHfhCeli8127LzkIgwJGcwwe8tUE= github.com/radovskyb/watcher v1.0.7/go.mod h1:78okwvY5wPdzcb1UYnip1pvrZNIVEIh/Cm+ZuvsUYIg= github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be h1:ta7tUOvsPHVHGom5hKW5VXNc2xZIkfCKP8iaqOyYtUQ= github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be/go.mod h1:MIDFMn7db1kT65GmV94GzpX9Qdi7N/pQlwb+AN8wh+Q= github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rhysd/go-github-selfupdate v1.2.2/go.mod h1:khesvSyKcXDUxeySCedFh621iawCks0dS/QnHPcpCws= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc= github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/steveyen/gtreap v0.1.0 h1:CjhzTa274PyJLJuMZwIzCO1PfC00oRa8d1Kc78bFXJM= github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stripe/stripe-go/v72 v72.10.0 h1:4Rsv7Ts4D2qii2r0gW3qZpvPXRV0W8BpagLgvh7kRjY= github.com/stripe/stripe-go/v72 v72.10.0/go.mod h1:QwqJQtduHubZht9mek5sds9CtQcKFdsykV9ZepRWwo0= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/supranational/blst v0.1.1/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tcnksm/go-gitconfig v0.1.2/go.mod h1:/8EhP4H7oJZdIPyT+/UIsG87kTzrzM4UsLGSItWYCpE= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/textileio/dcrypto v0.0.1 h1:ftXQKd+CAM7a0XFrw+hJqizo+ux8+g5RttKjImZRc7U= github.com/textileio/dcrypto v0.0.1/go.mod h1:rDlYXuL+HQwkyrxOR230zEUouRnlTwH6O5XWoPbmfcE= github.com/textileio/go-assets v0.0.0-20200430191519-b341e634e2b7 h1:J7+UXJT/Ku8ylMqqHH4T+CiHtOSd8woUZjynG3fEwDI= github.com/textileio/go-assets v0.0.0-20200430191519-b341e634e2b7/go.mod h1:j7aKMh8sbbtvttp7V7yCOkHW/pfRtIM/6h+8qEDsLyI= github.com/textileio/go-datastore v0.4.5-0.20200819232101-baa577bf9422 h1:DNpznzcn7pd1Cn2fdKHfPPw62r8Ii4DGVnQRu9Jbvok= github.com/textileio/go-datastore v0.4.5-0.20200819232101-baa577bf9422/go.mod h1:/38mp5DMgxCZrb5wpgPlWGXuZ99/ur8wgHDSXU5zCjU= github.com/textileio/go-datastore-extensions v1.0.0/go.mod h1:Pzj9FDRkb55910dr/FX8M7WywvnS26gBgEDez1ZBuLE= github.com/textileio/go-ds-badger v0.2.5-0.20200819232634-de89720b5d6a h1:AdjNdwIWrZAUrXfC9IHG8eKhRlJarXZoN9CmGlfTeLA= github.com/textileio/go-ds-badger v0.2.5-0.20200819232634-de89720b5d6a/go.mod h1:0kLVpG7eeM95s4rS78lQe4eG5DCk+cnU8xas2nPSdZY= github.com/textileio/go-ds-mongo v0.1.2/go.mod h1:9wmGTUr+MWidGxYQe27RuCogEUZ7vnQxZb4GWj7uWL8= github.com/textileio/powergate v0.0.1-beta.13.0.20200703203605-db27e80fa8b5/go.mod h1:D3ImIiFCJSFdNdSO1m1SL+P8kQvW2Jrkc6hAZI8h7Bg= github.com/textileio/powergate v1.2.1 h1:qMqSo/nqN870NRCSrH5AqIQhPm/v9V1HHAiZrFWy5AM= github.com/textileio/powergate v1.2.1/go.mod h1:MFWG3Tm4wv/Sb5fIL+JyM7mDbvZtfdh8ky09VJFnGNg= github.com/textileio/textile v1.0.14 h1:ohEoUHATajh6FdVElYtOlgZI3Y0DFPTAJfMrGzpCquc= github.com/textileio/textile v1.0.14/go.mod h1:W+VB49SnKeZrbXhLIJMe+rFixVYireocJmfrds3EWKo= github.com/textileio/uiprogress v0.0.4/go.mod h1:ijtyLXHP6vw9MbbT4tmCKZonLPE3LN4mD9C/XRJkrgg= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= github.com/tidwall/gjson v1.3.5 h1:2oW9FBNu8qt9jy5URgrzsVx/T/KSn3qn/smJQ0crlDQ= github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/sjson v1.0.4 h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg= github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.1+incompatible h1:uArBYHQR0HqLFFAypI7RsWTzPSj/bDpmZZuQjMLSg1A= github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM= github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200501014322-5f9941ef88e0/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200501232601-351665a6e756/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU= github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1 h1:ctS9Anw/KozviCCtK6VWMz5kPL9nbQzbQY4yfqlIV4M= github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1/go.mod h1:tKH72zYNt/exx6/5IQO6L9LoQ0rEjd5SbbWaDTs9Zso= github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4/go.mod h1:K+EVq8d5QcQ2At5VECsA+SNZvWefyBXh8TnIsxo1OvQ= github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9 h1:Y1/FEOpaCpD21WxrmfeIYCFPuVPRCY2XZTWzTNHGw30= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A= github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c h1:GGsyl0dZ2jJgVT+VvWBf/cNijrHRhkrTjkmp5wg7li0= github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c/go.mod h1:xxcJeBb7SIUl/Wzkz1eVKJE/CB34YNrqX2TQI6jY9zs= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xakep666/mongo-migrate v0.2.1 h1:pRK966a44ujuGMEl73MOzv4MajcH8Q6MWo+TBlxjhvs= github.com/xakep666/mongo-migrate v0.2.1/go.mod h1:pVQysP+es2wX4TaeVd7zLkRZhKMcBqcC/KRyLms6Eyk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/c-for-go v0.0.0-20200718154222-87b0065af829/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I= github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk= github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1/go.mod h1:KatxXrVDzgWwbssUWsF5+cOJHXPvzQ09YSlzGNuhOEo= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v3.3.22+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= go.mongodb.org/mongo-driver v1.0.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.4.0/go.mod h1:llVBH2pkj9HywK0Dtdt6lDikOjFLbceHVu/Rc0iMKLs= go.mongodb.org/mongo-driver v1.4.1 h1:38NSAyDPagwnFpUA/D5SFgbugUYR3NzYRNa4Qk9UxKs= go.mongodb.org/mongo-driver v1.4.1/go.mod h1:llVBH2pkj9HywK0Dtdt6lDikOjFLbceHVu/Rc0iMKLs= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/dig v1.8.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/dig v1.9.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/dig v1.10.0 h1:yLmDDj9/zuDjv3gz8GQGviXMs9TfysIUMUilCpgzUJY= go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= go.uber.org/fx v1.12.0/go.mod h1:egT3Kyg1JFYQkvKLZ3EsykxkNrZxgXS+gKoKo7abERY= go.uber.org/fx v1.13.1 h1:CFNTr1oin5OJ0VCZ8EycL3wzF29Jz2g0xe55RFsf2a4= go.uber.org/fx v1.13.1/go.mod h1:bREWhavnedxpJeTq9pQT53BbvwhUv7TcpsOqcH4a+3w= go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200427165652-729f1e841bcc/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 h1:wBouT66WTYFXdxfVdz9sVWARVd/2vfGcmI45D2gj45M= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200121082415-34d275377bf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200427175716-29b57079015a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02 h1:5Ftd3YbC/kANXWCBjvppvUmv1BMakgFcBKA7MpYYp4M= golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191114200427-caa0b0f7d508/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200428115010-c45acf45369a/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200702021140-07506425bd67 h1:4BC1C1i30F3MZeiIO6y6IIo4DxrtOwITK87bQl3lhFA= google.golang.org/genproto v0.0.0-20200702021140-07506425bd67/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc/examples v0.0.0-20200819190100-f640ae6a4f43/go.mod h1:wQWkdCkP0Pl3MzFPvfqTNUnXA2eIVY4eakDiKJvniKc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ= gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/segmentio/analytics-go.v3 v3.1.0/go.mod h1:4QqqlTlSSpVlWA9/9nDcPw+FkM2yv1NQoYjUbL9/JAw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/sqlite v1.1.3 h1:BYfdVuZB5He/u9dt4qDpZqiqDJ6KhPqs5QUqsr/Eeuc= gorm.io/driver/sqlite v1.1.3/go.mod h1:AKDgRWk8lcSQSw+9kxCJnX/yySj8G3rdwYlU57cB45c= gorm.io/gorm v1.20.1/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.20.5 h1:g3tpSF9kggASzReK+Z3dYei1IJODLqNUbOjSuCczY8g= gorm.io/gorm v1.20.5/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= ================================================ FILE: grpc/auth/app_token_auth/app_token_auth.go ================================================ package app_token_auth import ( "context" "errors" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/permissions" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) type AppTokenAuth struct { kc keychain.Keychain } func New(kc keychain.Keychain) *AppTokenAuth { return &AppTokenAuth{ kc: kc, } } func (a *AppTokenAuth) Authorize(ctx context.Context, fullMethodName string) (context.Context, error) { if canSkipAuth(fullMethodName) { return ctx, nil } token, err := AuthFromMD(ctx, "AppToken") if err != nil { return nil, err } tokenInfo, err := a.validateToken(token, fullMethodName) if err != nil { return nil, status.Errorf(codes.Unauthenticated, "invalid auth token: %v", err) } newCtx := context.WithValue(ctx, "appToken", tokenInfo) return newCtx, nil } func (a *AppTokenAuth) validateToken(tok, fullMethodName string) (*permissions.AppToken, error) { key, sec, err := permissions.GetKeyAndSecretFromAccessToken(tok) if err != nil { return nil, err } appTok, err := a.kc.GetAppToken(key) if err != nil { return nil, err } if appTok.Secret != sec { return nil, errors.New("app token secret does not match") } authorized := false if appTok.IsMaster { authorized = true } // Check if method is authorized for _, p := range appTok.Permissions { if "/space.SpaceApi/"+p == fullMethodName { authorized = true } } if authorized == false { return nil, errors.New("app token does not grant access to " + fullMethodName) } return appTok, nil } var publicMethods = []string{ "InitializeMasterAppToken", } func canSkipAuth(fullMethodName string) bool { for _, pm := range publicMethods { if "/space.SpaceApi/"+pm == fullMethodName { return true } } return false } ================================================ FILE: grpc/auth/app_token_auth/auth_from_md.go ================================================ package app_token_auth import ( "context" "strings" "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) var ( headerAuthorize = "authorization" ) // AuthFromMD is a helper function for extracting the :authorization header from the gRPC metadata of the request. // // It expects the `:authorization` header to be of a certain scheme (e.g. `basic`, `bearer`), in a // case-insensitive format (see rfc2617, sec 1.2). If no such authorization is found, or the token // is of wrong scheme, an error with gRPC status `Unauthenticated` is returned. func AuthFromMD(ctx context.Context, expectedScheme string) (string, error) { val := metautils.ExtractIncoming(ctx).Get(headerAuthorize) if val == "" { return "", status.Errorf(codes.Unauthenticated, "Request unauthenticated with "+expectedScheme) } splits := strings.SplitN(val, " ", 2) if len(splits) < 2 { return "", status.Errorf(codes.Unauthenticated, "Bad authorization string") } if !strings.EqualFold(splits[0], expectedScheme) { return "", status.Errorf(codes.Unauthenticated, "Request unauthenticated with "+expectedScheme) } return splits[1], nil } ================================================ FILE: grpc/auth/middleware/grpc_auth.go ================================================ package grpc_auth import ( "context" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" "google.golang.org/grpc" ) // AuthFunc is the pluggable function that performs authentication. // // The passed in `Context` will contain the gRPC metadata.MD object (for header-based authentication) and // the peer.Peer information that can contain transport-based credentials (e.g. `credentials.AuthInfo`). // // The returned context will be propagated to handlers, allowing user changes to `Context`. However, // please make sure that the `Context` returned is a child `Context` of the one passed in. // // If error is returned, its `grpc.Code()` will be returned to the user as well as the verbatim message. // Please make sure you use `codes.Unauthenticated` (lacking auth) and `codes.PermissionDenied` // (authed, but lacking perms) appropriately. type AuthFunc func(ctx context.Context, fullMethodName string) (context.Context, error) // UnaryServerInterceptor returns a new unary server interceptors that performs per-request auth. func UnaryServerInterceptor(authFunc AuthFunc) grpc.UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { var newCtx context.Context var err error newCtx, err = authFunc(ctx, info.FullMethod) if err != nil { return nil, err } return handler(newCtx, req) } } // StreamServerInterceptor returns a new unary server interceptors that performs per-request auth. func StreamServerInterceptor(authFunc AuthFunc) grpc.StreamServerInterceptor { return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { var newCtx context.Context var err error newCtx, err = authFunc(stream.Context(), info.FullMethod) if err != nil { return err } wrapped := grpc_middleware.WrapServerStream(stream) wrapped.WrappedContext = newCtx return handler(srv, wrapped) } } ================================================ FILE: grpc/grpc.go ================================================ package grpc import ( "context" "fmt" "net" "net/http" "time" "github.com/rs/cors" "github.com/improbable-eng/grpc-web/go/grpcweb" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/FleekHQ/space-daemon/core/space/fuse" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/FleekHQ/space-daemon/core/space" "github.com/FleekHQ/space-daemon/grpc/auth/app_token_auth" grpc_auth "github.com/FleekHQ/space-daemon/grpc/auth/middleware" "github.com/FleekHQ/space-daemon/grpc/pb" "github.com/FleekHQ/space-daemon/log" "google.golang.org/grpc" ) const ( DefaultGrpcPort = 9999 ) var defaultServerOptions = serverOptions{ port: DefaultGrpcPort, } type serverOptions struct { port int proxyPort int // port for grpcweb proxy restProxyPort int // port for rest api proxy } type grpcServer struct { opts *serverOptions s *grpc.Server rpcProxy *http.Server restServer *http.Server sv space.Service fc *fuse.Controller kc keychain.Keychain // TODO: see if we need to clean this up by gc or handle an array fileEventStream pb.SpaceApi_SubscribeServer txlEventStream pb.SpaceApi_TxlSubscribeServer notificationEventStream pb.SpaceApi_NotificationSubscribeServer isStarted bool readyCh chan bool } // Idea taken from here https://medium.com/soon-london/variadic-configuration-functions-in-go-8cef1c97ce99 type ServerOption func(o *serverOptions) // gRPC server uses Service from core to handle requests func New(sv space.Service, fc *fuse.Controller, kc keychain.Keychain, opts ...ServerOption) *grpcServer { o := defaultServerOptions for _, opt := range opts { opt(&o) } srv := &grpcServer{ opts: &o, sv: sv, fc: fc, kc: kc, readyCh: make(chan bool, 1), } return srv } // Start grpc and api server with provided options func (srv *grpcServer) Start(ctx context.Context) error { lis, err := net.Listen("tcp", fmt.Sprintf(":%d", srv.opts.port)) if err != nil { log.Error(fmt.Sprintf("failed to listen on port : %v", srv.opts.port), err) return err } log.Info(fmt.Sprintf("listening on address %s", lis.Addr().String())) appTokenAuth := app_token_auth.New(srv.kc) srv.s = grpc.NewServer( grpc.StreamInterceptor(grpc_auth.StreamServerInterceptor(appTokenAuth.Authorize)), grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(appTokenAuth.Authorize)), ) pb.RegisterSpaceApiServer(srv.s, srv) if err = srv.startRestProxy(ctx, lis); err != nil { return err } srv.startGrpcWebProxy() log.Info(fmt.Sprintf("gRPC server started on Port %v", srv.opts.port)) srv.isStarted = true srv.readyCh <- true // this is a blocking function return srv.s.Serve(lis) } func (srv *grpcServer) startRestProxy(ctx context.Context, lis net.Listener) error { mux := runtime.NewServeMux() opts := []grpc.DialOption{grpc.WithInsecure()} if err := pb.RegisterSpaceApiHandlerFromEndpoint(ctx, mux, lis.Addr().String(), opts); err != nil { log.Error("Failed to start REST server", err) return err } srv.restServer = &http.Server{ Addr: fmt.Sprintf(":%d", srv.opts.restProxyPort), Handler: mux, } srv.restServer.Handler = cors.AllowAll().Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { log.Debug("Incoming REST Proxy Request", "path:"+r.URL.Path, "method:"+r.Method) mux.ServeHTTP(w, r) })) log.Info("REST server is starting", fmt.Sprintf("port:%v", srv.opts.restProxyPort)) go func() { if err := srv.restServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { log.Error(fmt.Sprintf("REST server failed to start on port %d", srv.opts.restProxyPort), err) } }() return nil } func (srv *grpcServer) startGrpcWebProxy() { webrpcProxy := grpcweb.WrapServer( srv.s, grpcweb.WithOriginFunc(func(origin string) bool { return true }), grpcweb.WithWebsockets(true), grpcweb.WithWebsocketOriginFunc(func(req *http.Request) bool { return true }), ) srv.rpcProxy = &http.Server{ Addr: fmt.Sprintf(":%d", srv.opts.proxyPort), } srv.rpcProxy.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if webrpcProxy.IsGrpcWebRequest(r) || webrpcProxy.IsAcceptableGrpcCorsRequest(r) || webrpcProxy.IsGrpcWebSocketRequest(r) { webrpcProxy.ServeHTTP(w, r) } }) go func() { if err := srv.rpcProxy.ListenAndServe(); err != nil && err != http.ErrServerClosed { log.Error("Space grpcweb proxy error", err) } }() log.Info(fmt.Sprintf("gRPC-web proxy server started on Port %d", srv.opts.proxyPort)) } // Helper function for setting port func WithPort(port int) ServerOption { return func(o *serverOptions) { if port != 0 { o.port = port } } } func WithProxyPort(port int) ServerOption { return func(o *serverOptions) { if port != 0 { o.proxyPort = port } } } // WithRestProxyPort configures the REST Proxy port func WithRestProxyPort(port int) ServerOption { return func(o *serverOptions) { if port != 0 { o.restProxyPort = port } } } func (srv *grpcServer) Shutdown() error { if !srv.isStarted { return nil } close(srv.readyCh) defer func() { srv.rpcProxy = nil srv.restServer = nil srv.s = nil srv.isStarted = false }() srv.s.GracefulStop() shutdownCtx, _ := context.WithTimeout(context.Background(), 10*time.Second) if err := srv.rpcProxy.Shutdown(shutdownCtx); err != nil { return err } if err := srv.restServer.Shutdown(shutdownCtx); err != nil { return err } return nil } func (srv *grpcServer) WaitForReady() chan bool { return srv.readyCh } ================================================ FILE: grpc/handlers.go ================================================ package grpc import ( "context" "errors" "github.com/FleekHQ/space-daemon/core/events" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/grpc/pb" "github.com/FleekHQ/space-daemon/log" "github.com/golang/protobuf/ptypes/empty" ) var errNotImplemented = errors.New("Not implemented") func (srv *grpcServer) sendFileEvent(event *pb.FileEventResponse) { if srv.fileEventStream != nil { log.Info("sending events to client", event.String()) srv.fileEventStream.Send(event) } } func (srv *grpcServer) SendFileEvent(event events.FileEvent) { dirEntries := mapFileInfoToDirectoryEntry([]domain.FileInfo{event.Info}) entry := dirEntries[0] pe := &pb.FileEventResponse{ Type: mapFileEventToPb(event.Type), Entry: entry, Bucket: event.Bucket, DbId: event.DbID, } srv.sendFileEvent(pe) } func mapFileEventToPb(eventType events.FileEventType) pb.EventType { switch eventType { case events.FileAdded: return pb.EventType_ENTRY_ADDED case events.FileDeleted: return pb.EventType_ENTRY_DELETED case events.FileUpdated: return pb.EventType_ENTRY_UPDATED case events.FileBackupInProgress: return pb.EventType_ENTRY_BACKUP_IN_PROGRESS case events.FileBackupReady: return pb.EventType_ENTRY_BACKUP_READY case events.FileRestoring: return pb.EventType_ENTRY_RESTORE_IN_PROGRESS case events.FileRestored: return pb.EventType_ENTRY_RESTORE_READY case events.FolderAdded: return pb.EventType_FOLDER_ADDED case events.FolderDeleted: return pb.EventType_FOLDER_DELETED case events.FolderUpdated: return pb.EventType_FOLDER_UPDATED default: return pb.EventType_ENTRY_ADDED } } func (srv *grpcServer) sendTextileEvent(event *pb.TextileEventResponse) { if srv.txlEventStream != nil { log.Info("sending events to client") srv.txlEventStream.Send(event) } } func (srv *grpcServer) SendTextileEvent(event events.TextileEvent) { pe := &pb.TextileEventResponse{} srv.sendTextileEvent(pe) } func (srv *grpcServer) ListDirectories(ctx context.Context, request *pb.ListDirectoriesRequest) (*pb.ListDirectoriesResponse, error) { bucketName := request.Bucket listMembers := !request.OmitMembers entries, err := srv.sv.ListDirs(ctx, "", bucketName, listMembers) if err != nil { return nil, err } dirEntries := mapFileInfoToDirectoryEntry(entries) res := &pb.ListDirectoriesResponse{ Entries: dirEntries, } return res, nil } func (srv *grpcServer) ListDirectory( ctx context.Context, request *pb.ListDirectoryRequest, ) (*pb.ListDirectoryResponse, error) { listMembers := !request.OmitMembers entries, err := srv.sv.ListDir(ctx, request.GetPath(), request.GetBucket(), listMembers) if err != nil { return nil, err } dirEntries := mapFileInfoToDirectoryEntry(entries) res := &pb.ListDirectoryResponse{ Entries: dirEntries, } return res, nil } func mapFileInfoToDirectoryEntry(entries []domain.FileInfo) []*pb.ListDirectoryEntry { dirEntries := make([]*pb.ListDirectoryEntry, 0) for _, e := range entries { members := make([]*pb.FileMember, 0) for _, m := range e.Members { members = append(members, &pb.FileMember{ Address: m.Address, PublicKey: m.PublicKey, }) } var backupCount = 0 if e.BackedUp { backupCount = 1 } dirEntry := &pb.ListDirectoryEntry{ Path: e.Path, IsDir: e.IsDir, Name: e.Name, SizeInBytes: e.SizeInBytes, Created: e.Created, Updated: e.Updated, FileExtension: e.FileExtension, IpfsHash: e.IpfsHash, Members: members, BackupCount: int64(backupCount), IsLocallyAvailable: e.LocallyAvailable, IsBackupInProgress: e.BackupInProgress, IsRestoreInProgress: e.RestoreInProgress, } dirEntries = append(dirEntries, dirEntry) } return dirEntries } func (srv *grpcServer) Subscribe(empty *empty.Empty, stream pb.SpaceApi_SubscribeServer) error { srv.registerStream(stream) // waits until request is done select { case <-stream.Context().Done(): break } // clean up stream srv.registerStream(nil) log.Info("closing stream") return nil } func (srv *grpcServer) registerStream(stream pb.SpaceApi_SubscribeServer) { srv.fileEventStream = stream } func (srv *grpcServer) TxlSubscribe(empty *empty.Empty, stream pb.SpaceApi_TxlSubscribeServer) error { srv.registerTxlStream(stream) // waits until request is done select { case <-stream.Context().Done(): break } // clean up stream srv.registerTxlStream(nil) log.Info("closing stream") return nil } func (srv *grpcServer) registerTxlStream(stream pb.SpaceApi_TxlSubscribeServer) { srv.txlEventStream = stream } func (srv *grpcServer) OpenFile(ctx context.Context, request *pb.OpenFileRequest) (*pb.OpenFileResponse, error) { fi, err := srv.sv.OpenFile(ctx, request.Path, request.Bucket, request.DbId) if err != nil { return nil, err } return &pb.OpenFileResponse{Location: fi.Location}, nil } func (srv *grpcServer) AddItems(request *pb.AddItemsRequest, stream pb.SpaceApi_AddItemsServer) error { ctx := stream.Context() results, totals, err := srv.sv.AddItems(ctx, request.SourcePaths, request.TargetPath, request.Bucket) if err != nil { return err } notifications := make(chan domain.AddItemResult) done := make(chan struct{}) // push notification stream from out go func() { var completedBytes int64 var completedFiles int64 for res := range notifications { completedFiles++ var r *pb.AddItemsResponse if res.Error != nil { r = &pb.AddItemsResponse{ Result: &pb.AddItemResult{ SourcePath: res.SourcePath, Error: res.Error.Error(), }, TotalFiles: totals.TotalFiles, TotalBytes: totals.TotalBytes, CompletedFiles: completedFiles, CompletedBytes: completedBytes, } } else { completedBytes += res.Bytes r = &pb.AddItemsResponse{ Result: &pb.AddItemResult{ SourcePath: res.SourcePath, BucketPath: res.BucketPath, }, TotalFiles: totals.TotalFiles, TotalBytes: totals.TotalBytes, CompletedFiles: completedFiles, CompletedBytes: completedBytes, } } stream.Send(r) } done <- struct{}{} }() // receive results from service for in := range results { select { case notifications <- in: case <-stream.Context().Done(): break } } // close out channel and stream close(notifications) // wait for all notifications to finish <-done log.Printf("closing stream for addFiles") return nil } func (srv *grpcServer) CreateFolder(ctx context.Context, request *pb.CreateFolderRequest) (*pb.CreateFolderResponse, error) { err := srv.sv.CreateFolder(ctx, request.Path, request.Bucket) if err != nil { return nil, err } return &pb.CreateFolderResponse{}, nil } func (srv *grpcServer) RemoveDirOrFile(ctx context.Context, request *pb.RemoveDirOrFileRequest) (*pb.RemoveDirOrFileResponse, error) { err := srv.sv.RemoveDirOrFile(ctx, request.Path, request.Bucket) if err != nil { return nil, err } return &pb.RemoveDirOrFileResponse{}, nil } ================================================ FILE: grpc/handlers_account.go ================================================ package grpc import ( "context" "github.com/FleekHQ/space-daemon/grpc/pb" "github.com/pkg/errors" ) func (srv *grpcServer) DeleteAccount(ctx context.Context, request *pb.DeleteAccountRequest) (*pb.DeleteAccountResponse, error) { if err := srv.fc.Unmount(); err != nil { return nil, errors.Wrap(err, "failed to unmount fuse drive") } if err := srv.sv.TruncateData(ctx); err != nil { return nil, errors.Wrap(err, "error during clean up") } if err := srv.sv.DeleteKeypair(ctx); err != nil { return nil, errors.Wrap(err, "failed to remove keypair") } return &pb.DeleteAccountResponse{}, nil } ================================================ FILE: grpc/handlers_app_token.go ================================================ package grpc import ( "context" "github.com/FleekHQ/space-daemon/grpc/pb" ) func (srv *grpcServer) InitializeMasterAppToken(ctx context.Context, request *pb.InitializeMasterAppTokenRequest) (*pb.InitializeMasterAppTokenResponse, error) { appToken, err := srv.sv.InitializeMasterAppToken(ctx) if err != nil { return nil, err } return &pb.InitializeMasterAppTokenResponse{ AppToken: appToken.GetAccessToken(), }, nil } func (srv *grpcServer) GenerateAppToken(ctx context.Context, request *pb.GenerateAppTokenRequest) (*pb.GenerateAppTokenResponse, error) { // TODO: Implement this when we prioritize adding a third-party app marketplace return nil, errNotImplemented } ================================================ FILE: grpc/handlers_backup.go ================================================ package grpc import ( "context" "github.com/FleekHQ/space-daemon/grpc/pb" ) func (srv *grpcServer) ToggleBucketBackup(ctx context.Context, request *pb.ToggleBucketBackupRequest) (*pb.ToggleBucketBackupResponse, error) { bucketSlug := request.Bucket bucketBackup := request.Backup err := srv.sv.ToggleBucketBackup(ctx, bucketSlug, bucketBackup) if err != nil { return nil, err } return &pb.ToggleBucketBackupResponse{}, nil } func (srv *grpcServer) BucketBackupRestore(ctx context.Context, request *pb.BucketBackupRestoreRequest) (*pb.BucketBackupRestoreResponse, error) { bucketSlug := request.Bucket err := srv.sv.BucketBackupRestore(ctx, bucketSlug) if err != nil { return nil, err } return &pb.BucketBackupRestoreResponse{}, nil } func (srv *grpcServer) GetUsageInfo(ctx context.Context, request *pb.GetUsageInfoRequest) (*pb.GetUsageInfoResponse, error) { return nil, errNotImplemented } ================================================ FILE: grpc/handlers_central_services.go ================================================ package grpc import ( "context" "github.com/FleekHQ/space-daemon/grpc/pb" ) func (srv *grpcServer) GetAPISessionTokens(ctx context.Context, request *pb.GetAPISessionTokensRequest) (*pb.GetAPISessionTokensResponse, error) { tokens, err := srv.sv.GetAPISessionTokens(ctx) if err != nil { return nil, err } return &pb.GetAPISessionTokensResponse{ HubToken: tokens.HubToken, ServicesToken: tokens.ServicesToken, }, nil } ================================================ FILE: grpc/handlers_fuse.go ================================================ package grpc import ( "context" "github.com/opentracing/opentracing-go" "github.com/FleekHQ/space-daemon/core/space/fuse" "github.com/FleekHQ/space-daemon/grpc/pb" "github.com/golang/protobuf/ptypes/empty" "github.com/pkg/errors" ) // ToggleFuseDrive switching on or off a mounted fuse drive func (srv *grpcServer) ToggleFuseDrive(ctx context.Context, request *pb.ToggleFuseRequest) (*pb.FuseDriveResponse, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "ToggleFuseDrive") defer span.Finish() if request.MountDrive { if err := srv.fc.Mount(); err != nil { return nil, errors.Wrap(err, "failed to mount fuse drive") } } else { if err := srv.fc.Unmount(); err != nil { return nil, errors.Wrap(err, "failed to unmount fuse drive") } } return srv.GetFuseDriveStatus(ctx, nil) } // GetFuseDriveStatus returns the current mounted state func (srv *grpcServer) GetFuseDriveStatus(ctx context.Context, empty *empty.Empty) (*pb.FuseDriveResponse, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "GetFuseDriveStatus") defer span.Finish() state, err := srv.fc.GetFuseState(ctx) if err != nil { return nil, err } return &pb.FuseDriveResponse{ State: fuseStateToRpcState(state), MountPath: srv.fc.GetMountPath(), }, nil } var fuseStateToRpcStateMap = map[fuse.State]pb.FuseState{ fuse.UNSUPPORTED: pb.FuseState_UNSUPPORTED, fuse.NOT_INSTALLED: pb.FuseState_NOT_INSTALLED, fuse.UNMOUNTED: pb.FuseState_UNMOUNTED, fuse.MOUNTED: pb.FuseState_MOUNTED, } func fuseStateToRpcState(state fuse.State) pb.FuseState { return fuseStateToRpcStateMap[state] } ================================================ FILE: grpc/handlers_key_pair.go ================================================ package grpc import ( "context" "github.com/FleekHQ/space-daemon/grpc/pb" ) func (srv *grpcServer) GenerateKeyPair(ctx context.Context, request *pb.GenerateKeyPairRequest) (*pb.GenerateKeyPairResponse, error) { mnemonic, err := srv.sv.GenerateKeyPair(ctx, false) if err != nil { return nil, err } return &pb.GenerateKeyPairResponse{ Mnemonic: mnemonic, }, nil } func (srv *grpcServer) GenerateKeyPairWithForce(ctx context.Context, request *pb.GenerateKeyPairRequest) (*pb.GenerateKeyPairResponse, error) { mnemonic, err := srv.sv.GenerateKeyPair(ctx, true) if err != nil { return nil, err } return &pb.GenerateKeyPairResponse{ Mnemonic: mnemonic, }, nil } func (srv *grpcServer) GetPublicKey(ctx context.Context, request *pb.GetPublicKeyRequest) (*pb.GetPublicKeyResponse, error) { pub, err := srv.sv.GetPublicKey(ctx) if err != nil { return nil, err } return &pb.GetPublicKeyResponse{ PublicKey: pub, }, nil } func (srv *grpcServer) DeleteKeyPair(ctx context.Context, request *pb.DeleteKeyPairRequest) (*pb.DeleteKeyPairResponse, error) { err := srv.sv.DeleteKeypair(ctx) if err != nil { return nil, err } return &pb.DeleteKeyPairResponse{}, nil } func (srv *grpcServer) RestoreKeyPairViaMnemonic(ctx context.Context, request *pb.RestoreKeyPairViaMnemonicRequest) (*pb.RestoreKeyPairViaMnemonicResponse, error) { if err := srv.sv.RestoreKeyPairFromMnemonic(ctx, request.Mnemonic); err != nil { return nil, err } return &pb.RestoreKeyPairViaMnemonicResponse{}, nil } func (srv *grpcServer) GetStoredMnemonic(ctx context.Context, request *pb.GetStoredMnemonicRequest) (*pb.GetStoredMnemonicResponse, error) { mnemonic, err := srv.sv.GetMnemonic(ctx) if err != nil { return nil, err } return &pb.GetStoredMnemonicResponse{ Mnemonic: mnemonic, }, nil } ================================================ FILE: grpc/handlers_notif.go ================================================ package grpc import ( "context" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/grpc/pb" "github.com/FleekHQ/space-daemon/log" "github.com/golang/protobuf/ptypes/empty" ) func mapToPbNotification(n domain.Notification) *pb.Notification { // maybe there is a cooler way to do this (e.g., with reflection) switch n.NotificationType { case domain.INVITATION: inv := n.InvitationValue pbpths := make([]*pb.FullPath, 0) for _, pth := range n.InvitationValue.ItemPaths { pbpth := &pb.FullPath{ Bucket: pth.Bucket, DbId: pth.DbId, Path: pth.Path, } pbpths = append(pbpths, pbpth) } pbinv := &pb.Invitation{ InvitationID: n.ID, InviterPublicKey: inv.InviterPublicKey, Status: pb.InvitationStatus(inv.Status), ItemPaths: pbpths, } ro := &pb.Notification_InvitationValue{pbinv} parsedNotif := &pb.Notification{ ID: n.ID, Body: n.Body, ReadAt: n.ReadAt, CreatedAt: n.CreatedAt, RelatedObject: ro, Type: pb.NotificationType(n.NotificationType), } return parsedNotif case domain.USAGEALERT: ua := n.UsageAlertValue pbua := &pb.UsageAlert{ Used: ua.Used, Limit: ua.Limit, Message: ua.Message, } ro := &pb.Notification_UsageAlert{pbua} parsedNotif := &pb.Notification{ ID: n.ID, Body: n.Body, ReadAt: n.ReadAt, CreatedAt: n.CreatedAt, RelatedObject: ro, Type: pb.NotificationType(n.NotificationType), } return parsedNotif case domain.INVITATION_REPLY: ir := n.InvitationAcceptValue pbir := &pb.InvitationAccept{ InvitationID: ir.InvitationID, } ro := &pb.Notification_InvitationAccept{pbir} parsedNotif := &pb.Notification{ ID: n.ID, Body: n.Body, ReadAt: n.ReadAt, CreatedAt: n.CreatedAt, RelatedObject: ro, Type: pb.NotificationType(n.NotificationType), } return parsedNotif case domain.REVOKED_INVITATION: pbpths := make([]*pb.FullPath, 0) for _, pth := range n.RevokedInvitationValue.ItemPaths { pbpth := &pb.FullPath{ Bucket: pth.Bucket, DbId: pth.DbId, Path: pth.Path, } pbpths = append(pbpths, pbpth) } revokedInvite := &pb.RevokedInvitation{ InviterPublicKey: n.RevokedInvitationValue.InviterPublicKey, ItemPaths: pbpths, } ro := &pb.Notification_RevokedInvitation{RevokedInvitation: revokedInvite} parsedNotif := &pb.Notification{ ID: n.ID, Body: n.Body, ReadAt: n.ReadAt, CreatedAt: n.CreatedAt, RelatedObject: ro, Type: pb.NotificationType(n.NotificationType), } return parsedNotif default: parsedNotif := &pb.Notification{ ID: n.ID, Body: n.Body, ReadAt: n.ReadAt, CreatedAt: n.CreatedAt, Type: pb.NotificationType(n.NotificationType), } return parsedNotif } } func (srv *grpcServer) SetNotificationsLastSeenAt(ctx context.Context, request *pb.SetNotificationsLastSeenAtRequest) (*pb.SetNotificationsLastSeenAtResponse, error) { err := srv.sv.SetNotificationsLastSeenAt(request.Timestamp) if err != nil { return nil, err } return &pb.SetNotificationsLastSeenAtResponse{}, nil } func (srv *grpcServer) GetNotifications(ctx context.Context, request *pb.GetNotificationsRequest) (*pb.GetNotificationsResponse, error) { // textile expects int instead of int64 for limit field n, err := srv.sv.GetNotifications(ctx, request.Seek, int(request.Limit)) if err != nil { return nil, err } parsedNotifs := []*pb.Notification{} for _, notif := range n { parsedNotif := mapToPbNotification(*notif) parsedNotifs = append(parsedNotifs, parsedNotif) } var no string if len(parsedNotifs) > 0 { no = parsedNotifs[len(parsedNotifs)-1].ID } ls, err := srv.sv.GetNotificationsLastSeenAt() if err != nil { // error getting last seen at but we dont want to fail the // whole request for that ls = 0 } return &pb.GetNotificationsResponse{ Notifications: parsedNotifs, NextOffset: no, LastSeenAt: ls, }, nil } func (srv *grpcServer) ReadNotification(ctx context.Context, request *pb.ReadNotificationRequest) (*pb.ReadNotificationResponse, error) { return nil, errNotImplemented } func (srv *grpcServer) HandleFilesInvitation( ctx context.Context, request *pb.HandleFilesInvitationRequest, ) (*pb.HandleFilesInvitationResponse, error) { err := srv.sv.HandleSharedFilesInvitation(ctx, request.InvitationID, request.Accept) if err != nil { return nil, err } return &pb.HandleFilesInvitationResponse{}, nil } func (srv *grpcServer) NotificationSubscribe(empty *empty.Empty, stream pb.SpaceApi_NotificationSubscribeServer) error { srv.registerNotificationStream(stream) // waits until request is done select { case <-stream.Context().Done(): break } // clean up stream srv.registerNotificationStream(nil) log.Info("closing stream") return nil } func (srv *grpcServer) registerNotificationStream(stream pb.SpaceApi_NotificationSubscribeServer) { srv.notificationEventStream = stream } func (srv *grpcServer) sendNotificationEvent(event *pb.NotificationEventResponse) { if srv.notificationEventStream != nil { log.Info("sending events to client") srv.notificationEventStream.Send(event) } } func (srv *grpcServer) SendNotificationEvent(notif *domain.Notification) { parsedNotif := mapToPbNotification(*notif) pe := &pb.NotificationEventResponse{ Notification: parsedNotif, } srv.sendNotificationEvent(pe) } ================================================ FILE: grpc/handlers_search.go ================================================ package grpc import ( "context" "github.com/FleekHQ/space-daemon/grpc/pb" ) // Search files based on query fields func (srv *grpcServer) SearchFiles(ctx context.Context, request *pb.SearchFilesRequest) (*pb.SearchFilesResponse, error) { if request.Query == "" { return &pb.SearchFilesResponse{ Entries: []*pb.SearchFilesDirectoryEntry{}, Query: request.Query, }, nil } entries, err := srv.sv.SearchFiles(ctx, request.Query) if err != nil { return nil, err } searchResponseEntries := make([]*pb.SearchFilesDirectoryEntry, len(entries)) for i, e := range entries { searchResponseEntries[i] = &pb.SearchFilesDirectoryEntry{ Entry: &pb.ListDirectoryEntry{ Path: e.Path, IsDir: e.IsDir, Name: e.Name, SizeInBytes: e.SizeInBytes, Created: e.Created, Updated: e.Updated, FileExtension: e.FileExtension, IpfsHash: e.IpfsHash, IsLocallyAvailable: e.LocallyAvailable, IsBackupInProgress: e.BackupInProgress, IsRestoreInProgress: e.RestoreInProgress, }, DbId: e.DbID, Bucket: e.Bucket, } } return &pb.SearchFilesResponse{ Entries: searchResponseEntries, Query: request.Query, }, nil } ================================================ FILE: grpc/handlers_sharing.go ================================================ package grpc import ( "context" "encoding/hex" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/util/address" "github.com/FleekHQ/space-daemon/grpc/pb" "github.com/libp2p/go-libp2p-core/crypto" "github.com/opentracing/opentracing-go" ) func (srv *grpcServer) ShareFilesViaPublicKey(ctx context.Context, request *pb.ShareFilesViaPublicKeyRequest) (*pb.ShareFilesViaPublicKeyResponse, error) { var pks []crypto.PubKey for _, pk := range request.PublicKeys { b, err := hex.DecodeString(pk) if err != nil { return nil, err } p, err := crypto.UnmarshalEd25519PublicKey([]byte(b)) if err != nil { return nil, err } pks = append(pks, p) } var cleanedPaths []domain.FullPath for _, path := range request.Paths { cleanedPath := &domain.FullPath{ Bucket: path.Bucket, Path: path.Path, DbId: path.DbId, } cleanedPaths = append(cleanedPaths, *cleanedPath) } // fail before since actual sharing is irreversible err := srv.sv.AddRecentlySharedPublicKeys(ctx, pks) if err != nil { return nil, err } err = srv.sv.ShareFilesViaPublicKey(ctx, cleanedPaths, pks) if err != nil { return nil, err } return &pb.ShareFilesViaPublicKeyResponse{}, nil } func (srv *grpcServer) UnshareFilesViaPublicKey( ctx context.Context, request *pb.UnshareFilesViaPublicKeyRequest, ) (*pb.UnshareFilesViaPublicKeyResponse, error) { var pks []crypto.PubKey for _, pk := range request.PublicKeys { b, err := hex.DecodeString(pk) if err != nil { return nil, err } p, err := crypto.UnmarshalEd25519PublicKey(b) if err != nil { return nil, err } pks = append(pks, p) } var domainPaths []domain.FullPath for _, path := range request.Paths { cleanedPath := domain.FullPath{ Bucket: path.Bucket, Path: path.Path, DbId: path.DbId, } domainPaths = append(domainPaths, cleanedPath) } err := srv.sv.UnshareFilesViaPublicKey(ctx, domainPaths, pks) return &pb.UnshareFilesViaPublicKeyResponse{}, err } func (srv *grpcServer) GetSharedWithMeFiles(ctx context.Context, request *pb.GetSharedWithMeFilesRequest) (*pb.GetSharedWithMeFilesResponse, error) { entries, offset, err := srv.sv.GetSharedWithMeFiles(ctx, request.Seek, int(request.Limit)) if err != nil { return nil, err } dirEntries := make([]*pb.SharedListDirectoryEntry, 0) for _, e := range entries { members := make([]*pb.FileMember, 0) for _, m := range e.Members { members = append(members, &pb.FileMember{ PublicKey: m.PublicKey, Address: m.Address, }) } var backupCount = 0 if e.BackedUp { backupCount = 1 } dirEntry := &pb.SharedListDirectoryEntry{ DbId: e.DbID, Bucket: e.Bucket, SharedBy: e.SharedBy, Entry: &pb.ListDirectoryEntry{ Path: e.Path, IsDir: e.IsDir, Name: e.Name, SizeInBytes: e.SizeInBytes, Created: e.Created, Updated: e.Updated, FileExtension: e.FileExtension, IpfsHash: e.IpfsHash, Members: members, IsLocallyAvailable: e.LocallyAvailable, BackupCount: int64(backupCount), }, IsPublicLink: e.IsPublicLink, } dirEntries = append(dirEntries, dirEntry) } res := &pb.GetSharedWithMeFilesResponse{ Items: dirEntries, NextOffset: offset, } return res, nil } func (srv *grpcServer) GetSharedByMeFiles(ctx context.Context, request *pb.GetSharedByMeFilesRequest) (*pb.GetSharedByMeFilesResponse, error) { entries, offset, err := srv.sv.GetSharedByMeFiles(ctx, request.Seek, int(request.Limit)) if err != nil { return nil, err } dirEntries := make([]*pb.SharedListDirectoryEntry, 0) for _, e := range entries { members := make([]*pb.FileMember, 0) for _, m := range e.Members { members = append(members, &pb.FileMember{ PublicKey: m.PublicKey, Address: m.Address, }) } var backupCount = 0 if e.BackedUp { backupCount = 1 } dirEntry := &pb.SharedListDirectoryEntry{ DbId: e.DbID, Bucket: e.Bucket, Entry: &pb.ListDirectoryEntry{ Path: e.Path, IsDir: e.IsDir, Name: e.Name, SizeInBytes: e.SizeInBytes, Created: e.Created, Updated: e.Updated, FileExtension: e.FileExtension, IpfsHash: e.IpfsHash, Members: members, IsLocallyAvailable: e.LocallyAvailable, BackupCount: int64(backupCount), }, IsPublicLink: e.IsPublicLink, } dirEntries = append(dirEntries, dirEntry) } res := &pb.GetSharedByMeFilesResponse{ Items: dirEntries, NextOffset: offset, } return res, nil } func (srv *grpcServer) GeneratePublicFileLink(ctx context.Context, request *pb.GeneratePublicFileLinkRequest) (*pb.GeneratePublicFileLinkResponse, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "GeneratePublicFileLink") defer span.Finish() res, err := srv.sv.GenerateFilesSharingLink(ctx, request.Password, request.ItemPaths, request.Bucket, request.DbId) if err != nil { return nil, err } return &pb.GeneratePublicFileLinkResponse{ Link: res.SpaceDownloadLink, FileCid: res.SharedFileCid, }, nil } func (srv *grpcServer) OpenPublicFile(ctx context.Context, request *pb.OpenPublicFileRequest) (*pb.OpenPublicFileResponse, error) { res, err := srv.sv.OpenSharedFile(ctx, request.FileCid, request.Password, request.Filename) if err != nil { return nil, err } return &pb.OpenPublicFileResponse{ Location: res.Location, }, nil } func (srv *grpcServer) GetRecentlySharedWith(ctx context.Context, request *pb.GetRecentlySharedWithRequest) (*pb.GetRecentlySharedWithResponse, error) { fileMembers := make([]*pb.FileMember, 0) pks, err := srv.sv.RecentlySharedPublicKeys(ctx) if err != nil { return nil, err } for _, pk := range pks { pubBytes, err := pk.Raw() if err != nil { return nil, err } fileMember := &pb.FileMember{ PublicKey: hex.EncodeToString(pubBytes), Address: address.DeriveAddress(pk), } fileMembers = append(fileMembers, fileMember) } res := &pb.GetRecentlySharedWithResponse{ Members: fileMembers, } return res, nil } ================================================ FILE: grpc/handlers_textile.go ================================================ package grpc import ( "context" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/core/textile" "github.com/FleekHQ/space-daemon/grpc/pb" ) func parseBucket(ctx context.Context, b textile.Bucket) *pb.Bucket { bd := b.GetData() itemsCount, _ := b.ItemsCount(ctx, bd.Path, false) br := &pb.Bucket{ Key: bd.Key, Name: bd.Name, Path: bd.Path, CreatedAt: bd.CreatedAt, UpdatedAt: bd.UpdatedAt, ItemsCount: itemsCount, // TODO: Fill these out from metathread + identity service call Members: []*pb.BucketMember{}, IsPersonalBucket: false, } return br } func (srv *grpcServer) CreateBucket(ctx context.Context, request *pb.CreateBucketRequest) (*pb.CreateBucketResponse, error) { b, err := srv.sv.CreateBucket(ctx, request.Slug) if err != nil { return nil, err } parsedBucket := parseBucket(ctx, b) return &pb.CreateBucketResponse{ Bucket: parsedBucket, }, nil } func (srv *grpcServer) ListBuckets(ctx context.Context, request *pb.ListBucketsRequest) (*pb.ListBucketsResponse, error) { buckets, err := srv.sv.ListBuckets(ctx) if err != nil { return nil, err } parsedBuckets := []*pb.Bucket{} for _, b := range buckets { parsedBucket := parseBucket(ctx, b) parsedBuckets = append(parsedBuckets, parsedBucket) } return &pb.ListBucketsResponse{ Buckets: parsedBuckets, }, nil } func (srv *grpcServer) ShareBucket(ctx context.Context, request *pb.ShareBucketRequest) (*pb.ShareBucketResponse, error) { i, err := srv.sv.ShareBucket(ctx, request.Bucket) if err != nil { return nil, err } ti := &pb.ThreadInfo{ Addresses: i.Addresses, Key: i.Key, } return &pb.ShareBucketResponse{ Threadinfo: ti, }, nil } func (srv *grpcServer) JoinBucket(ctx context.Context, request *pb.JoinBucketRequest) (*pb.JoinBucketResponse, error) { ti := &domain.ThreadInfo{ Addresses: request.Threadinfo.Addresses, Key: request.Threadinfo.Key, } r, err := srv.sv.JoinBucket(ctx, request.Bucket, ti) if err != nil { return nil, err } return &pb.JoinBucketResponse{ Result: r, }, nil } ================================================ FILE: grpc/handlers_vault.go ================================================ package grpc import ( "context" "github.com/FleekHQ/space-daemon/core/space/domain" "github.com/FleekHQ/space-daemon/grpc/pb" ) func (srv *grpcServer) BackupKeysByPassphrase(ctx context.Context, request *pb.BackupKeysByPassphraseRequest) (*pb.BackupKeysByPassphraseResponse, error) { resp := &pb.BackupKeysByPassphraseResponse{} err := srv.sv.BackupKeysByPassphrase(ctx, request.Uuid, request.Passphrase, domain.KeyBackupType(request.Type)) return resp, err } func (srv *grpcServer) RecoverKeysByPassphrase(ctx context.Context, request *pb.RecoverKeysByPassphraseRequest) (*pb.RecoverKeysByPassphraseResponse, error) { resp := &pb.RecoverKeysByPassphraseResponse{} err := srv.sv.RecoverKeysByPassphrase(ctx, request.Uuid, request.Passphrase, domain.KeyBackupType(request.Type)) return resp, err } func (srv *grpcServer) CreateLocalKeysBackup(ctx context.Context, request *pb.CreateLocalKeysBackupRequest) (*pb.CreateLocalKeysBackupResponse, error) { resp := &pb.CreateLocalKeysBackupResponse{} err := srv.sv.CreateLocalKeysBackup(ctx, request.PathToKeyBackup) return resp, err } func (srv *grpcServer) RecoverKeysByLocalBackup(ctx context.Context, request *pb.RecoverKeysByLocalBackupRequest) (*pb.RecoverKeysByLocalBackupResponse, error) { resp := &pb.RecoverKeysByLocalBackupResponse{} err := srv.sv.RecoverKeysByLocalBackup(ctx, request.PathToKeyBackup) return resp, err } func (srv *grpcServer) TestKeysPassphrase(ctx context.Context, request *pb.TestKeysPassphraseRequest) (*pb.TestKeysPassphraseResponse, error) { resp := &pb.TestKeysPassphraseResponse{} err := srv.sv.TestPassphrase(ctx, request.Uuid, request.Passphrase) return resp, err } ================================================ FILE: grpc/pb/space.pb.go ================================================ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 // protoc v3.6.1 // source: space.proto package pb import ( context "context" proto "github.com/golang/protobuf/proto" empty "github.com/golang/protobuf/ptypes/empty" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 type EventType int32 const ( EventType_ENTRY_ADDED EventType = 0 EventType_ENTRY_DELETED EventType = 1 EventType_ENTRY_UPDATED EventType = 2 EventType_ENTRY_BACKUP_IN_PROGRESS EventType = 3 EventType_ENTRY_BACKUP_READY EventType = 4 EventType_ENTRY_RESTORE_IN_PROGRESS EventType = 5 EventType_ENTRY_RESTORE_READY EventType = 6 EventType_FOLDER_ADDED EventType = 7 EventType_FOLDER_DELETED EventType = 8 EventType_FOLDER_UPDATED EventType = 9 ) // Enum value maps for EventType. var ( EventType_name = map[int32]string{ 0: "ENTRY_ADDED", 1: "ENTRY_DELETED", 2: "ENTRY_UPDATED", 3: "ENTRY_BACKUP_IN_PROGRESS", 4: "ENTRY_BACKUP_READY", 5: "ENTRY_RESTORE_IN_PROGRESS", 6: "ENTRY_RESTORE_READY", 7: "FOLDER_ADDED", 8: "FOLDER_DELETED", 9: "FOLDER_UPDATED", } EventType_value = map[string]int32{ "ENTRY_ADDED": 0, "ENTRY_DELETED": 1, "ENTRY_UPDATED": 2, "ENTRY_BACKUP_IN_PROGRESS": 3, "ENTRY_BACKUP_READY": 4, "ENTRY_RESTORE_IN_PROGRESS": 5, "ENTRY_RESTORE_READY": 6, "FOLDER_ADDED": 7, "FOLDER_DELETED": 8, "FOLDER_UPDATED": 9, } ) func (x EventType) Enum() *EventType { p := new(EventType) *p = x return p } func (x EventType) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } func (EventType) Descriptor() protoreflect.EnumDescriptor { return file_space_proto_enumTypes[0].Descriptor() } func (EventType) Type() protoreflect.EnumType { return &file_space_proto_enumTypes[0] } func (x EventType) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } // Deprecated: Use EventType.Descriptor instead. func (EventType) EnumDescriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{0} } type KeyBackupType int32 const ( KeyBackupType_PASSWORD KeyBackupType = 0 KeyBackupType_GOOGLE KeyBackupType = 1 KeyBackupType_TWITTER KeyBackupType = 2 KeyBackupType_EMAIL KeyBackupType = 3 ) // Enum value maps for KeyBackupType. var ( KeyBackupType_name = map[int32]string{ 0: "PASSWORD", 1: "GOOGLE", 2: "TWITTER", 3: "EMAIL", } KeyBackupType_value = map[string]int32{ "PASSWORD": 0, "GOOGLE": 1, "TWITTER": 2, "EMAIL": 3, } ) func (x KeyBackupType) Enum() *KeyBackupType { p := new(KeyBackupType) *p = x return p } func (x KeyBackupType) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } func (KeyBackupType) Descriptor() protoreflect.EnumDescriptor { return file_space_proto_enumTypes[1].Descriptor() } func (KeyBackupType) Type() protoreflect.EnumType { return &file_space_proto_enumTypes[1] } func (x KeyBackupType) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } // Deprecated: Use KeyBackupType.Descriptor instead. func (KeyBackupType) EnumDescriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{1} } type FuseState int32 const ( FuseState_UNSUPPORTED FuseState = 0 FuseState_NOT_INSTALLED FuseState = 1 FuseState_UNMOUNTED FuseState = 2 FuseState_MOUNTED FuseState = 3 ) // Enum value maps for FuseState. var ( FuseState_name = map[int32]string{ 0: "UNSUPPORTED", 1: "NOT_INSTALLED", 2: "UNMOUNTED", 3: "MOUNTED", } FuseState_value = map[string]int32{ "UNSUPPORTED": 0, "NOT_INSTALLED": 1, "UNMOUNTED": 2, "MOUNTED": 3, } ) func (x FuseState) Enum() *FuseState { p := new(FuseState) *p = x return p } func (x FuseState) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } func (FuseState) Descriptor() protoreflect.EnumDescriptor { return file_space_proto_enumTypes[2].Descriptor() } func (FuseState) Type() protoreflect.EnumType { return &file_space_proto_enumTypes[2] } func (x FuseState) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } // Deprecated: Use FuseState.Descriptor instead. func (FuseState) EnumDescriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{2} } type NotificationType int32 const ( NotificationType_UNKNOWN NotificationType = 0 NotificationType_INVITATION NotificationType = 1 NotificationType_USAGEALERT NotificationType = 2 NotificationType_INVITATION_REPLY NotificationType = 3 NotificationType_REVOKED_INVITATION NotificationType = 4 ) // Enum value maps for NotificationType. var ( NotificationType_name = map[int32]string{ 0: "UNKNOWN", 1: "INVITATION", 2: "USAGEALERT", 3: "INVITATION_REPLY", 4: "REVOKED_INVITATION", } NotificationType_value = map[string]int32{ "UNKNOWN": 0, "INVITATION": 1, "USAGEALERT": 2, "INVITATION_REPLY": 3, "REVOKED_INVITATION": 4, } ) func (x NotificationType) Enum() *NotificationType { p := new(NotificationType) *p = x return p } func (x NotificationType) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } func (NotificationType) Descriptor() protoreflect.EnumDescriptor { return file_space_proto_enumTypes[3].Descriptor() } func (NotificationType) Type() protoreflect.EnumType { return &file_space_proto_enumTypes[3] } func (x NotificationType) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } // Deprecated: Use NotificationType.Descriptor instead. func (NotificationType) EnumDescriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{3} } type InvitationStatus int32 const ( InvitationStatus_PENDING InvitationStatus = 0 InvitationStatus_ACCEPTED InvitationStatus = 1 InvitationStatus_REJECTED InvitationStatus = 2 ) // Enum value maps for InvitationStatus. var ( InvitationStatus_name = map[int32]string{ 0: "PENDING", 1: "ACCEPTED", 2: "REJECTED", } InvitationStatus_value = map[string]int32{ "PENDING": 0, "ACCEPTED": 1, "REJECTED": 2, } ) func (x InvitationStatus) Enum() *InvitationStatus { p := new(InvitationStatus) *p = x return p } func (x InvitationStatus) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } func (InvitationStatus) Descriptor() protoreflect.EnumDescriptor { return file_space_proto_enumTypes[4].Descriptor() } func (InvitationStatus) Type() protoreflect.EnumType { return &file_space_proto_enumTypes[4] } func (x InvitationStatus) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } // Deprecated: Use InvitationStatus.Descriptor instead. func (InvitationStatus) EnumDescriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{4} } type SearchFilesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` } func (x *SearchFilesRequest) Reset() { *x = SearchFilesRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *SearchFilesRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*SearchFilesRequest) ProtoMessage() {} func (x *SearchFilesRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use SearchFilesRequest.ProtoReflect.Descriptor instead. func (*SearchFilesRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{0} } func (x *SearchFilesRequest) GetQuery() string { if x != nil { return x.Query } return "" } type SearchFilesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Entries []*SearchFilesDirectoryEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` } func (x *SearchFilesResponse) Reset() { *x = SearchFilesResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *SearchFilesResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*SearchFilesResponse) ProtoMessage() {} func (x *SearchFilesResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use SearchFilesResponse.ProtoReflect.Descriptor instead. func (*SearchFilesResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{1} } func (x *SearchFilesResponse) GetEntries() []*SearchFilesDirectoryEntry { if x != nil { return x.Entries } return nil } func (x *SearchFilesResponse) GetQuery() string { if x != nil { return x.Query } return "" } type SearchFilesDirectoryEntry struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Entry *ListDirectoryEntry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` DbId string `protobuf:"bytes,2,opt,name=dbId,proto3" json:"dbId,omitempty"` Bucket string `protobuf:"bytes,3,opt,name=bucket,proto3" json:"bucket,omitempty"` } func (x *SearchFilesDirectoryEntry) Reset() { *x = SearchFilesDirectoryEntry{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *SearchFilesDirectoryEntry) String() string { return protoimpl.X.MessageStringOf(x) } func (*SearchFilesDirectoryEntry) ProtoMessage() {} func (x *SearchFilesDirectoryEntry) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use SearchFilesDirectoryEntry.ProtoReflect.Descriptor instead. func (*SearchFilesDirectoryEntry) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{2} } func (x *SearchFilesDirectoryEntry) GetEntry() *ListDirectoryEntry { if x != nil { return x.Entry } return nil } func (x *SearchFilesDirectoryEntry) GetDbId() string { if x != nil { return x.DbId } return "" } func (x *SearchFilesDirectoryEntry) GetBucket() string { if x != nil { return x.Bucket } return "" } type SetNotificationsLastSeenAtRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (x *SetNotificationsLastSeenAtRequest) Reset() { *x = SetNotificationsLastSeenAtRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *SetNotificationsLastSeenAtRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*SetNotificationsLastSeenAtRequest) ProtoMessage() {} func (x *SetNotificationsLastSeenAtRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use SetNotificationsLastSeenAtRequest.ProtoReflect.Descriptor instead. func (*SetNotificationsLastSeenAtRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{3} } func (x *SetNotificationsLastSeenAtRequest) GetTimestamp() int64 { if x != nil { return x.Timestamp } return 0 } type SetNotificationsLastSeenAtResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *SetNotificationsLastSeenAtResponse) Reset() { *x = SetNotificationsLastSeenAtResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *SetNotificationsLastSeenAtResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*SetNotificationsLastSeenAtResponse) ProtoMessage() {} func (x *SetNotificationsLastSeenAtResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use SetNotificationsLastSeenAtResponse.ProtoReflect.Descriptor instead. func (*SetNotificationsLastSeenAtResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{4} } type GetSharedWithMeFilesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Seek string `protobuf:"bytes,1,opt,name=seek,proto3" json:"seek,omitempty"` Limit int64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` } func (x *GetSharedWithMeFilesRequest) Reset() { *x = GetSharedWithMeFilesRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetSharedWithMeFilesRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetSharedWithMeFilesRequest) ProtoMessage() {} func (x *GetSharedWithMeFilesRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetSharedWithMeFilesRequest.ProtoReflect.Descriptor instead. func (*GetSharedWithMeFilesRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{5} } func (x *GetSharedWithMeFilesRequest) GetSeek() string { if x != nil { return x.Seek } return "" } func (x *GetSharedWithMeFilesRequest) GetLimit() int64 { if x != nil { return x.Limit } return 0 } type GetSharedWithMeFilesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Items []*SharedListDirectoryEntry `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` NextOffset string `protobuf:"bytes,2,opt,name=nextOffset,proto3" json:"nextOffset,omitempty"` } func (x *GetSharedWithMeFilesResponse) Reset() { *x = GetSharedWithMeFilesResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetSharedWithMeFilesResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetSharedWithMeFilesResponse) ProtoMessage() {} func (x *GetSharedWithMeFilesResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetSharedWithMeFilesResponse.ProtoReflect.Descriptor instead. func (*GetSharedWithMeFilesResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{6} } func (x *GetSharedWithMeFilesResponse) GetItems() []*SharedListDirectoryEntry { if x != nil { return x.Items } return nil } func (x *GetSharedWithMeFilesResponse) GetNextOffset() string { if x != nil { return x.NextOffset } return "" } type GetSharedByMeFilesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Seek string `protobuf:"bytes,1,opt,name=seek,proto3" json:"seek,omitempty"` Limit int64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` } func (x *GetSharedByMeFilesRequest) Reset() { *x = GetSharedByMeFilesRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetSharedByMeFilesRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetSharedByMeFilesRequest) ProtoMessage() {} func (x *GetSharedByMeFilesRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetSharedByMeFilesRequest.ProtoReflect.Descriptor instead. func (*GetSharedByMeFilesRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{7} } func (x *GetSharedByMeFilesRequest) GetSeek() string { if x != nil { return x.Seek } return "" } func (x *GetSharedByMeFilesRequest) GetLimit() int64 { if x != nil { return x.Limit } return 0 } type GetSharedByMeFilesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Items []*SharedListDirectoryEntry `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` NextOffset string `protobuf:"bytes,2,opt,name=nextOffset,proto3" json:"nextOffset,omitempty"` } func (x *GetSharedByMeFilesResponse) Reset() { *x = GetSharedByMeFilesResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetSharedByMeFilesResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetSharedByMeFilesResponse) ProtoMessage() {} func (x *GetSharedByMeFilesResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetSharedByMeFilesResponse.ProtoReflect.Descriptor instead. func (*GetSharedByMeFilesResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{8} } func (x *GetSharedByMeFilesResponse) GetItems() []*SharedListDirectoryEntry { if x != nil { return x.Items } return nil } func (x *GetSharedByMeFilesResponse) GetNextOffset() string { if x != nil { return x.NextOffset } return "" } type GetUsageInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *GetUsageInfoRequest) Reset() { *x = GetUsageInfoRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetUsageInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetUsageInfoRequest) ProtoMessage() {} func (x *GetUsageInfoRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetUsageInfoRequest.ProtoReflect.Descriptor instead. func (*GetUsageInfoRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{9} } type GetUsageInfoResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields LocalStarogeUsed uint64 `protobuf:"varint,1,opt,name=localStarogeUsed,proto3" json:"localStarogeUsed,omitempty"` LocalBandwidthUsed uint64 `protobuf:"varint,2,opt,name=localBandwidthUsed,proto3" json:"localBandwidthUsed,omitempty"` SpaceStorageUsed uint64 `protobuf:"varint,3,opt,name=spaceStorageUsed,proto3" json:"spaceStorageUsed,omitempty"` SpaceBandwidthUsed uint64 `protobuf:"varint,4,opt,name=spaceBandwidthUsed,proto3" json:"spaceBandwidthUsed,omitempty"` UsageQuota uint64 `protobuf:"varint,5,opt,name=usageQuota,proto3" json:"usageQuota,omitempty"` } func (x *GetUsageInfoResponse) Reset() { *x = GetUsageInfoResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetUsageInfoResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetUsageInfoResponse) ProtoMessage() {} func (x *GetUsageInfoResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetUsageInfoResponse.ProtoReflect.Descriptor instead. func (*GetUsageInfoResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{10} } func (x *GetUsageInfoResponse) GetLocalStarogeUsed() uint64 { if x != nil { return x.LocalStarogeUsed } return 0 } func (x *GetUsageInfoResponse) GetLocalBandwidthUsed() uint64 { if x != nil { return x.LocalBandwidthUsed } return 0 } func (x *GetUsageInfoResponse) GetSpaceStorageUsed() uint64 { if x != nil { return x.SpaceStorageUsed } return 0 } func (x *GetUsageInfoResponse) GetSpaceBandwidthUsed() uint64 { if x != nil { return x.SpaceBandwidthUsed } return 0 } func (x *GetUsageInfoResponse) GetUsageQuota() uint64 { if x != nil { return x.UsageQuota } return 0 } type ToggleBucketBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` Backup bool `protobuf:"varint,2,opt,name=backup,proto3" json:"backup,omitempty"` } func (x *ToggleBucketBackupRequest) Reset() { *x = ToggleBucketBackupRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ToggleBucketBackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ToggleBucketBackupRequest) ProtoMessage() {} func (x *ToggleBucketBackupRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ToggleBucketBackupRequest.ProtoReflect.Descriptor instead. func (*ToggleBucketBackupRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{11} } func (x *ToggleBucketBackupRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } func (x *ToggleBucketBackupRequest) GetBackup() bool { if x != nil { return x.Backup } return false } type ToggleBucketBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *ToggleBucketBackupResponse) Reset() { *x = ToggleBucketBackupResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ToggleBucketBackupResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ToggleBucketBackupResponse) ProtoMessage() {} func (x *ToggleBucketBackupResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ToggleBucketBackupResponse.ProtoReflect.Descriptor instead. func (*ToggleBucketBackupResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{12} } type BucketBackupRestoreRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` } func (x *BucketBackupRestoreRequest) Reset() { *x = BucketBackupRestoreRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BucketBackupRestoreRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*BucketBackupRestoreRequest) ProtoMessage() {} func (x *BucketBackupRestoreRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BucketBackupRestoreRequest.ProtoReflect.Descriptor instead. func (*BucketBackupRestoreRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{13} } func (x *BucketBackupRestoreRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } type BucketBackupRestoreResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *BucketBackupRestoreResponse) Reset() { *x = BucketBackupRestoreResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BucketBackupRestoreResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*BucketBackupRestoreResponse) ProtoMessage() {} func (x *BucketBackupRestoreResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BucketBackupRestoreResponse.ProtoReflect.Descriptor instead. func (*BucketBackupRestoreResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{14} } type ListDirectoriesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` OmitMembers bool `protobuf:"varint,2,opt,name=omitMembers,proto3" json:"omitMembers,omitempty"` } func (x *ListDirectoriesRequest) Reset() { *x = ListDirectoriesRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListDirectoriesRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListDirectoriesRequest) ProtoMessage() {} func (x *ListDirectoriesRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListDirectoriesRequest.ProtoReflect.Descriptor instead. func (*ListDirectoriesRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{15} } func (x *ListDirectoriesRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } func (x *ListDirectoriesRequest) GetOmitMembers() bool { if x != nil { return x.OmitMembers } return false } type FileMember struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields PublicKey string `protobuf:"bytes,1,opt,name=publicKey,proto3" json:"publicKey,omitempty"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` } func (x *FileMember) Reset() { *x = FileMember{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FileMember) String() string { return protoimpl.X.MessageStringOf(x) } func (*FileMember) ProtoMessage() {} func (x *FileMember) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FileMember.ProtoReflect.Descriptor instead. func (*FileMember) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{16} } func (x *FileMember) GetPublicKey() string { if x != nil { return x.PublicKey } return "" } func (x *FileMember) GetAddress() string { if x != nil { return x.Address } return "" } type ListDirectoryEntry struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` IsDir bool `protobuf:"varint,2,opt,name=isDir,proto3" json:"isDir,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` SizeInBytes string `protobuf:"bytes,4,opt,name=sizeInBytes,proto3" json:"sizeInBytes,omitempty"` Created string `protobuf:"bytes,5,opt,name=created,proto3" json:"created,omitempty"` Updated string `protobuf:"bytes,6,opt,name=updated,proto3" json:"updated,omitempty"` FileExtension string `protobuf:"bytes,7,opt,name=fileExtension,proto3" json:"fileExtension,omitempty"` IpfsHash string `protobuf:"bytes,8,opt,name=ipfsHash,proto3" json:"ipfsHash,omitempty"` IsLocallyAvailable bool `protobuf:"varint,9,opt,name=isLocallyAvailable,proto3" json:"isLocallyAvailable,omitempty"` BackupCount int64 `protobuf:"varint,10,opt,name=backupCount,proto3" json:"backupCount,omitempty"` Members []*FileMember `protobuf:"bytes,11,rep,name=members,proto3" json:"members,omitempty"` IsBackupInProgress bool `protobuf:"varint,12,opt,name=isBackupInProgress,proto3" json:"isBackupInProgress,omitempty"` IsRestoreInProgress bool `protobuf:"varint,13,opt,name=isRestoreInProgress,proto3" json:"isRestoreInProgress,omitempty"` } func (x *ListDirectoryEntry) Reset() { *x = ListDirectoryEntry{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListDirectoryEntry) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListDirectoryEntry) ProtoMessage() {} func (x *ListDirectoryEntry) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListDirectoryEntry.ProtoReflect.Descriptor instead. func (*ListDirectoryEntry) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{17} } func (x *ListDirectoryEntry) GetPath() string { if x != nil { return x.Path } return "" } func (x *ListDirectoryEntry) GetIsDir() bool { if x != nil { return x.IsDir } return false } func (x *ListDirectoryEntry) GetName() string { if x != nil { return x.Name } return "" } func (x *ListDirectoryEntry) GetSizeInBytes() string { if x != nil { return x.SizeInBytes } return "" } func (x *ListDirectoryEntry) GetCreated() string { if x != nil { return x.Created } return "" } func (x *ListDirectoryEntry) GetUpdated() string { if x != nil { return x.Updated } return "" } func (x *ListDirectoryEntry) GetFileExtension() string { if x != nil { return x.FileExtension } return "" } func (x *ListDirectoryEntry) GetIpfsHash() string { if x != nil { return x.IpfsHash } return "" } func (x *ListDirectoryEntry) GetIsLocallyAvailable() bool { if x != nil { return x.IsLocallyAvailable } return false } func (x *ListDirectoryEntry) GetBackupCount() int64 { if x != nil { return x.BackupCount } return 0 } func (x *ListDirectoryEntry) GetMembers() []*FileMember { if x != nil { return x.Members } return nil } func (x *ListDirectoryEntry) GetIsBackupInProgress() bool { if x != nil { return x.IsBackupInProgress } return false } func (x *ListDirectoryEntry) GetIsRestoreInProgress() bool { if x != nil { return x.IsRestoreInProgress } return false } type SharedListDirectoryEntry struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Entry *ListDirectoryEntry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` DbId string `protobuf:"bytes,2,opt,name=dbId,proto3" json:"dbId,omitempty"` Bucket string `protobuf:"bytes,3,opt,name=bucket,proto3" json:"bucket,omitempty"` IsPublicLink bool `protobuf:"varint,4,opt,name=isPublicLink,proto3" json:"isPublicLink,omitempty"` SharedBy string `protobuf:"bytes,5,opt,name=sharedBy,proto3" json:"sharedBy,omitempty"` } func (x *SharedListDirectoryEntry) Reset() { *x = SharedListDirectoryEntry{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *SharedListDirectoryEntry) String() string { return protoimpl.X.MessageStringOf(x) } func (*SharedListDirectoryEntry) ProtoMessage() {} func (x *SharedListDirectoryEntry) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use SharedListDirectoryEntry.ProtoReflect.Descriptor instead. func (*SharedListDirectoryEntry) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{18} } func (x *SharedListDirectoryEntry) GetEntry() *ListDirectoryEntry { if x != nil { return x.Entry } return nil } func (x *SharedListDirectoryEntry) GetDbId() string { if x != nil { return x.DbId } return "" } func (x *SharedListDirectoryEntry) GetBucket() string { if x != nil { return x.Bucket } return "" } func (x *SharedListDirectoryEntry) GetIsPublicLink() bool { if x != nil { return x.IsPublicLink } return false } func (x *SharedListDirectoryEntry) GetSharedBy() string { if x != nil { return x.SharedBy } return "" } type ListDirectoriesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Entries []*ListDirectoryEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` } func (x *ListDirectoriesResponse) Reset() { *x = ListDirectoriesResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListDirectoriesResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListDirectoriesResponse) ProtoMessage() {} func (x *ListDirectoriesResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListDirectoriesResponse.ProtoReflect.Descriptor instead. func (*ListDirectoriesResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{19} } func (x *ListDirectoriesResponse) GetEntries() []*ListDirectoryEntry { if x != nil { return x.Entries } return nil } type ListDirectoryRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` OmitMembers bool `protobuf:"varint,3,opt,name=omitMembers,proto3" json:"omitMembers,omitempty"` } func (x *ListDirectoryRequest) Reset() { *x = ListDirectoryRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListDirectoryRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListDirectoryRequest) ProtoMessage() {} func (x *ListDirectoryRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListDirectoryRequest.ProtoReflect.Descriptor instead. func (*ListDirectoryRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{20} } func (x *ListDirectoryRequest) GetPath() string { if x != nil { return x.Path } return "" } func (x *ListDirectoryRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } func (x *ListDirectoryRequest) GetOmitMembers() bool { if x != nil { return x.OmitMembers } return false } type ListDirectoryResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Entries []*ListDirectoryEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` } func (x *ListDirectoryResponse) Reset() { *x = ListDirectoryResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListDirectoryResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListDirectoryResponse) ProtoMessage() {} func (x *ListDirectoryResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListDirectoryResponse.ProtoReflect.Descriptor instead. func (*ListDirectoryResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{21} } func (x *ListDirectoryResponse) GetEntries() []*ListDirectoryEntry { if x != nil { return x.Entries } return nil } type CreateBucketRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Slug string `protobuf:"bytes,1,opt,name=slug,proto3" json:"slug,omitempty"` } func (x *CreateBucketRequest) Reset() { *x = CreateBucketRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateBucketRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateBucketRequest) ProtoMessage() {} func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateBucketRequest.ProtoReflect.Descriptor instead. func (*CreateBucketRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{22} } func (x *CreateBucketRequest) GetSlug() string { if x != nil { return x.Slug } return "" } type BucketMember struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` PublicKey string `protobuf:"bytes,2,opt,name=publicKey,proto3" json:"publicKey,omitempty"` IsOwner bool `protobuf:"varint,3,opt,name=isOwner,proto3" json:"isOwner,omitempty"` HasJoined bool `protobuf:"varint,4,opt,name=hasJoined,proto3" json:"hasJoined,omitempty"` } func (x *BucketMember) Reset() { *x = BucketMember{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BucketMember) String() string { return protoimpl.X.MessageStringOf(x) } func (*BucketMember) ProtoMessage() {} func (x *BucketMember) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BucketMember.ProtoReflect.Descriptor instead. func (*BucketMember) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{23} } func (x *BucketMember) GetAddress() string { if x != nil { return x.Address } return "" } func (x *BucketMember) GetPublicKey() string { if x != nil { return x.PublicKey } return "" } func (x *BucketMember) GetIsOwner() bool { if x != nil { return x.IsOwner } return false } func (x *BucketMember) GetHasJoined() bool { if x != nil { return x.HasJoined } return false } type Bucket struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` CreatedAt int64 `protobuf:"varint,4,opt,name=createdAt,proto3" json:"createdAt,omitempty"` UpdatedAt int64 `protobuf:"varint,5,opt,name=updatedAt,proto3" json:"updatedAt,omitempty"` Members []*BucketMember `protobuf:"bytes,6,rep,name=members,proto3" json:"members,omitempty"` IsPersonalBucket bool `protobuf:"varint,7,opt,name=isPersonalBucket,proto3" json:"isPersonalBucket,omitempty"` IsBackupEnabled bool `protobuf:"varint,8,opt,name=isBackupEnabled,proto3" json:"isBackupEnabled,omitempty"` ItemsCount int32 `protobuf:"varint,9,opt,name=itemsCount,proto3" json:"itemsCount,omitempty"` } func (x *Bucket) Reset() { *x = Bucket{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Bucket) String() string { return protoimpl.X.MessageStringOf(x) } func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{24} } func (x *Bucket) GetKey() string { if x != nil { return x.Key } return "" } func (x *Bucket) GetName() string { if x != nil { return x.Name } return "" } func (x *Bucket) GetPath() string { if x != nil { return x.Path } return "" } func (x *Bucket) GetCreatedAt() int64 { if x != nil { return x.CreatedAt } return 0 } func (x *Bucket) GetUpdatedAt() int64 { if x != nil { return x.UpdatedAt } return 0 } func (x *Bucket) GetMembers() []*BucketMember { if x != nil { return x.Members } return nil } func (x *Bucket) GetIsPersonalBucket() bool { if x != nil { return x.IsPersonalBucket } return false } func (x *Bucket) GetIsBackupEnabled() bool { if x != nil { return x.IsBackupEnabled } return false } func (x *Bucket) GetItemsCount() int32 { if x != nil { return x.ItemsCount } return 0 } type CreateBucketResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` } func (x *CreateBucketResponse) Reset() { *x = CreateBucketResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateBucketResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateBucketResponse) ProtoMessage() {} func (x *CreateBucketResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateBucketResponse.ProtoReflect.Descriptor instead. func (*CreateBucketResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{25} } func (x *CreateBucketResponse) GetBucket() *Bucket { if x != nil { return x.Bucket } return nil } type GenerateKeyPairRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *GenerateKeyPairRequest) Reset() { *x = GenerateKeyPairRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GenerateKeyPairRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GenerateKeyPairRequest) ProtoMessage() {} func (x *GenerateKeyPairRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GenerateKeyPairRequest.ProtoReflect.Descriptor instead. func (*GenerateKeyPairRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{26} } type GenerateKeyPairResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Mnemonic string `protobuf:"bytes,1,opt,name=mnemonic,proto3" json:"mnemonic,omitempty"` } func (x *GenerateKeyPairResponse) Reset() { *x = GenerateKeyPairResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GenerateKeyPairResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GenerateKeyPairResponse) ProtoMessage() {} func (x *GenerateKeyPairResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GenerateKeyPairResponse.ProtoReflect.Descriptor instead. func (*GenerateKeyPairResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{27} } func (x *GenerateKeyPairResponse) GetMnemonic() string { if x != nil { return x.Mnemonic } return "" } type GetStoredMnemonicRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *GetStoredMnemonicRequest) Reset() { *x = GetStoredMnemonicRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetStoredMnemonicRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetStoredMnemonicRequest) ProtoMessage() {} func (x *GetStoredMnemonicRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetStoredMnemonicRequest.ProtoReflect.Descriptor instead. func (*GetStoredMnemonicRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{28} } type GetStoredMnemonicResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Mnemonic string `protobuf:"bytes,1,opt,name=mnemonic,proto3" json:"mnemonic,omitempty"` } func (x *GetStoredMnemonicResponse) Reset() { *x = GetStoredMnemonicResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetStoredMnemonicResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetStoredMnemonicResponse) ProtoMessage() {} func (x *GetStoredMnemonicResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetStoredMnemonicResponse.ProtoReflect.Descriptor instead. func (*GetStoredMnemonicResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{29} } func (x *GetStoredMnemonicResponse) GetMnemonic() string { if x != nil { return x.Mnemonic } return "" } type RestoreKeyPairViaMnemonicRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Mnemonic string `protobuf:"bytes,1,opt,name=mnemonic,proto3" json:"mnemonic,omitempty"` } func (x *RestoreKeyPairViaMnemonicRequest) Reset() { *x = RestoreKeyPairViaMnemonicRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RestoreKeyPairViaMnemonicRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*RestoreKeyPairViaMnemonicRequest) ProtoMessage() {} func (x *RestoreKeyPairViaMnemonicRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RestoreKeyPairViaMnemonicRequest.ProtoReflect.Descriptor instead. func (*RestoreKeyPairViaMnemonicRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{30} } func (x *RestoreKeyPairViaMnemonicRequest) GetMnemonic() string { if x != nil { return x.Mnemonic } return "" } type RestoreKeyPairViaMnemonicResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *RestoreKeyPairViaMnemonicResponse) Reset() { *x = RestoreKeyPairViaMnemonicResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RestoreKeyPairViaMnemonicResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*RestoreKeyPairViaMnemonicResponse) ProtoMessage() {} func (x *RestoreKeyPairViaMnemonicResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RestoreKeyPairViaMnemonicResponse.ProtoReflect.Descriptor instead. func (*RestoreKeyPairViaMnemonicResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{31} } type FileEventResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Type EventType `protobuf:"varint,1,opt,name=type,proto3,enum=space.EventType" json:"type,omitempty"` Entry *ListDirectoryEntry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` Bucket string `protobuf:"bytes,3,opt,name=bucket,proto3" json:"bucket,omitempty"` DbId string `protobuf:"bytes,4,opt,name=dbId,proto3" json:"dbId,omitempty"` } func (x *FileEventResponse) Reset() { *x = FileEventResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FileEventResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*FileEventResponse) ProtoMessage() {} func (x *FileEventResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FileEventResponse.ProtoReflect.Descriptor instead. func (*FileEventResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{32} } func (x *FileEventResponse) GetType() EventType { if x != nil { return x.Type } return EventType_ENTRY_ADDED } func (x *FileEventResponse) GetEntry() *ListDirectoryEntry { if x != nil { return x.Entry } return nil } func (x *FileEventResponse) GetBucket() string { if x != nil { return x.Bucket } return "" } func (x *FileEventResponse) GetDbId() string { if x != nil { return x.DbId } return "" } type TextileEventResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` } func (x *TextileEventResponse) Reset() { *x = TextileEventResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *TextileEventResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*TextileEventResponse) ProtoMessage() {} func (x *TextileEventResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use TextileEventResponse.ProtoReflect.Descriptor instead. func (*TextileEventResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{33} } func (x *TextileEventResponse) GetBucket() string { if x != nil { return x.Bucket } return "" } type OpenFileRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` DbId string `protobuf:"bytes,3,opt,name=dbId,proto3" json:"dbId,omitempty"` // optional field to specify shared with me file } func (x *OpenFileRequest) Reset() { *x = OpenFileRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *OpenFileRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*OpenFileRequest) ProtoMessage() {} func (x *OpenFileRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use OpenFileRequest.ProtoReflect.Descriptor instead. func (*OpenFileRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{34} } func (x *OpenFileRequest) GetPath() string { if x != nil { return x.Path } return "" } func (x *OpenFileRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } func (x *OpenFileRequest) GetDbId() string { if x != nil { return x.DbId } return "" } type OpenFileResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` } func (x *OpenFileResponse) Reset() { *x = OpenFileResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *OpenFileResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*OpenFileResponse) ProtoMessage() {} func (x *OpenFileResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use OpenFileResponse.ProtoReflect.Descriptor instead. func (*OpenFileResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{35} } func (x *OpenFileResponse) GetLocation() string { if x != nil { return x.Location } return "" } type OpenPublicFileRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields FileCid string `protobuf:"bytes,1,opt,name=fileCid,proto3" json:"fileCid,omitempty"` Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` Filename string `protobuf:"bytes,3,opt,name=filename,proto3" json:"filename,omitempty"` } func (x *OpenPublicFileRequest) Reset() { *x = OpenPublicFileRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *OpenPublicFileRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*OpenPublicFileRequest) ProtoMessage() {} func (x *OpenPublicFileRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use OpenPublicFileRequest.ProtoReflect.Descriptor instead. func (*OpenPublicFileRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{36} } func (x *OpenPublicFileRequest) GetFileCid() string { if x != nil { return x.FileCid } return "" } func (x *OpenPublicFileRequest) GetPassword() string { if x != nil { return x.Password } return "" } func (x *OpenPublicFileRequest) GetFilename() string { if x != nil { return x.Filename } return "" } type OpenPublicFileResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` } func (x *OpenPublicFileResponse) Reset() { *x = OpenPublicFileResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *OpenPublicFileResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*OpenPublicFileResponse) ProtoMessage() {} func (x *OpenPublicFileResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use OpenPublicFileResponse.ProtoReflect.Descriptor instead. func (*OpenPublicFileResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{37} } func (x *OpenPublicFileResponse) GetLocation() string { if x != nil { return x.Location } return "" } type AddItemsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // full paths to file or Folder on FS. Needs to be a location available to the daemon SourcePaths []string `protobuf:"bytes,1,rep,name=sourcePaths,proto3" json:"sourcePaths,omitempty"` // target path in bucket. TargetPath string `protobuf:"bytes,2,opt,name=targetPath,proto3" json:"targetPath,omitempty"` // The bucket in which to save the item Bucket string `protobuf:"bytes,3,opt,name=bucket,proto3" json:"bucket,omitempty"` } func (x *AddItemsRequest) Reset() { *x = AddItemsRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *AddItemsRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*AddItemsRequest) ProtoMessage() {} func (x *AddItemsRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use AddItemsRequest.ProtoReflect.Descriptor instead. func (*AddItemsRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{38} } func (x *AddItemsRequest) GetSourcePaths() []string { if x != nil { return x.SourcePaths } return nil } func (x *AddItemsRequest) GetTargetPath() string { if x != nil { return x.TargetPath } return "" } func (x *AddItemsRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } type AddItemResult struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields SourcePath string `protobuf:"bytes,1,opt,name=sourcePath,proto3" json:"sourcePath,omitempty"` BucketPath string `protobuf:"bytes,2,opt,name=bucketPath,proto3" json:"bucketPath,omitempty"` Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` } func (x *AddItemResult) Reset() { *x = AddItemResult{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *AddItemResult) String() string { return protoimpl.X.MessageStringOf(x) } func (*AddItemResult) ProtoMessage() {} func (x *AddItemResult) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use AddItemResult.ProtoReflect.Descriptor instead. func (*AddItemResult) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{39} } func (x *AddItemResult) GetSourcePath() string { if x != nil { return x.SourcePath } return "" } func (x *AddItemResult) GetBucketPath() string { if x != nil { return x.BucketPath } return "" } func (x *AddItemResult) GetError() string { if x != nil { return x.Error } return "" } type AddItemsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Result *AddItemResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` TotalFiles int64 `protobuf:"varint,2,opt,name=totalFiles,proto3" json:"totalFiles,omitempty"` TotalBytes int64 `protobuf:"varint,3,opt,name=totalBytes,proto3" json:"totalBytes,omitempty"` CompletedFiles int64 `protobuf:"varint,4,opt,name=completedFiles,proto3" json:"completedFiles,omitempty"` CompletedBytes int64 `protobuf:"varint,5,opt,name=completedBytes,proto3" json:"completedBytes,omitempty"` } func (x *AddItemsResponse) Reset() { *x = AddItemsResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *AddItemsResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*AddItemsResponse) ProtoMessage() {} func (x *AddItemsResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use AddItemsResponse.ProtoReflect.Descriptor instead. func (*AddItemsResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{40} } func (x *AddItemsResponse) GetResult() *AddItemResult { if x != nil { return x.Result } return nil } func (x *AddItemsResponse) GetTotalFiles() int64 { if x != nil { return x.TotalFiles } return 0 } func (x *AddItemsResponse) GetTotalBytes() int64 { if x != nil { return x.TotalBytes } return 0 } func (x *AddItemsResponse) GetCompletedFiles() int64 { if x != nil { return x.CompletedFiles } return 0 } func (x *AddItemsResponse) GetCompletedBytes() int64 { if x != nil { return x.CompletedBytes } return 0 } type CreateFolderRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // target path in bucket to add new empty folder Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // The bucket in which to add the folder Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` } func (x *CreateFolderRequest) Reset() { *x = CreateFolderRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateFolderRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateFolderRequest) ProtoMessage() {} func (x *CreateFolderRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateFolderRequest.ProtoReflect.Descriptor instead. func (*CreateFolderRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{41} } func (x *CreateFolderRequest) GetPath() string { if x != nil { return x.Path } return "" } func (x *CreateFolderRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } // not sure we need to return anything other than an error if we failed type CreateFolderResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *CreateFolderResponse) Reset() { *x = CreateFolderResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateFolderResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateFolderResponse) ProtoMessage() {} func (x *CreateFolderResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateFolderResponse.ProtoReflect.Descriptor instead. func (*CreateFolderResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{42} } type BackupKeysByPassphraseRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` Passphrase string `protobuf:"bytes,2,opt,name=passphrase,proto3" json:"passphrase,omitempty"` Type KeyBackupType `protobuf:"varint,3,opt,name=type,proto3,enum=space.KeyBackupType" json:"type,omitempty"` } func (x *BackupKeysByPassphraseRequest) Reset() { *x = BackupKeysByPassphraseRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BackupKeysByPassphraseRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*BackupKeysByPassphraseRequest) ProtoMessage() {} func (x *BackupKeysByPassphraseRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BackupKeysByPassphraseRequest.ProtoReflect.Descriptor instead. func (*BackupKeysByPassphraseRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{43} } func (x *BackupKeysByPassphraseRequest) GetUuid() string { if x != nil { return x.Uuid } return "" } func (x *BackupKeysByPassphraseRequest) GetPassphrase() string { if x != nil { return x.Passphrase } return "" } func (x *BackupKeysByPassphraseRequest) GetType() KeyBackupType { if x != nil { return x.Type } return KeyBackupType_PASSWORD } type BackupKeysByPassphraseResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *BackupKeysByPassphraseResponse) Reset() { *x = BackupKeysByPassphraseResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BackupKeysByPassphraseResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*BackupKeysByPassphraseResponse) ProtoMessage() {} func (x *BackupKeysByPassphraseResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BackupKeysByPassphraseResponse.ProtoReflect.Descriptor instead. func (*BackupKeysByPassphraseResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{44} } type RecoverKeysByPassphraseRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` Passphrase string `protobuf:"bytes,2,opt,name=passphrase,proto3" json:"passphrase,omitempty"` Type KeyBackupType `protobuf:"varint,3,opt,name=type,proto3,enum=space.KeyBackupType" json:"type,omitempty"` } func (x *RecoverKeysByPassphraseRequest) Reset() { *x = RecoverKeysByPassphraseRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RecoverKeysByPassphraseRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*RecoverKeysByPassphraseRequest) ProtoMessage() {} func (x *RecoverKeysByPassphraseRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RecoverKeysByPassphraseRequest.ProtoReflect.Descriptor instead. func (*RecoverKeysByPassphraseRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{45} } func (x *RecoverKeysByPassphraseRequest) GetUuid() string { if x != nil { return x.Uuid } return "" } func (x *RecoverKeysByPassphraseRequest) GetPassphrase() string { if x != nil { return x.Passphrase } return "" } func (x *RecoverKeysByPassphraseRequest) GetType() KeyBackupType { if x != nil { return x.Type } return KeyBackupType_PASSWORD } type RecoverKeysByPassphraseResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *RecoverKeysByPassphraseResponse) Reset() { *x = RecoverKeysByPassphraseResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RecoverKeysByPassphraseResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*RecoverKeysByPassphraseResponse) ProtoMessage() {} func (x *RecoverKeysByPassphraseResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RecoverKeysByPassphraseResponse.ProtoReflect.Descriptor instead. func (*RecoverKeysByPassphraseResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{46} } type TestKeysPassphraseRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` Passphrase string `protobuf:"bytes,2,opt,name=passphrase,proto3" json:"passphrase,omitempty"` } func (x *TestKeysPassphraseRequest) Reset() { *x = TestKeysPassphraseRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *TestKeysPassphraseRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*TestKeysPassphraseRequest) ProtoMessage() {} func (x *TestKeysPassphraseRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use TestKeysPassphraseRequest.ProtoReflect.Descriptor instead. func (*TestKeysPassphraseRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{47} } func (x *TestKeysPassphraseRequest) GetUuid() string { if x != nil { return x.Uuid } return "" } func (x *TestKeysPassphraseRequest) GetPassphrase() string { if x != nil { return x.Passphrase } return "" } type TestKeysPassphraseResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *TestKeysPassphraseResponse) Reset() { *x = TestKeysPassphraseResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *TestKeysPassphraseResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*TestKeysPassphraseResponse) ProtoMessage() {} func (x *TestKeysPassphraseResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use TestKeysPassphraseResponse.ProtoReflect.Descriptor instead. func (*TestKeysPassphraseResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{48} } type ThreadInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } func (x *ThreadInfo) Reset() { *x = ThreadInfo{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ThreadInfo) String() string { return protoimpl.X.MessageStringOf(x) } func (*ThreadInfo) ProtoMessage() {} func (x *ThreadInfo) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ThreadInfo.ProtoReflect.Descriptor instead. func (*ThreadInfo) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{49} } func (x *ThreadInfo) GetAddresses() []string { if x != nil { return x.Addresses } return nil } func (x *ThreadInfo) GetKey() string { if x != nil { return x.Key } return "" } type ShareBucketRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` } func (x *ShareBucketRequest) Reset() { *x = ShareBucketRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ShareBucketRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ShareBucketRequest) ProtoMessage() {} func (x *ShareBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ShareBucketRequest.ProtoReflect.Descriptor instead. func (*ShareBucketRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{50} } func (x *ShareBucketRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } type ShareBucketResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Threadinfo *ThreadInfo `protobuf:"bytes,1,opt,name=threadinfo,proto3" json:"threadinfo,omitempty"` } func (x *ShareBucketResponse) Reset() { *x = ShareBucketResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ShareBucketResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ShareBucketResponse) ProtoMessage() {} func (x *ShareBucketResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ShareBucketResponse.ProtoReflect.Descriptor instead. func (*ShareBucketResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{51} } func (x *ShareBucketResponse) GetThreadinfo() *ThreadInfo { if x != nil { return x.Threadinfo } return nil } type JoinBucketRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Threadinfo *ThreadInfo `protobuf:"bytes,1,opt,name=threadinfo,proto3" json:"threadinfo,omitempty"` Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` } func (x *JoinBucketRequest) Reset() { *x = JoinBucketRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *JoinBucketRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*JoinBucketRequest) ProtoMessage() {} func (x *JoinBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use JoinBucketRequest.ProtoReflect.Descriptor instead. func (*JoinBucketRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{52} } func (x *JoinBucketRequest) GetThreadinfo() *ThreadInfo { if x != nil { return x.Threadinfo } return nil } func (x *JoinBucketRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } type JoinBucketResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` } func (x *JoinBucketResponse) Reset() { *x = JoinBucketResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *JoinBucketResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*JoinBucketResponse) ProtoMessage() {} func (x *JoinBucketResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use JoinBucketResponse.ProtoReflect.Descriptor instead. func (*JoinBucketResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{53} } func (x *JoinBucketResponse) GetResult() bool { if x != nil { return x.Result } return false } type ShareFilesViaPublicKeyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields PublicKeys []string `protobuf:"bytes,1,rep,name=publicKeys,proto3" json:"publicKeys,omitempty"` Paths []*FullPath `protobuf:"bytes,2,rep,name=paths,proto3" json:"paths,omitempty"` } func (x *ShareFilesViaPublicKeyRequest) Reset() { *x = ShareFilesViaPublicKeyRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ShareFilesViaPublicKeyRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ShareFilesViaPublicKeyRequest) ProtoMessage() {} func (x *ShareFilesViaPublicKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ShareFilesViaPublicKeyRequest.ProtoReflect.Descriptor instead. func (*ShareFilesViaPublicKeyRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{54} } func (x *ShareFilesViaPublicKeyRequest) GetPublicKeys() []string { if x != nil { return x.PublicKeys } return nil } func (x *ShareFilesViaPublicKeyRequest) GetPaths() []*FullPath { if x != nil { return x.Paths } return nil } type FullPath struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields DbId string `protobuf:"bytes,1,opt,name=dbId,proto3" json:"dbId,omitempty"` // optional field to specify shared with me file Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` } func (x *FullPath) Reset() { *x = FullPath{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FullPath) String() string { return protoimpl.X.MessageStringOf(x) } func (*FullPath) ProtoMessage() {} func (x *FullPath) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FullPath.ProtoReflect.Descriptor instead. func (*FullPath) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{55} } func (x *FullPath) GetDbId() string { if x != nil { return x.DbId } return "" } func (x *FullPath) GetBucket() string { if x != nil { return x.Bucket } return "" } func (x *FullPath) GetPath() string { if x != nil { return x.Path } return "" } type ShareFilesViaPublicKeyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *ShareFilesViaPublicKeyResponse) Reset() { *x = ShareFilesViaPublicKeyResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ShareFilesViaPublicKeyResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ShareFilesViaPublicKeyResponse) ProtoMessage() {} func (x *ShareFilesViaPublicKeyResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ShareFilesViaPublicKeyResponse.ProtoReflect.Descriptor instead. func (*ShareFilesViaPublicKeyResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{56} } type UnshareFilesViaPublicKeyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields PublicKeys []string `protobuf:"bytes,1,rep,name=publicKeys,proto3" json:"publicKeys,omitempty"` Paths []*FullPath `protobuf:"bytes,2,rep,name=paths,proto3" json:"paths,omitempty"` } func (x *UnshareFilesViaPublicKeyRequest) Reset() { *x = UnshareFilesViaPublicKeyRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *UnshareFilesViaPublicKeyRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*UnshareFilesViaPublicKeyRequest) ProtoMessage() {} func (x *UnshareFilesViaPublicKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use UnshareFilesViaPublicKeyRequest.ProtoReflect.Descriptor instead. func (*UnshareFilesViaPublicKeyRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{57} } func (x *UnshareFilesViaPublicKeyRequest) GetPublicKeys() []string { if x != nil { return x.PublicKeys } return nil } func (x *UnshareFilesViaPublicKeyRequest) GetPaths() []*FullPath { if x != nil { return x.Paths } return nil } type UnshareFilesViaPublicKeyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *UnshareFilesViaPublicKeyResponse) Reset() { *x = UnshareFilesViaPublicKeyResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *UnshareFilesViaPublicKeyResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*UnshareFilesViaPublicKeyResponse) ProtoMessage() {} func (x *UnshareFilesViaPublicKeyResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use UnshareFilesViaPublicKeyResponse.ProtoReflect.Descriptor instead. func (*UnshareFilesViaPublicKeyResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{58} } type GeneratePublicFileLinkRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` ItemPaths []string `protobuf:"bytes,2,rep,name=itemPaths,proto3" json:"itemPaths,omitempty"` Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"` // optional field to specify db id // for shared with me files DbId string `protobuf:"bytes,4,opt,name=dbId,proto3" json:"dbId,omitempty"` } func (x *GeneratePublicFileLinkRequest) Reset() { *x = GeneratePublicFileLinkRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GeneratePublicFileLinkRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GeneratePublicFileLinkRequest) ProtoMessage() {} func (x *GeneratePublicFileLinkRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GeneratePublicFileLinkRequest.ProtoReflect.Descriptor instead. func (*GeneratePublicFileLinkRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{59} } func (x *GeneratePublicFileLinkRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } func (x *GeneratePublicFileLinkRequest) GetItemPaths() []string { if x != nil { return x.ItemPaths } return nil } func (x *GeneratePublicFileLinkRequest) GetPassword() string { if x != nil { return x.Password } return "" } func (x *GeneratePublicFileLinkRequest) GetDbId() string { if x != nil { return x.DbId } return "" } type GeneratePublicFileLinkResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Link string `protobuf:"bytes,1,opt,name=link,proto3" json:"link,omitempty"` FileCid string `protobuf:"bytes,2,opt,name=fileCid,proto3" json:"fileCid,omitempty"` } func (x *GeneratePublicFileLinkResponse) Reset() { *x = GeneratePublicFileLinkResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GeneratePublicFileLinkResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GeneratePublicFileLinkResponse) ProtoMessage() {} func (x *GeneratePublicFileLinkResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GeneratePublicFileLinkResponse.ProtoReflect.Descriptor instead. func (*GeneratePublicFileLinkResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{60} } func (x *GeneratePublicFileLinkResponse) GetLink() string { if x != nil { return x.Link } return "" } func (x *GeneratePublicFileLinkResponse) GetFileCid() string { if x != nil { return x.FileCid } return "" } type ToggleFuseRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields MountDrive bool `protobuf:"varint,1,opt,name=mountDrive,proto3" json:"mountDrive,omitempty"` } func (x *ToggleFuseRequest) Reset() { *x = ToggleFuseRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ToggleFuseRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ToggleFuseRequest) ProtoMessage() {} func (x *ToggleFuseRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ToggleFuseRequest.ProtoReflect.Descriptor instead. func (*ToggleFuseRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{61} } func (x *ToggleFuseRequest) GetMountDrive() bool { if x != nil { return x.MountDrive } return false } type FuseDriveResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields State FuseState `protobuf:"varint,1,opt,name=state,proto3,enum=space.FuseState" json:"state,omitempty"` MountPath string `protobuf:"bytes,2,opt,name=mountPath,proto3" json:"mountPath,omitempty"` } func (x *FuseDriveResponse) Reset() { *x = FuseDriveResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FuseDriveResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*FuseDriveResponse) ProtoMessage() {} func (x *FuseDriveResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FuseDriveResponse.ProtoReflect.Descriptor instead. func (*FuseDriveResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{62} } func (x *FuseDriveResponse) GetState() FuseState { if x != nil { return x.State } return FuseState_UNSUPPORTED } func (x *FuseDriveResponse) GetMountPath() string { if x != nil { return x.MountPath } return "" } type ListBucketsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *ListBucketsRequest) Reset() { *x = ListBucketsRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListBucketsRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListBucketsRequest) ProtoMessage() {} func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListBucketsRequest.ProtoReflect.Descriptor instead. func (*ListBucketsRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{63} } type ListBucketsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"` } func (x *ListBucketsResponse) Reset() { *x = ListBucketsResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListBucketsResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListBucketsResponse) ProtoMessage() {} func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListBucketsResponse.ProtoReflect.Descriptor instead. func (*ListBucketsResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{64} } func (x *ListBucketsResponse) GetBuckets() []*Bucket { if x != nil { return x.Buckets } return nil } type Invitation struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields InviterPublicKey string `protobuf:"bytes,1,opt,name=inviterPublicKey,proto3" json:"inviterPublicKey,omitempty"` InvitationID string `protobuf:"bytes,2,opt,name=invitationID,proto3" json:"invitationID,omitempty"` Status InvitationStatus `protobuf:"varint,4,opt,name=status,proto3,enum=space.InvitationStatus" json:"status,omitempty"` ItemPaths []*FullPath `protobuf:"bytes,5,rep,name=itemPaths,proto3" json:"itemPaths,omitempty"` } func (x *Invitation) Reset() { *x = Invitation{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Invitation) String() string { return protoimpl.X.MessageStringOf(x) } func (*Invitation) ProtoMessage() {} func (x *Invitation) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Invitation.ProtoReflect.Descriptor instead. func (*Invitation) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{65} } func (x *Invitation) GetInviterPublicKey() string { if x != nil { return x.InviterPublicKey } return "" } func (x *Invitation) GetInvitationID() string { if x != nil { return x.InvitationID } return "" } func (x *Invitation) GetStatus() InvitationStatus { if x != nil { return x.Status } return InvitationStatus_PENDING } func (x *Invitation) GetItemPaths() []*FullPath { if x != nil { return x.ItemPaths } return nil } type UsageAlert struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Used int64 `protobuf:"varint,1,opt,name=used,proto3" json:"used,omitempty"` Limit int64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` } func (x *UsageAlert) Reset() { *x = UsageAlert{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *UsageAlert) String() string { return protoimpl.X.MessageStringOf(x) } func (*UsageAlert) ProtoMessage() {} func (x *UsageAlert) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use UsageAlert.ProtoReflect.Descriptor instead. func (*UsageAlert) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{66} } func (x *UsageAlert) GetUsed() int64 { if x != nil { return x.Used } return 0 } func (x *UsageAlert) GetLimit() int64 { if x != nil { return x.Limit } return 0 } func (x *UsageAlert) GetMessage() string { if x != nil { return x.Message } return "" } type InvitationAccept struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields InvitationID string `protobuf:"bytes,2,opt,name=invitationID,proto3" json:"invitationID,omitempty"` } func (x *InvitationAccept) Reset() { *x = InvitationAccept{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *InvitationAccept) String() string { return protoimpl.X.MessageStringOf(x) } func (*InvitationAccept) ProtoMessage() {} func (x *InvitationAccept) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use InvitationAccept.ProtoReflect.Descriptor instead. func (*InvitationAccept) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{67} } func (x *InvitationAccept) GetInvitationID() string { if x != nil { return x.InvitationID } return "" } type RevokedInvitation struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields InviterPublicKey string `protobuf:"bytes,1,opt,name=inviterPublicKey,proto3" json:"inviterPublicKey,omitempty"` ItemPaths []*FullPath `protobuf:"bytes,5,rep,name=itemPaths,proto3" json:"itemPaths,omitempty"` } func (x *RevokedInvitation) Reset() { *x = RevokedInvitation{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RevokedInvitation) String() string { return protoimpl.X.MessageStringOf(x) } func (*RevokedInvitation) ProtoMessage() {} func (x *RevokedInvitation) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RevokedInvitation.ProtoReflect.Descriptor instead. func (*RevokedInvitation) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{68} } func (x *RevokedInvitation) GetInviterPublicKey() string { if x != nil { return x.InviterPublicKey } return "" } func (x *RevokedInvitation) GetItemPaths() []*FullPath { if x != nil { return x.ItemPaths } return nil } type Notification struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` // underlying message id from textile Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` Body string `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` // Types that are assignable to RelatedObject: // *Notification_InvitationValue // *Notification_UsageAlert // *Notification_InvitationAccept // *Notification_RevokedInvitation RelatedObject isNotification_RelatedObject `protobuf_oneof:"relatedObject"` Type NotificationType `protobuf:"varint,8,opt,name=type,proto3,enum=space.NotificationType" json:"type,omitempty"` CreatedAt int64 `protobuf:"varint,9,opt,name=createdAt,proto3" json:"createdAt,omitempty"` ReadAt int64 `protobuf:"varint,10,opt,name=readAt,proto3" json:"readAt,omitempty"` } func (x *Notification) Reset() { *x = Notification{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Notification) String() string { return protoimpl.X.MessageStringOf(x) } func (*Notification) ProtoMessage() {} func (x *Notification) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Notification.ProtoReflect.Descriptor instead. func (*Notification) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{69} } func (x *Notification) GetID() string { if x != nil { return x.ID } return "" } func (x *Notification) GetSubject() string { if x != nil { return x.Subject } return "" } func (x *Notification) GetBody() string { if x != nil { return x.Body } return "" } func (m *Notification) GetRelatedObject() isNotification_RelatedObject { if m != nil { return m.RelatedObject } return nil } func (x *Notification) GetInvitationValue() *Invitation { if x, ok := x.GetRelatedObject().(*Notification_InvitationValue); ok { return x.InvitationValue } return nil } func (x *Notification) GetUsageAlert() *UsageAlert { if x, ok := x.GetRelatedObject().(*Notification_UsageAlert); ok { return x.UsageAlert } return nil } func (x *Notification) GetInvitationAccept() *InvitationAccept { if x, ok := x.GetRelatedObject().(*Notification_InvitationAccept); ok { return x.InvitationAccept } return nil } func (x *Notification) GetRevokedInvitation() *RevokedInvitation { if x, ok := x.GetRelatedObject().(*Notification_RevokedInvitation); ok { return x.RevokedInvitation } return nil } func (x *Notification) GetType() NotificationType { if x != nil { return x.Type } return NotificationType_UNKNOWN } func (x *Notification) GetCreatedAt() int64 { if x != nil { return x.CreatedAt } return 0 } func (x *Notification) GetReadAt() int64 { if x != nil { return x.ReadAt } return 0 } type isNotification_RelatedObject interface { isNotification_RelatedObject() } type Notification_InvitationValue struct { InvitationValue *Invitation `protobuf:"bytes,4,opt,name=invitationValue,proto3,oneof"` } type Notification_UsageAlert struct { UsageAlert *UsageAlert `protobuf:"bytes,5,opt,name=usageAlert,proto3,oneof"` } type Notification_InvitationAccept struct { InvitationAccept *InvitationAccept `protobuf:"bytes,6,opt,name=invitationAccept,proto3,oneof"` } type Notification_RevokedInvitation struct { RevokedInvitation *RevokedInvitation `protobuf:"bytes,7,opt,name=revokedInvitation,proto3,oneof"` } func (*Notification_InvitationValue) isNotification_RelatedObject() {} func (*Notification_UsageAlert) isNotification_RelatedObject() {} func (*Notification_InvitationAccept) isNotification_RelatedObject() {} func (*Notification_RevokedInvitation) isNotification_RelatedObject() {} type HandleFilesInvitationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields InvitationID string `protobuf:"bytes,1,opt,name=invitationID,proto3" json:"invitationID,omitempty"` Accept bool `protobuf:"varint,2,opt,name=accept,proto3" json:"accept,omitempty"` } func (x *HandleFilesInvitationRequest) Reset() { *x = HandleFilesInvitationRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *HandleFilesInvitationRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*HandleFilesInvitationRequest) ProtoMessage() {} func (x *HandleFilesInvitationRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use HandleFilesInvitationRequest.ProtoReflect.Descriptor instead. func (*HandleFilesInvitationRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{70} } func (x *HandleFilesInvitationRequest) GetInvitationID() string { if x != nil { return x.InvitationID } return "" } func (x *HandleFilesInvitationRequest) GetAccept() bool { if x != nil { return x.Accept } return false } type HandleFilesInvitationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *HandleFilesInvitationResponse) Reset() { *x = HandleFilesInvitationResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *HandleFilesInvitationResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*HandleFilesInvitationResponse) ProtoMessage() {} func (x *HandleFilesInvitationResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use HandleFilesInvitationResponse.ProtoReflect.Descriptor instead. func (*HandleFilesInvitationResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{71} } type NotificationEventResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Notification *Notification `protobuf:"bytes,1,opt,name=notification,proto3" json:"notification,omitempty"` } func (x *NotificationEventResponse) Reset() { *x = NotificationEventResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *NotificationEventResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*NotificationEventResponse) ProtoMessage() {} func (x *NotificationEventResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use NotificationEventResponse.ProtoReflect.Descriptor instead. func (*NotificationEventResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{72} } func (x *NotificationEventResponse) GetNotification() *Notification { if x != nil { return x.Notification } return nil } type GetNotificationsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Seek string `protobuf:"bytes,1,opt,name=seek,proto3" json:"seek,omitempty"` Limit int64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` } func (x *GetNotificationsRequest) Reset() { *x = GetNotificationsRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetNotificationsRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetNotificationsRequest) ProtoMessage() {} func (x *GetNotificationsRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetNotificationsRequest.ProtoReflect.Descriptor instead. func (*GetNotificationsRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{73} } func (x *GetNotificationsRequest) GetSeek() string { if x != nil { return x.Seek } return "" } func (x *GetNotificationsRequest) GetLimit() int64 { if x != nil { return x.Limit } return 0 } type GetNotificationsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Notifications []*Notification `protobuf:"bytes,1,rep,name=notifications,proto3" json:"notifications,omitempty"` NextOffset string `protobuf:"bytes,2,opt,name=nextOffset,proto3" json:"nextOffset,omitempty"` LastSeenAt int64 `protobuf:"varint,3,opt,name=lastSeenAt,proto3" json:"lastSeenAt,omitempty"` } func (x *GetNotificationsResponse) Reset() { *x = GetNotificationsResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetNotificationsResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetNotificationsResponse) ProtoMessage() {} func (x *GetNotificationsResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetNotificationsResponse.ProtoReflect.Descriptor instead. func (*GetNotificationsResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{74} } func (x *GetNotificationsResponse) GetNotifications() []*Notification { if x != nil { return x.Notifications } return nil } func (x *GetNotificationsResponse) GetNextOffset() string { if x != nil { return x.NextOffset } return "" } func (x *GetNotificationsResponse) GetLastSeenAt() int64 { if x != nil { return x.LastSeenAt } return 0 } type ReadNotificationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` } func (x *ReadNotificationRequest) Reset() { *x = ReadNotificationRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ReadNotificationRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ReadNotificationRequest) ProtoMessage() {} func (x *ReadNotificationRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ReadNotificationRequest.ProtoReflect.Descriptor instead. func (*ReadNotificationRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{75} } func (x *ReadNotificationRequest) GetID() string { if x != nil { return x.ID } return "" } type ReadNotificationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *ReadNotificationResponse) Reset() { *x = ReadNotificationResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ReadNotificationResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ReadNotificationResponse) ProtoMessage() {} func (x *ReadNotificationResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ReadNotificationResponse.ProtoReflect.Descriptor instead. func (*ReadNotificationResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{76} } type GetPublicKeyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *GetPublicKeyRequest) Reset() { *x = GetPublicKeyRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetPublicKeyRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetPublicKeyRequest) ProtoMessage() {} func (x *GetPublicKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetPublicKeyRequest.ProtoReflect.Descriptor instead. func (*GetPublicKeyRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{77} } type GetPublicKeyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Public key encoded in hex PublicKey string `protobuf:"bytes,1,opt,name=publicKey,proto3" json:"publicKey,omitempty"` } func (x *GetPublicKeyResponse) Reset() { *x = GetPublicKeyResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetPublicKeyResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetPublicKeyResponse) ProtoMessage() {} func (x *GetPublicKeyResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetPublicKeyResponse.ProtoReflect.Descriptor instead. func (*GetPublicKeyResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{78} } func (x *GetPublicKeyResponse) GetPublicKey() string { if x != nil { return x.PublicKey } return "" } type RecoverKeysByLocalBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields PathToKeyBackup string `protobuf:"bytes,1,opt,name=pathToKeyBackup,proto3" json:"pathToKeyBackup,omitempty"` } func (x *RecoverKeysByLocalBackupRequest) Reset() { *x = RecoverKeysByLocalBackupRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RecoverKeysByLocalBackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*RecoverKeysByLocalBackupRequest) ProtoMessage() {} func (x *RecoverKeysByLocalBackupRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RecoverKeysByLocalBackupRequest.ProtoReflect.Descriptor instead. func (*RecoverKeysByLocalBackupRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{79} } func (x *RecoverKeysByLocalBackupRequest) GetPathToKeyBackup() string { if x != nil { return x.PathToKeyBackup } return "" } type RecoverKeysByLocalBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *RecoverKeysByLocalBackupResponse) Reset() { *x = RecoverKeysByLocalBackupResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RecoverKeysByLocalBackupResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*RecoverKeysByLocalBackupResponse) ProtoMessage() {} func (x *RecoverKeysByLocalBackupResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RecoverKeysByLocalBackupResponse.ProtoReflect.Descriptor instead. func (*RecoverKeysByLocalBackupResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{80} } type CreateLocalKeysBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The path in which to save the backup PathToKeyBackup string `protobuf:"bytes,1,opt,name=pathToKeyBackup,proto3" json:"pathToKeyBackup,omitempty"` } func (x *CreateLocalKeysBackupRequest) Reset() { *x = CreateLocalKeysBackupRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateLocalKeysBackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateLocalKeysBackupRequest) ProtoMessage() {} func (x *CreateLocalKeysBackupRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateLocalKeysBackupRequest.ProtoReflect.Descriptor instead. func (*CreateLocalKeysBackupRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{81} } func (x *CreateLocalKeysBackupRequest) GetPathToKeyBackup() string { if x != nil { return x.PathToKeyBackup } return "" } type CreateLocalKeysBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *CreateLocalKeysBackupResponse) Reset() { *x = CreateLocalKeysBackupResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateLocalKeysBackupResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateLocalKeysBackupResponse) ProtoMessage() {} func (x *CreateLocalKeysBackupResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateLocalKeysBackupResponse.ProtoReflect.Descriptor instead. func (*CreateLocalKeysBackupResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{82} } type DeleteAccountRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *DeleteAccountRequest) Reset() { *x = DeleteAccountRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DeleteAccountRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*DeleteAccountRequest) ProtoMessage() {} func (x *DeleteAccountRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DeleteAccountRequest.ProtoReflect.Descriptor instead. func (*DeleteAccountRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{83} } type DeleteAccountResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *DeleteAccountResponse) Reset() { *x = DeleteAccountResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DeleteAccountResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*DeleteAccountResponse) ProtoMessage() {} func (x *DeleteAccountResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DeleteAccountResponse.ProtoReflect.Descriptor instead. func (*DeleteAccountResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{84} } type DeleteKeyPairRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *DeleteKeyPairRequest) Reset() { *x = DeleteKeyPairRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DeleteKeyPairRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*DeleteKeyPairRequest) ProtoMessage() {} func (x *DeleteKeyPairRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DeleteKeyPairRequest.ProtoReflect.Descriptor instead. func (*DeleteKeyPairRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{85} } type DeleteKeyPairResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *DeleteKeyPairResponse) Reset() { *x = DeleteKeyPairResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DeleteKeyPairResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*DeleteKeyPairResponse) ProtoMessage() {} func (x *DeleteKeyPairResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DeleteKeyPairResponse.ProtoReflect.Descriptor instead. func (*DeleteKeyPairResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{86} } type GetAPISessionTokensRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *GetAPISessionTokensRequest) Reset() { *x = GetAPISessionTokensRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetAPISessionTokensRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetAPISessionTokensRequest) ProtoMessage() {} func (x *GetAPISessionTokensRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetAPISessionTokensRequest.ProtoReflect.Descriptor instead. func (*GetAPISessionTokensRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{87} } type GetAPISessionTokensResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields HubToken string `protobuf:"bytes,1,opt,name=hubToken,proto3" json:"hubToken,omitempty"` ServicesToken string `protobuf:"bytes,2,opt,name=servicesToken,proto3" json:"servicesToken,omitempty"` } func (x *GetAPISessionTokensResponse) Reset() { *x = GetAPISessionTokensResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetAPISessionTokensResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetAPISessionTokensResponse) ProtoMessage() {} func (x *GetAPISessionTokensResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[88] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetAPISessionTokensResponse.ProtoReflect.Descriptor instead. func (*GetAPISessionTokensResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{88} } func (x *GetAPISessionTokensResponse) GetHubToken() string { if x != nil { return x.HubToken } return "" } func (x *GetAPISessionTokensResponse) GetServicesToken() string { if x != nil { return x.ServicesToken } return "" } type GetRecentlySharedWithRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *GetRecentlySharedWithRequest) Reset() { *x = GetRecentlySharedWithRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetRecentlySharedWithRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetRecentlySharedWithRequest) ProtoMessage() {} func (x *GetRecentlySharedWithRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetRecentlySharedWithRequest.ProtoReflect.Descriptor instead. func (*GetRecentlySharedWithRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{89} } type GetRecentlySharedWithResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Members []*FileMember `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` } func (x *GetRecentlySharedWithResponse) Reset() { *x = GetRecentlySharedWithResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetRecentlySharedWithResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetRecentlySharedWithResponse) ProtoMessage() {} func (x *GetRecentlySharedWithResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[90] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetRecentlySharedWithResponse.ProtoReflect.Descriptor instead. func (*GetRecentlySharedWithResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{90} } func (x *GetRecentlySharedWithResponse) GetMembers() []*FileMember { if x != nil { return x.Members } return nil } type InitializeMasterAppTokenRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *InitializeMasterAppTokenRequest) Reset() { *x = InitializeMasterAppTokenRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *InitializeMasterAppTokenRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*InitializeMasterAppTokenRequest) ProtoMessage() {} func (x *InitializeMasterAppTokenRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[91] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use InitializeMasterAppTokenRequest.ProtoReflect.Descriptor instead. func (*InitializeMasterAppTokenRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{91} } type InitializeMasterAppTokenResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields AppToken string `protobuf:"bytes,1,opt,name=appToken,proto3" json:"appToken,omitempty"` } func (x *InitializeMasterAppTokenResponse) Reset() { *x = InitializeMasterAppTokenResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *InitializeMasterAppTokenResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*InitializeMasterAppTokenResponse) ProtoMessage() {} func (x *InitializeMasterAppTokenResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[92] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use InitializeMasterAppTokenResponse.ProtoReflect.Descriptor instead. func (*InitializeMasterAppTokenResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{92} } func (x *InitializeMasterAppTokenResponse) GetAppToken() string { if x != nil { return x.AppToken } return "" } type AllowedMethod struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields MethodName string `protobuf:"bytes,1,opt,name=methodName,proto3" json:"methodName,omitempty"` } func (x *AllowedMethod) Reset() { *x = AllowedMethod{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *AllowedMethod) String() string { return protoimpl.X.MessageStringOf(x) } func (*AllowedMethod) ProtoMessage() {} func (x *AllowedMethod) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[93] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use AllowedMethod.ProtoReflect.Descriptor instead. func (*AllowedMethod) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{93} } func (x *AllowedMethod) GetMethodName() string { if x != nil { return x.MethodName } return "" } type GenerateAppTokenRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields AllowedMethods []*AllowedMethod `protobuf:"bytes,1,rep,name=allowedMethods,proto3" json:"allowedMethods,omitempty"` } func (x *GenerateAppTokenRequest) Reset() { *x = GenerateAppTokenRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GenerateAppTokenRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GenerateAppTokenRequest) ProtoMessage() {} func (x *GenerateAppTokenRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[94] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GenerateAppTokenRequest.ProtoReflect.Descriptor instead. func (*GenerateAppTokenRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{94} } func (x *GenerateAppTokenRequest) GetAllowedMethods() []*AllowedMethod { if x != nil { return x.AllowedMethods } return nil } type GenerateAppTokenResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields AppToken string `protobuf:"bytes,1,opt,name=appToken,proto3" json:"appToken,omitempty"` } func (x *GenerateAppTokenResponse) Reset() { *x = GenerateAppTokenResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GenerateAppTokenResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GenerateAppTokenResponse) ProtoMessage() {} func (x *GenerateAppTokenResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[95] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GenerateAppTokenResponse.ProtoReflect.Descriptor instead. func (*GenerateAppTokenResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{95} } func (x *GenerateAppTokenResponse) GetAppToken() string { if x != nil { return x.AppToken } return "" } type RemoveDirOrFileRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` } func (x *RemoveDirOrFileRequest) Reset() { *x = RemoveDirOrFileRequest{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RemoveDirOrFileRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*RemoveDirOrFileRequest) ProtoMessage() {} func (x *RemoveDirOrFileRequest) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RemoveDirOrFileRequest.ProtoReflect.Descriptor instead. func (*RemoveDirOrFileRequest) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{96} } func (x *RemoveDirOrFileRequest) GetPath() string { if x != nil { return x.Path } return "" } func (x *RemoveDirOrFileRequest) GetBucket() string { if x != nil { return x.Bucket } return "" } type RemoveDirOrFileResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *RemoveDirOrFileResponse) Reset() { *x = RemoveDirOrFileResponse{} if protoimpl.UnsafeEnabled { mi := &file_space_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RemoveDirOrFileResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*RemoveDirOrFileResponse) ProtoMessage() {} func (x *RemoveDirOrFileResponse) ProtoReflect() protoreflect.Message { mi := &file_space_proto_msgTypes[97] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RemoveDirOrFileResponse.ProtoReflect.Descriptor instead. func (*RemoveDirOrFileResponse) Descriptor() ([]byte, []int) { return file_space_proto_rawDescGZIP(), []int{97} } var File_space_proto protoreflect.FileDescriptor var file_space_proto_rawDesc = []byte{ 0x0a, 0x0b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2a, 0x0a, 0x12, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x67, 0x0a, 0x13, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x78, 0x0a, 0x19, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x62, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x62, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x41, 0x0a, 0x21, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x24, 0x0a, 0x22, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x65, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x65, 0x65, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x75, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x45, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x65, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x65, 0x65, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x73, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xee, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x72, 0x6f, 0x67, 0x65, 0x55, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x72, 0x6f, 0x67, 0x65, 0x55, 0x73, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x55, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x55, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x55, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x55, 0x73, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x55, 0x73, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x75, 0x73, 0x61, 0x67, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x75, 0x73, 0x61, 0x67, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x22, 0x4b, 0x0a, 0x19, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x1c, 0x0a, 0x1a, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0x0a, 0x1a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x1d, 0x0a, 0x1b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x6f, 0x6d, 0x69, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x6d, 0x69, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x22, 0x44, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0xcb, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x73, 0x44, 0x69, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x69, 0x73, 0x44, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x69, 0x7a, 0x65, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x69, 0x7a, 0x65, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x70, 0x66, 0x73, 0x48, 0x61, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x70, 0x66, 0x73, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x73, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x69, 0x73, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x73, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x18, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x62, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x62, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4c, 0x69, 0x6e, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x79, 0x22, 0x4e, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x6f, 0x6d, 0x69, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x6d, 0x69, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x22, 0x4c, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x29, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6c, 0x75, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x6c, 0x75, 0x67, 0x22, 0x7e, 0x0a, 0x0c, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x73, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x61, 0x73, 0x4a, 0x6f, 0x69, 0x6e, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x61, 0x73, 0x4a, 0x6f, 0x69, 0x6e, 0x65, 0x64, 0x22, 0xa3, 0x02, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2d, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x69, 0x73, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x69, 0x73, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69, 0x73, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x3d, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x17, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x22, 0x1a, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x37, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x22, 0x3e, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x56, 0x69, 0x61, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x56, 0x69, 0x61, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x96, 0x01, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x62, 0x49, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x62, 0x49, 0x64, 0x22, 0x2e, 0x0a, 0x14, 0x54, 0x65, 0x78, 0x74, 0x69, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x51, 0x0a, 0x0f, 0x4f, 0x70, 0x65, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x62, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x62, 0x49, 0x64, 0x22, 0x2e, 0x0a, 0x10, 0x4f, 0x70, 0x65, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x69, 0x0a, 0x15, 0x4f, 0x70, 0x65, 0x6e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x34, 0x0a, 0x16, 0x4f, 0x70, 0x65, 0x6e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6b, 0x0a, 0x0f, 0x41, 0x64, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x65, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xd0, 0x01, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x41, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7d, 0x0a, 0x1d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x20, 0x0a, 0x1e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, 0x0a, 0x1e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x21, 0x0a, 0x1f, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x0a, 0x0a, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x2c, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x48, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x5e, 0x0a, 0x11, 0x4a, 0x6f, 0x69, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x2c, 0x0a, 0x12, 0x4a, 0x6f, 0x69, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x66, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x22, 0x4a, 0x0a, 0x08, 0x46, 0x75, 0x6c, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x62, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x62, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, 0x1f, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x22, 0x22, 0x0a, 0x20, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x85, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x74, 0x65, 0x6d, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x69, 0x74, 0x65, 0x6d, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x62, 0x49, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x62, 0x49, 0x64, 0x22, 0x4e, 0x0a, 0x1e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x69, 0x64, 0x22, 0x33, 0x0a, 0x11, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x46, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x44, 0x72, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x44, 0x72, 0x69, 0x76, 0x65, 0x22, 0x59, 0x0a, 0x11, 0x46, 0x75, 0x73, 0x65, 0x44, 0x72, 0x69, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x75, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x22, 0x14, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3e, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x0a, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x0a, 0x09, 0x69, 0x74, 0x65, 0x6d, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x69, 0x74, 0x65, 0x6d, 0x50, 0x61, 0x74, 0x68, 0x73, 0x22, 0x50, 0x0a, 0x0a, 0x55, 0x73, 0x61, 0x67, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x36, 0x0a, 0x10, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x22, 0x6e, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x69, 0x74, 0x65, 0x6d, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x69, 0x74, 0x65, 0x6d, 0x50, 0x61, 0x74, 0x68, 0x73, 0x22, 0xc5, 0x03, 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x3d, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x33, 0x0a, 0x0a, 0x75, 0x73, 0x61, 0x67, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x75, 0x73, 0x61, 0x67, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x00, 0x52, 0x10, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x48, 0x0a, 0x11, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x64, 0x41, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x65, 0x61, 0x64, 0x41, 0x74, 0x42, 0x0f, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x5a, 0x0a, 0x1c, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x22, 0x1f, 0x0a, 0x1d, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x54, 0x0a, 0x19, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x65, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x65, 0x65, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x95, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x22, 0x29, 0x0a, 0x17, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x34, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x4b, 0x0a, 0x1f, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x54, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x54, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x22, 0x0a, 0x20, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x54, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x54, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x1f, 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x41, 0x50, 0x49, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5f, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x41, 0x50, 0x49, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x75, 0x62, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x75, 0x62, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x22, 0x21, 0x0a, 0x1f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x2f, 0x0a, 0x0d, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x57, 0x0a, 0x17, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22, 0x36, 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x44, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x69, 0x72, 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x69, 0x72, 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0xea, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x41, 0x44, 0x44, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x1d, 0x0a, 0x19, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x05, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x45, 0x44, 0x10, 0x07, 0x12, 0x12, 0x0a, 0x0e, 0x46, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x12, 0x0a, 0x0e, 0x46, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44, 0x10, 0x09, 0x2a, 0x41, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x57, 0x49, 0x54, 0x54, 0x45, 0x52, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x03, 0x2a, 0x4b, 0x0a, 0x09, 0x46, 0x75, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x4d, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x03, 0x2a, 0x6d, 0x0a, 0x10, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x56, 0x49, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x53, 0x41, 0x47, 0x45, 0x41, 0x4c, 0x45, 0x52, 0x54, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x49, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x56, 0x4f, 0x4b, 0x45, 0x44, 0x5f, 0x49, 0x4e, 0x56, 0x49, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x2a, 0x3b, 0x0a, 0x10, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, 0x32, 0xd2, 0x29, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x63, 0x65, 0x41, 0x70, 0x69, 0x12, 0x6d, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1d, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x61, 0x6c, 0x6c, 0x12, 0x63, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1b, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x12, 0x72, 0x0a, 0x0f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x12, 0x1d, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x70, 0x61, 0x69, 0x72, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x75, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x12, 0x1f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x70, 0x61, 0x69, 0x72, 0x73, 0x2f, 0x6d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x12, 0x9b, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x56, 0x69, 0x61, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x12, 0x27, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x56, 0x69, 0x61, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x56, 0x69, 0x61, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x22, 0x20, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x70, 0x61, 0x69, 0x72, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x6e, 0x65, 0x6d, 0x6f, 0x6e, 0x69, 0x63, 0x3a, 0x01, 0x2a, 0x12, 0x6a, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x12, 0x1b, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x22, 0x13, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x70, 0x61, 0x69, 0x72, 0x73, 0x2f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x80, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x57, 0x69, 0x74, 0x68, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1a, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x70, 0x61, 0x69, 0x72, 0x73, 0x2f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x61, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x22, 0x0d, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x5f, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x30, 0x01, 0x12, 0x68, 0x0a, 0x0c, 0x54, 0x78, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x69, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x65, 0x78, 0x74, 0x69, 0x6c, 0x65, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x08, 0x4f, 0x70, 0x65, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x16, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x22, 0x0e, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0x63, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x69, 0x72, 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1d, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x69, 0x72, 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x69, 0x72, 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x11, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x2a, 0x09, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x9d, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x24, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x22, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x6e, 0x6b, 0x3a, 0x01, 0x2a, 0x12, 0x7f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x12, 0x77, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x20, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x12, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x12, 0x6b, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x6e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x12, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x12, 0x53, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e, 0x22, 0x09, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0x30, 0x01, 0x12, 0x63, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x1a, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x22, 0x0f, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x60, 0x0a, 0x0f, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x46, 0x75, 0x73, 0x65, 0x44, 0x72, 0x69, 0x76, 0x65, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x46, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x75, 0x73, 0x65, 0x44, 0x72, 0x69, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x22, 0x0e, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x46, 0x75, 0x73, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x58, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x46, 0x75, 0x73, 0x65, 0x44, 0x72, 0x69, 0x76, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x75, 0x73, 0x65, 0x44, 0x72, 0x69, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x10, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0a, 0x12, 0x08, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x75, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x22, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x88, 0x01, 0x0a, 0x16, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x12, 0x24, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x22, 0x16, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x01, 0x2a, 0x12, 0x8c, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x12, 0x25, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x22, 0x17, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x3a, 0x01, 0x2a, 0x12, 0x7a, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x12, 0x20, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x50, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x22, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x73, 0x73, 0x70, 0x68, 0x72, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0x86, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x23, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x22, 0x17, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x01, 0x2a, 0x12, 0x90, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x26, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x79, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x22, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x3a, 0x01, 0x2a, 0x12, 0x6b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1a, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x67, 0x0a, 0x0a, 0x4a, 0x6f, 0x69, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x22, 0x19, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6a, 0x6f, 0x69, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0x8c, 0x01, 0x0a, 0x16, 0x53, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1a, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x94, 0x01, 0x0a, 0x18, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x22, 0x1c, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x56, 0x69, 0x61, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x91, 0x01, 0x0a, 0x15, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x49, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x22, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x7b, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0x7b, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x20, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x59, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x19, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0d, 0x12, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x6e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x12, 0x11, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7b, 0x0a, 0x10, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x22, 0x1b, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x49, 0x44, 0x7d, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x3a, 0x01, 0x2a, 0x12, 0x68, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x22, 0x11, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0x70, 0x0a, 0x12, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x20, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0f, 0x22, 0x0a, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x01, 0x2a, 0x12, 0x7b, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x21, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x12, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x5a, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x11, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x7a, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x50, 0x49, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x21, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x50, 0x49, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x50, 0x49, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x12, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x7e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x12, 0x23, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x9a, 0x01, 0x0a, 0x1a, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x12, 0x28, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x22, 0x1c, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x41, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0x5e, 0x0a, 0x0b, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x8c, 0x01, 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x26, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x22, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x01, 0x2a, 0x12, 0x6d, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1e, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x22, 0x0d, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x42, 0x06, 0x5a, 0x04, 0x2e, 0x3b, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_space_proto_rawDescOnce sync.Once file_space_proto_rawDescData = file_space_proto_rawDesc ) func file_space_proto_rawDescGZIP() []byte { file_space_proto_rawDescOnce.Do(func() { file_space_proto_rawDescData = protoimpl.X.CompressGZIP(file_space_proto_rawDescData) }) return file_space_proto_rawDescData } var file_space_proto_enumTypes = make([]protoimpl.EnumInfo, 5) var file_space_proto_msgTypes = make([]protoimpl.MessageInfo, 98) var file_space_proto_goTypes = []interface{}{ (EventType)(0), // 0: space.EventType (KeyBackupType)(0), // 1: space.KeyBackupType (FuseState)(0), // 2: space.FuseState (NotificationType)(0), // 3: space.NotificationType (InvitationStatus)(0), // 4: space.InvitationStatus (*SearchFilesRequest)(nil), // 5: space.SearchFilesRequest (*SearchFilesResponse)(nil), // 6: space.SearchFilesResponse (*SearchFilesDirectoryEntry)(nil), // 7: space.SearchFilesDirectoryEntry (*SetNotificationsLastSeenAtRequest)(nil), // 8: space.SetNotificationsLastSeenAtRequest (*SetNotificationsLastSeenAtResponse)(nil), // 9: space.SetNotificationsLastSeenAtResponse (*GetSharedWithMeFilesRequest)(nil), // 10: space.GetSharedWithMeFilesRequest (*GetSharedWithMeFilesResponse)(nil), // 11: space.GetSharedWithMeFilesResponse (*GetSharedByMeFilesRequest)(nil), // 12: space.GetSharedByMeFilesRequest (*GetSharedByMeFilesResponse)(nil), // 13: space.GetSharedByMeFilesResponse (*GetUsageInfoRequest)(nil), // 14: space.GetUsageInfoRequest (*GetUsageInfoResponse)(nil), // 15: space.GetUsageInfoResponse (*ToggleBucketBackupRequest)(nil), // 16: space.ToggleBucketBackupRequest (*ToggleBucketBackupResponse)(nil), // 17: space.ToggleBucketBackupResponse (*BucketBackupRestoreRequest)(nil), // 18: space.BucketBackupRestoreRequest (*BucketBackupRestoreResponse)(nil), // 19: space.BucketBackupRestoreResponse (*ListDirectoriesRequest)(nil), // 20: space.ListDirectoriesRequest (*FileMember)(nil), // 21: space.FileMember (*ListDirectoryEntry)(nil), // 22: space.ListDirectoryEntry (*SharedListDirectoryEntry)(nil), // 23: space.SharedListDirectoryEntry (*ListDirectoriesResponse)(nil), // 24: space.ListDirectoriesResponse (*ListDirectoryRequest)(nil), // 25: space.ListDirectoryRequest (*ListDirectoryResponse)(nil), // 26: space.ListDirectoryResponse (*CreateBucketRequest)(nil), // 27: space.CreateBucketRequest (*BucketMember)(nil), // 28: space.BucketMember (*Bucket)(nil), // 29: space.Bucket (*CreateBucketResponse)(nil), // 30: space.CreateBucketResponse (*GenerateKeyPairRequest)(nil), // 31: space.GenerateKeyPairRequest (*GenerateKeyPairResponse)(nil), // 32: space.GenerateKeyPairResponse (*GetStoredMnemonicRequest)(nil), // 33: space.GetStoredMnemonicRequest (*GetStoredMnemonicResponse)(nil), // 34: space.GetStoredMnemonicResponse (*RestoreKeyPairViaMnemonicRequest)(nil), // 35: space.RestoreKeyPairViaMnemonicRequest (*RestoreKeyPairViaMnemonicResponse)(nil), // 36: space.RestoreKeyPairViaMnemonicResponse (*FileEventResponse)(nil), // 37: space.FileEventResponse (*TextileEventResponse)(nil), // 38: space.TextileEventResponse (*OpenFileRequest)(nil), // 39: space.OpenFileRequest (*OpenFileResponse)(nil), // 40: space.OpenFileResponse (*OpenPublicFileRequest)(nil), // 41: space.OpenPublicFileRequest (*OpenPublicFileResponse)(nil), // 42: space.OpenPublicFileResponse (*AddItemsRequest)(nil), // 43: space.AddItemsRequest (*AddItemResult)(nil), // 44: space.AddItemResult (*AddItemsResponse)(nil), // 45: space.AddItemsResponse (*CreateFolderRequest)(nil), // 46: space.CreateFolderRequest (*CreateFolderResponse)(nil), // 47: space.CreateFolderResponse (*BackupKeysByPassphraseRequest)(nil), // 48: space.BackupKeysByPassphraseRequest (*BackupKeysByPassphraseResponse)(nil), // 49: space.BackupKeysByPassphraseResponse (*RecoverKeysByPassphraseRequest)(nil), // 50: space.RecoverKeysByPassphraseRequest (*RecoverKeysByPassphraseResponse)(nil), // 51: space.RecoverKeysByPassphraseResponse (*TestKeysPassphraseRequest)(nil), // 52: space.TestKeysPassphraseRequest (*TestKeysPassphraseResponse)(nil), // 53: space.TestKeysPassphraseResponse (*ThreadInfo)(nil), // 54: space.ThreadInfo (*ShareBucketRequest)(nil), // 55: space.ShareBucketRequest (*ShareBucketResponse)(nil), // 56: space.ShareBucketResponse (*JoinBucketRequest)(nil), // 57: space.JoinBucketRequest (*JoinBucketResponse)(nil), // 58: space.JoinBucketResponse (*ShareFilesViaPublicKeyRequest)(nil), // 59: space.ShareFilesViaPublicKeyRequest (*FullPath)(nil), // 60: space.FullPath (*ShareFilesViaPublicKeyResponse)(nil), // 61: space.ShareFilesViaPublicKeyResponse (*UnshareFilesViaPublicKeyRequest)(nil), // 62: space.UnshareFilesViaPublicKeyRequest (*UnshareFilesViaPublicKeyResponse)(nil), // 63: space.UnshareFilesViaPublicKeyResponse (*GeneratePublicFileLinkRequest)(nil), // 64: space.GeneratePublicFileLinkRequest (*GeneratePublicFileLinkResponse)(nil), // 65: space.GeneratePublicFileLinkResponse (*ToggleFuseRequest)(nil), // 66: space.ToggleFuseRequest (*FuseDriveResponse)(nil), // 67: space.FuseDriveResponse (*ListBucketsRequest)(nil), // 68: space.ListBucketsRequest (*ListBucketsResponse)(nil), // 69: space.ListBucketsResponse (*Invitation)(nil), // 70: space.Invitation (*UsageAlert)(nil), // 71: space.UsageAlert (*InvitationAccept)(nil), // 72: space.InvitationAccept (*RevokedInvitation)(nil), // 73: space.RevokedInvitation (*Notification)(nil), // 74: space.Notification (*HandleFilesInvitationRequest)(nil), // 75: space.HandleFilesInvitationRequest (*HandleFilesInvitationResponse)(nil), // 76: space.HandleFilesInvitationResponse (*NotificationEventResponse)(nil), // 77: space.NotificationEventResponse (*GetNotificationsRequest)(nil), // 78: space.GetNotificationsRequest (*GetNotificationsResponse)(nil), // 79: space.GetNotificationsResponse (*ReadNotificationRequest)(nil), // 80: space.ReadNotificationRequest (*ReadNotificationResponse)(nil), // 81: space.ReadNotificationResponse (*GetPublicKeyRequest)(nil), // 82: space.GetPublicKeyRequest (*GetPublicKeyResponse)(nil), // 83: space.GetPublicKeyResponse (*RecoverKeysByLocalBackupRequest)(nil), // 84: space.RecoverKeysByLocalBackupRequest (*RecoverKeysByLocalBackupResponse)(nil), // 85: space.RecoverKeysByLocalBackupResponse (*CreateLocalKeysBackupRequest)(nil), // 86: space.CreateLocalKeysBackupRequest (*CreateLocalKeysBackupResponse)(nil), // 87: space.CreateLocalKeysBackupResponse (*DeleteAccountRequest)(nil), // 88: space.DeleteAccountRequest (*DeleteAccountResponse)(nil), // 89: space.DeleteAccountResponse (*DeleteKeyPairRequest)(nil), // 90: space.DeleteKeyPairRequest (*DeleteKeyPairResponse)(nil), // 91: space.DeleteKeyPairResponse (*GetAPISessionTokensRequest)(nil), // 92: space.GetAPISessionTokensRequest (*GetAPISessionTokensResponse)(nil), // 93: space.GetAPISessionTokensResponse (*GetRecentlySharedWithRequest)(nil), // 94: space.GetRecentlySharedWithRequest (*GetRecentlySharedWithResponse)(nil), // 95: space.GetRecentlySharedWithResponse (*InitializeMasterAppTokenRequest)(nil), // 96: space.InitializeMasterAppTokenRequest (*InitializeMasterAppTokenResponse)(nil), // 97: space.InitializeMasterAppTokenResponse (*AllowedMethod)(nil), // 98: space.AllowedMethod (*GenerateAppTokenRequest)(nil), // 99: space.GenerateAppTokenRequest (*GenerateAppTokenResponse)(nil), // 100: space.GenerateAppTokenResponse (*RemoveDirOrFileRequest)(nil), // 101: space.RemoveDirOrFileRequest (*RemoveDirOrFileResponse)(nil), // 102: space.RemoveDirOrFileResponse (*empty.Empty)(nil), // 103: google.protobuf.Empty } var file_space_proto_depIdxs = []int32{ 7, // 0: space.SearchFilesResponse.entries:type_name -> space.SearchFilesDirectoryEntry 22, // 1: space.SearchFilesDirectoryEntry.entry:type_name -> space.ListDirectoryEntry 23, // 2: space.GetSharedWithMeFilesResponse.items:type_name -> space.SharedListDirectoryEntry 23, // 3: space.GetSharedByMeFilesResponse.items:type_name -> space.SharedListDirectoryEntry 21, // 4: space.ListDirectoryEntry.members:type_name -> space.FileMember 22, // 5: space.SharedListDirectoryEntry.entry:type_name -> space.ListDirectoryEntry 22, // 6: space.ListDirectoriesResponse.entries:type_name -> space.ListDirectoryEntry 22, // 7: space.ListDirectoryResponse.entries:type_name -> space.ListDirectoryEntry 28, // 8: space.Bucket.members:type_name -> space.BucketMember 29, // 9: space.CreateBucketResponse.bucket:type_name -> space.Bucket 0, // 10: space.FileEventResponse.type:type_name -> space.EventType 22, // 11: space.FileEventResponse.entry:type_name -> space.ListDirectoryEntry 44, // 12: space.AddItemsResponse.result:type_name -> space.AddItemResult 1, // 13: space.BackupKeysByPassphraseRequest.type:type_name -> space.KeyBackupType 1, // 14: space.RecoverKeysByPassphraseRequest.type:type_name -> space.KeyBackupType 54, // 15: space.ShareBucketResponse.threadinfo:type_name -> space.ThreadInfo 54, // 16: space.JoinBucketRequest.threadinfo:type_name -> space.ThreadInfo 60, // 17: space.ShareFilesViaPublicKeyRequest.paths:type_name -> space.FullPath 60, // 18: space.UnshareFilesViaPublicKeyRequest.paths:type_name -> space.FullPath 2, // 19: space.FuseDriveResponse.state:type_name -> space.FuseState 29, // 20: space.ListBucketsResponse.buckets:type_name -> space.Bucket 4, // 21: space.Invitation.status:type_name -> space.InvitationStatus 60, // 22: space.Invitation.itemPaths:type_name -> space.FullPath 60, // 23: space.RevokedInvitation.itemPaths:type_name -> space.FullPath 70, // 24: space.Notification.invitationValue:type_name -> space.Invitation 71, // 25: space.Notification.usageAlert:type_name -> space.UsageAlert 72, // 26: space.Notification.invitationAccept:type_name -> space.InvitationAccept 73, // 27: space.Notification.revokedInvitation:type_name -> space.RevokedInvitation 3, // 28: space.Notification.type:type_name -> space.NotificationType 74, // 29: space.NotificationEventResponse.notification:type_name -> space.Notification 74, // 30: space.GetNotificationsResponse.notifications:type_name -> space.Notification 21, // 31: space.GetRecentlySharedWithResponse.members:type_name -> space.FileMember 98, // 32: space.GenerateAppTokenRequest.allowedMethods:type_name -> space.AllowedMethod 20, // 33: space.SpaceApi.ListDirectories:input_type -> space.ListDirectoriesRequest 25, // 34: space.SpaceApi.ListDirectory:input_type -> space.ListDirectoryRequest 31, // 35: space.SpaceApi.GenerateKeyPair:input_type -> space.GenerateKeyPairRequest 33, // 36: space.SpaceApi.GetStoredMnemonic:input_type -> space.GetStoredMnemonicRequest 35, // 37: space.SpaceApi.RestoreKeyPairViaMnemonic:input_type -> space.RestoreKeyPairViaMnemonicRequest 90, // 38: space.SpaceApi.DeleteKeyPair:input_type -> space.DeleteKeyPairRequest 31, // 39: space.SpaceApi.GenerateKeyPairWithForce:input_type -> space.GenerateKeyPairRequest 82, // 40: space.SpaceApi.GetPublicKey:input_type -> space.GetPublicKeyRequest 103, // 41: space.SpaceApi.Subscribe:input_type -> google.protobuf.Empty 103, // 42: space.SpaceApi.TxlSubscribe:input_type -> google.protobuf.Empty 39, // 43: space.SpaceApi.OpenFile:input_type -> space.OpenFileRequest 101, // 44: space.SpaceApi.RemoveDirOrFile:input_type -> space.RemoveDirOrFileRequest 64, // 45: space.SpaceApi.GeneratePublicFileLink:input_type -> space.GeneratePublicFileLinkRequest 10, // 46: space.SpaceApi.GetSharedWithMeFiles:input_type -> space.GetSharedWithMeFilesRequest 12, // 47: space.SpaceApi.GetSharedByMeFiles:input_type -> space.GetSharedByMeFilesRequest 41, // 48: space.SpaceApi.OpenPublicFile:input_type -> space.OpenPublicFileRequest 43, // 49: space.SpaceApi.AddItems:input_type -> space.AddItemsRequest 46, // 50: space.SpaceApi.CreateFolder:input_type -> space.CreateFolderRequest 66, // 51: space.SpaceApi.ToggleFuseDrive:input_type -> space.ToggleFuseRequest 103, // 52: space.SpaceApi.GetFuseDriveStatus:input_type -> google.protobuf.Empty 27, // 53: space.SpaceApi.CreateBucket:input_type -> space.CreateBucketRequest 48, // 54: space.SpaceApi.BackupKeysByPassphrase:input_type -> space.BackupKeysByPassphraseRequest 50, // 55: space.SpaceApi.RecoverKeysByPassphrase:input_type -> space.RecoverKeysByPassphraseRequest 52, // 56: space.SpaceApi.TestKeysPassphrase:input_type -> space.TestKeysPassphraseRequest 86, // 57: space.SpaceApi.CreateLocalKeysBackup:input_type -> space.CreateLocalKeysBackupRequest 84, // 58: space.SpaceApi.RecoverKeysByLocalBackup:input_type -> space.RecoverKeysByLocalBackupRequest 55, // 59: space.SpaceApi.ShareBucket:input_type -> space.ShareBucketRequest 57, // 60: space.SpaceApi.JoinBucket:input_type -> space.JoinBucketRequest 59, // 61: space.SpaceApi.ShareFilesViaPublicKey:input_type -> space.ShareFilesViaPublicKeyRequest 62, // 62: space.SpaceApi.UnshareFilesViaPublicKey:input_type -> space.UnshareFilesViaPublicKeyRequest 75, // 63: space.SpaceApi.HandleFilesInvitation:input_type -> space.HandleFilesInvitationRequest 103, // 64: space.SpaceApi.NotificationSubscribe:input_type -> google.protobuf.Empty 68, // 65: space.SpaceApi.ListBuckets:input_type -> space.ListBucketsRequest 78, // 66: space.SpaceApi.GetNotifications:input_type -> space.GetNotificationsRequest 80, // 67: space.SpaceApi.ReadNotification:input_type -> space.ReadNotificationRequest 88, // 68: space.SpaceApi.DeleteAccount:input_type -> space.DeleteAccountRequest 16, // 69: space.SpaceApi.ToggleBucketBackup:input_type -> space.ToggleBucketBackupRequest 18, // 70: space.SpaceApi.BucketBackupRestore:input_type -> space.BucketBackupRestoreRequest 14, // 71: space.SpaceApi.GetUsageInfo:input_type -> space.GetUsageInfoRequest 92, // 72: space.SpaceApi.GetAPISessionTokens:input_type -> space.GetAPISessionTokensRequest 94, // 73: space.SpaceApi.GetRecentlySharedWith:input_type -> space.GetRecentlySharedWithRequest 8, // 74: space.SpaceApi.SetNotificationsLastSeenAt:input_type -> space.SetNotificationsLastSeenAtRequest 5, // 75: space.SpaceApi.SearchFiles:input_type -> space.SearchFilesRequest 96, // 76: space.SpaceApi.InitializeMasterAppToken:input_type -> space.InitializeMasterAppTokenRequest 99, // 77: space.SpaceApi.GenerateAppToken:input_type -> space.GenerateAppTokenRequest 24, // 78: space.SpaceApi.ListDirectories:output_type -> space.ListDirectoriesResponse 26, // 79: space.SpaceApi.ListDirectory:output_type -> space.ListDirectoryResponse 32, // 80: space.SpaceApi.GenerateKeyPair:output_type -> space.GenerateKeyPairResponse 34, // 81: space.SpaceApi.GetStoredMnemonic:output_type -> space.GetStoredMnemonicResponse 36, // 82: space.SpaceApi.RestoreKeyPairViaMnemonic:output_type -> space.RestoreKeyPairViaMnemonicResponse 91, // 83: space.SpaceApi.DeleteKeyPair:output_type -> space.DeleteKeyPairResponse 32, // 84: space.SpaceApi.GenerateKeyPairWithForce:output_type -> space.GenerateKeyPairResponse 83, // 85: space.SpaceApi.GetPublicKey:output_type -> space.GetPublicKeyResponse 37, // 86: space.SpaceApi.Subscribe:output_type -> space.FileEventResponse 38, // 87: space.SpaceApi.TxlSubscribe:output_type -> space.TextileEventResponse 40, // 88: space.SpaceApi.OpenFile:output_type -> space.OpenFileResponse 102, // 89: space.SpaceApi.RemoveDirOrFile:output_type -> space.RemoveDirOrFileResponse 65, // 90: space.SpaceApi.GeneratePublicFileLink:output_type -> space.GeneratePublicFileLinkResponse 11, // 91: space.SpaceApi.GetSharedWithMeFiles:output_type -> space.GetSharedWithMeFilesResponse 13, // 92: space.SpaceApi.GetSharedByMeFiles:output_type -> space.GetSharedByMeFilesResponse 42, // 93: space.SpaceApi.OpenPublicFile:output_type -> space.OpenPublicFileResponse 45, // 94: space.SpaceApi.AddItems:output_type -> space.AddItemsResponse 47, // 95: space.SpaceApi.CreateFolder:output_type -> space.CreateFolderResponse 67, // 96: space.SpaceApi.ToggleFuseDrive:output_type -> space.FuseDriveResponse 67, // 97: space.SpaceApi.GetFuseDriveStatus:output_type -> space.FuseDriveResponse 30, // 98: space.SpaceApi.CreateBucket:output_type -> space.CreateBucketResponse 49, // 99: space.SpaceApi.BackupKeysByPassphrase:output_type -> space.BackupKeysByPassphraseResponse 51, // 100: space.SpaceApi.RecoverKeysByPassphrase:output_type -> space.RecoverKeysByPassphraseResponse 53, // 101: space.SpaceApi.TestKeysPassphrase:output_type -> space.TestKeysPassphraseResponse 87, // 102: space.SpaceApi.CreateLocalKeysBackup:output_type -> space.CreateLocalKeysBackupResponse 85, // 103: space.SpaceApi.RecoverKeysByLocalBackup:output_type -> space.RecoverKeysByLocalBackupResponse 56, // 104: space.SpaceApi.ShareBucket:output_type -> space.ShareBucketResponse 58, // 105: space.SpaceApi.JoinBucket:output_type -> space.JoinBucketResponse 61, // 106: space.SpaceApi.ShareFilesViaPublicKey:output_type -> space.ShareFilesViaPublicKeyResponse 63, // 107: space.SpaceApi.UnshareFilesViaPublicKey:output_type -> space.UnshareFilesViaPublicKeyResponse 76, // 108: space.SpaceApi.HandleFilesInvitation:output_type -> space.HandleFilesInvitationResponse 77, // 109: space.SpaceApi.NotificationSubscribe:output_type -> space.NotificationEventResponse 69, // 110: space.SpaceApi.ListBuckets:output_type -> space.ListBucketsResponse 79, // 111: space.SpaceApi.GetNotifications:output_type -> space.GetNotificationsResponse 81, // 112: space.SpaceApi.ReadNotification:output_type -> space.ReadNotificationResponse 89, // 113: space.SpaceApi.DeleteAccount:output_type -> space.DeleteAccountResponse 17, // 114: space.SpaceApi.ToggleBucketBackup:output_type -> space.ToggleBucketBackupResponse 19, // 115: space.SpaceApi.BucketBackupRestore:output_type -> space.BucketBackupRestoreResponse 15, // 116: space.SpaceApi.GetUsageInfo:output_type -> space.GetUsageInfoResponse 93, // 117: space.SpaceApi.GetAPISessionTokens:output_type -> space.GetAPISessionTokensResponse 95, // 118: space.SpaceApi.GetRecentlySharedWith:output_type -> space.GetRecentlySharedWithResponse 9, // 119: space.SpaceApi.SetNotificationsLastSeenAt:output_type -> space.SetNotificationsLastSeenAtResponse 6, // 120: space.SpaceApi.SearchFiles:output_type -> space.SearchFilesResponse 97, // 121: space.SpaceApi.InitializeMasterAppToken:output_type -> space.InitializeMasterAppTokenResponse 100, // 122: space.SpaceApi.GenerateAppToken:output_type -> space.GenerateAppTokenResponse 78, // [78:123] is the sub-list for method output_type 33, // [33:78] is the sub-list for method input_type 33, // [33:33] is the sub-list for extension type_name 33, // [33:33] is the sub-list for extension extendee 0, // [0:33] is the sub-list for field type_name } func init() { file_space_proto_init() } func file_space_proto_init() { if File_space_proto != nil { return } if !protoimpl.UnsafeEnabled { file_space_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SearchFilesRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SearchFilesResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SearchFilesDirectoryEntry); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetNotificationsLastSeenAtRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetNotificationsLastSeenAtResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSharedWithMeFilesRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSharedWithMeFilesResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSharedByMeFilesRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSharedByMeFilesResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetUsageInfoRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetUsageInfoResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ToggleBucketBackupRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ToggleBucketBackupResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BucketBackupRestoreRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BucketBackupRestoreResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListDirectoriesRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileMember); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListDirectoryEntry); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SharedListDirectoryEntry); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListDirectoriesResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListDirectoryRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListDirectoryResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateBucketRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BucketMember); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateBucketResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GenerateKeyPairRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GenerateKeyPairResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStoredMnemonicRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStoredMnemonicResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RestoreKeyPairViaMnemonicRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RestoreKeyPairViaMnemonicResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileEventResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TextileEventResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*OpenFileRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*OpenFileResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*OpenPublicFileRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*OpenPublicFileResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AddItemsRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AddItemResult); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AddItemsResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateFolderRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateFolderResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BackupKeysByPassphraseRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BackupKeysByPassphraseResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RecoverKeysByPassphraseRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RecoverKeysByPassphraseResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TestKeysPassphraseRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TestKeysPassphraseResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ThreadInfo); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShareBucketRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShareBucketResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*JoinBucketRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*JoinBucketResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShareFilesViaPublicKeyRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FullPath); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShareFilesViaPublicKeyResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UnshareFilesViaPublicKeyRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UnshareFilesViaPublicKeyResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratePublicFileLinkRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratePublicFileLinkResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ToggleFuseRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FuseDriveResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListBucketsRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListBucketsResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Invitation); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UsageAlert); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InvitationAccept); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RevokedInvitation); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Notification); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HandleFilesInvitationRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HandleFilesInvitationResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*NotificationEventResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetNotificationsRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetNotificationsResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadNotificationRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadNotificationResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetPublicKeyRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetPublicKeyResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RecoverKeysByLocalBackupRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RecoverKeysByLocalBackupResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateLocalKeysBackupRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateLocalKeysBackupResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteAccountRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteAccountResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteKeyPairRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteKeyPairResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAPISessionTokensRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAPISessionTokensResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetRecentlySharedWithRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetRecentlySharedWithResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InitializeMasterAppTokenRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InitializeMasterAppTokenResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AllowedMethod); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GenerateAppTokenRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GenerateAppTokenResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveDirOrFileRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_space_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveDirOrFileResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } file_space_proto_msgTypes[69].OneofWrappers = []interface{}{ (*Notification_InvitationValue)(nil), (*Notification_UsageAlert)(nil), (*Notification_InvitationAccept)(nil), (*Notification_RevokedInvitation)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_space_proto_rawDesc, NumEnums: 5, NumMessages: 98, NumExtensions: 0, NumServices: 1, }, GoTypes: file_space_proto_goTypes, DependencyIndexes: file_space_proto_depIdxs, EnumInfos: file_space_proto_enumTypes, MessageInfos: file_space_proto_msgTypes, }.Build() File_space_proto = out.File file_space_proto_rawDesc = nil file_space_proto_goTypes = nil file_space_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // SpaceApiClient is the client API for SpaceApi service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type SpaceApiClient interface { // Get all folder or files in the default bucket. It fetches all subdirectories too. ListDirectories(ctx context.Context, in *ListDirectoriesRequest, opts ...grpc.CallOption) (*ListDirectoriesResponse, error) // Get the folder or files in the path directory. // Unlike ListDirectories, this only returns immediate children at path. ListDirectory(ctx context.Context, in *ListDirectoryRequest, opts ...grpc.CallOption) (*ListDirectoryResponse, error) // Generate Key Pair for current account. // This will return error if daemon account already has keypairs GenerateKeyPair(ctx context.Context, in *GenerateKeyPairRequest, opts ...grpc.CallOption) (*GenerateKeyPairResponse, error) GetStoredMnemonic(ctx context.Context, in *GetStoredMnemonicRequest, opts ...grpc.CallOption) (*GetStoredMnemonicResponse, error) // Restores a keypair given a mnemonic. // This will override any existing key pair RestoreKeyPairViaMnemonic(ctx context.Context, in *RestoreKeyPairViaMnemonicRequest, opts ...grpc.CallOption) (*RestoreKeyPairViaMnemonicResponse, error) DeleteKeyPair(ctx context.Context, in *DeleteKeyPairRequest, opts ...grpc.CallOption) (*DeleteKeyPairResponse, error) // Force Generation of KeyPair. This will override existing keys stored in daemon. GenerateKeyPairWithForce(ctx context.Context, in *GenerateKeyPairRequest, opts ...grpc.CallOption) (*GenerateKeyPairResponse, error) GetPublicKey(ctx context.Context, in *GetPublicKeyRequest, opts ...grpc.CallOption) (*GetPublicKeyResponse, error) // Subscribe to file events. This streams responses to the caller Subscribe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (SpaceApi_SubscribeClient, error) // Subscribe to textile events. This streams responses to the caller TxlSubscribe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (SpaceApi_TxlSubscribeClient, error) // Open a file in the daemon. // Daemon keeps track of all open files and closes them if no activity is noticed after a while OpenFile(ctx context.Context, in *OpenFileRequest, opts ...grpc.CallOption) (*OpenFileResponse, error) // Removes a file or dir from a bucket RemoveDirOrFile(ctx context.Context, in *RemoveDirOrFileRequest, opts ...grpc.CallOption) (*RemoveDirOrFileResponse, error) // Generates a copy of the file that's accessible through IPFS gateways GeneratePublicFileLink(ctx context.Context, in *GeneratePublicFileLinkRequest, opts ...grpc.CallOption) (*GeneratePublicFileLinkResponse, error) // Gets the files that are shared with this recipient GetSharedWithMeFiles(ctx context.Context, in *GetSharedWithMeFilesRequest, opts ...grpc.CallOption) (*GetSharedWithMeFilesResponse, error) // Gets the files that are shared by the sender GetSharedByMeFiles(ctx context.Context, in *GetSharedByMeFilesRequest, opts ...grpc.CallOption) (*GetSharedByMeFilesResponse, error) // Open an encrypted public shared file in the daemon. // This requires the decryption key and file hash/cid to work OpenPublicFile(ctx context.Context, in *OpenPublicFileRequest, opts ...grpc.CallOption) (*OpenPublicFileResponse, error) // Adds items (files/folders) to be uploaded to the bucket. AddItems(ctx context.Context, in *AddItemsRequest, opts ...grpc.CallOption) (SpaceApi_AddItemsClient, error) // Creates a folder/directory at the specified path CreateFolder(ctx context.Context, in *CreateFolderRequest, opts ...grpc.CallOption) (*CreateFolderResponse, error) // Toggle FUSE drive to be mounted or unmounted ToggleFuseDrive(ctx context.Context, in *ToggleFuseRequest, opts ...grpc.CallOption) (*FuseDriveResponse, error) // Get status of FUSE drive. If mounted or unmounted GetFuseDriveStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*FuseDriveResponse, error) // Create a new bucket owned by current user (aka keypair) CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*CreateBucketResponse, error) // Backup Key by Passphrase BackupKeysByPassphrase(ctx context.Context, in *BackupKeysByPassphraseRequest, opts ...grpc.CallOption) (*BackupKeysByPassphraseResponse, error) // Recover Keys by Passphrase RecoverKeysByPassphrase(ctx context.Context, in *RecoverKeysByPassphraseRequest, opts ...grpc.CallOption) (*RecoverKeysByPassphraseResponse, error) // Tests a passphrase to see if it matches the one previously used TestKeysPassphrase(ctx context.Context, in *TestKeysPassphraseRequest, opts ...grpc.CallOption) (*TestKeysPassphraseResponse, error) CreateLocalKeysBackup(ctx context.Context, in *CreateLocalKeysBackupRequest, opts ...grpc.CallOption) (*CreateLocalKeysBackupResponse, error) RecoverKeysByLocalBackup(ctx context.Context, in *RecoverKeysByLocalBackupRequest, opts ...grpc.CallOption) (*RecoverKeysByLocalBackupResponse, error) // Share bucket ShareBucket(ctx context.Context, in *ShareBucketRequest, opts ...grpc.CallOption) (*ShareBucketResponse, error) // Join bucket JoinBucket(ctx context.Context, in *JoinBucketRequest, opts ...grpc.CallOption) (*JoinBucketResponse, error) // Share bucket via public key using Textile Hub inboxing ShareFilesViaPublicKey(ctx context.Context, in *ShareFilesViaPublicKeyRequest, opts ...grpc.CallOption) (*ShareFilesViaPublicKeyResponse, error) // Remove public keys for shared files in buckets UnshareFilesViaPublicKey(ctx context.Context, in *UnshareFilesViaPublicKeyRequest, opts ...grpc.CallOption) (*UnshareFilesViaPublicKeyResponse, error) HandleFilesInvitation(ctx context.Context, in *HandleFilesInvitationRequest, opts ...grpc.CallOption) (*HandleFilesInvitationResponse, error) NotificationSubscribe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (SpaceApi_NotificationSubscribeClient, error) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) GetNotifications(ctx context.Context, in *GetNotificationsRequest, opts ...grpc.CallOption) (*GetNotificationsResponse, error) ReadNotification(ctx context.Context, in *ReadNotificationRequest, opts ...grpc.CallOption) (*ReadNotificationResponse, error) DeleteAccount(ctx context.Context, in *DeleteAccountRequest, opts ...grpc.CallOption) (*DeleteAccountResponse, error) ToggleBucketBackup(ctx context.Context, in *ToggleBucketBackupRequest, opts ...grpc.CallOption) (*ToggleBucketBackupResponse, error) BucketBackupRestore(ctx context.Context, in *BucketBackupRestoreRequest, opts ...grpc.CallOption) (*BucketBackupRestoreResponse, error) GetUsageInfo(ctx context.Context, in *GetUsageInfoRequest, opts ...grpc.CallOption) (*GetUsageInfoResponse, error) GetAPISessionTokens(ctx context.Context, in *GetAPISessionTokensRequest, opts ...grpc.CallOption) (*GetAPISessionTokensResponse, error) // Returns a list of addresses / public keys of clients to which files where shared or received, ordered by date GetRecentlySharedWith(ctx context.Context, in *GetRecentlySharedWithRequest, opts ...grpc.CallOption) (*GetRecentlySharedWithResponse, error) // This will set the last read timestamp for the user so that the client // can check if newer notifications are present for UX SetNotificationsLastSeenAt(ctx context.Context, in *SetNotificationsLastSeenAtRequest, opts ...grpc.CallOption) (*SetNotificationsLastSeenAtResponse, error) // Search for files across all users bucket SearchFiles(ctx context.Context, in *SearchFilesRequest, opts ...grpc.CallOption) (*SearchFilesResponse, error) // Initialize master app token // App tokens are used to authorize scoped access to a range of methods // Master token can only be generated once and has access to all methods InitializeMasterAppToken(ctx context.Context, in *InitializeMasterAppTokenRequest, opts ...grpc.CallOption) (*InitializeMasterAppTokenResponse, error) // Generates an app token with scoped access. GenerateAppToken(ctx context.Context, in *GenerateAppTokenRequest, opts ...grpc.CallOption) (*GenerateAppTokenResponse, error) } type spaceApiClient struct { cc grpc.ClientConnInterface } func NewSpaceApiClient(cc grpc.ClientConnInterface) SpaceApiClient { return &spaceApiClient{cc} } func (c *spaceApiClient) ListDirectories(ctx context.Context, in *ListDirectoriesRequest, opts ...grpc.CallOption) (*ListDirectoriesResponse, error) { out := new(ListDirectoriesResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/ListDirectories", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) ListDirectory(ctx context.Context, in *ListDirectoryRequest, opts ...grpc.CallOption) (*ListDirectoryResponse, error) { out := new(ListDirectoryResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/ListDirectory", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GenerateKeyPair(ctx context.Context, in *GenerateKeyPairRequest, opts ...grpc.CallOption) (*GenerateKeyPairResponse, error) { out := new(GenerateKeyPairResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GenerateKeyPair", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GetStoredMnemonic(ctx context.Context, in *GetStoredMnemonicRequest, opts ...grpc.CallOption) (*GetStoredMnemonicResponse, error) { out := new(GetStoredMnemonicResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GetStoredMnemonic", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) RestoreKeyPairViaMnemonic(ctx context.Context, in *RestoreKeyPairViaMnemonicRequest, opts ...grpc.CallOption) (*RestoreKeyPairViaMnemonicResponse, error) { out := new(RestoreKeyPairViaMnemonicResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/RestoreKeyPairViaMnemonic", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) DeleteKeyPair(ctx context.Context, in *DeleteKeyPairRequest, opts ...grpc.CallOption) (*DeleteKeyPairResponse, error) { out := new(DeleteKeyPairResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/DeleteKeyPair", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GenerateKeyPairWithForce(ctx context.Context, in *GenerateKeyPairRequest, opts ...grpc.CallOption) (*GenerateKeyPairResponse, error) { out := new(GenerateKeyPairResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GenerateKeyPairWithForce", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GetPublicKey(ctx context.Context, in *GetPublicKeyRequest, opts ...grpc.CallOption) (*GetPublicKeyResponse, error) { out := new(GetPublicKeyResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GetPublicKey", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) Subscribe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (SpaceApi_SubscribeClient, error) { stream, err := c.cc.NewStream(ctx, &_SpaceApi_serviceDesc.Streams[0], "/space.SpaceApi/Subscribe", opts...) if err != nil { return nil, err } x := &spaceApiSubscribeClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type SpaceApi_SubscribeClient interface { Recv() (*FileEventResponse, error) grpc.ClientStream } type spaceApiSubscribeClient struct { grpc.ClientStream } func (x *spaceApiSubscribeClient) Recv() (*FileEventResponse, error) { m := new(FileEventResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *spaceApiClient) TxlSubscribe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (SpaceApi_TxlSubscribeClient, error) { stream, err := c.cc.NewStream(ctx, &_SpaceApi_serviceDesc.Streams[1], "/space.SpaceApi/TxlSubscribe", opts...) if err != nil { return nil, err } x := &spaceApiTxlSubscribeClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type SpaceApi_TxlSubscribeClient interface { Recv() (*TextileEventResponse, error) grpc.ClientStream } type spaceApiTxlSubscribeClient struct { grpc.ClientStream } func (x *spaceApiTxlSubscribeClient) Recv() (*TextileEventResponse, error) { m := new(TextileEventResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *spaceApiClient) OpenFile(ctx context.Context, in *OpenFileRequest, opts ...grpc.CallOption) (*OpenFileResponse, error) { out := new(OpenFileResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/OpenFile", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) RemoveDirOrFile(ctx context.Context, in *RemoveDirOrFileRequest, opts ...grpc.CallOption) (*RemoveDirOrFileResponse, error) { out := new(RemoveDirOrFileResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/RemoveDirOrFile", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GeneratePublicFileLink(ctx context.Context, in *GeneratePublicFileLinkRequest, opts ...grpc.CallOption) (*GeneratePublicFileLinkResponse, error) { out := new(GeneratePublicFileLinkResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GeneratePublicFileLink", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GetSharedWithMeFiles(ctx context.Context, in *GetSharedWithMeFilesRequest, opts ...grpc.CallOption) (*GetSharedWithMeFilesResponse, error) { out := new(GetSharedWithMeFilesResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GetSharedWithMeFiles", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GetSharedByMeFiles(ctx context.Context, in *GetSharedByMeFilesRequest, opts ...grpc.CallOption) (*GetSharedByMeFilesResponse, error) { out := new(GetSharedByMeFilesResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GetSharedByMeFiles", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) OpenPublicFile(ctx context.Context, in *OpenPublicFileRequest, opts ...grpc.CallOption) (*OpenPublicFileResponse, error) { out := new(OpenPublicFileResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/OpenPublicFile", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) AddItems(ctx context.Context, in *AddItemsRequest, opts ...grpc.CallOption) (SpaceApi_AddItemsClient, error) { stream, err := c.cc.NewStream(ctx, &_SpaceApi_serviceDesc.Streams[2], "/space.SpaceApi/AddItems", opts...) if err != nil { return nil, err } x := &spaceApiAddItemsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type SpaceApi_AddItemsClient interface { Recv() (*AddItemsResponse, error) grpc.ClientStream } type spaceApiAddItemsClient struct { grpc.ClientStream } func (x *spaceApiAddItemsClient) Recv() (*AddItemsResponse, error) { m := new(AddItemsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *spaceApiClient) CreateFolder(ctx context.Context, in *CreateFolderRequest, opts ...grpc.CallOption) (*CreateFolderResponse, error) { out := new(CreateFolderResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/CreateFolder", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) ToggleFuseDrive(ctx context.Context, in *ToggleFuseRequest, opts ...grpc.CallOption) (*FuseDriveResponse, error) { out := new(FuseDriveResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/ToggleFuseDrive", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GetFuseDriveStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*FuseDriveResponse, error) { out := new(FuseDriveResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GetFuseDriveStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*CreateBucketResponse, error) { out := new(CreateBucketResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/CreateBucket", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) BackupKeysByPassphrase(ctx context.Context, in *BackupKeysByPassphraseRequest, opts ...grpc.CallOption) (*BackupKeysByPassphraseResponse, error) { out := new(BackupKeysByPassphraseResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/BackupKeysByPassphrase", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) RecoverKeysByPassphrase(ctx context.Context, in *RecoverKeysByPassphraseRequest, opts ...grpc.CallOption) (*RecoverKeysByPassphraseResponse, error) { out := new(RecoverKeysByPassphraseResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/RecoverKeysByPassphrase", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) TestKeysPassphrase(ctx context.Context, in *TestKeysPassphraseRequest, opts ...grpc.CallOption) (*TestKeysPassphraseResponse, error) { out := new(TestKeysPassphraseResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/TestKeysPassphrase", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) CreateLocalKeysBackup(ctx context.Context, in *CreateLocalKeysBackupRequest, opts ...grpc.CallOption) (*CreateLocalKeysBackupResponse, error) { out := new(CreateLocalKeysBackupResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/CreateLocalKeysBackup", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) RecoverKeysByLocalBackup(ctx context.Context, in *RecoverKeysByLocalBackupRequest, opts ...grpc.CallOption) (*RecoverKeysByLocalBackupResponse, error) { out := new(RecoverKeysByLocalBackupResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/RecoverKeysByLocalBackup", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) ShareBucket(ctx context.Context, in *ShareBucketRequest, opts ...grpc.CallOption) (*ShareBucketResponse, error) { out := new(ShareBucketResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/ShareBucket", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) JoinBucket(ctx context.Context, in *JoinBucketRequest, opts ...grpc.CallOption) (*JoinBucketResponse, error) { out := new(JoinBucketResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/JoinBucket", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) ShareFilesViaPublicKey(ctx context.Context, in *ShareFilesViaPublicKeyRequest, opts ...grpc.CallOption) (*ShareFilesViaPublicKeyResponse, error) { out := new(ShareFilesViaPublicKeyResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/ShareFilesViaPublicKey", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) UnshareFilesViaPublicKey(ctx context.Context, in *UnshareFilesViaPublicKeyRequest, opts ...grpc.CallOption) (*UnshareFilesViaPublicKeyResponse, error) { out := new(UnshareFilesViaPublicKeyResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/UnshareFilesViaPublicKey", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) HandleFilesInvitation(ctx context.Context, in *HandleFilesInvitationRequest, opts ...grpc.CallOption) (*HandleFilesInvitationResponse, error) { out := new(HandleFilesInvitationResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/HandleFilesInvitation", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) NotificationSubscribe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (SpaceApi_NotificationSubscribeClient, error) { stream, err := c.cc.NewStream(ctx, &_SpaceApi_serviceDesc.Streams[3], "/space.SpaceApi/NotificationSubscribe", opts...) if err != nil { return nil, err } x := &spaceApiNotificationSubscribeClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type SpaceApi_NotificationSubscribeClient interface { Recv() (*NotificationEventResponse, error) grpc.ClientStream } type spaceApiNotificationSubscribeClient struct { grpc.ClientStream } func (x *spaceApiNotificationSubscribeClient) Recv() (*NotificationEventResponse, error) { m := new(NotificationEventResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *spaceApiClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) { out := new(ListBucketsResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/ListBuckets", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GetNotifications(ctx context.Context, in *GetNotificationsRequest, opts ...grpc.CallOption) (*GetNotificationsResponse, error) { out := new(GetNotificationsResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GetNotifications", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) ReadNotification(ctx context.Context, in *ReadNotificationRequest, opts ...grpc.CallOption) (*ReadNotificationResponse, error) { out := new(ReadNotificationResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/ReadNotification", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) DeleteAccount(ctx context.Context, in *DeleteAccountRequest, opts ...grpc.CallOption) (*DeleteAccountResponse, error) { out := new(DeleteAccountResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/DeleteAccount", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) ToggleBucketBackup(ctx context.Context, in *ToggleBucketBackupRequest, opts ...grpc.CallOption) (*ToggleBucketBackupResponse, error) { out := new(ToggleBucketBackupResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/ToggleBucketBackup", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) BucketBackupRestore(ctx context.Context, in *BucketBackupRestoreRequest, opts ...grpc.CallOption) (*BucketBackupRestoreResponse, error) { out := new(BucketBackupRestoreResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/BucketBackupRestore", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GetUsageInfo(ctx context.Context, in *GetUsageInfoRequest, opts ...grpc.CallOption) (*GetUsageInfoResponse, error) { out := new(GetUsageInfoResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GetUsageInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GetAPISessionTokens(ctx context.Context, in *GetAPISessionTokensRequest, opts ...grpc.CallOption) (*GetAPISessionTokensResponse, error) { out := new(GetAPISessionTokensResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GetAPISessionTokens", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GetRecentlySharedWith(ctx context.Context, in *GetRecentlySharedWithRequest, opts ...grpc.CallOption) (*GetRecentlySharedWithResponse, error) { out := new(GetRecentlySharedWithResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GetRecentlySharedWith", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) SetNotificationsLastSeenAt(ctx context.Context, in *SetNotificationsLastSeenAtRequest, opts ...grpc.CallOption) (*SetNotificationsLastSeenAtResponse, error) { out := new(SetNotificationsLastSeenAtResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/SetNotificationsLastSeenAt", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) SearchFiles(ctx context.Context, in *SearchFilesRequest, opts ...grpc.CallOption) (*SearchFilesResponse, error) { out := new(SearchFilesResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/SearchFiles", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) InitializeMasterAppToken(ctx context.Context, in *InitializeMasterAppTokenRequest, opts ...grpc.CallOption) (*InitializeMasterAppTokenResponse, error) { out := new(InitializeMasterAppTokenResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/InitializeMasterAppToken", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *spaceApiClient) GenerateAppToken(ctx context.Context, in *GenerateAppTokenRequest, opts ...grpc.CallOption) (*GenerateAppTokenResponse, error) { out := new(GenerateAppTokenResponse) err := c.cc.Invoke(ctx, "/space.SpaceApi/GenerateAppToken", in, out, opts...) if err != nil { return nil, err } return out, nil } // SpaceApiServer is the server API for SpaceApi service. type SpaceApiServer interface { // Get all folder or files in the default bucket. It fetches all subdirectories too. ListDirectories(context.Context, *ListDirectoriesRequest) (*ListDirectoriesResponse, error) // Get the folder or files in the path directory. // Unlike ListDirectories, this only returns immediate children at path. ListDirectory(context.Context, *ListDirectoryRequest) (*ListDirectoryResponse, error) // Generate Key Pair for current account. // This will return error if daemon account already has keypairs GenerateKeyPair(context.Context, *GenerateKeyPairRequest) (*GenerateKeyPairResponse, error) GetStoredMnemonic(context.Context, *GetStoredMnemonicRequest) (*GetStoredMnemonicResponse, error) // Restores a keypair given a mnemonic. // This will override any existing key pair RestoreKeyPairViaMnemonic(context.Context, *RestoreKeyPairViaMnemonicRequest) (*RestoreKeyPairViaMnemonicResponse, error) DeleteKeyPair(context.Context, *DeleteKeyPairRequest) (*DeleteKeyPairResponse, error) // Force Generation of KeyPair. This will override existing keys stored in daemon. GenerateKeyPairWithForce(context.Context, *GenerateKeyPairRequest) (*GenerateKeyPairResponse, error) GetPublicKey(context.Context, *GetPublicKeyRequest) (*GetPublicKeyResponse, error) // Subscribe to file events. This streams responses to the caller Subscribe(*empty.Empty, SpaceApi_SubscribeServer) error // Subscribe to textile events. This streams responses to the caller TxlSubscribe(*empty.Empty, SpaceApi_TxlSubscribeServer) error // Open a file in the daemon. // Daemon keeps track of all open files and closes them if no activity is noticed after a while OpenFile(context.Context, *OpenFileRequest) (*OpenFileResponse, error) // Removes a file or dir from a bucket RemoveDirOrFile(context.Context, *RemoveDirOrFileRequest) (*RemoveDirOrFileResponse, error) // Generates a copy of the file that's accessible through IPFS gateways GeneratePublicFileLink(context.Context, *GeneratePublicFileLinkRequest) (*GeneratePublicFileLinkResponse, error) // Gets the files that are shared with this recipient GetSharedWithMeFiles(context.Context, *GetSharedWithMeFilesRequest) (*GetSharedWithMeFilesResponse, error) // Gets the files that are shared by the sender GetSharedByMeFiles(context.Context, *GetSharedByMeFilesRequest) (*GetSharedByMeFilesResponse, error) // Open an encrypted public shared file in the daemon. // This requires the decryption key and file hash/cid to work OpenPublicFile(context.Context, *OpenPublicFileRequest) (*OpenPublicFileResponse, error) // Adds items (files/folders) to be uploaded to the bucket. AddItems(*AddItemsRequest, SpaceApi_AddItemsServer) error // Creates a folder/directory at the specified path CreateFolder(context.Context, *CreateFolderRequest) (*CreateFolderResponse, error) // Toggle FUSE drive to be mounted or unmounted ToggleFuseDrive(context.Context, *ToggleFuseRequest) (*FuseDriveResponse, error) // Get status of FUSE drive. If mounted or unmounted GetFuseDriveStatus(context.Context, *empty.Empty) (*FuseDriveResponse, error) // Create a new bucket owned by current user (aka keypair) CreateBucket(context.Context, *CreateBucketRequest) (*CreateBucketResponse, error) // Backup Key by Passphrase BackupKeysByPassphrase(context.Context, *BackupKeysByPassphraseRequest) (*BackupKeysByPassphraseResponse, error) // Recover Keys by Passphrase RecoverKeysByPassphrase(context.Context, *RecoverKeysByPassphraseRequest) (*RecoverKeysByPassphraseResponse, error) // Tests a passphrase to see if it matches the one previously used TestKeysPassphrase(context.Context, *TestKeysPassphraseRequest) (*TestKeysPassphraseResponse, error) CreateLocalKeysBackup(context.Context, *CreateLocalKeysBackupRequest) (*CreateLocalKeysBackupResponse, error) RecoverKeysByLocalBackup(context.Context, *RecoverKeysByLocalBackupRequest) (*RecoverKeysByLocalBackupResponse, error) // Share bucket ShareBucket(context.Context, *ShareBucketRequest) (*ShareBucketResponse, error) // Join bucket JoinBucket(context.Context, *JoinBucketRequest) (*JoinBucketResponse, error) // Share bucket via public key using Textile Hub inboxing ShareFilesViaPublicKey(context.Context, *ShareFilesViaPublicKeyRequest) (*ShareFilesViaPublicKeyResponse, error) // Remove public keys for shared files in buckets UnshareFilesViaPublicKey(context.Context, *UnshareFilesViaPublicKeyRequest) (*UnshareFilesViaPublicKeyResponse, error) HandleFilesInvitation(context.Context, *HandleFilesInvitationRequest) (*HandleFilesInvitationResponse, error) NotificationSubscribe(*empty.Empty, SpaceApi_NotificationSubscribeServer) error ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) GetNotifications(context.Context, *GetNotificationsRequest) (*GetNotificationsResponse, error) ReadNotification(context.Context, *ReadNotificationRequest) (*ReadNotificationResponse, error) DeleteAccount(context.Context, *DeleteAccountRequest) (*DeleteAccountResponse, error) ToggleBucketBackup(context.Context, *ToggleBucketBackupRequest) (*ToggleBucketBackupResponse, error) BucketBackupRestore(context.Context, *BucketBackupRestoreRequest) (*BucketBackupRestoreResponse, error) GetUsageInfo(context.Context, *GetUsageInfoRequest) (*GetUsageInfoResponse, error) GetAPISessionTokens(context.Context, *GetAPISessionTokensRequest) (*GetAPISessionTokensResponse, error) // Returns a list of addresses / public keys of clients to which files where shared or received, ordered by date GetRecentlySharedWith(context.Context, *GetRecentlySharedWithRequest) (*GetRecentlySharedWithResponse, error) // This will set the last read timestamp for the user so that the client // can check if newer notifications are present for UX SetNotificationsLastSeenAt(context.Context, *SetNotificationsLastSeenAtRequest) (*SetNotificationsLastSeenAtResponse, error) // Search for files across all users bucket SearchFiles(context.Context, *SearchFilesRequest) (*SearchFilesResponse, error) // Initialize master app token // App tokens are used to authorize scoped access to a range of methods // Master token can only be generated once and has access to all methods InitializeMasterAppToken(context.Context, *InitializeMasterAppTokenRequest) (*InitializeMasterAppTokenResponse, error) // Generates an app token with scoped access. GenerateAppToken(context.Context, *GenerateAppTokenRequest) (*GenerateAppTokenResponse, error) } // UnimplementedSpaceApiServer can be embedded to have forward compatible implementations. type UnimplementedSpaceApiServer struct { } func (*UnimplementedSpaceApiServer) ListDirectories(context.Context, *ListDirectoriesRequest) (*ListDirectoriesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListDirectories not implemented") } func (*UnimplementedSpaceApiServer) ListDirectory(context.Context, *ListDirectoryRequest) (*ListDirectoryResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListDirectory not implemented") } func (*UnimplementedSpaceApiServer) GenerateKeyPair(context.Context, *GenerateKeyPairRequest) (*GenerateKeyPairResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GenerateKeyPair not implemented") } func (*UnimplementedSpaceApiServer) GetStoredMnemonic(context.Context, *GetStoredMnemonicRequest) (*GetStoredMnemonicResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetStoredMnemonic not implemented") } func (*UnimplementedSpaceApiServer) RestoreKeyPairViaMnemonic(context.Context, *RestoreKeyPairViaMnemonicRequest) (*RestoreKeyPairViaMnemonicResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RestoreKeyPairViaMnemonic not implemented") } func (*UnimplementedSpaceApiServer) DeleteKeyPair(context.Context, *DeleteKeyPairRequest) (*DeleteKeyPairResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteKeyPair not implemented") } func (*UnimplementedSpaceApiServer) GenerateKeyPairWithForce(context.Context, *GenerateKeyPairRequest) (*GenerateKeyPairResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GenerateKeyPairWithForce not implemented") } func (*UnimplementedSpaceApiServer) GetPublicKey(context.Context, *GetPublicKeyRequest) (*GetPublicKeyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetPublicKey not implemented") } func (*UnimplementedSpaceApiServer) Subscribe(*empty.Empty, SpaceApi_SubscribeServer) error { return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") } func (*UnimplementedSpaceApiServer) TxlSubscribe(*empty.Empty, SpaceApi_TxlSubscribeServer) error { return status.Errorf(codes.Unimplemented, "method TxlSubscribe not implemented") } func (*UnimplementedSpaceApiServer) OpenFile(context.Context, *OpenFileRequest) (*OpenFileResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OpenFile not implemented") } func (*UnimplementedSpaceApiServer) RemoveDirOrFile(context.Context, *RemoveDirOrFileRequest) (*RemoveDirOrFileResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RemoveDirOrFile not implemented") } func (*UnimplementedSpaceApiServer) GeneratePublicFileLink(context.Context, *GeneratePublicFileLinkRequest) (*GeneratePublicFileLinkResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GeneratePublicFileLink not implemented") } func (*UnimplementedSpaceApiServer) GetSharedWithMeFiles(context.Context, *GetSharedWithMeFilesRequest) (*GetSharedWithMeFilesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSharedWithMeFiles not implemented") } func (*UnimplementedSpaceApiServer) GetSharedByMeFiles(context.Context, *GetSharedByMeFilesRequest) (*GetSharedByMeFilesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSharedByMeFiles not implemented") } func (*UnimplementedSpaceApiServer) OpenPublicFile(context.Context, *OpenPublicFileRequest) (*OpenPublicFileResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OpenPublicFile not implemented") } func (*UnimplementedSpaceApiServer) AddItems(*AddItemsRequest, SpaceApi_AddItemsServer) error { return status.Errorf(codes.Unimplemented, "method AddItems not implemented") } func (*UnimplementedSpaceApiServer) CreateFolder(context.Context, *CreateFolderRequest) (*CreateFolderResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateFolder not implemented") } func (*UnimplementedSpaceApiServer) ToggleFuseDrive(context.Context, *ToggleFuseRequest) (*FuseDriveResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ToggleFuseDrive not implemented") } func (*UnimplementedSpaceApiServer) GetFuseDriveStatus(context.Context, *empty.Empty) (*FuseDriveResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetFuseDriveStatus not implemented") } func (*UnimplementedSpaceApiServer) CreateBucket(context.Context, *CreateBucketRequest) (*CreateBucketResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented") } func (*UnimplementedSpaceApiServer) BackupKeysByPassphrase(context.Context, *BackupKeysByPassphraseRequest) (*BackupKeysByPassphraseResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BackupKeysByPassphrase not implemented") } func (*UnimplementedSpaceApiServer) RecoverKeysByPassphrase(context.Context, *RecoverKeysByPassphraseRequest) (*RecoverKeysByPassphraseResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecoverKeysByPassphrase not implemented") } func (*UnimplementedSpaceApiServer) TestKeysPassphrase(context.Context, *TestKeysPassphraseRequest) (*TestKeysPassphraseResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method TestKeysPassphrase not implemented") } func (*UnimplementedSpaceApiServer) CreateLocalKeysBackup(context.Context, *CreateLocalKeysBackupRequest) (*CreateLocalKeysBackupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateLocalKeysBackup not implemented") } func (*UnimplementedSpaceApiServer) RecoverKeysByLocalBackup(context.Context, *RecoverKeysByLocalBackupRequest) (*RecoverKeysByLocalBackupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecoverKeysByLocalBackup not implemented") } func (*UnimplementedSpaceApiServer) ShareBucket(context.Context, *ShareBucketRequest) (*ShareBucketResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ShareBucket not implemented") } func (*UnimplementedSpaceApiServer) JoinBucket(context.Context, *JoinBucketRequest) (*JoinBucketResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method JoinBucket not implemented") } func (*UnimplementedSpaceApiServer) ShareFilesViaPublicKey(context.Context, *ShareFilesViaPublicKeyRequest) (*ShareFilesViaPublicKeyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ShareFilesViaPublicKey not implemented") } func (*UnimplementedSpaceApiServer) UnshareFilesViaPublicKey(context.Context, *UnshareFilesViaPublicKeyRequest) (*UnshareFilesViaPublicKeyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UnshareFilesViaPublicKey not implemented") } func (*UnimplementedSpaceApiServer) HandleFilesInvitation(context.Context, *HandleFilesInvitationRequest) (*HandleFilesInvitationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method HandleFilesInvitation not implemented") } func (*UnimplementedSpaceApiServer) NotificationSubscribe(*empty.Empty, SpaceApi_NotificationSubscribeServer) error { return status.Errorf(codes.Unimplemented, "method NotificationSubscribe not implemented") } func (*UnimplementedSpaceApiServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented") } func (*UnimplementedSpaceApiServer) GetNotifications(context.Context, *GetNotificationsRequest) (*GetNotificationsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetNotifications not implemented") } func (*UnimplementedSpaceApiServer) ReadNotification(context.Context, *ReadNotificationRequest) (*ReadNotificationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadNotification not implemented") } func (*UnimplementedSpaceApiServer) DeleteAccount(context.Context, *DeleteAccountRequest) (*DeleteAccountResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteAccount not implemented") } func (*UnimplementedSpaceApiServer) ToggleBucketBackup(context.Context, *ToggleBucketBackupRequest) (*ToggleBucketBackupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ToggleBucketBackup not implemented") } func (*UnimplementedSpaceApiServer) BucketBackupRestore(context.Context, *BucketBackupRestoreRequest) (*BucketBackupRestoreResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BucketBackupRestore not implemented") } func (*UnimplementedSpaceApiServer) GetUsageInfo(context.Context, *GetUsageInfoRequest) (*GetUsageInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetUsageInfo not implemented") } func (*UnimplementedSpaceApiServer) GetAPISessionTokens(context.Context, *GetAPISessionTokensRequest) (*GetAPISessionTokensResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetAPISessionTokens not implemented") } func (*UnimplementedSpaceApiServer) GetRecentlySharedWith(context.Context, *GetRecentlySharedWithRequest) (*GetRecentlySharedWithResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetRecentlySharedWith not implemented") } func (*UnimplementedSpaceApiServer) SetNotificationsLastSeenAt(context.Context, *SetNotificationsLastSeenAtRequest) (*SetNotificationsLastSeenAtResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SetNotificationsLastSeenAt not implemented") } func (*UnimplementedSpaceApiServer) SearchFiles(context.Context, *SearchFilesRequest) (*SearchFilesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SearchFiles not implemented") } func (*UnimplementedSpaceApiServer) InitializeMasterAppToken(context.Context, *InitializeMasterAppTokenRequest) (*InitializeMasterAppTokenResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method InitializeMasterAppToken not implemented") } func (*UnimplementedSpaceApiServer) GenerateAppToken(context.Context, *GenerateAppTokenRequest) (*GenerateAppTokenResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GenerateAppToken not implemented") } func RegisterSpaceApiServer(s *grpc.Server, srv SpaceApiServer) { s.RegisterService(&_SpaceApi_serviceDesc, srv) } func _SpaceApi_ListDirectories_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListDirectoriesRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).ListDirectories(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/ListDirectories", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).ListDirectories(ctx, req.(*ListDirectoriesRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_ListDirectory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListDirectoryRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).ListDirectory(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/ListDirectory", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).ListDirectory(ctx, req.(*ListDirectoryRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GenerateKeyPair_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GenerateKeyPairRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GenerateKeyPair(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GenerateKeyPair", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GenerateKeyPair(ctx, req.(*GenerateKeyPairRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GetStoredMnemonic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetStoredMnemonicRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GetStoredMnemonic(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GetStoredMnemonic", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GetStoredMnemonic(ctx, req.(*GetStoredMnemonicRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_RestoreKeyPairViaMnemonic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RestoreKeyPairViaMnemonicRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).RestoreKeyPairViaMnemonic(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/RestoreKeyPairViaMnemonic", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).RestoreKeyPairViaMnemonic(ctx, req.(*RestoreKeyPairViaMnemonicRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_DeleteKeyPair_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteKeyPairRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).DeleteKeyPair(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/DeleteKeyPair", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).DeleteKeyPair(ctx, req.(*DeleteKeyPairRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GenerateKeyPairWithForce_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GenerateKeyPairRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GenerateKeyPairWithForce(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GenerateKeyPairWithForce", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GenerateKeyPairWithForce(ctx, req.(*GenerateKeyPairRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GetPublicKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetPublicKeyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GetPublicKey(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GetPublicKey", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GetPublicKey(ctx, req.(*GetPublicKeyRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(empty.Empty) if err := stream.RecvMsg(m); err != nil { return err } return srv.(SpaceApiServer).Subscribe(m, &spaceApiSubscribeServer{stream}) } type SpaceApi_SubscribeServer interface { Send(*FileEventResponse) error grpc.ServerStream } type spaceApiSubscribeServer struct { grpc.ServerStream } func (x *spaceApiSubscribeServer) Send(m *FileEventResponse) error { return x.ServerStream.SendMsg(m) } func _SpaceApi_TxlSubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(empty.Empty) if err := stream.RecvMsg(m); err != nil { return err } return srv.(SpaceApiServer).TxlSubscribe(m, &spaceApiTxlSubscribeServer{stream}) } type SpaceApi_TxlSubscribeServer interface { Send(*TextileEventResponse) error grpc.ServerStream } type spaceApiTxlSubscribeServer struct { grpc.ServerStream } func (x *spaceApiTxlSubscribeServer) Send(m *TextileEventResponse) error { return x.ServerStream.SendMsg(m) } func _SpaceApi_OpenFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OpenFileRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).OpenFile(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/OpenFile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).OpenFile(ctx, req.(*OpenFileRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_RemoveDirOrFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RemoveDirOrFileRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).RemoveDirOrFile(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/RemoveDirOrFile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).RemoveDirOrFile(ctx, req.(*RemoveDirOrFileRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GeneratePublicFileLink_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GeneratePublicFileLinkRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GeneratePublicFileLink(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GeneratePublicFileLink", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GeneratePublicFileLink(ctx, req.(*GeneratePublicFileLinkRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GetSharedWithMeFiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetSharedWithMeFilesRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GetSharedWithMeFiles(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GetSharedWithMeFiles", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GetSharedWithMeFiles(ctx, req.(*GetSharedWithMeFilesRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GetSharedByMeFiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetSharedByMeFilesRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GetSharedByMeFiles(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GetSharedByMeFiles", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GetSharedByMeFiles(ctx, req.(*GetSharedByMeFilesRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_OpenPublicFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OpenPublicFileRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).OpenPublicFile(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/OpenPublicFile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).OpenPublicFile(ctx, req.(*OpenPublicFileRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_AddItems_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(AddItemsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(SpaceApiServer).AddItems(m, &spaceApiAddItemsServer{stream}) } type SpaceApi_AddItemsServer interface { Send(*AddItemsResponse) error grpc.ServerStream } type spaceApiAddItemsServer struct { grpc.ServerStream } func (x *spaceApiAddItemsServer) Send(m *AddItemsResponse) error { return x.ServerStream.SendMsg(m) } func _SpaceApi_CreateFolder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateFolderRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).CreateFolder(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/CreateFolder", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).CreateFolder(ctx, req.(*CreateFolderRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_ToggleFuseDrive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ToggleFuseRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).ToggleFuseDrive(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/ToggleFuseDrive", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).ToggleFuseDrive(ctx, req.(*ToggleFuseRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GetFuseDriveStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GetFuseDriveStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GetFuseDriveStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GetFuseDriveStatus(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_CreateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateBucketRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).CreateBucket(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/CreateBucket", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).CreateBucket(ctx, req.(*CreateBucketRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_BackupKeysByPassphrase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(BackupKeysByPassphraseRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).BackupKeysByPassphrase(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/BackupKeysByPassphrase", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).BackupKeysByPassphrase(ctx, req.(*BackupKeysByPassphraseRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_RecoverKeysByPassphrase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RecoverKeysByPassphraseRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).RecoverKeysByPassphrase(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/RecoverKeysByPassphrase", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).RecoverKeysByPassphrase(ctx, req.(*RecoverKeysByPassphraseRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_TestKeysPassphrase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TestKeysPassphraseRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).TestKeysPassphrase(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/TestKeysPassphrase", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).TestKeysPassphrase(ctx, req.(*TestKeysPassphraseRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_CreateLocalKeysBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateLocalKeysBackupRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).CreateLocalKeysBackup(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/CreateLocalKeysBackup", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).CreateLocalKeysBackup(ctx, req.(*CreateLocalKeysBackupRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_RecoverKeysByLocalBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RecoverKeysByLocalBackupRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).RecoverKeysByLocalBackup(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/RecoverKeysByLocalBackup", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).RecoverKeysByLocalBackup(ctx, req.(*RecoverKeysByLocalBackupRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_ShareBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ShareBucketRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).ShareBucket(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/ShareBucket", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).ShareBucket(ctx, req.(*ShareBucketRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_JoinBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(JoinBucketRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).JoinBucket(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/JoinBucket", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).JoinBucket(ctx, req.(*JoinBucketRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_ShareFilesViaPublicKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ShareFilesViaPublicKeyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).ShareFilesViaPublicKey(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/ShareFilesViaPublicKey", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).ShareFilesViaPublicKey(ctx, req.(*ShareFilesViaPublicKeyRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_UnshareFilesViaPublicKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnshareFilesViaPublicKeyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).UnshareFilesViaPublicKey(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/UnshareFilesViaPublicKey", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).UnshareFilesViaPublicKey(ctx, req.(*UnshareFilesViaPublicKeyRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_HandleFilesInvitation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HandleFilesInvitationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).HandleFilesInvitation(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/HandleFilesInvitation", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).HandleFilesInvitation(ctx, req.(*HandleFilesInvitationRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_NotificationSubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(empty.Empty) if err := stream.RecvMsg(m); err != nil { return err } return srv.(SpaceApiServer).NotificationSubscribe(m, &spaceApiNotificationSubscribeServer{stream}) } type SpaceApi_NotificationSubscribeServer interface { Send(*NotificationEventResponse) error grpc.ServerStream } type spaceApiNotificationSubscribeServer struct { grpc.ServerStream } func (x *spaceApiNotificationSubscribeServer) Send(m *NotificationEventResponse) error { return x.ServerStream.SendMsg(m) } func _SpaceApi_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListBucketsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).ListBuckets(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/ListBuckets", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).ListBuckets(ctx, req.(*ListBucketsRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GetNotifications_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetNotificationsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GetNotifications(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GetNotifications", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GetNotifications(ctx, req.(*GetNotificationsRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_ReadNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReadNotificationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).ReadNotification(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/ReadNotification", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).ReadNotification(ctx, req.(*ReadNotificationRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_DeleteAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteAccountRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).DeleteAccount(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/DeleteAccount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).DeleteAccount(ctx, req.(*DeleteAccountRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_ToggleBucketBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ToggleBucketBackupRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).ToggleBucketBackup(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/ToggleBucketBackup", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).ToggleBucketBackup(ctx, req.(*ToggleBucketBackupRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_BucketBackupRestore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(BucketBackupRestoreRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).BucketBackupRestore(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/BucketBackupRestore", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).BucketBackupRestore(ctx, req.(*BucketBackupRestoreRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GetUsageInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetUsageInfoRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GetUsageInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GetUsageInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GetUsageInfo(ctx, req.(*GetUsageInfoRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GetAPISessionTokens_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetAPISessionTokensRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GetAPISessionTokens(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GetAPISessionTokens", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GetAPISessionTokens(ctx, req.(*GetAPISessionTokensRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GetRecentlySharedWith_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetRecentlySharedWithRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GetRecentlySharedWith(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GetRecentlySharedWith", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GetRecentlySharedWith(ctx, req.(*GetRecentlySharedWithRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_SetNotificationsLastSeenAt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SetNotificationsLastSeenAtRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).SetNotificationsLastSeenAt(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/SetNotificationsLastSeenAt", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).SetNotificationsLastSeenAt(ctx, req.(*SetNotificationsLastSeenAtRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_SearchFiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SearchFilesRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).SearchFiles(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/SearchFiles", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).SearchFiles(ctx, req.(*SearchFilesRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_InitializeMasterAppToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(InitializeMasterAppTokenRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).InitializeMasterAppToken(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/InitializeMasterAppToken", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).InitializeMasterAppToken(ctx, req.(*InitializeMasterAppTokenRequest)) } return interceptor(ctx, in, info, handler) } func _SpaceApi_GenerateAppToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GenerateAppTokenRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SpaceApiServer).GenerateAppToken(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/space.SpaceApi/GenerateAppToken", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SpaceApiServer).GenerateAppToken(ctx, req.(*GenerateAppTokenRequest)) } return interceptor(ctx, in, info, handler) } var _SpaceApi_serviceDesc = grpc.ServiceDesc{ ServiceName: "space.SpaceApi", HandlerType: (*SpaceApiServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "ListDirectories", Handler: _SpaceApi_ListDirectories_Handler, }, { MethodName: "ListDirectory", Handler: _SpaceApi_ListDirectory_Handler, }, { MethodName: "GenerateKeyPair", Handler: _SpaceApi_GenerateKeyPair_Handler, }, { MethodName: "GetStoredMnemonic", Handler: _SpaceApi_GetStoredMnemonic_Handler, }, { MethodName: "RestoreKeyPairViaMnemonic", Handler: _SpaceApi_RestoreKeyPairViaMnemonic_Handler, }, { MethodName: "DeleteKeyPair", Handler: _SpaceApi_DeleteKeyPair_Handler, }, { MethodName: "GenerateKeyPairWithForce", Handler: _SpaceApi_GenerateKeyPairWithForce_Handler, }, { MethodName: "GetPublicKey", Handler: _SpaceApi_GetPublicKey_Handler, }, { MethodName: "OpenFile", Handler: _SpaceApi_OpenFile_Handler, }, { MethodName: "RemoveDirOrFile", Handler: _SpaceApi_RemoveDirOrFile_Handler, }, { MethodName: "GeneratePublicFileLink", Handler: _SpaceApi_GeneratePublicFileLink_Handler, }, { MethodName: "GetSharedWithMeFiles", Handler: _SpaceApi_GetSharedWithMeFiles_Handler, }, { MethodName: "GetSharedByMeFiles", Handler: _SpaceApi_GetSharedByMeFiles_Handler, }, { MethodName: "OpenPublicFile", Handler: _SpaceApi_OpenPublicFile_Handler, }, { MethodName: "CreateFolder", Handler: _SpaceApi_CreateFolder_Handler, }, { MethodName: "ToggleFuseDrive", Handler: _SpaceApi_ToggleFuseDrive_Handler, }, { MethodName: "GetFuseDriveStatus", Handler: _SpaceApi_GetFuseDriveStatus_Handler, }, { MethodName: "CreateBucket", Handler: _SpaceApi_CreateBucket_Handler, }, { MethodName: "BackupKeysByPassphrase", Handler: _SpaceApi_BackupKeysByPassphrase_Handler, }, { MethodName: "RecoverKeysByPassphrase", Handler: _SpaceApi_RecoverKeysByPassphrase_Handler, }, { MethodName: "TestKeysPassphrase", Handler: _SpaceApi_TestKeysPassphrase_Handler, }, { MethodName: "CreateLocalKeysBackup", Handler: _SpaceApi_CreateLocalKeysBackup_Handler, }, { MethodName: "RecoverKeysByLocalBackup", Handler: _SpaceApi_RecoverKeysByLocalBackup_Handler, }, { MethodName: "ShareBucket", Handler: _SpaceApi_ShareBucket_Handler, }, { MethodName: "JoinBucket", Handler: _SpaceApi_JoinBucket_Handler, }, { MethodName: "ShareFilesViaPublicKey", Handler: _SpaceApi_ShareFilesViaPublicKey_Handler, }, { MethodName: "UnshareFilesViaPublicKey", Handler: _SpaceApi_UnshareFilesViaPublicKey_Handler, }, { MethodName: "HandleFilesInvitation", Handler: _SpaceApi_HandleFilesInvitation_Handler, }, { MethodName: "ListBuckets", Handler: _SpaceApi_ListBuckets_Handler, }, { MethodName: "GetNotifications", Handler: _SpaceApi_GetNotifications_Handler, }, { MethodName: "ReadNotification", Handler: _SpaceApi_ReadNotification_Handler, }, { MethodName: "DeleteAccount", Handler: _SpaceApi_DeleteAccount_Handler, }, { MethodName: "ToggleBucketBackup", Handler: _SpaceApi_ToggleBucketBackup_Handler, }, { MethodName: "BucketBackupRestore", Handler: _SpaceApi_BucketBackupRestore_Handler, }, { MethodName: "GetUsageInfo", Handler: _SpaceApi_GetUsageInfo_Handler, }, { MethodName: "GetAPISessionTokens", Handler: _SpaceApi_GetAPISessionTokens_Handler, }, { MethodName: "GetRecentlySharedWith", Handler: _SpaceApi_GetRecentlySharedWith_Handler, }, { MethodName: "SetNotificationsLastSeenAt", Handler: _SpaceApi_SetNotificationsLastSeenAt_Handler, }, { MethodName: "SearchFiles", Handler: _SpaceApi_SearchFiles_Handler, }, { MethodName: "InitializeMasterAppToken", Handler: _SpaceApi_InitializeMasterAppToken_Handler, }, { MethodName: "GenerateAppToken", Handler: _SpaceApi_GenerateAppToken_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "Subscribe", Handler: _SpaceApi_Subscribe_Handler, ServerStreams: true, }, { StreamName: "TxlSubscribe", Handler: _SpaceApi_TxlSubscribe_Handler, ServerStreams: true, }, { StreamName: "AddItems", Handler: _SpaceApi_AddItems_Handler, ServerStreams: true, }, { StreamName: "NotificationSubscribe", Handler: _SpaceApi_NotificationSubscribe_Handler, ServerStreams: true, }, }, Metadata: "space.proto", } ================================================ FILE: grpc/pb/space.pb.gw.go ================================================ // Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: space.proto /* Package pb is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ package pb import ( "context" "io" "net/http" "github.com/golang/protobuf/descriptor" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/empty" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/grpc-ecosystem/grpc-gateway/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) // Suppress "imported and not used" errors var _ codes.Code var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray var _ = descriptor.ForMessage var ( filter_SpaceApi_ListDirectories_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) func request_SpaceApi_ListDirectories_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ListDirectoriesRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_ListDirectories_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.ListDirectories(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_ListDirectories_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ListDirectoriesRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_ListDirectories_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.ListDirectories(ctx, &protoReq) return msg, metadata, err } var ( filter_SpaceApi_ListDirectory_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) func request_SpaceApi_ListDirectory_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ListDirectoryRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_ListDirectory_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.ListDirectory(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_ListDirectory_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ListDirectoryRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_ListDirectory_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.ListDirectory(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GenerateKeyPair_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GenerateKeyPairRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.GenerateKeyPair(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GenerateKeyPair_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GenerateKeyPairRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.GenerateKeyPair(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GetStoredMnemonic_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetStoredMnemonicRequest var metadata runtime.ServerMetadata msg, err := client.GetStoredMnemonic(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GetStoredMnemonic_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetStoredMnemonicRequest var metadata runtime.ServerMetadata msg, err := server.GetStoredMnemonic(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_RestoreKeyPairViaMnemonic_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq RestoreKeyPairViaMnemonicRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RestoreKeyPairViaMnemonic(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_RestoreKeyPairViaMnemonic_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq RestoreKeyPairViaMnemonicRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RestoreKeyPairViaMnemonic(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_DeleteKeyPair_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq DeleteKeyPairRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.DeleteKeyPair(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_DeleteKeyPair_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq DeleteKeyPairRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.DeleteKeyPair(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GenerateKeyPairWithForce_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GenerateKeyPairRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.GenerateKeyPairWithForce(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GenerateKeyPairWithForce_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GenerateKeyPairRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.GenerateKeyPairWithForce(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GetPublicKey_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetPublicKeyRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.GetPublicKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GetPublicKey_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetPublicKeyRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.GetPublicKey(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_Subscribe_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (SpaceApi_SubscribeClient, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata stream, err := client.Subscribe(ctx, &protoReq) if err != nil { return nil, metadata, err } header, err := stream.Header() if err != nil { return nil, metadata, err } metadata.HeaderMD = header return stream, metadata, nil } func request_SpaceApi_TxlSubscribe_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (SpaceApi_TxlSubscribeClient, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata stream, err := client.TxlSubscribe(ctx, &protoReq) if err != nil { return nil, metadata, err } header, err := stream.Header() if err != nil { return nil, metadata, err } metadata.HeaderMD = header return stream, metadata, nil } func request_SpaceApi_OpenFile_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq OpenFileRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.OpenFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_OpenFile_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq OpenFileRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.OpenFile(ctx, &protoReq) return msg, metadata, err } var ( filter_SpaceApi_RemoveDirOrFile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) func request_SpaceApi_RemoveDirOrFile_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq RemoveDirOrFileRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_RemoveDirOrFile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RemoveDirOrFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_RemoveDirOrFile_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq RemoveDirOrFileRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_RemoveDirOrFile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RemoveDirOrFile(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GeneratePublicFileLink_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GeneratePublicFileLinkRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["bucket"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "bucket") } protoReq.Bucket, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "bucket", err) } msg, err := client.GeneratePublicFileLink(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GeneratePublicFileLink_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GeneratePublicFileLinkRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["bucket"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "bucket") } protoReq.Bucket, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "bucket", err) } msg, err := server.GeneratePublicFileLink(ctx, &protoReq) return msg, metadata, err } var ( filter_SpaceApi_GetSharedWithMeFiles_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) func request_SpaceApi_GetSharedWithMeFiles_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetSharedWithMeFilesRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_GetSharedWithMeFiles_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.GetSharedWithMeFiles(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GetSharedWithMeFiles_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetSharedWithMeFilesRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_GetSharedWithMeFiles_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.GetSharedWithMeFiles(ctx, &protoReq) return msg, metadata, err } var ( filter_SpaceApi_GetSharedByMeFiles_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) func request_SpaceApi_GetSharedByMeFiles_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetSharedByMeFilesRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_GetSharedByMeFiles_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.GetSharedByMeFiles(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GetSharedByMeFiles_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetSharedByMeFilesRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_GetSharedByMeFiles_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.GetSharedByMeFiles(ctx, &protoReq) return msg, metadata, err } var ( filter_SpaceApi_OpenPublicFile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) func request_SpaceApi_OpenPublicFile_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq OpenPublicFileRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_OpenPublicFile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.OpenPublicFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_OpenPublicFile_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq OpenPublicFileRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_OpenPublicFile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.OpenPublicFile(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_AddItems_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (SpaceApi_AddItemsClient, runtime.ServerMetadata, error) { var protoReq AddItemsRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.AddItems(ctx, &protoReq) if err != nil { return nil, metadata, err } header, err := stream.Header() if err != nil { return nil, metadata, err } metadata.HeaderMD = header return stream, metadata, nil } func request_SpaceApi_CreateFolder_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq CreateFolderRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.CreateFolder(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_CreateFolder_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq CreateFolderRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.CreateFolder(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_ToggleFuseDrive_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ToggleFuseRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.ToggleFuseDrive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_ToggleFuseDrive_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ToggleFuseRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.ToggleFuseDrive(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GetFuseDriveStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := client.GetFuseDriveStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GetFuseDriveStatus_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata msg, err := server.GetFuseDriveStatus(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_CreateBucket_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq CreateBucketRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.CreateBucket(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_CreateBucket_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq CreateBucketRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.CreateBucket(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_BackupKeysByPassphrase_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq BackupKeysByPassphraseRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.BackupKeysByPassphrase(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_BackupKeysByPassphrase_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq BackupKeysByPassphraseRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.BackupKeysByPassphrase(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_RecoverKeysByPassphrase_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq RecoverKeysByPassphraseRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RecoverKeysByPassphrase(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_RecoverKeysByPassphrase_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq RecoverKeysByPassphraseRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RecoverKeysByPassphrase(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_TestKeysPassphrase_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq TestKeysPassphraseRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.TestKeysPassphrase(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_TestKeysPassphrase_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq TestKeysPassphraseRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.TestKeysPassphrase(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_CreateLocalKeysBackup_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq CreateLocalKeysBackupRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.CreateLocalKeysBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_CreateLocalKeysBackup_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq CreateLocalKeysBackupRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.CreateLocalKeysBackup(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_RecoverKeysByLocalBackup_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq RecoverKeysByLocalBackupRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RecoverKeysByLocalBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_RecoverKeysByLocalBackup_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq RecoverKeysByLocalBackupRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RecoverKeysByLocalBackup(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_ShareBucket_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ShareBucketRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["bucket"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "bucket") } protoReq.Bucket, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "bucket", err) } msg, err := client.ShareBucket(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_ShareBucket_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ShareBucketRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["bucket"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "bucket") } protoReq.Bucket, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "bucket", err) } msg, err := server.ShareBucket(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_JoinBucket_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq JoinBucketRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["bucket"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "bucket") } protoReq.Bucket, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "bucket", err) } msg, err := client.JoinBucket(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_JoinBucket_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq JoinBucketRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["bucket"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "bucket") } protoReq.Bucket, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "bucket", err) } msg, err := server.JoinBucket(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_ShareFilesViaPublicKey_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ShareFilesViaPublicKeyRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.ShareFilesViaPublicKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_ShareFilesViaPublicKey_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ShareFilesViaPublicKeyRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.ShareFilesViaPublicKey(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_UnshareFilesViaPublicKey_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq UnshareFilesViaPublicKeyRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UnshareFilesViaPublicKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_UnshareFilesViaPublicKey_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq UnshareFilesViaPublicKeyRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.UnshareFilesViaPublicKey(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_HandleFilesInvitation_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq HandleFilesInvitationRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["invitationID"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "invitationID") } protoReq.InvitationID, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "invitationID", err) } msg, err := client.HandleFilesInvitation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_HandleFilesInvitation_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq HandleFilesInvitationRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["invitationID"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "invitationID") } protoReq.InvitationID, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "invitationID", err) } msg, err := server.HandleFilesInvitation(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_NotificationSubscribe_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (SpaceApi_NotificationSubscribeClient, runtime.ServerMetadata, error) { var protoReq empty.Empty var metadata runtime.ServerMetadata stream, err := client.NotificationSubscribe(ctx, &protoReq) if err != nil { return nil, metadata, err } header, err := stream.Header() if err != nil { return nil, metadata, err } metadata.HeaderMD = header return stream, metadata, nil } func request_SpaceApi_ListBuckets_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ListBucketsRequest var metadata runtime.ServerMetadata msg, err := client.ListBuckets(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_ListBuckets_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ListBucketsRequest var metadata runtime.ServerMetadata msg, err := server.ListBuckets(ctx, &protoReq) return msg, metadata, err } var ( filter_SpaceApi_GetNotifications_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) func request_SpaceApi_GetNotifications_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetNotificationsRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_GetNotifications_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.GetNotifications(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GetNotifications_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetNotificationsRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_GetNotifications_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.GetNotifications(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_ReadNotification_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ReadNotificationRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["ID"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "ID") } protoReq.ID, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ID", err) } msg, err := client.ReadNotification(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_ReadNotification_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ReadNotificationRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( val string ok bool err error _ = err ) val, ok = pathParams["ID"] if !ok { return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "ID") } protoReq.ID, err = runtime.String(val) if err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ID", err) } msg, err := server.ReadNotification(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_DeleteAccount_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq DeleteAccountRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.DeleteAccount(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_DeleteAccount_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq DeleteAccountRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.DeleteAccount(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_ToggleBucketBackup_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ToggleBucketBackupRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.ToggleBucketBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_ToggleBucketBackup_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ToggleBucketBackupRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.ToggleBucketBackup(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_BucketBackupRestore_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq BucketBackupRestoreRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.BucketBackupRestore(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_BucketBackupRestore_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq BucketBackupRestoreRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.BucketBackupRestore(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GetUsageInfo_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetUsageInfoRequest var metadata runtime.ServerMetadata msg, err := client.GetUsageInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GetUsageInfo_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetUsageInfoRequest var metadata runtime.ServerMetadata msg, err := server.GetUsageInfo(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GetAPISessionTokens_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetAPISessionTokensRequest var metadata runtime.ServerMetadata msg, err := client.GetAPISessionTokens(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GetAPISessionTokens_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetAPISessionTokensRequest var metadata runtime.ServerMetadata msg, err := server.GetAPISessionTokens(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GetRecentlySharedWith_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetRecentlySharedWithRequest var metadata runtime.ServerMetadata msg, err := client.GetRecentlySharedWith(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GetRecentlySharedWith_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetRecentlySharedWithRequest var metadata runtime.ServerMetadata msg, err := server.GetRecentlySharedWith(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_SetNotificationsLastSeenAt_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq SetNotificationsLastSeenAtRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.SetNotificationsLastSeenAt(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_SetNotificationsLastSeenAt_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq SetNotificationsLastSeenAtRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.SetNotificationsLastSeenAt(ctx, &protoReq) return msg, metadata, err } var ( filter_SpaceApi_SearchFiles_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) func request_SpaceApi_SearchFiles_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq SearchFilesRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_SearchFiles_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.SearchFiles(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_SearchFiles_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq SearchFilesRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SpaceApi_SearchFiles_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.SearchFiles(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_InitializeMasterAppToken_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq InitializeMasterAppTokenRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.InitializeMasterAppToken(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_InitializeMasterAppToken_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq InitializeMasterAppTokenRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.InitializeMasterAppToken(ctx, &protoReq) return msg, metadata, err } func request_SpaceApi_GenerateAppToken_0(ctx context.Context, marshaler runtime.Marshaler, client SpaceApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GenerateAppTokenRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.GenerateAppToken(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func local_request_SpaceApi_GenerateAppToken_0(ctx context.Context, marshaler runtime.Marshaler, server SpaceApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GenerateAppTokenRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.GenerateAppToken(ctx, &protoReq) return msg, metadata, err } // RegisterSpaceApiHandlerServer registers the http handlers for service SpaceApi to "mux". // UnaryRPC :call SpaceApiServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterSpaceApiHandlerFromEndpoint instead. func RegisterSpaceApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SpaceApiServer) error { mux.Handle("GET", pattern_SpaceApi_ListDirectories_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_ListDirectories_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ListDirectories_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_ListDirectory_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_ListDirectory_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ListDirectory_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GenerateKeyPair_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GenerateKeyPair_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GenerateKeyPair_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetStoredMnemonic_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GetStoredMnemonic_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetStoredMnemonic_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_RestoreKeyPairViaMnemonic_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_RestoreKeyPairViaMnemonic_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_RestoreKeyPairViaMnemonic_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_DeleteKeyPair_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_DeleteKeyPair_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_DeleteKeyPair_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GenerateKeyPairWithForce_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GenerateKeyPairWithForce_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GenerateKeyPairWithForce_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GetPublicKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GetPublicKey_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetPublicKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_Subscribe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return }) mux.Handle("GET", pattern_SpaceApi_TxlSubscribe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return }) mux.Handle("POST", pattern_SpaceApi_OpenFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_OpenFile_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_OpenFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("DELETE", pattern_SpaceApi_RemoveDirOrFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_RemoveDirOrFile_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_RemoveDirOrFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GeneratePublicFileLink_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GeneratePublicFileLink_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GeneratePublicFileLink_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetSharedWithMeFiles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GetSharedWithMeFiles_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetSharedWithMeFiles_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetSharedByMeFiles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GetSharedByMeFiles_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetSharedByMeFiles_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_OpenPublicFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_OpenPublicFile_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_OpenPublicFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_AddItems_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return }) mux.Handle("POST", pattern_SpaceApi_CreateFolder_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_CreateFolder_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_CreateFolder_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ToggleFuseDrive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_ToggleFuseDrive_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ToggleFuseDrive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetFuseDriveStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GetFuseDriveStatus_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetFuseDriveStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_CreateBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_CreateBucket_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_CreateBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_BackupKeysByPassphrase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_BackupKeysByPassphrase_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_BackupKeysByPassphrase_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_RecoverKeysByPassphrase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_RecoverKeysByPassphrase_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_RecoverKeysByPassphrase_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_TestKeysPassphrase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_TestKeysPassphrase_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_TestKeysPassphrase_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_CreateLocalKeysBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_CreateLocalKeysBackup_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_CreateLocalKeysBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_RecoverKeysByLocalBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_RecoverKeysByLocalBackup_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_RecoverKeysByLocalBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ShareBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_ShareBucket_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ShareBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_JoinBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_JoinBucket_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_JoinBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ShareFilesViaPublicKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_ShareFilesViaPublicKey_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ShareFilesViaPublicKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_UnshareFilesViaPublicKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_UnshareFilesViaPublicKey_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_UnshareFilesViaPublicKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_HandleFilesInvitation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_HandleFilesInvitation_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_HandleFilesInvitation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_NotificationSubscribe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return }) mux.Handle("GET", pattern_SpaceApi_ListBuckets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_ListBuckets_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ListBuckets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetNotifications_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GetNotifications_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetNotifications_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ReadNotification_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_ReadNotification_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ReadNotification_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_DeleteAccount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_DeleteAccount_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_DeleteAccount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ToggleBucketBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_ToggleBucketBackup_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ToggleBucketBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_BucketBackupRestore_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_BucketBackupRestore_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_BucketBackupRestore_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetUsageInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GetUsageInfo_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetUsageInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetAPISessionTokens_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GetAPISessionTokens_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetAPISessionTokens_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetRecentlySharedWith_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GetRecentlySharedWith_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetRecentlySharedWith_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_SetNotificationsLastSeenAt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_SetNotificationsLastSeenAt_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_SetNotificationsLastSeenAt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_SearchFiles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_SearchFiles_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_SearchFiles_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_InitializeMasterAppToken_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_InitializeMasterAppToken_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_InitializeMasterAppToken_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GenerateAppToken_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := local_request_SpaceApi_GenerateAppToken_0(rctx, inboundMarshaler, server, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GenerateAppToken_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) return nil } // RegisterSpaceApiHandlerFromEndpoint is same as RegisterSpaceApiHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterSpaceApiHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { conn, err := grpc.Dial(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } }() }() return RegisterSpaceApiHandler(ctx, mux, conn) } // RegisterSpaceApiHandler registers the http handlers for service SpaceApi to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterSpaceApiHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { return RegisterSpaceApiHandlerClient(ctx, mux, NewSpaceApiClient(conn)) } // RegisterSpaceApiHandlerClient registers the http handlers for service SpaceApi // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SpaceApiClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SpaceApiClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "SpaceApiClient" to call the correct interceptors. func RegisterSpaceApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SpaceApiClient) error { mux.Handle("GET", pattern_SpaceApi_ListDirectories_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_ListDirectories_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ListDirectories_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_ListDirectory_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_ListDirectory_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ListDirectory_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GenerateKeyPair_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GenerateKeyPair_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GenerateKeyPair_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetStoredMnemonic_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GetStoredMnemonic_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetStoredMnemonic_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_RestoreKeyPairViaMnemonic_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_RestoreKeyPairViaMnemonic_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_RestoreKeyPairViaMnemonic_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_DeleteKeyPair_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_DeleteKeyPair_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_DeleteKeyPair_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GenerateKeyPairWithForce_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GenerateKeyPairWithForce_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GenerateKeyPairWithForce_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GetPublicKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GetPublicKey_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetPublicKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_Subscribe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_Subscribe_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_Subscribe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_TxlSubscribe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_TxlSubscribe_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_TxlSubscribe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_OpenFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_OpenFile_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_OpenFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("DELETE", pattern_SpaceApi_RemoveDirOrFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_RemoveDirOrFile_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_RemoveDirOrFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GeneratePublicFileLink_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GeneratePublicFileLink_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GeneratePublicFileLink_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetSharedWithMeFiles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GetSharedWithMeFiles_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetSharedWithMeFiles_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetSharedByMeFiles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GetSharedByMeFiles_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetSharedByMeFiles_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_OpenPublicFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_OpenPublicFile_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_OpenPublicFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_AddItems_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_AddItems_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_AddItems_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_CreateFolder_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_CreateFolder_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_CreateFolder_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ToggleFuseDrive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_ToggleFuseDrive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ToggleFuseDrive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetFuseDriveStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GetFuseDriveStatus_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetFuseDriveStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_CreateBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_CreateBucket_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_CreateBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_BackupKeysByPassphrase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_BackupKeysByPassphrase_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_BackupKeysByPassphrase_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_RecoverKeysByPassphrase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_RecoverKeysByPassphrase_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_RecoverKeysByPassphrase_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_TestKeysPassphrase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_TestKeysPassphrase_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_TestKeysPassphrase_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_CreateLocalKeysBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_CreateLocalKeysBackup_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_CreateLocalKeysBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_RecoverKeysByLocalBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_RecoverKeysByLocalBackup_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_RecoverKeysByLocalBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ShareBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_ShareBucket_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ShareBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_JoinBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_JoinBucket_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_JoinBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ShareFilesViaPublicKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_ShareFilesViaPublicKey_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ShareFilesViaPublicKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_UnshareFilesViaPublicKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_UnshareFilesViaPublicKey_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_UnshareFilesViaPublicKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_HandleFilesInvitation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_HandleFilesInvitation_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_HandleFilesInvitation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_NotificationSubscribe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_NotificationSubscribe_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_NotificationSubscribe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_ListBuckets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_ListBuckets_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ListBuckets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetNotifications_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GetNotifications_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetNotifications_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ReadNotification_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_ReadNotification_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ReadNotification_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_DeleteAccount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_DeleteAccount_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_DeleteAccount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_ToggleBucketBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_ToggleBucketBackup_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_ToggleBucketBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_BucketBackupRestore_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_BucketBackupRestore_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_BucketBackupRestore_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetUsageInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GetUsageInfo_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetUsageInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetAPISessionTokens_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GetAPISessionTokens_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetAPISessionTokens_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_GetRecentlySharedWith_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GetRecentlySharedWith_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GetRecentlySharedWith_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_SetNotificationsLastSeenAt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_SetNotificationsLastSeenAt_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_SetNotificationsLastSeenAt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("GET", pattern_SpaceApi_SearchFiles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_SearchFiles_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_SearchFiles_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_InitializeMasterAppToken_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_InitializeMasterAppToken_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_InitializeMasterAppToken_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_SpaceApi_GenerateAppToken_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_SpaceApi_GenerateAppToken_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_SpaceApi_GenerateAppToken_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) return nil } var ( pattern_SpaceApi_ListDirectories_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "directories", "all"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_ListDirectory_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "directories"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GenerateKeyPair_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "keypairs", "generate"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GetStoredMnemonic_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "keypairs", "mnemonic"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_RestoreKeyPairViaMnemonic_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "keypairs", "restoreWithMnemonic"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_DeleteKeyPair_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "keypairs", "delete"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GenerateKeyPairWithForce_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "keypairs", "forceGenerate"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GetPublicKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "publicKey"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_Subscribe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "subscriptions", "file"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_TxlSubscribe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "subscriptions", "textile"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_OpenFile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "files", "open"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_RemoveDirOrFile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "files"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GeneratePublicFileLink_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "buckets", "bucket", "generatePublicFileLink"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GetSharedWithMeFiles_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "files", "sharedWithMe"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GetSharedByMeFiles_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "files", "sharedByMe"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_OpenPublicFile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "files", "openPublic"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_AddItems_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "files"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_CreateFolder_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "directories"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_ToggleFuseDrive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "toggleFuse"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GetFuseDriveStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "fuse"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_CreateBucket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "buckets"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_BackupKeysByPassphrase_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "passphrases", "backup"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_RecoverKeysByPassphrase_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "passphrases", "recover"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_TestKeysPassphrase_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "passphrases", "test"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_CreateLocalKeysBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "localBackups", "backup"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_RecoverKeysByLocalBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "localBackups", "recover"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_ShareBucket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "buckets", "bucket", "share"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_JoinBucket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "buckets", "bucket", "join"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_ShareFilesViaPublicKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "shareFilesViaPublicKey"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_UnshareFilesViaPublicKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "unshareFilesViaPublicKey"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_HandleFilesInvitation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "filesinvitation", "invitationID"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_NotificationSubscribe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "subscriptions", "notification"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_ListBuckets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "buckets"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GetNotifications_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "notifications"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_ReadNotification_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "notifications", "ID", "read"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_DeleteAccount_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "deleteAccount"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_ToggleBucketBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "backup"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_BucketBackupRestore_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backup", "restore"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GetUsageInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "usage"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GetAPISessionTokens_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "apiSessionTokens"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GetRecentlySharedWith_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "sharedWithList"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_SetNotificationsLastSeenAt_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "notifications", "lastSeenAt"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_SearchFiles_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "search", "files"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_InitializeMasterAppToken_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "appTokens", "master"}, "", runtime.AssumeColonVerbOpt(true))) pattern_SpaceApi_GenerateAppToken_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "appTokens"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( forward_SpaceApi_ListDirectories_0 = runtime.ForwardResponseMessage forward_SpaceApi_ListDirectory_0 = runtime.ForwardResponseMessage forward_SpaceApi_GenerateKeyPair_0 = runtime.ForwardResponseMessage forward_SpaceApi_GetStoredMnemonic_0 = runtime.ForwardResponseMessage forward_SpaceApi_RestoreKeyPairViaMnemonic_0 = runtime.ForwardResponseMessage forward_SpaceApi_DeleteKeyPair_0 = runtime.ForwardResponseMessage forward_SpaceApi_GenerateKeyPairWithForce_0 = runtime.ForwardResponseMessage forward_SpaceApi_GetPublicKey_0 = runtime.ForwardResponseMessage forward_SpaceApi_Subscribe_0 = runtime.ForwardResponseStream forward_SpaceApi_TxlSubscribe_0 = runtime.ForwardResponseStream forward_SpaceApi_OpenFile_0 = runtime.ForwardResponseMessage forward_SpaceApi_RemoveDirOrFile_0 = runtime.ForwardResponseMessage forward_SpaceApi_GeneratePublicFileLink_0 = runtime.ForwardResponseMessage forward_SpaceApi_GetSharedWithMeFiles_0 = runtime.ForwardResponseMessage forward_SpaceApi_GetSharedByMeFiles_0 = runtime.ForwardResponseMessage forward_SpaceApi_OpenPublicFile_0 = runtime.ForwardResponseMessage forward_SpaceApi_AddItems_0 = runtime.ForwardResponseStream forward_SpaceApi_CreateFolder_0 = runtime.ForwardResponseMessage forward_SpaceApi_ToggleFuseDrive_0 = runtime.ForwardResponseMessage forward_SpaceApi_GetFuseDriveStatus_0 = runtime.ForwardResponseMessage forward_SpaceApi_CreateBucket_0 = runtime.ForwardResponseMessage forward_SpaceApi_BackupKeysByPassphrase_0 = runtime.ForwardResponseMessage forward_SpaceApi_RecoverKeysByPassphrase_0 = runtime.ForwardResponseMessage forward_SpaceApi_TestKeysPassphrase_0 = runtime.ForwardResponseMessage forward_SpaceApi_CreateLocalKeysBackup_0 = runtime.ForwardResponseMessage forward_SpaceApi_RecoverKeysByLocalBackup_0 = runtime.ForwardResponseMessage forward_SpaceApi_ShareBucket_0 = runtime.ForwardResponseMessage forward_SpaceApi_JoinBucket_0 = runtime.ForwardResponseMessage forward_SpaceApi_ShareFilesViaPublicKey_0 = runtime.ForwardResponseMessage forward_SpaceApi_UnshareFilesViaPublicKey_0 = runtime.ForwardResponseMessage forward_SpaceApi_HandleFilesInvitation_0 = runtime.ForwardResponseMessage forward_SpaceApi_NotificationSubscribe_0 = runtime.ForwardResponseStream forward_SpaceApi_ListBuckets_0 = runtime.ForwardResponseMessage forward_SpaceApi_GetNotifications_0 = runtime.ForwardResponseMessage forward_SpaceApi_ReadNotification_0 = runtime.ForwardResponseMessage forward_SpaceApi_DeleteAccount_0 = runtime.ForwardResponseMessage forward_SpaceApi_ToggleBucketBackup_0 = runtime.ForwardResponseMessage forward_SpaceApi_BucketBackupRestore_0 = runtime.ForwardResponseMessage forward_SpaceApi_GetUsageInfo_0 = runtime.ForwardResponseMessage forward_SpaceApi_GetAPISessionTokens_0 = runtime.ForwardResponseMessage forward_SpaceApi_GetRecentlySharedWith_0 = runtime.ForwardResponseMessage forward_SpaceApi_SetNotificationsLastSeenAt_0 = runtime.ForwardResponseMessage forward_SpaceApi_SearchFiles_0 = runtime.ForwardResponseMessage forward_SpaceApi_InitializeMasterAppToken_0 = runtime.ForwardResponseMessage forward_SpaceApi_GenerateAppToken_0 = runtime.ForwardResponseMessage ) ================================================ FILE: grpc/proto/space.proto ================================================ syntax = "proto3"; import "google/protobuf/empty.proto"; import "google/api/annotations.proto"; package space; option go_package = ".;pb"; //See here for more info about the google.api.http spec: https://github.com/googleapis/googleapis/blob/master/google/api/http.proto#L46 // SpaceApi service service SpaceApi { // Get all folder or files in the default bucket. It fetches all subdirectories too. rpc ListDirectories(ListDirectoriesRequest) returns (ListDirectoriesResponse) { option (google.api.http) = { get: "/v1/directories/all" }; } // Get the folder or files in the path directory. // Unlike ListDirectories, this only returns immediate children at path. rpc ListDirectory(ListDirectoryRequest) returns (ListDirectoryResponse) { option (google.api.http) = { get: "/v1/directories" }; } // Generate Key Pair for current account. // This will return error if daemon account already has keypairs rpc GenerateKeyPair(GenerateKeyPairRequest) returns (GenerateKeyPairResponse) { option (google.api.http) = { post: "/v1/keypairs/generate" body: "*" }; } rpc GetStoredMnemonic(GetStoredMnemonicRequest) returns (GetStoredMnemonicResponse) { option (google.api.http) = { get: "/v1/keypairs/mnemonic" }; } // Restores a keypair given a mnemonic. // This will override any existing key pair rpc RestoreKeyPairViaMnemonic(RestoreKeyPairViaMnemonicRequest) returns (RestoreKeyPairViaMnemonicResponse) { option (google.api.http) = { post: "/v1/keypairs/restoreWithMnemonic" body: "*" }; } rpc DeleteKeyPair(DeleteKeyPairRequest) returns (DeleteKeyPairResponse) { option (google.api.http) = { post: "/v1/keypairs/delete" body: "*" }; } // Force Generation of KeyPair. This will override existing keys stored in daemon. rpc GenerateKeyPairWithForce(GenerateKeyPairRequest) returns (GenerateKeyPairResponse) { option (google.api.http) = { post: "/v1/keypairs/forceGenerate" body: "*" }; } rpc GetPublicKey(GetPublicKeyRequest) returns (GetPublicKeyResponse) { option (google.api.http) = { post: "/v1/publicKey" body: "*" }; } // Subscribe to file events. This streams responses to the caller rpc Subscribe(google.protobuf.Empty) returns (stream FileEventResponse) { option (google.api.http) = { get: "/v1/subscriptions/file" }; } // Subscribe to textile events. This streams responses to the caller rpc TxlSubscribe(google.protobuf.Empty) returns (stream TextileEventResponse) { option (google.api.http) = { get: "/v1/subscriptions/textile" }; } // Open a file in the daemon. // Daemon keeps track of all open files and closes them if no activity is noticed after a while rpc OpenFile(OpenFileRequest) returns (OpenFileResponse) { option (google.api.http) = { post: "/v1/files/open" body: "*" }; } // Removes a file or dir from a bucket rpc RemoveDirOrFile(RemoveDirOrFileRequest) returns (RemoveDirOrFileResponse) { option (google.api.http) = { delete: "/v1/files" }; } // Generates a copy of the file that's accessible through IPFS gateways rpc GeneratePublicFileLink(GeneratePublicFileLinkRequest) returns (GeneratePublicFileLinkResponse) { option (google.api.http) = { post: "/v1/buckets/{bucket}/generatePublicFileLink" body: "*" }; } // Gets the files that are shared with this recipient rpc GetSharedWithMeFiles(GetSharedWithMeFilesRequest) returns (GetSharedWithMeFilesResponse) { option (google.api.http) = { get: "/v1/files/sharedWithMe" }; } // Gets the files that are shared by the sender rpc GetSharedByMeFiles(GetSharedByMeFilesRequest) returns (GetSharedByMeFilesResponse) { option (google.api.http) = { get: "/v1/files/sharedByMe" }; } // Open an encrypted public shared file in the daemon. // This requires the decryption key and file hash/cid to work rpc OpenPublicFile(OpenPublicFileRequest) returns (OpenPublicFileResponse) { option (google.api.http) = { get: "/v1/files/openPublic" }; } // Adds items (files/folders) to be uploaded to the bucket. rpc AddItems(AddItemsRequest) returns (stream AddItemsResponse) { option (google.api.http) = { post: "/v1/files" body: "*" }; } // Creates a folder/directory at the specified path rpc CreateFolder(CreateFolderRequest) returns (CreateFolderResponse) { option (google.api.http) = { post: "/v1/directories" body: "*" }; } // Toggle FUSE drive to be mounted or unmounted rpc ToggleFuseDrive(ToggleFuseRequest) returns (FuseDriveResponse) { option (google.api.http) = { post: "/v1/toggleFuse" body: "*" }; } // Get status of FUSE drive. If mounted or unmounted rpc GetFuseDriveStatus(google.protobuf.Empty) returns (FuseDriveResponse) { option (google.api.http) = { get: "/v1/fuse" }; } // Create a new bucket owned by current user (aka keypair) rpc CreateBucket(CreateBucketRequest) returns (CreateBucketResponse) { option (google.api.http) = { post: "/v1/buckets" body: "*" }; } // Backup Key by Passphrase rpc BackupKeysByPassphrase(BackupKeysByPassphraseRequest) returns (BackupKeysByPassphraseResponse) { option (google.api.http) = { post: "/v1/passphrases/backup" body: "*" }; } // Recover Keys by Passphrase rpc RecoverKeysByPassphrase(RecoverKeysByPassphraseRequest) returns (RecoverKeysByPassphraseResponse) { option (google.api.http) = { post: "/v1/passphrases/recover" body: "*" }; } // Tests a passphrase to see if it matches the one previously used rpc TestKeysPassphrase(TestKeysPassphraseRequest) returns (TestKeysPassphraseResponse) { option (google.api.http) = { post: "/v1/passphrases/test" body: "*" }; } rpc CreateLocalKeysBackup(CreateLocalKeysBackupRequest) returns (CreateLocalKeysBackupResponse) { option (google.api.http) = { post: "/v1/localBackups/backup" body: "*" }; } rpc RecoverKeysByLocalBackup(RecoverKeysByLocalBackupRequest) returns (RecoverKeysByLocalBackupResponse) { option (google.api.http) = { post: "/v1/localBackups/recover" body: "*" }; } // Share bucket rpc ShareBucket(ShareBucketRequest) returns (ShareBucketResponse) { option (google.api.http) = { post: "/v1/buckets/{bucket}/share" body: "*" }; } // Join bucket rpc JoinBucket(JoinBucketRequest) returns (JoinBucketResponse) { option (google.api.http) = { post: "/v1/buckets/{bucket}/join" body: "*" }; } // Share bucket via public key using Textile Hub inboxing rpc ShareFilesViaPublicKey(ShareFilesViaPublicKeyRequest) returns (ShareFilesViaPublicKeyResponse) { option (google.api.http) = { post: "/v1/shareFilesViaPublicKey" body: "*" }; } // Remove public keys for shared files in buckets rpc UnshareFilesViaPublicKey(UnshareFilesViaPublicKeyRequest) returns (UnshareFilesViaPublicKeyResponse) { option (google.api.http) = { post: "/v1/unshareFilesViaPublicKey" body: "*" }; } rpc HandleFilesInvitation(HandleFilesInvitationRequest) returns (HandleFilesInvitationResponse) { option (google.api.http) = { post: "/v1/filesinvitation/{invitationID}" body: "*" }; } rpc NotificationSubscribe(google.protobuf.Empty) returns (stream NotificationEventResponse) { option (google.api.http) = { get: "/v1/subscriptions/notification" }; } rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) { option (google.api.http) = { get: "/v1/buckets" }; } rpc GetNotifications(GetNotificationsRequest) returns (GetNotificationsResponse) { option (google.api.http) = { get: "/v1/notifications" }; } rpc ReadNotification(ReadNotificationRequest) returns (ReadNotificationResponse) { option (google.api.http) = { post: "/v1/notifications/{ID}/read" body: "*" }; } rpc DeleteAccount(DeleteAccountRequest) returns (DeleteAccountResponse) { option (google.api.http) = { post: "/v1/deleteAccount" body: "*" }; } rpc ToggleBucketBackup(ToggleBucketBackupRequest) returns (ToggleBucketBackupResponse) { option (google.api.http) = { post: "/v1/backup" body: "*" }; } rpc BucketBackupRestore(BucketBackupRestoreRequest) returns (BucketBackupRestoreResponse) { option (google.api.http) = { post: "/v1/backup/restore" body: "*" }; } rpc GetUsageInfo(GetUsageInfoRequest) returns (GetUsageInfoResponse) { option (google.api.http) = { get: "/v1/usage" }; } rpc GetAPISessionTokens(GetAPISessionTokensRequest) returns (GetAPISessionTokensResponse) { option (google.api.http) = { get: "/v1/apiSessionTokens" }; } // Returns a list of addresses / public keys of clients to which files where shared or received, ordered by date rpc GetRecentlySharedWith(GetRecentlySharedWithRequest) returns (GetRecentlySharedWithResponse) { option (google.api.http) = { get: "/v1/sharedWithList" }; } // This will set the last read timestamp for the user so that the client // can check if newer notifications are present for UX rpc SetNotificationsLastSeenAt(SetNotificationsLastSeenAtRequest) returns (SetNotificationsLastSeenAtResponse) { option (google.api.http) = { post: "/v1/notifications/lastSeenAt" body: "*" }; } // Search for files across all users bucket rpc SearchFiles(SearchFilesRequest) returns (SearchFilesResponse) { option (google.api.http) = { get: "/v1/search/files" }; } // Initialize master app token // App tokens are used to authorize scoped access to a range of methods // Master token can only be generated once and has access to all methods rpc InitializeMasterAppToken(InitializeMasterAppTokenRequest) returns (InitializeMasterAppTokenResponse) { option (google.api.http) = { post: "/v1/appTokens/master" body: "*" }; } // Generates an app token with scoped access. rpc GenerateAppToken(GenerateAppTokenRequest) returns (GenerateAppTokenResponse) { option (google.api.http) = { post: "/v1/appTokens" body: "*" }; } } message SearchFilesRequest { string query = 1; } message SearchFilesResponse { repeated SearchFilesDirectoryEntry entries = 1; string query = 2; } message SearchFilesDirectoryEntry { ListDirectoryEntry entry = 1; string dbId = 2; string bucket = 3; } message SetNotificationsLastSeenAtRequest { int64 timestamp = 1; } message SetNotificationsLastSeenAtResponse {} message GetSharedWithMeFilesRequest { string seek = 1; int64 limit = 2; } message GetSharedWithMeFilesResponse { repeated SharedListDirectoryEntry items = 1; string nextOffset = 2; } message GetSharedByMeFilesRequest { string seek = 1; int64 limit = 2; } message GetSharedByMeFilesResponse { repeated SharedListDirectoryEntry items = 1; string nextOffset = 2; } message GetUsageInfoRequest {} message GetUsageInfoResponse { uint64 localStarogeUsed = 1; uint64 localBandwidthUsed = 2; uint64 spaceStorageUsed = 3; uint64 spaceBandwidthUsed = 4; uint64 usageQuota = 5; } message ToggleBucketBackupRequest { string bucket = 1; bool backup = 2; } message ToggleBucketBackupResponse {} message BucketBackupRestoreRequest { string bucket = 1; } message BucketBackupRestoreResponse {} message ListDirectoriesRequest { string bucket = 1; bool omitMembers = 2; } message FileMember { string publicKey = 1; string address = 2; } message ListDirectoryEntry { string path = 1; bool isDir = 2; string name = 3; string sizeInBytes = 4; string created = 5; string updated = 6; string fileExtension = 7; string ipfsHash = 8; bool isLocallyAvailable = 9; int64 backupCount = 10; repeated FileMember members = 11; bool isBackupInProgress = 12; bool isRestoreInProgress = 13; } message SharedListDirectoryEntry { ListDirectoryEntry entry = 1; string dbId = 2; string bucket = 3; bool isPublicLink = 4; string sharedBy = 5; } message ListDirectoriesResponse { repeated ListDirectoryEntry entries = 1; } message ListDirectoryRequest { string path = 1; string bucket = 2; bool omitMembers = 3; } message ListDirectoryResponse { repeated ListDirectoryEntry entries = 1; } message CreateBucketRequest { string slug = 1; } message BucketMember { string address = 1; string publicKey = 2; bool isOwner = 3; bool hasJoined = 4; } message Bucket { string key = 1; string name = 2; string path = 3; int64 createdAt = 4; int64 updatedAt = 5; repeated BucketMember members = 6; bool isPersonalBucket = 7; bool isBackupEnabled = 8; int32 itemsCount = 9; } message CreateBucketResponse { Bucket bucket = 1; } message GenerateKeyPairRequest {} message GenerateKeyPairResponse { string mnemonic = 1; } message GetStoredMnemonicRequest {} message GetStoredMnemonicResponse { string mnemonic = 1; } message RestoreKeyPairViaMnemonicRequest { string mnemonic = 1; } message RestoreKeyPairViaMnemonicResponse {} enum EventType { ENTRY_ADDED = 0; ENTRY_DELETED = 1; ENTRY_UPDATED = 2; ENTRY_BACKUP_IN_PROGRESS = 3; ENTRY_BACKUP_READY = 4; ENTRY_RESTORE_IN_PROGRESS = 5; ENTRY_RESTORE_READY = 6; FOLDER_ADDED = 7; FOLDER_DELETED = 8; FOLDER_UPDATED = 9; } message FileEventResponse { EventType type = 1; ListDirectoryEntry entry = 2; string bucket = 3; string dbId = 4; } message TextileEventResponse { string bucket = 1; } message OpenFileRequest { string path = 1; string bucket = 2; string dbId = 3; // optional field to specify shared with me file } message OpenFileResponse { string location = 1; } message OpenPublicFileRequest { string fileCid = 1; string password = 2; string filename = 3; } message OpenPublicFileResponse { string location = 1; } message AddItemsRequest { // full paths to file or Folder on FS. Needs to be a location available to the daemon repeated string sourcePaths = 1; // target path in bucket. string targetPath = 2; // The bucket in which to save the item string bucket = 3; } message AddItemResult { string sourcePath= 1; string bucketPath = 2; string error = 3; } message AddItemsResponse { AddItemResult result = 1; int64 totalFiles = 2; int64 totalBytes = 3; int64 completedFiles = 4; int64 completedBytes = 5; } message CreateFolderRequest { // target path in bucket to add new empty folder string path = 1; // The bucket in which to add the folder string bucket = 2; } // not sure we need to return anything other than an error if we failed message CreateFolderResponse { } enum KeyBackupType { PASSWORD = 0; GOOGLE = 1; TWITTER = 2; EMAIL = 3; } message BackupKeysByPassphraseRequest { string uuid = 1; string passphrase = 2; KeyBackupType type = 3; } message BackupKeysByPassphraseResponse {} message RecoverKeysByPassphraseRequest { string uuid = 1; string passphrase = 2; KeyBackupType type = 3; } message RecoverKeysByPassphraseResponse {} message TestKeysPassphraseRequest { string uuid = 1; string passphrase = 2; } message TestKeysPassphraseResponse {} message ThreadInfo { repeated string addresses = 1; string key = 2; } message ShareBucketRequest { string bucket = 1; } message ShareBucketResponse { ThreadInfo threadinfo = 1; } message JoinBucketRequest { ThreadInfo threadinfo = 1; string bucket = 2; } message JoinBucketResponse { bool result = 1; } message ShareFilesViaPublicKeyRequest { repeated string publicKeys = 1; repeated FullPath paths = 2; } message FullPath { string dbId = 1; // optional field to specify shared with me file string bucket = 2; string path = 3; } message ShareFilesViaPublicKeyResponse {} message UnshareFilesViaPublicKeyRequest { repeated string publicKeys = 1; repeated FullPath paths = 2; } message UnshareFilesViaPublicKeyResponse {} message GeneratePublicFileLinkRequest { string bucket = 1; repeated string itemPaths = 2; string password = 3; // optional field to specify db id // for shared with me files string dbId = 4; } message GeneratePublicFileLinkResponse { string link = 1; string fileCid = 2; } message ToggleFuseRequest { bool mountDrive = 1; } enum FuseState { UNSUPPORTED = 0; NOT_INSTALLED = 1; UNMOUNTED = 2; MOUNTED = 3; } message FuseDriveResponse { FuseState state = 1; string mountPath = 2; } message ListBucketsRequest {} message ListBucketsResponse { repeated Bucket buckets = 1; } enum NotificationType { UNKNOWN = 0; INVITATION = 1; USAGEALERT = 2; INVITATION_REPLY = 3; REVOKED_INVITATION = 4; } enum InvitationStatus { PENDING = 0; ACCEPTED = 1; REJECTED = 2; } message Invitation { string inviterPublicKey = 1; string invitationID = 2; InvitationStatus status = 4; repeated FullPath itemPaths = 5; } message UsageAlert { int64 used = 1; int64 limit = 2; string message = 3; } message InvitationAccept { string invitationID = 2; } message RevokedInvitation { string inviterPublicKey = 1; repeated FullPath itemPaths = 5; } message Notification { string ID = 1; // underlying message id from textile string subject = 2; string body = 3; oneof relatedObject { Invitation invitationValue = 4; UsageAlert usageAlert = 5; InvitationAccept invitationAccept = 6; RevokedInvitation revokedInvitation = 7; } NotificationType type = 8; int64 createdAt = 9; int64 readAt = 10; } message HandleFilesInvitationRequest { string invitationID = 1; bool accept = 2; } message HandleFilesInvitationResponse {} message NotificationEventResponse { Notification notification = 1; } message GetNotificationsRequest { string seek = 1; int64 limit = 2; } message GetNotificationsResponse { repeated Notification notifications= 1; string nextOffset = 2; int64 lastSeenAt = 3; } message ReadNotificationRequest { string ID = 1; } message ReadNotificationResponse { } message GetPublicKeyRequest {} message GetPublicKeyResponse { // Public key encoded in hex string publicKey = 1; } message RecoverKeysByLocalBackupRequest { string pathToKeyBackup = 1; } message RecoverKeysByLocalBackupResponse {} message CreateLocalKeysBackupRequest { // The path in which to save the backup string pathToKeyBackup = 1; } message CreateLocalKeysBackupResponse {} message DeleteAccountRequest {} message DeleteAccountResponse {} message DeleteKeyPairRequest {} message DeleteKeyPairResponse {} message GetAPISessionTokensRequest {} message GetAPISessionTokensResponse { string hubToken = 1; string servicesToken = 2; } message GetRecentlySharedWithRequest {} message GetRecentlySharedWithResponse { repeated FileMember members = 1; } message InitializeMasterAppTokenRequest {} message InitializeMasterAppTokenResponse { string appToken = 1; } message AllowedMethod { string methodName = 1; } message GenerateAppTokenRequest { repeated AllowedMethod allowedMethods = 1; } message GenerateAppTokenResponse { string appToken = 1; } message RemoveDirOrFileRequest { string path = 1; string bucket = 2; } message RemoveDirOrFileResponse {} ================================================ FILE: integration_tests/README.md ================================================ ### Integration Testing Guide #### Fixtures Fixtures module contains test fixtures to setup and teardown tests and perform some actions #### Helpers Helpers modules contains helper function for performing actions and also some assertions. Custom assertions should be created to make tests more readable and also prevent duplication of assertion logic. ================================================ FILE: integration_tests/fixtures/app.go ================================================ package fixtures import ( "os" "path" "strings" "github.com/FleekHQ/space-daemon/log" "github.com/FleekHQ/space-daemon/core/keychain" "github.com/99designs/keyring" "github.com/FleekHQ/space-daemon/app" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/grpc/pb" . "github.com/onsi/gomega" ) type RunAppCtx struct { App *app.App cfg config.Config client pb.SpaceApiClient ClientAppToken string ClientMnemonic string } func RunApp() *RunAppCtx { _, cfg, env := GetTestConfig() spaceApp := app.New(cfg, env) err := spaceApp.Start() ExpectWithOffset(1, err).NotTo(HaveOccurred(), "space app failed to start") ExpectWithOffset(1, spaceApp.IsRunning).To(Equal(true), "spaceApp.IsRunning should be true") return &RunAppCtx{ App: spaceApp, cfg: cfg, client: nil, } } // RunAppWithClientAppToken creates an instance of RunAppCtx for test but with the // ClientAppToken already set func RunAppWithClientAppToken(appToken string) *RunAppCtx { newApp := RunApp() newApp.ClientAppToken = appToken return newApp } func (a *RunAppCtx) Shutdown() { if a.App != nil { // shutdown app err := a.App.Shutdown() if err != nil { log.Error("Failed to shutdown app in test", err) } spaceStorePath := a.cfg.GetString(config.SpaceStorePath, "") buckdPath := a.cfg.GetString(config.BuckdPath, "") // delete app dir _ = os.RemoveAll(spaceStorePath) _ = os.RemoveAll(buckdPath) } } func (a *RunAppCtx) ClearMasterAppToken() { spaceStorePath := a.cfg.GetString(config.SpaceStorePath, "") // clear master token from keystore ucd, _ := os.UserConfigDir() ring, err := keyring.Open(keyring.Config{ ServiceName: "space", KeychainTrustApplication: true, KeychainAccessibleWhenUnlocked: true, KWalletAppID: "space", KWalletFolder: "space", WinCredPrefix: "space", LibSecretCollectionName: "space", PassPrefix: "space", PassDir: spaceStorePath + "/kcpw", FileDir: path.Join(ucd, "space", "keyring"), }) if err == nil { _ = ring.Remove(keychain.AppTokenStoreKey + "_" + keychain.MasterAppTokenStoreKey) if a.ClientAppToken != "" { parts := strings.Split(a.ClientAppToken, ".") _ = ring.Remove(keychain.AppTokenStoreKey + "_" + parts[0]) } } } ================================================ FILE: integration_tests/fixtures/client.go ================================================ package fixtures import ( "context" "fmt" "google.golang.org/grpc/metadata" . "github.com/onsi/gomega" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/grpc/pb" "google.golang.org/grpc" ) var DefaultBucket = "personal" var MirrorBucket = "personal_mirror" func (a *RunAppCtx) Client() pb.SpaceApiClient { if a.client != nil { return a.client } conn, err := DialGrpcClient(fmt.Sprintf(":%d", a.cfg.GetInt(config.SpaceServerPort, 9999)), a) Expect(err).NotTo(HaveOccurred()) a.client = pb.NewSpaceApiClient(conn) return a.client } func DialGrpcClient(targetAddr string, a *RunAppCtx) (*grpc.ClientConn, error) { return grpc.Dial( targetAddr, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithUnaryInterceptor(func( ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption, ) error { if a.ClientAppToken != "" { md := metadata.New(map[string]string{"authorization": "AppToken " + a.ClientAppToken}) ctx = metadata.NewOutgoingContext(ctx, md) } return invoker(ctx, method, req, reply, cc, opts...) }), grpc.WithStreamInterceptor(func( ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption, ) (grpc.ClientStream, error) { if a.ClientAppToken != "" { md := metadata.New(map[string]string{"authorization": "AppToken " + a.ClientAppToken}) ctx = metadata.NewOutgoingContext(ctx, md) } return streamer(ctx, desc, cc, method, opts...) }), ) } ================================================ FILE: integration_tests/fixtures/configs.go ================================================ package fixtures import ( "fmt" "os" "path/filepath" "github.com/FleekHQ/space-daemon/config" "github.com/FleekHQ/space-daemon/core/env" . "github.com/onsi/gomega" "github.com/phayes/freeport" ) // GetTestConfig returns a ConfigMap instance instantiated using the env variables func GetTestConfig() (*config.Flags, config.Config, env.SpaceEnv) { homeDir, err := os.UserHomeDir() Expect(err).NotTo(HaveOccurred()) freePorts, err := freeport.GetFreePorts(8) Expect(err).NotTo(HaveOccurred()) flags := config.Flags{ Ipfsaddr: "/ip4/127.0.0.1/tcp/5001", Ipfsnode: false, // use external ipfs node Ipfsnodeaddr: "/ip4/127.0.0.1/tcp/5001", Ipfsnodepath: filepath.Join(homeDir, ".fleek-space-ipfs-node-test"), DevMode: false, ServicesAPIURL: os.Getenv("SERVICES_API_URL"), SpaceStorageSiteUrl: os.Getenv("SPACE_STORAGE_SITE_URL"), VaultAPIURL: os.Getenv("VAULT_API_URL"), VaultSaltSecret: os.Getenv("VAULT_SALT_SECRET"), ServicesHubAuthURL: os.Getenv("SERVICES_HUB_AUTH_URL"), TextileHubTarget: os.Getenv("TXL_HUB_TARGET"), TextileHubMa: os.Getenv("TXL_HUB_MA"), TextileThreadsTarget: os.Getenv("TXL_THREADS_TARGET"), TextileHubGatewayUrl: os.Getenv("TXL_HUB_GATEWAY_URL"), TextileUserKey: os.Getenv("TXL_USER_KEY"), TextileUserSecret: os.Getenv("TXL_USER_SECRET"), SpaceStorePath: filepath.Join(homeDir, ".fleek-space-"+RandomPathName()), RpcServerPort: freePorts[1], RpcProxyServerPort: freePorts[2], RestProxyServerPort: freePorts[3], BuckdPath: filepath.Join(homeDir, ".fleek-space-buckd-"+RandomPathName()), BuckdApiMaAddr: fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", freePorts[4]), BuckdApiProxyMaAddr: fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", freePorts[5]), BuckdThreadsHostMaAddr: fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", freePorts[6]), BuckdGatewayPort: freePorts[7], LogLevel: "info", } // env spaceEnv := env.New() // load configs return &flags, config.NewMap(&flags), spaceEnv } ================================================ FILE: integration_tests/fixtures/directories.go ================================================ package fixtures import "math/rand" const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ " // RandomPathName create a valid path segment name. Usually 10 characters in length func RandomPathName() string { b := make([]byte, 10) for i := range b { b[i] = letterBytes[rand.Intn(len(letterBytes))] } return string(b) } ================================================ FILE: integration_tests/helpers/assertions.go ================================================ package helpers import ( "context" "errors" "fmt" "io/ioutil" "github.com/FleekHQ/space-daemon/integration_tests/fixtures" "github.com/FleekHQ/space-daemon/grpc/pb" . "github.com/onsi/gomega" ) func ExpectFileExists(ctx context.Context, client pb.SpaceApiClient, remotePath string, remoteFileName string) *pb.ListDirectoryEntry { res, err := client.ListDirectory(ctx, &pb.ListDirectoryRequest{ Path: remotePath, Bucket: fixtures.DefaultBucket, }) ExpectWithOffset(1, err).NotTo(HaveOccurred()) ExpectWithOffset(1, res.Entries).NotTo(BeEmpty(), "file at remote path not found") for _, item := range res.Entries { if item.Name == remoteFileName { return item } } // Not Found ExpectWithOffset(1, errors.New("file at remote path not found")).NotTo(HaveOccurred()) return nil } func ExpectFileContentEquals(filePath string, expectedContent []byte) { actualContent, err := ioutil.ReadFile(filePath) ExpectWithOffset(1, err).NotTo(HaveOccurred()) ExpectWithOffset(1, actualContent).To(Equal(expectedContent)) } func ExpectFileToBeSharedWithMe( ctx context.Context, client pb.SpaceApiClient, fileName, bucket, dbId string, isPublicShared bool, ) *pb.SharedListDirectoryEntry { // assert file is visible in recently shared list sharedWithMeRes, err := client.GetSharedWithMeFiles(ctx, &pb.GetSharedWithMeFilesRequest{Limit: 10}) ExpectWithOffset(1, err).NotTo(HaveOccurred()) ExpectWithOffset(1, sharedWithMeRes.Items).NotTo(BeEmpty()) for _, item := range sharedWithMeRes.Items { if item.Entry.Name == fileName { ExpectWithOffset(1, item.IsPublicLink).To(Equal(isPublicShared), "shared files is publicly shared not expected value") ExpectWithOffset(1, item.Bucket).To(Equal(bucket), "shared files bucket slug not expected value") if dbId != "" { // conditionally skip this for some checks ExpectWithOffset(1, item.DbId).To(Equal(dbId), "shared files dbId not expected value") } return item } } // Not Found ExpectWithOffset(1, errors.New(fmt.Sprintf("shared with me file not found. filename=%s, isPublicShared=%v", fileName, isPublicShared))) return nil } ================================================ FILE: integration_tests/helpers/directories.go ================================================ package helpers import ( "context" "io" "io/ioutil" "os" "time" "github.com/onsi/ginkgo" "github.com/golang/protobuf/ptypes/empty" "github.com/FleekHQ/space-daemon/integration_tests/fixtures" "github.com/FleekHQ/space-daemon/grpc/pb" . "github.com/onsi/gomega" ) func CreateEmptyFolder(ctx context.Context, client pb.SpaceApiClient, path string) { _, err := client.CreateFolder(ctx, &pb.CreateFolderRequest{ Path: path, Bucket: fixtures.DefaultBucket, }) ExpectWithOffset(1, err).NotTo(HaveOccurred()) <-time.After(4 * time.Second) // required currently for textile to perform sync properly. TODO: Fix this } func CreateLocalStringFile(strContent string) *os.File { content := []byte(strContent) tmpfile, err := ioutil.TempFile("", "*-localStringFile.txt") ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to create local string file") if err != nil { defer tmpfile.Close() } _, err = tmpfile.Write(content) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to write string content") return tmpfile } func UploadFilesToTargetPath( ctx context.Context, client pb.SpaceApiClient, targetPath string, sourcePaths []string, ) { streamResponse, err := client.AddItems(ctx, &pb.AddItemsRequest{ SourcePaths: sourcePaths, TargetPath: targetPath, Bucket: fixtures.DefaultBucket, }) ExpectWithOffset(1, err).NotTo(HaveOccurred()) _, err = streamResponse.Recv() ExpectWithOffset(1, err).NotTo(HaveOccurred()) <-time.After(4 * time.Second) // required currently for textile to perform sync properly. TODO: Fix this } // StartWatchingForRemoteUploads kicks off a goroutine that subscribes to Subscribe rpc for the client // and watches for the specified files and buckets to be uploaded // The first channels returns returns a true when all files have been found or false after a 5 minutes timeout of not finding them all // the second function returned must always be called at the end of a test to ensure the goroutine is stopped. func StartWatchingForRemoteBackup( ctx context.Context, client pb.SpaceApiClient, filePathsToWatch []string, ) (<-chan bool, func()) { ExpectWithOffset(1, filePathsToWatch).NotTo(BeEmpty()) streamCtx, cancelStreamCtx := context.WithCancel(context.Background()) streamResponse, err := client.Subscribe(streamCtx, &empty.Empty{}) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Subscribe failed to connect") foundFiles := make(map[string]bool) totalFound := 0 foundAllChan := make(chan bool, 1) closeAChan := make(chan bool, 1) itemsChan := make(chan *pb.FileEventResponse) go func() { defer ginkgo.GinkgoRecover() timeoutChan := time.After(5 * time.Minute) for { select { case <-closeAChan: return case <-timeoutChan: foundAllChan <- totalFound == len(filePathsToWatch) return case item := <-itemsChan: if !foundFiles[item.Entry.Path] && item.Bucket == fixtures.DefaultBucket && item.Type == pb.EventType_ENTRY_BACKUP_READY { for _, path := range filePathsToWatch { if path == item.Entry.Path { foundFiles[item.Entry.Path] = true totalFound++ } } } if totalFound == len(filePathsToWatch) { // wait for completion foundAllChan <- true // all file have been found return } } } }() go func() { defer ginkgo.GinkgoRecover() for { select { default: item, err := streamResponse.Recv() if err == io.EOF { // stream closed return } ExpectWithOffset(2, err).NotTo(HaveOccurred(), "failed while receiving data from stream") if err == nil { itemsChan <- item } } } }() return foundAllChan, func() { closeAChan <- true cancelStreamCtx() } } // i need to be receiving and in parallel ================================================ FILE: integration_tests/helpers/initialize.go ================================================ package helpers import ( "context" "github.com/FleekHQ/space-daemon/grpc/pb" "github.com/FleekHQ/space-daemon/integration_tests/fixtures" . "github.com/onsi/gomega" ) func InitializeApp(app *fixtures.RunAppCtx) { if app.ClientAppToken == "" { res, err := app.Client().InitializeMasterAppToken( context.Background(), &pb.InitializeMasterAppTokenRequest{}, ) ExpectWithOffset(1, err).NotTo(HaveOccurred()) ExpectWithOffset(1, res.AppToken).NotTo(BeEmpty()) app.ClientAppToken = res.AppToken } if app.ClientMnemonic == "" { kpRes, err := app.Client().GenerateKeyPair(context.Background(), &pb.GenerateKeyPairRequest{}) ExpectWithOffset(1, err).NotTo(HaveOccurred()) app.ClientMnemonic = kpRes.Mnemonic } } ================================================ FILE: integration_tests/integration_tests_suite_test.go ================================================ package integration_tests import ( "context" "os" "testing" "github.com/FleekHQ/space-daemon/config" ipfs "github.com/FleekHQ/space-daemon/core/ipfs/node" . "github.com/FleekHQ/space-daemon/integration_tests/helpers" "github.com/FleekHQ/space-daemon/integration_tests/fixtures" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var ( app *fixtures.RunAppCtx ipfsNode *ipfs.IpfsNode ipfsCfg config.Config ) // TestIntegrationTests registers the integration test suite with ginkgo. func TestIntegrationTests(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "IntegrationTests Suite") } var _ = BeforeSuite(func() { // start ipfs node _, ipfsCfg, _ = fixtures.GetTestConfig() ipfsNode = ipfs.NewIpsNode(ipfsCfg) ipfsErrChan := make(chan error) go func() { ipfsErrChan <- ipfsNode.Start(context.TODO()) }() select { case err := <-ipfsErrChan: Expect(err).NotTo(HaveOccurred(), "Error starting ipfs node for integration tests") case <-ipfsNode.WaitForReady(): // ipfs node ready } app = fixtures.RunApp() InitializeApp(app) }) var _ = AfterSuite(func() { app.Shutdown() app.ClearMasterAppToken() // shutdown ipfs _ = ipfsNode.Shutdown() _ = os.RemoveAll(ipfsCfg.GetString(config.Ipfsnodepath, "")) }) ================================================ FILE: integration_tests/sharing_test.go ================================================ package integration_tests import ( "context" "path/filepath" "github.com/FleekHQ/space-daemon/integration_tests/fixtures" "github.com/FleekHQ/space-daemon/grpc/pb" . "github.com/FleekHQ/space-daemon/integration_tests/helpers" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Sharing Files", func() { Context("when sharing publicly", func() { It("should work", func() { ctx := context.Background() password := "random-strong-password" sharedFileContent := "Perhaps really long text" file := CreateLocalStringFile(sharedFileContent) fileName := filepath.Base(file.Name()) // share file UploadFilesToTargetPath(ctx, app.Client(), "", []string{file.Name()}) publicLinkRes, err := app.Client().GeneratePublicFileLink(ctx, &pb.GeneratePublicFileLinkRequest{ Bucket: fixtures.DefaultBucket, ItemPaths: []string{fileName}, Password: password, }) Expect(err).NotTo(HaveOccurred()) // fetch shared file openLinkRes, err := app.Client().OpenPublicFile(ctx, &pb.OpenPublicFileRequest{ FileCid: publicLinkRes.FileCid, Password: password, Filename: fileName, }) Expect(err).NotTo(HaveOccurred()) ExpectFileContentEquals(openLinkRes.Location, []byte(sharedFileContent)) // assert file is visible in recently shared list ExpectFileToBeSharedWithMe(ctx, app.Client(), fileName, "", "", true) }) }) Context("when sharing privately", func() { var app2 *fixtures.RunAppCtx BeforeEach(func() { app2 = fixtures.RunAppWithClientAppToken(app.ClientAppToken) InitializeApp(app2) }) AfterEach(func() { app2.Shutdown() }) It("should work", func() { ctx := context.Background() sharedFileContent := "A really really really long text or possibly binary data" file := CreateLocalStringFile(sharedFileContent) fileName := filepath.Base(file.Name()) pk1Res, err := app.Client().GetPublicKey(ctx, &pb.GetPublicKeyRequest{}) Expect(err).NotTo(HaveOccurred()) // upload file to second users directory remoteBackupWait, cleanupWait := StartWatchingForRemoteBackup(ctx, app2.Client(), []string{fileName}) defer cleanupWait() UploadFilesToTargetPath(ctx, app2.Client(), "", []string{file.Name()}) // Wait for file to be replicated to the hub backupSuccess := <-remoteBackupWait Expect(backupSuccess).To(BeTrue(), "failed to complete remote backup of watched files") // share file with the first user _, err = app2.Client().ShareFilesViaPublicKey(ctx, &pb.ShareFilesViaPublicKeyRequest{ PublicKeys: []string{pk1Res.PublicKey}, Paths: []*pb.FullPath{{ Bucket: fixtures.DefaultBucket, Path: fileName, }}, }) Expect(err).NotTo(HaveOccurred()) // Fetch and verify invite notification. notifRes, err := app.Client().GetNotifications(ctx, &pb.GetNotificationsRequest{Limit: 10}) Expect(err).NotTo(HaveOccurred()) Expect(notifRes.Notifications).NotTo(BeEmpty(), "no invite notification provided") Expect(notifRes.Notifications[0].Type).To(Equal(pb.NotificationType_INVITATION)) // Accept invite _, err = app.Client().HandleFilesInvitation(ctx, &pb.HandleFilesInvitationRequest{ InvitationID: notifRes.Notifications[0].ID, Accept: true, }) Expect(err).NotTo(HaveOccurred()) // verify file is in shared with first user sharedItem := ExpectFileToBeSharedWithMe(ctx, app.Client(), fileName, fixtures.MirrorBucket, "", false) // confirm first user can see file openFileResult, err := app.Client().OpenFile(ctx, &pb.OpenFileRequest{ Path: sharedItem.Entry.Path, Bucket: sharedItem.Bucket, DbId: sharedItem.DbId, }) Expect(err).NotTo(HaveOccurred()) ExpectFileContentEquals(openFileResult.Location, []byte(sharedFileContent)) }) }) }) ================================================ FILE: integration_tests/uploads_test.go ================================================ package integration_tests import ( "context" "path/filepath" "github.com/FleekHQ/space-daemon/integration_tests/fixtures" "github.com/FleekHQ/space-daemon/grpc/pb" . "github.com/FleekHQ/space-daemon/integration_tests/helpers" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("App Uploads", func() { It("should create empty folder successfully", func() { ctx := context.Background() folderName := fixtures.RandomPathName() CreateEmptyFolder(ctx, app.Client(), folderName) _, err := app.Client().ListDirectory(ctx, &pb.ListDirectoryRequest{ Path: "", Bucket: fixtures.DefaultBucket, }) Expect(err).NotTo(HaveOccurred()) ExpectFileExists(ctx, app.Client(), "", folderName) }) It("should upload and download files successfully", func() { ctx := context.Background() file := CreateLocalStringFile("random file content") fileName := filepath.Base(file.Name()) UploadFilesToTargetPath(ctx, app.Client(), "", []string{file.Name()}) ExpectFileExists(ctx, app.Client(), "", fileName) // try uploading to a folder topFolderPath := fixtures.RandomPathName() CreateEmptyFolder(ctx, app.Client(), topFolderPath) UploadFilesToTargetPath(ctx, app.Client(), topFolderPath, []string{file.Name()}) ExpectFileExists(ctx, app.Client(), topFolderPath, fileName) }) }) ================================================ FILE: log/logger.go ================================================ package log import ( "fmt" "os" "strings" "github.com/FleekHQ/space-daemon/core/env" "github.com/sirupsen/logrus" ) var ( log *logger ) type logger struct { log *logrus.Logger } func init() { log = new("") } func New(env env.SpaceEnv) *logger { // TODO: check for log level in config and pass it to new return new("") } func new(logLevel string) *logger { logLevelConf := "Debug" level, err := logrus.ParseLevel(logLevelConf) if err != nil { level = logrus.DebugLevel } log = &logger{ log: &logrus.Logger{ Level: level, Out: os.Stdout, Formatter: &logrus.TextFormatter{}, }} return log } // METHODS func (l *logger) Info(msg string, tags ...string) { if l.log.Level < logrus.InfoLevel { return } l.log.WithFields(parseFields(tags...)).Info(msg) } func (l *logger) Printf(msg string, args ...interface{}) { if l.log.Level < logrus.InfoLevel { return } l.log.Printf(msg, args...) } func (l *logger) Debug(msg string, tags ...string) { if l.log.Level < logrus.DebugLevel { return } l.log.WithFields(parseFields(tags...)).Debug(msg) } func (l *logger) Warn(msg string, tags ...string) { if l.log.Level < logrus.WarnLevel { return } l.log.WithFields(parseFields(tags...)).Warn(msg) } func (l *logger) Error(msg string, err error, tags ...string) { if l.log.Level < logrus.ErrorLevel { return } msg = fmt.Sprintf("%s -- ERROR -- %v", msg, err) // l.log.WithFields(parseFields(tags...)).Error(msg) l.log.Error(msg, tags) } func (l *logger) Fatal(err error) { l.Error(err.Error(), err) l.log.Exit(1) } // Functions func Info(msg string, tags ...string) { log.Info(msg, tags...) } func Printf(msg string, args ...interface{}) { log.Printf(msg, args...) } func Warn(msg string, tags ...string) { log.Warn(msg, tags...) } func Debug(msg string, tags ...string) { log.Debug(msg, tags...) } func Error(msg string, err error, tags ...string) { log.Error(msg, err, tags...) } func Fatal(err error) { log.Fatal(err) } func SetLogLevel(logLevel string) { level, err := logrus.ParseLevel(logLevel) if err != nil { level = logrus.DebugLevel } log.log.SetLevel(level) } func parseFields(tags ...string) logrus.Fields { result := make(logrus.Fields, len(tags)) for _, tag := range tags { els := strings.Split(tag, ":") if len(els) > 1 { result[strings.TrimSpace(els[0])] = strings.TrimSpace(els[1]) } } return result } ================================================ FILE: mocks/Bucket.go ================================================ // Code generated by mockery v2.3.0. DO NOT EDIT. package mocks import ( context "context" bucket "github.com/FleekHQ/space-daemon/core/textile/bucket" io "io" mock "github.com/stretchr/testify/mock" path "github.com/ipfs/interface-go-ipfs-core/path" thread "github.com/textileio/go-threads/core/thread" ) // Bucket is an autogenerated mock type for the Bucket type type Bucket struct { mock.Mock } // CreateDirectory provides a mock function with given fields: ctx, _a1 func (_m *Bucket) CreateDirectory(ctx context.Context, _a1 string) (path.Resolved, path.Path, error) { ret := _m.Called(ctx, _a1) var r0 path.Resolved if rf, ok := ret.Get(0).(func(context.Context, string) path.Resolved); ok { r0 = rf(ctx, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(path.Resolved) } } var r1 path.Path if rf, ok := ret.Get(1).(func(context.Context, string) path.Path); ok { r1 = rf(ctx, _a1) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(path.Path) } } var r2 error if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { r2 = rf(ctx, _a1) } else { r2 = ret.Error(2) } return r0, r1, r2 } // DeleteDirOrFile provides a mock function with given fields: ctx, _a1 func (_m *Bucket) DeleteDirOrFile(ctx context.Context, _a1 string) (path.Resolved, error) { ret := _m.Called(ctx, _a1) var r0 path.Resolved if rf, ok := ret.Get(0).(func(context.Context, string) path.Resolved); ok { r0 = rf(ctx, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(path.Resolved) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, _a1) } else { r1 = ret.Error(1) } return r0, r1 } // DirExists provides a mock function with given fields: ctx, _a1 func (_m *Bucket) DirExists(ctx context.Context, _a1 string) (bool, error) { ret := _m.Called(ctx, _a1) var r0 bool if rf, ok := ret.Get(0).(func(context.Context, string) bool); ok { r0 = rf(ctx, _a1) } else { r0 = ret.Get(0).(bool) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, _a1) } else { r1 = ret.Error(1) } return r0, r1 } // DownloadFile provides a mock function with given fields: ctx, _a1, reader func (_m *Bucket) DownloadFile(ctx context.Context, _a1 string, reader io.Reader) (path.Resolved, path.Path, error) { ret := _m.Called(ctx, _a1, reader) var r0 path.Resolved if rf, ok := ret.Get(0).(func(context.Context, string, io.Reader) path.Resolved); ok { r0 = rf(ctx, _a1, reader) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(path.Resolved) } } var r1 path.Path if rf, ok := ret.Get(1).(func(context.Context, string, io.Reader) path.Path); ok { r1 = rf(ctx, _a1, reader) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(path.Path) } } var r2 error if rf, ok := ret.Get(2).(func(context.Context, string, io.Reader) error); ok { r2 = rf(ctx, _a1, reader) } else { r2 = ret.Error(2) } return r0, r1, r2 } // Each provides a mock function with given fields: ctx, _a1, iterator, withRecursive func (_m *Bucket) Each(ctx context.Context, _a1 string, iterator func(context.Context, *bucket.Bucket, string) error, withRecursive bool) (int, error) { ret := _m.Called(ctx, _a1, iterator, withRecursive) var r0 int if rf, ok := ret.Get(0).(func(context.Context, string, func(context.Context, *bucket.Bucket, string) error, bool) int); ok { r0 = rf(ctx, _a1, iterator, withRecursive) } else { r0 = ret.Get(0).(int) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, func(context.Context, *bucket.Bucket, string) error, bool) error); ok { r1 = rf(ctx, _a1, iterator, withRecursive) } else { r1 = ret.Error(1) } return r0, r1 } // FileExists provides a mock function with given fields: ctx, _a1 func (_m *Bucket) FileExists(ctx context.Context, _a1 string) (bool, error) { ret := _m.Called(ctx, _a1) var r0 bool if rf, ok := ret.Get(0).(func(context.Context, string) bool); ok { r0 = rf(ctx, _a1) } else { r0 = ret.Get(0).(bool) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, _a1) } else { r1 = ret.Error(1) } return r0, r1 } // GetClient provides a mock function with given fields: func (_m *Bucket) GetClient() bucket.BucketsClient { ret := _m.Called() var r0 bucket.BucketsClient if rf, ok := ret.Get(0).(func() bucket.BucketsClient); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(bucket.BucketsClient) } } return r0 } // GetContext provides a mock function with given fields: ctx func (_m *Bucket) GetContext(ctx context.Context) (context.Context, *thread.ID, error) { ret := _m.Called(ctx) var r0 context.Context if rf, ok := ret.Get(0).(func(context.Context) context.Context); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(context.Context) } } var r1 *thread.ID if rf, ok := ret.Get(1).(func(context.Context) *thread.ID); ok { r1 = rf(ctx) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*thread.ID) } } var r2 error if rf, ok := ret.Get(2).(func(context.Context) error); ok { r2 = rf(ctx) } else { r2 = ret.Error(2) } return r0, r1, r2 } // GetData provides a mock function with given fields: func (_m *Bucket) GetData() bucket.BucketData { ret := _m.Called() var r0 bucket.BucketData if rf, ok := ret.Get(0).(func() bucket.BucketData); ok { r0 = rf() } else { r0 = ret.Get(0).(bucket.BucketData) } return r0 } // GetFile provides a mock function with given fields: ctx, _a1, w func (_m *Bucket) GetFile(ctx context.Context, _a1 string, w io.Writer) error { ret := _m.Called(ctx, _a1, w) var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, io.Writer) error); ok { r0 = rf(ctx, _a1, w) } else { r0 = ret.Error(0) } return r0 } // GetThreadID provides a mock function with given fields: ctx func (_m *Bucket) GetThreadID(ctx context.Context) (*thread.ID, error) { ret := _m.Called(ctx) var r0 *thread.ID if rf, ok := ret.Get(0).(func(context.Context) *thread.ID); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*thread.ID) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } // ItemsCount provides a mock function with given fields: ctx, _a1, withRecursive func (_m *Bucket) ItemsCount(ctx context.Context, _a1 string, withRecursive bool) (int32, error) { ret := _m.Called(ctx, _a1, withRecursive) var r0 int32 if rf, ok := ret.Get(0).(func(context.Context, string, bool) int32); ok { r0 = rf(ctx, _a1, withRecursive) } else { r0 = ret.Get(0).(int32) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { r1 = rf(ctx, _a1, withRecursive) } else { r1 = ret.Error(1) } return r0, r1 } // Key provides a mock function with given fields: func (_m *Bucket) Key() string { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } return r0 } // ListDirectory provides a mock function with given fields: ctx, _a1 func (_m *Bucket) ListDirectory(ctx context.Context, _a1 string) (*bucket.DirEntries, error) { ret := _m.Called(ctx, _a1) var r0 *bucket.DirEntries if rf, ok := ret.Get(0).(func(context.Context, string) *bucket.DirEntries); ok { r0 = rf(ctx, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*bucket.DirEntries) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, _a1) } else { r1 = ret.Error(1) } return r0, r1 } // Slug provides a mock function with given fields: func (_m *Bucket) Slug() string { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } return r0 } // UpdatedAt provides a mock function with given fields: ctx, _a1 func (_m *Bucket) UpdatedAt(ctx context.Context, _a1 string) (int64, error) { ret := _m.Called(ctx, _a1) var r0 int64 if rf, ok := ret.Get(0).(func(context.Context, string) int64); ok { r0 = rf(ctx, _a1) } else { r0 = ret.Get(0).(int64) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, _a1) } else { r1 = ret.Error(1) } return r0, r1 } // UploadFile provides a mock function with given fields: ctx, _a1, reader func (_m *Bucket) UploadFile(ctx context.Context, _a1 string, reader io.Reader) (path.Resolved, path.Path, error) { ret := _m.Called(ctx, _a1, reader) var r0 path.Resolved if rf, ok := ret.Get(0).(func(context.Context, string, io.Reader) path.Resolved); ok { r0 = rf(ctx, _a1, reader) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(path.Resolved) } } var r1 path.Path if rf, ok := ret.Get(1).(func(context.Context, string, io.Reader) path.Path); ok { r1 = rf(ctx, _a1, reader) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(path.Path) } } var r2 error if rf, ok := ret.Get(2).(func(context.Context, string, io.Reader) error); ok { r2 = rf(ctx, _a1, reader) } else { r2 = ret.Error(2) } return r0, r1, r2 } ================================================ FILE: mocks/Client.go ================================================ // Code generated by mockery v2.4.0. DO NOT EDIT. package mocks import ( config "github.com/FleekHQ/space-daemon/config" cid "github.com/ipfs/go-cid" client "github.com/textileio/go-threads/api/client" context "context" crypto "github.com/libp2p/go-libp2p-core/crypto" db "github.com/textileio/go-threads/db" domain "github.com/FleekHQ/space-daemon/core/space/domain" io "io" mock "github.com/stretchr/testify/mock" model "github.com/FleekHQ/space-daemon/core/textile/model" sync "github.com/FleekHQ/space-daemon/core/textile/sync" textile "github.com/FleekHQ/space-daemon/core/textile" usersdclient "github.com/textileio/textile/v2/api/usersd/client" ) // Client is an autogenerated mock type for the Client type type Client struct { mock.Mock } // AcceptSharedFileLink provides a mock function with given fields: ctx, cidHash, password, filename, fileSize func (_m *Client) AcceptSharedFileLink(ctx context.Context, cidHash string, password string, filename string, fileSize string) (*domain.SharedDirEntry, error) { ret := _m.Called(ctx, cidHash, password, filename, fileSize) var r0 *domain.SharedDirEntry if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *domain.SharedDirEntry); ok { r0 = rf(ctx, cidHash, password, filename, fileSize) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*domain.SharedDirEntry) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { r1 = rf(ctx, cidHash, password, filename, fileSize) } else { r1 = ret.Error(1) } return r0, r1 } // AcceptSharedFilesInvitation provides a mock function with given fields: ctx, invitation func (_m *Client) AcceptSharedFilesInvitation(ctx context.Context, invitation domain.Invitation) (domain.Invitation, error) { ret := _m.Called(ctx, invitation) var r0 domain.Invitation if rf, ok := ret.Get(0).(func(context.Context, domain.Invitation) domain.Invitation); ok { r0 = rf(ctx, invitation) } else { r0 = ret.Get(0).(domain.Invitation) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, domain.Invitation) error); ok { r1 = rf(ctx, invitation) } else { r1 = ret.Error(1) } return r0, r1 } // AttachMailboxNotifier provides a mock function with given fields: notif func (_m *Client) AttachMailboxNotifier(notif textile.GrpcMailboxNotifier) { _m.Called(notif) } // AttachSynchronizerNotifier provides a mock function with given fields: notif func (_m *Client) AttachSynchronizerNotifier(notif sync.EventNotifier) { _m.Called(notif) } // BucketBackupRestore provides a mock function with given fields: ctx, bucketSlug func (_m *Client) BucketBackupRestore(ctx context.Context, bucketSlug string) error { ret := _m.Called(ctx, bucketSlug) var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, bucketSlug) } else { r0 = ret.Error(0) } return r0 } // CreateBucket provides a mock function with given fields: ctx, bucketSlug func (_m *Client) CreateBucket(ctx context.Context, bucketSlug string) (textile.Bucket, error) { ret := _m.Called(ctx, bucketSlug) var r0 textile.Bucket if rf, ok := ret.Get(0).(func(context.Context, string) textile.Bucket); ok { r0 = rf(ctx, bucketSlug) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(textile.Bucket) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, bucketSlug) } else { r1 = ret.Error(1) } return r0, r1 } // DeleteAccount provides a mock function with given fields: ctx func (_m *Client) DeleteAccount(ctx context.Context) error { ret := _m.Called(ctx) var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) } else { r0 = ret.Error(0) } return r0 } // DisableSync provides a mock function with given fields: func (_m *Client) DisableSync() { _m.Called() } // DownloadPublicItem provides a mock function with given fields: ctx, _a1 func (_m *Client) DownloadPublicItem(ctx context.Context, _a1 cid.Cid) (io.ReadCloser, error) { ret := _m.Called(ctx, _a1) var r0 io.ReadCloser if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) io.ReadCloser); ok { r0 = rf(ctx, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(io.ReadCloser) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { r1 = rf(ctx, _a1) } else { r1 = ret.Error(1) } return r0, r1 } // GetBucket provides a mock function with given fields: ctx, slug, remoteFile func (_m *Client) GetBucket(ctx context.Context, slug string, remoteFile *textile.GetBucketForRemoteFileInput) (textile.Bucket, error) { ret := _m.Called(ctx, slug, remoteFile) var r0 textile.Bucket if rf, ok := ret.Get(0).(func(context.Context, string, *textile.GetBucketForRemoteFileInput) textile.Bucket); ok { r0 = rf(ctx, slug, remoteFile) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(textile.Bucket) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, *textile.GetBucketForRemoteFileInput) error); ok { r1 = rf(ctx, slug, remoteFile) } else { r1 = ret.Error(1) } return r0, r1 } // GetDefaultBucket provides a mock function with given fields: ctx func (_m *Client) GetDefaultBucket(ctx context.Context) (textile.Bucket, error) { ret := _m.Called(ctx) var r0 textile.Bucket if rf, ok := ret.Get(0).(func(context.Context) textile.Bucket); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(textile.Bucket) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } // GetFailedHealthchecks provides a mock function with given fields: func (_m *Client) GetFailedHealthchecks() int { ret := _m.Called() var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() } else { r0 = ret.Get(0).(int) } return r0 } // GetMailAsNotifications provides a mock function with given fields: ctx, seek, limit func (_m *Client) GetMailAsNotifications(ctx context.Context, seek string, limit int) ([]*domain.Notification, error) { ret := _m.Called(ctx, seek, limit) var r0 []*domain.Notification if rf, ok := ret.Get(0).(func(context.Context, string, int) []*domain.Notification); ok { r0 = rf(ctx, seek, limit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*domain.Notification) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, int) error); ok { r1 = rf(ctx, seek, limit) } else { r1 = ret.Error(1) } return r0, r1 } // GetModel provides a mock function with given fields: func (_m *Client) GetModel() model.Model { ret := _m.Called() var r0 model.Model if rf, ok := ret.Get(0).(func() model.Model); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(model.Model) } } return r0 } // GetPathAccessRoles provides a mock function with given fields: ctx, b, path func (_m *Client) GetPathAccessRoles(ctx context.Context, b textile.Bucket, path string) ([]domain.Member, error) { ret := _m.Called(ctx, b, path) var r0 []domain.Member if rf, ok := ret.Get(0).(func(context.Context, textile.Bucket, string) []domain.Member); ok { r0 = rf(ctx, b, path) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]domain.Member) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, textile.Bucket, string) error); ok { r1 = rf(ctx, b, path) } else { r1 = ret.Error(1) } return r0, r1 } // GetPublicReceivedFile provides a mock function with given fields: ctx, cidHash, accepted func (_m *Client) GetPublicReceivedFile(ctx context.Context, cidHash string, accepted bool) (*domain.SharedDirEntry, string, error) { ret := _m.Called(ctx, cidHash, accepted) var r0 *domain.SharedDirEntry if rf, ok := ret.Get(0).(func(context.Context, string, bool) *domain.SharedDirEntry); ok { r0 = rf(ctx, cidHash, accepted) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*domain.SharedDirEntry) } } var r1 string if rf, ok := ret.Get(1).(func(context.Context, string, bool) string); ok { r1 = rf(ctx, cidHash, accepted) } else { r1 = ret.Get(1).(string) } var r2 error if rf, ok := ret.Get(2).(func(context.Context, string, bool) error); ok { r2 = rf(ctx, cidHash, accepted) } else { r2 = ret.Error(2) } return r0, r1, r2 } // GetPublicShareBucket provides a mock function with given fields: ctx func (_m *Client) GetPublicShareBucket(ctx context.Context) (textile.Bucket, error) { ret := _m.Called(ctx) var r0 textile.Bucket if rf, ok := ret.Get(0).(func(context.Context) textile.Bucket); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(textile.Bucket) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } // GetReceivedFiles provides a mock function with given fields: ctx, accepted, seek, limit func (_m *Client) GetReceivedFiles(ctx context.Context, accepted bool, seek string, limit int) ([]*domain.SharedDirEntry, string, error) { ret := _m.Called(ctx, accepted, seek, limit) var r0 []*domain.SharedDirEntry if rf, ok := ret.Get(0).(func(context.Context, bool, string, int) []*domain.SharedDirEntry); ok { r0 = rf(ctx, accepted, seek, limit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*domain.SharedDirEntry) } } var r1 string if rf, ok := ret.Get(1).(func(context.Context, bool, string, int) string); ok { r1 = rf(ctx, accepted, seek, limit) } else { r1 = ret.Get(1).(string) } var r2 error if rf, ok := ret.Get(2).(func(context.Context, bool, string, int) error); ok { r2 = rf(ctx, accepted, seek, limit) } else { r2 = ret.Error(2) } return r0, r1, r2 } // GetSentFiles provides a mock function with given fields: ctx, seek, limit func (_m *Client) GetSentFiles(ctx context.Context, seek string, limit int) ([]*domain.SharedDirEntry, string, error) { ret := _m.Called(ctx, seek, limit) var r0 []*domain.SharedDirEntry if rf, ok := ret.Get(0).(func(context.Context, string, int) []*domain.SharedDirEntry); ok { r0 = rf(ctx, seek, limit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*domain.SharedDirEntry) } } var r1 string if rf, ok := ret.Get(1).(func(context.Context, string, int) string); ok { r1 = rf(ctx, seek, limit) } else { r1 = ret.Get(1).(string) } var r2 error if rf, ok := ret.Get(2).(func(context.Context, string, int) error); ok { r2 = rf(ctx, seek, limit) } else { r2 = ret.Error(2) } return r0, r1, r2 } // GetThreadsConnection provides a mock function with given fields: func (_m *Client) GetThreadsConnection() (*client.Client, error) { ret := _m.Called() var r0 *client.Client if rf, ok := ret.Get(0).(func() *client.Client); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*client.Client) } } var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { r1 = ret.Error(1) } return r0, r1 } // IsHealthy provides a mock function with given fields: func (_m *Client) IsHealthy() bool { ret := _m.Called() var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() } else { r0 = ret.Get(0).(bool) } return r0 } // IsInitialized provides a mock function with given fields: func (_m *Client) IsInitialized() bool { ret := _m.Called() var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() } else { r0 = ret.Get(0).(bool) } return r0 } // IsRunning provides a mock function with given fields: func (_m *Client) IsRunning() bool { ret := _m.Called() var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() } else { r0 = ret.Get(0).(bool) } return r0 } // JoinBucket provides a mock function with given fields: ctx, slug, ti func (_m *Client) JoinBucket(ctx context.Context, slug string, ti *domain.ThreadInfo) (bool, error) { ret := _m.Called(ctx, slug, ti) var r0 bool if rf, ok := ret.Get(0).(func(context.Context, string, *domain.ThreadInfo) bool); ok { r0 = rf(ctx, slug, ti) } else { r0 = ret.Get(0).(bool) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, *domain.ThreadInfo) error); ok { r1 = rf(ctx, slug, ti) } else { r1 = ret.Error(1) } return r0, r1 } // ListBuckets provides a mock function with given fields: ctx func (_m *Client) ListBuckets(ctx context.Context) ([]textile.Bucket, error) { ret := _m.Called(ctx) var r0 []textile.Bucket if rf, ok := ret.Get(0).(func(context.Context) []textile.Bucket); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]textile.Bucket) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } // Listen provides a mock function with given fields: ctx, dbID, threadName func (_m *Client) Listen(ctx context.Context, dbID string, threadName string) (<-chan client.ListenEvent, error) { ret := _m.Called(ctx, dbID, threadName) var r0 <-chan client.ListenEvent if rf, ok := ret.Get(0).(func(context.Context, string, string) <-chan client.ListenEvent); ok { r0 = rf(ctx, dbID, threadName) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(<-chan client.ListenEvent) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { r1 = rf(ctx, dbID, threadName) } else { r1 = ret.Error(1) } return r0, r1 } // ManageShareFilesViaPublicKey provides a mock function with given fields: ctx, paths, pubkeys, keys, role func (_m *Client) ManageShareFilesViaPublicKey(ctx context.Context, paths []domain.FullPath, pubkeys []crypto.PubKey, keys [][]byte, role domain.SharedFilesRoleAction) error { ret := _m.Called(ctx, paths, pubkeys, keys, role) var r0 error if rf, ok := ret.Get(0).(func(context.Context, []domain.FullPath, []crypto.PubKey, [][]byte, domain.SharedFilesRoleAction) error); ok { r0 = rf(ctx, paths, pubkeys, keys, role) } else { r0 = ret.Error(0) } return r0 } // RejectSharedFilesInvitation provides a mock function with given fields: ctx, invitation func (_m *Client) RejectSharedFilesInvitation(ctx context.Context, invitation domain.Invitation) (domain.Invitation, error) { ret := _m.Called(ctx, invitation) var r0 domain.Invitation if rf, ok := ret.Get(0).(func(context.Context, domain.Invitation) domain.Invitation); ok { r0 = rf(ctx, invitation) } else { r0 = ret.Get(0).(domain.Invitation) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, domain.Invitation) error); ok { r1 = rf(ctx, invitation) } else { r1 = ret.Error(1) } return r0, r1 } // RemoveKeys provides a mock function with given fields: ctx func (_m *Client) RemoveKeys(ctx context.Context) error { ret := _m.Called(ctx) var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) } else { r0 = ret.Error(0) } return r0 } // RestoreDB provides a mock function with given fields: ctx func (_m *Client) RestoreDB(ctx context.Context) error { ret := _m.Called(ctx) var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) } else { r0 = ret.Error(0) } return r0 } // SendMessage provides a mock function with given fields: ctx, recipient, body func (_m *Client) SendMessage(ctx context.Context, recipient crypto.PubKey, body []byte) (*usersdclient.Message, error) { ret := _m.Called(ctx, recipient, body) var r0 *usersdclient.Message if rf, ok := ret.Get(0).(func(context.Context, crypto.PubKey, []byte) *usersdclient.Message); ok { r0 = rf(ctx, recipient, body) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*usersdclient.Message) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, crypto.PubKey, []byte) error); ok { r1 = rf(ctx, recipient, body) } else { r1 = ret.Error(1) } return r0, r1 } // ShareBucket provides a mock function with given fields: ctx, bucketSlug func (_m *Client) ShareBucket(ctx context.Context, bucketSlug string) (*db.Info, error) { ret := _m.Called(ctx, bucketSlug) var r0 *db.Info if rf, ok := ret.Get(0).(func(context.Context, string) *db.Info); ok { r0 = rf(ctx, bucketSlug) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*db.Info) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, bucketSlug) } else { r1 = ret.Error(1) } return r0, r1 } // Shutdown provides a mock function with given fields: func (_m *Client) Shutdown() error { ret := _m.Called() var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() } else { r0 = ret.Error(0) } return r0 } // Start provides a mock function with given fields: ctx, cfg func (_m *Client) Start(ctx context.Context, cfg config.Config) error { ret := _m.Called(ctx, cfg) var r0 error if rf, ok := ret.Get(0).(func(context.Context, config.Config) error); ok { r0 = rf(ctx, cfg) } else { r0 = ret.Error(0) } return r0 } // ToggleBucketBackup provides a mock function with given fields: ctx, bucketSlug, bucketBackup func (_m *Client) ToggleBucketBackup(ctx context.Context, bucketSlug string, bucketBackup bool) (bool, error) { ret := _m.Called(ctx, bucketSlug, bucketBackup) var r0 bool if rf, ok := ret.Get(0).(func(context.Context, string, bool) bool); ok { r0 = rf(ctx, bucketSlug, bucketBackup) } else { r0 = ret.Get(0).(bool) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { r1 = rf(ctx, bucketSlug, bucketBackup) } else { r1 = ret.Error(1) } return r0, r1 } // WaitForHealthy provides a mock function with given fields: func (_m *Client) WaitForHealthy() chan error { ret := _m.Called() var r0 chan error if rf, ok := ret.Get(0).(func() chan error); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(chan error) } } return r0 } // WaitForInitialized provides a mock function with given fields: func (_m *Client) WaitForInitialized() chan bool { ret := _m.Called() var r0 chan bool if rf, ok := ret.Get(0).(func() chan bool); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(chan bool) } } return r0 } // WaitForReady provides a mock function with given fields: func (_m *Client) WaitForReady() chan bool { ret := _m.Called() var r0 chan bool if rf, ok := ret.Get(0).(func() chan bool); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(chan bool) } } return r0 } ================================================ FILE: mocks/FilesSearchEngine.go ================================================ // Code generated by mockery v2.0.3. DO NOT EDIT. package mocks import ( context "context" search "github.com/FleekHQ/space-daemon/core/search" mock "github.com/stretchr/testify/mock" ) // FilesSearchEngine is an autogenerated mock type for the FilesSearchEngine type type FilesSearchEngine struct { mock.Mock } // DeleteFileData provides a mock function with given fields: ctx, data func (_m *FilesSearchEngine) DeleteFileData(ctx context.Context, data *search.DeleteIndexRecord) error { ret := _m.Called(ctx, data) var r0 error if rf, ok := ret.Get(0).(func(context.Context, *search.DeleteIndexRecord) error); ok { r0 = rf(ctx, data) } else { r0 = ret.Error(0) } return r0 } // InsertFileData provides a mock function with given fields: ctx, data func (_m *FilesSearchEngine) InsertFileData(ctx context.Context, data *search.InsertIndexRecord) (*search.IndexRecord, error) { ret := _m.Called(ctx, data) var r0 *search.IndexRecord if rf, ok := ret.Get(0).(func(context.Context, *search.InsertIndexRecord) *search.IndexRecord); ok { r0 = rf(ctx, data) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*search.IndexRecord) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, *search.InsertIndexRecord) error); ok { r1 = rf(ctx, data) } else { r1 = ret.Error(1) } return r0, r1 } // QueryFileData provides a mock function with given fields: ctx, query, limit func (_m *FilesSearchEngine) QueryFileData(ctx context.Context, query string, limit int) ([]*search.IndexRecord, error) { ret := _m.Called(ctx, query, limit) var r0 []*search.IndexRecord if rf, ok := ret.Get(0).(func(context.Context, string, int) []*search.IndexRecord); ok { r0 = rf(ctx, query, limit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*search.IndexRecord) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, int) error); ok { r1 = rf(ctx, query, limit) } else { r1 = ret.Error(1) } return r0, r1 } // Start provides a mock function with given fields: func (_m *FilesSearchEngine) Start() error { ret := _m.Called() var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() } else { r0 = ret.Error(0) } return r0 } ================================================ FILE: mocks/HubAuth.go ================================================ // Code generated by mockery v2.0.0. DO NOT EDIT. package mocks import ( context "context" hub "github.com/FleekHQ/space-daemon/core/textile/hub" mock "github.com/stretchr/testify/mock" ) // HubAuth is an autogenerated mock type for the HubAuth type type HubAuth struct { mock.Mock } // ClearCache provides a mock function with given fields: func (_m *HubAuth) ClearCache() error { ret := _m.Called() var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() } else { r0 = ret.Error(0) } return r0 } // GetHubContext provides a mock function with given fields: ctx func (_m *HubAuth) GetHubContext(ctx context.Context) (context.Context, error) { ret := _m.Called(ctx) var r0 context.Context if rf, ok := ret.Get(0).(func(context.Context) context.Context); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(context.Context) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } // GetTokensWithCache provides a mock function with given fields: ctx func (_m *HubAuth) GetTokensWithCache(ctx context.Context) (*hub.AuthTokens, error) { ret := _m.Called(ctx) var r0 *hub.AuthTokens if rf, ok := ret.Get(0).(func(context.Context) *hub.AuthTokens); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*hub.AuthTokens) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } ================================================ FILE: mocks/Keychain.go ================================================ // Code generated by mockery v2.0.0. DO NOT EDIT. package mocks import ( keychain "github.com/FleekHQ/space-daemon/core/keychain" crypto "github.com/libp2p/go-libp2p-core/crypto" mock "github.com/stretchr/testify/mock" permissions "github.com/FleekHQ/space-daemon/core/permissions" thread "github.com/textileio/go-threads/core/thread" ) // Keychain is an autogenerated mock type for the Keychain type type Keychain struct { mock.Mock } // DeleteKeypair provides a mock function with given fields: func (_m *Keychain) DeleteKeypair() error { ret := _m.Called() var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() } else { r0 = ret.Error(0) } return r0 } // GenerateKeyFromMnemonic provides a mock function with given fields: _a0 func (_m *Keychain) GenerateKeyFromMnemonic(_a0 ...keychain.GenerateKeyFromMnemonicOpts) (string, error) { _va := make([]interface{}, len(_a0)) for _i := range _a0 { _va[_i] = _a0[_i] } var _ca []interface{} _ca = append(_ca, _va...) ret := _m.Called(_ca...) var r0 string if rf, ok := ret.Get(0).(func(...keychain.GenerateKeyFromMnemonicOpts) string); ok { r0 = rf(_a0...) } else { r0 = ret.Get(0).(string) } var r1 error if rf, ok := ret.Get(1).(func(...keychain.GenerateKeyFromMnemonicOpts) error); ok { r1 = rf(_a0...) } else { r1 = ret.Error(1) } return r0, r1 } // GenerateKeyPair provides a mock function with given fields: func (_m *Keychain) GenerateKeyPair() ([]byte, []byte, error) { ret := _m.Called() var r0 []byte if rf, ok := ret.Get(0).(func() []byte); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } var r1 []byte if rf, ok := ret.Get(1).(func() []byte); ok { r1 = rf() } else { if ret.Get(1) != nil { r1 = ret.Get(1).([]byte) } } var r2 error if rf, ok := ret.Get(2).(func() error); ok { r2 = rf() } else { r2 = ret.Error(2) } return r0, r1, r2 } // GenerateKeyPairWithForce provides a mock function with given fields: func (_m *Keychain) GenerateKeyPairWithForce() ([]byte, []byte, error) { ret := _m.Called() var r0 []byte if rf, ok := ret.Get(0).(func() []byte); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } var r1 []byte if rf, ok := ret.Get(1).(func() []byte); ok { r1 = rf() } else { if ret.Get(1) != nil { r1 = ret.Get(1).([]byte) } } var r2 error if rf, ok := ret.Get(2).(func() error); ok { r2 = rf() } else { r2 = ret.Error(2) } return r0, r1, r2 } // GetAppToken provides a mock function with given fields: key func (_m *Keychain) GetAppToken(key string) (*permissions.AppToken, error) { ret := _m.Called(key) var r0 *permissions.AppToken if rf, ok := ret.Get(0).(func(string) *permissions.AppToken); ok { r0 = rf(key) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*permissions.AppToken) } } var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(key) } else { r1 = ret.Error(1) } return r0, r1 } // GetManagedThreadKey provides a mock function with given fields: threadKeyName func (_m *Keychain) GetManagedThreadKey(threadKeyName string) (thread.Key, error) { ret := _m.Called(threadKeyName) var r0 thread.Key if rf, ok := ret.Get(0).(func(string) thread.Key); ok { r0 = rf(threadKeyName) } else { r0 = ret.Get(0).(thread.Key) } var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(threadKeyName) } else { r1 = ret.Error(1) } return r0, r1 } // GetStoredKeyPairInLibP2PFormat provides a mock function with given fields: func (_m *Keychain) GetStoredKeyPairInLibP2PFormat() (crypto.PrivKey, crypto.PubKey, error) { ret := _m.Called() var r0 crypto.PrivKey if rf, ok := ret.Get(0).(func() crypto.PrivKey); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(crypto.PrivKey) } } var r1 crypto.PubKey if rf, ok := ret.Get(1).(func() crypto.PubKey); ok { r1 = rf() } else { if ret.Get(1) != nil { r1 = ret.Get(1).(crypto.PubKey) } } var r2 error if rf, ok := ret.Get(2).(func() error); ok { r2 = rf() } else { r2 = ret.Error(2) } return r0, r1, r2 } // GetStoredMnemonic provides a mock function with given fields: func (_m *Keychain) GetStoredMnemonic() (string, error) { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { r1 = ret.Error(1) } return r0, r1 } // GetStoredPublicKey provides a mock function with given fields: func (_m *Keychain) GetStoredPublicKey() (crypto.PubKey, error) { ret := _m.Called() var r0 crypto.PubKey if rf, ok := ret.Get(0).(func() crypto.PubKey); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(crypto.PubKey) } } var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { r1 = ret.Error(1) } return r0, r1 } // ImportExistingKeyPair provides a mock function with given fields: priv, mnemonic func (_m *Keychain) ImportExistingKeyPair(priv crypto.PrivKey, mnemonic string) error { ret := _m.Called(priv, mnemonic) var r0 error if rf, ok := ret.Get(0).(func(crypto.PrivKey, string) error); ok { r0 = rf(priv, mnemonic) } else { r0 = ret.Error(0) } return r0 } // Sign provides a mock function with given fields: _a0 func (_m *Keychain) Sign(_a0 []byte) ([]byte, error) { ret := _m.Called(_a0) var r0 []byte if rf, ok := ret.Get(0).(func([]byte) []byte); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } var r1 error if rf, ok := ret.Get(1).(func([]byte) error); ok { r1 = rf(_a0) } else { r1 = ret.Error(1) } return r0, r1 } // StoreAppToken provides a mock function with given fields: tok func (_m *Keychain) StoreAppToken(tok *permissions.AppToken) error { ret := _m.Called(tok) var r0 error if rf, ok := ret.Get(0).(func(*permissions.AppToken) error); ok { r0 = rf(tok) } else { r0 = ret.Error(0) } return r0 } ================================================ FILE: mocks/Keyring.go ================================================ // Code generated by mockery v2.0.0. DO NOT EDIT. package mocks import ( keyring "github.com/99designs/keyring" mock "github.com/stretchr/testify/mock" ) // Keyring is an autogenerated mock type for the Keyring type type Keyring struct { mock.Mock } // Get provides a mock function with given fields: _a0 func (_m *Keyring) Get(_a0 string) (keyring.Item, error) { ret := _m.Called(_a0) var r0 keyring.Item if rf, ok := ret.Get(0).(func(string) keyring.Item); ok { r0 = rf(_a0) } else { r0 = ret.Get(0).(keyring.Item) } var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(_a0) } else { r1 = ret.Error(1) } return r0, r1 } // GetMetadata provides a mock function with given fields: _a0 func (_m *Keyring) GetMetadata(_a0 string) (keyring.Metadata, error) { ret := _m.Called(_a0) var r0 keyring.Metadata if rf, ok := ret.Get(0).(func(string) keyring.Metadata); ok { r0 = rf(_a0) } else { r0 = ret.Get(0).(keyring.Metadata) } var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(_a0) } else { r1 = ret.Error(1) } return r0, r1 } // Remove provides a mock function with given fields: _a0 func (_m *Keyring) Remove(_a0 string) error { ret := _m.Called(_a0) var r0 error if rf, ok := ret.Get(0).(func(string) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) } return r0 } // Set provides a mock function with given fields: _a0 func (_m *Keyring) Set(_a0 keyring.Item) error { ret := _m.Called(_a0) var r0 error if rf, ok := ret.Get(0).(func(keyring.Item) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) } return r0 } ================================================ FILE: mocks/Mailbox.go ================================================ // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import ( client "github.com/textileio/textile/v2/api/usersd/client" cmd "github.com/textileio/textile/v2/cmd" context "context" local "github.com/textileio/textile/v2/mail/local" mock "github.com/stretchr/testify/mock" thread "github.com/textileio/go-threads/core/thread" ) // Mailbox is an autogenerated mock type for the Mailbox type type Mailbox struct { mock.Mock } // Identity provides a mock function with given fields: func (_m *Mailbox) Identity() thread.Identity { ret := _m.Called() var r0 thread.Identity if rf, ok := ret.Get(0).(func() thread.Identity); ok { r0 = rf() } else { if ret.Get(0) != nil { r0 = ret.Get(0).(thread.Identity) } } return r0 } // ListInboxMessages provides a mock function with given fields: ctx, opts func (_m *Mailbox) ListInboxMessages(ctx context.Context, opts ...client.ListOption) ([]client.Message, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] } var _ca []interface{} _ca = append(_ca, ctx) _ca = append(_ca, _va...) ret := _m.Called(_ca...) var r0 []client.Message if rf, ok := ret.Get(0).(func(context.Context, ...client.ListOption) []client.Message); ok { r0 = rf(ctx, opts...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]client.Message) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, ...client.ListOption) error); ok { r1 = rf(ctx, opts...) } else { r1 = ret.Error(1) } return r0, r1 } // SendMessage provides a mock function with given fields: ctx, to, body func (_m *Mailbox) SendMessage(ctx context.Context, to thread.PubKey, body []byte) (client.Message, error) { ret := _m.Called(ctx, to, body) var r0 client.Message if rf, ok := ret.Get(0).(func(context.Context, thread.PubKey, []byte) client.Message); ok { r0 = rf(ctx, to, body) } else { r0 = ret.Get(0).(client.Message) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, thread.PubKey, []byte) error); ok { r1 = rf(ctx, to, body) } else { r1 = ret.Error(1) } return r0, r1 } // WatchInbox provides a mock function with given fields: ctx, mevents, offline func (_m *Mailbox) WatchInbox(ctx context.Context, mevents chan<- local.MailboxEvent, offline bool) (<-chan cmd.WatchState, error) { ret := _m.Called(ctx, mevents, offline) var r0 <-chan cmd.WatchState if rf, ok := ret.Get(0).(func(context.Context, chan<- local.MailboxEvent, bool) <-chan cmd.WatchState); ok { r0 = rf(ctx, mevents, offline) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(<-chan cmd.WatchState) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, chan<- local.MailboxEvent, bool) error); ok { r1 = rf(ctx, mevents, offline) } else { r1 = ret.Error(1) } return r0, r1 } ================================================ FILE: mocks/Model.go ================================================ // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import ( context "context" domain "github.com/FleekHQ/space-daemon/core/space/domain" mock "github.com/stretchr/testify/mock" model "github.com/FleekHQ/space-daemon/core/textile/model" ) // Model is an autogenerated mock type for the Model type type Model struct { mock.Mock } // BucketBackupToggle provides a mock function with given fields: ctx, bucketSlug, backup func (_m *Model) BucketBackupToggle(ctx context.Context, bucketSlug string, backup bool) (*model.BucketSchema, error) { ret := _m.Called(ctx, bucketSlug, backup) var r0 *model.BucketSchema if rf, ok := ret.Get(0).(func(context.Context, string, bool) *model.BucketSchema); ok { r0 = rf(ctx, bucketSlug, backup) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.BucketSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { r1 = rf(ctx, bucketSlug, backup) } else { r1 = ret.Error(1) } return r0, r1 } // CreateBucket provides a mock function with given fields: ctx, bucketSlug, dbID func (_m *Model) CreateBucket(ctx context.Context, bucketSlug string, dbID string) (*model.BucketSchema, error) { ret := _m.Called(ctx, bucketSlug, dbID) var r0 *model.BucketSchema if rf, ok := ret.Get(0).(func(context.Context, string, string) *model.BucketSchema); ok { r0 = rf(ctx, bucketSlug, dbID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.BucketSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { r1 = rf(ctx, bucketSlug, dbID) } else { r1 = ret.Error(1) } return r0, r1 } // CreateMirrorBucket provides a mock function with given fields: ctx, bucketSlug, mirrorBucket func (_m *Model) CreateMirrorBucket(ctx context.Context, bucketSlug string, mirrorBucket *model.MirrorBucketSchema) (*model.BucketSchema, error) { ret := _m.Called(ctx, bucketSlug, mirrorBucket) var r0 *model.BucketSchema if rf, ok := ret.Get(0).(func(context.Context, string, *model.MirrorBucketSchema) *model.BucketSchema); ok { r0 = rf(ctx, bucketSlug, mirrorBucket) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.BucketSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, *model.MirrorBucketSchema) error); ok { r1 = rf(ctx, bucketSlug, mirrorBucket) } else { r1 = ret.Error(1) } return r0, r1 } // CreateMirrorFile provides a mock function with given fields: ctx, mirrorFile func (_m *Model) CreateMirrorFile(ctx context.Context, mirrorFile *domain.MirrorFile) (*model.MirrorFileSchema, error) { ret := _m.Called(ctx, mirrorFile) var r0 *model.MirrorFileSchema if rf, ok := ret.Get(0).(func(context.Context, *domain.MirrorFile) *model.MirrorFileSchema); ok { r0 = rf(ctx, mirrorFile) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.MirrorFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, *domain.MirrorFile) error); ok { r1 = rf(ctx, mirrorFile) } else { r1 = ret.Error(1) } return r0, r1 } // CreateReceivedFileViaInvitation provides a mock function with given fields: ctx, file, invitationId, accepted, key, sharedBy func (_m *Model) CreateReceivedFileViaInvitation(ctx context.Context, file domain.FullPath, invitationId string, accepted bool, key []byte, sharedBy string) (*model.ReceivedFileSchema, error) { ret := _m.Called(ctx, file, invitationId, accepted, key, sharedBy) var r0 *model.ReceivedFileSchema if rf, ok := ret.Get(0).(func(context.Context, domain.FullPath, string, bool, []byte, string) *model.ReceivedFileSchema); ok { r0 = rf(ctx, file, invitationId, accepted, key, sharedBy) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.ReceivedFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, domain.FullPath, string, bool, []byte, string) error); ok { r1 = rf(ctx, file, invitationId, accepted, key, sharedBy) } else { r1 = ret.Error(1) } return r0, r1 } // CreateReceivedFileViaPublicLink provides a mock function with given fields: ctx, ipfsHash, password, filename, filesize, accepted func (_m *Model) CreateReceivedFileViaPublicLink(ctx context.Context, ipfsHash string, password string, filename string, filesize string, accepted bool) (*model.ReceivedFileSchema, error) { ret := _m.Called(ctx, ipfsHash, password, filename, filesize, accepted) var r0 *model.ReceivedFileSchema if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, bool) *model.ReceivedFileSchema); ok { r0 = rf(ctx, ipfsHash, password, filename, filesize, accepted) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.ReceivedFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string, bool) error); ok { r1 = rf(ctx, ipfsHash, password, filename, filesize, accepted) } else { r1 = ret.Error(1) } return r0, r1 } // CreateSentFileViaInvitation provides a mock function with given fields: ctx, file, invitationId, key func (_m *Model) CreateSentFileViaInvitation(ctx context.Context, file domain.FullPath, invitationId string, key []byte) (*model.SentFileSchema, error) { ret := _m.Called(ctx, file, invitationId, key) var r0 *model.SentFileSchema if rf, ok := ret.Get(0).(func(context.Context, domain.FullPath, string, []byte) *model.SentFileSchema); ok { r0 = rf(ctx, file, invitationId, key) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.SentFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, domain.FullPath, string, []byte) error); ok { r1 = rf(ctx, file, invitationId, key) } else { r1 = ret.Error(1) } return r0, r1 } // CreateSharedPublicKey provides a mock function with given fields: ctx, pubKey func (_m *Model) CreateSharedPublicKey(ctx context.Context, pubKey string) (*model.SharedPublicKeySchema, error) { ret := _m.Called(ctx, pubKey) var r0 *model.SharedPublicKeySchema if rf, ok := ret.Get(0).(func(context.Context, string) *model.SharedPublicKeySchema); ok { r0 = rf(ctx, pubKey) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.SharedPublicKeySchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, pubKey) } else { r1 = ret.Error(1) } return r0, r1 } // DeleteReceivedFiles provides a mock function with given fields: ctx, paths, keys func (_m *Model) DeleteReceivedFiles(ctx context.Context, paths []domain.FullPath, keys [][]byte) error { ret := _m.Called(ctx, paths, keys) var r0 error if rf, ok := ret.Get(0).(func(context.Context, []domain.FullPath, [][]byte) error); ok { r0 = rf(ctx, paths, keys) } else { r0 = ret.Error(0) } return r0 } // DeleteSearchIndexRecord provides a mock function with given fields: ctx, name, path, bucketSlug, dbId func (_m *Model) DeleteSearchIndexRecord(ctx context.Context, name string, path string, bucketSlug string, dbId string) error { ret := _m.Called(ctx, name, path, bucketSlug, dbId) var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) error); ok { r0 = rf(ctx, name, path, bucketSlug, dbId) } else { r0 = ret.Error(0) } return r0 } // FindBucket provides a mock function with given fields: ctx, bucketSlug func (_m *Model) FindBucket(ctx context.Context, bucketSlug string) (*model.BucketSchema, error) { ret := _m.Called(ctx, bucketSlug) var r0 *model.BucketSchema if rf, ok := ret.Get(0).(func(context.Context, string) *model.BucketSchema); ok { r0 = rf(ctx, bucketSlug) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.BucketSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, bucketSlug) } else { r1 = ret.Error(1) } return r0, r1 } // FindMirrorFileByPathAndBucketSlug provides a mock function with given fields: ctx, path, bucketSlug func (_m *Model) FindMirrorFileByPathAndBucketSlug(ctx context.Context, path string, bucketSlug string) (*model.MirrorFileSchema, error) { ret := _m.Called(ctx, path, bucketSlug) var r0 *model.MirrorFileSchema if rf, ok := ret.Get(0).(func(context.Context, string, string) *model.MirrorFileSchema); ok { r0 = rf(ctx, path, bucketSlug) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.MirrorFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { r1 = rf(ctx, path, bucketSlug) } else { r1 = ret.Error(1) } return r0, r1 } // FindMirrorFileByPaths provides a mock function with given fields: ctx, paths func (_m *Model) FindMirrorFileByPaths(ctx context.Context, paths []string) (map[string]*model.MirrorFileSchema, error) { ret := _m.Called(ctx, paths) var r0 map[string]*model.MirrorFileSchema if rf, ok := ret.Get(0).(func(context.Context, []string) map[string]*model.MirrorFileSchema); ok { r0 = rf(ctx, paths) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(map[string]*model.MirrorFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { r1 = rf(ctx, paths) } else { r1 = ret.Error(1) } return r0, r1 } // FindPublicLinkReceivedFile provides a mock function with given fields: ctx, ipfsHash func (_m *Model) FindPublicLinkReceivedFile(ctx context.Context, ipfsHash string) (*model.ReceivedFileSchema, error) { ret := _m.Called(ctx, ipfsHash) var r0 *model.ReceivedFileSchema if rf, ok := ret.Get(0).(func(context.Context, string) *model.ReceivedFileSchema); ok { r0 = rf(ctx, ipfsHash) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.ReceivedFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, ipfsHash) } else { r1 = ret.Error(1) } return r0, r1 } // FindReceivedFile provides a mock function with given fields: ctx, remoteDbID, bucket, path func (_m *Model) FindReceivedFile(ctx context.Context, remoteDbID string, bucket string, path string) (*model.ReceivedFileSchema, error) { ret := _m.Called(ctx, remoteDbID, bucket, path) var r0 *model.ReceivedFileSchema if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *model.ReceivedFileSchema); ok { r0 = rf(ctx, remoteDbID, bucket, path) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.ReceivedFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { r1 = rf(ctx, remoteDbID, bucket, path) } else { r1 = ret.Error(1) } return r0, r1 } // FindReceivedFilesByIds provides a mock function with given fields: ctx, ids func (_m *Model) FindReceivedFilesByIds(ctx context.Context, ids []string) ([]*model.ReceivedFileSchema, error) { ret := _m.Called(ctx, ids) var r0 []*model.ReceivedFileSchema if rf, ok := ret.Get(0).(func(context.Context, []string) []*model.ReceivedFileSchema); ok { r0 = rf(ctx, ids) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.ReceivedFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { r1 = rf(ctx, ids) } else { r1 = ret.Error(1) } return r0, r1 } // FindSentFile provides a mock function with given fields: ctx, remoteDbID, bucket, path func (_m *Model) FindSentFile(ctx context.Context, remoteDbID string, bucket string, path string) (*model.SentFileSchema, error) { ret := _m.Called(ctx, remoteDbID, bucket, path) var r0 *model.SentFileSchema if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *model.SentFileSchema); ok { r0 = rf(ctx, remoteDbID, bucket, path) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.SentFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { r1 = rf(ctx, remoteDbID, bucket, path) } else { r1 = ret.Error(1) } return r0, r1 } // InitSearchIndexCollection provides a mock function with given fields: ctx func (_m *Model) InitSearchIndexCollection(ctx context.Context) error { ret := _m.Called(ctx) var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) } else { r0 = ret.Error(0) } return r0 } // ListBuckets provides a mock function with given fields: ctx func (_m *Model) ListBuckets(ctx context.Context) ([]*model.BucketSchema, error) { ret := _m.Called(ctx) var r0 []*model.BucketSchema if rf, ok := ret.Get(0).(func(context.Context) []*model.BucketSchema); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.BucketSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } // ListReceivedFiles provides a mock function with given fields: ctx, accepted, seek, limit func (_m *Model) ListReceivedFiles(ctx context.Context, accepted bool, seek string, limit int) ([]*model.ReceivedFileSchema, error) { ret := _m.Called(ctx, accepted, seek, limit) var r0 []*model.ReceivedFileSchema if rf, ok := ret.Get(0).(func(context.Context, bool, string, int) []*model.ReceivedFileSchema); ok { r0 = rf(ctx, accepted, seek, limit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.ReceivedFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, bool, string, int) error); ok { r1 = rf(ctx, accepted, seek, limit) } else { r1 = ret.Error(1) } return r0, r1 } // ListReceivedPublicFiles provides a mock function with given fields: ctx, cidHash, accepted func (_m *Model) ListReceivedPublicFiles(ctx context.Context, cidHash string, accepted bool) ([]*model.ReceivedFileSchema, error) { ret := _m.Called(ctx, cidHash, accepted) var r0 []*model.ReceivedFileSchema if rf, ok := ret.Get(0).(func(context.Context, string, bool) []*model.ReceivedFileSchema); ok { r0 = rf(ctx, cidHash, accepted) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.ReceivedFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { r1 = rf(ctx, cidHash, accepted) } else { r1 = ret.Error(1) } return r0, r1 } // ListSentFiles provides a mock function with given fields: ctx, seek, limit func (_m *Model) ListSentFiles(ctx context.Context, seek string, limit int) ([]*model.SentFileSchema, error) { ret := _m.Called(ctx, seek, limit) var r0 []*model.SentFileSchema if rf, ok := ret.Get(0).(func(context.Context, string, int) []*model.SentFileSchema); ok { r0 = rf(ctx, seek, limit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.SentFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, int) error); ok { r1 = rf(ctx, seek, limit) } else { r1 = ret.Error(1) } return r0, r1 } // ListSharedPublicKeys provides a mock function with given fields: ctx func (_m *Model) ListSharedPublicKeys(ctx context.Context) ([]*model.SharedPublicKeySchema, error) { ret := _m.Called(ctx) var r0 []*model.SharedPublicKeySchema if rf, ok := ret.Get(0).(func(context.Context) []*model.SharedPublicKeySchema); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.SharedPublicKeySchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } // QuerySearchIndex provides a mock function with given fields: ctx, query func (_m *Model) QuerySearchIndex(ctx context.Context, query string) ([]*model.SearchIndexRecord, error) { ret := _m.Called(ctx, query) var r0 []*model.SearchIndexRecord if rf, ok := ret.Get(0).(func(context.Context, string) []*model.SearchIndexRecord); ok { r0 = rf(ctx, query) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.SearchIndexRecord) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, query) } else { r1 = ret.Error(1) } return r0, r1 } // UpdateMirrorFile provides a mock function with given fields: ctx, mirrorFile func (_m *Model) UpdateMirrorFile(ctx context.Context, mirrorFile *model.MirrorFileSchema) (*model.MirrorFileSchema, error) { ret := _m.Called(ctx, mirrorFile) var r0 *model.MirrorFileSchema if rf, ok := ret.Get(0).(func(context.Context, *model.MirrorFileSchema) *model.MirrorFileSchema); ok { r0 = rf(ctx, mirrorFile) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.MirrorFileSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, *model.MirrorFileSchema) error); ok { r1 = rf(ctx, mirrorFile) } else { r1 = ret.Error(1) } return r0, r1 } // UpdateSearchIndexRecord provides a mock function with given fields: ctx, name, path, itemType, bucketSlug, dbId func (_m *Model) UpdateSearchIndexRecord(ctx context.Context, name string, path string, itemType model.SearchItemType, bucketSlug string, dbId string) (*model.SearchIndexRecord, error) { ret := _m.Called(ctx, name, path, itemType, bucketSlug, dbId) var r0 *model.SearchIndexRecord if rf, ok := ret.Get(0).(func(context.Context, string, string, model.SearchItemType, string, string) *model.SearchIndexRecord); ok { r0 = rf(ctx, name, path, itemType, bucketSlug, dbId) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.SearchIndexRecord) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, string, model.SearchItemType, string, string) error); ok { r1 = rf(ctx, name, path, itemType, bucketSlug, dbId) } else { r1 = ret.Error(1) } return r0, r1 } // UpsertBucket provides a mock function with given fields: ctx, bucketSlug, dbID func (_m *Model) UpsertBucket(ctx context.Context, bucketSlug string, dbID string) (*model.BucketSchema, error) { ret := _m.Called(ctx, bucketSlug, dbID) var r0 *model.BucketSchema if rf, ok := ret.Get(0).(func(context.Context, string, string) *model.BucketSchema); ok { r0 = rf(ctx, bucketSlug, dbID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.BucketSchema) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { r1 = rf(ctx, bucketSlug, dbID) } else { r1 = ret.Error(1) } return r0, r1 } ================================================ FILE: mocks/Store.go ================================================ // Code generated by mockery v2.0.0. DO NOT EDIT. package mocks import mock "github.com/stretchr/testify/mock" // Store is an autogenerated mock type for the Store type type Store struct { mock.Mock } // Close provides a mock function with given fields: func (_m *Store) Close() error { ret := _m.Called() var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() } else { r0 = ret.Error(0) } return r0 } // DropAll provides a mock function with given fields: func (_m *Store) DropAll() error { ret := _m.Called() var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() } else { r0 = ret.Error(0) } return r0 } // Get provides a mock function with given fields: key func (_m *Store) Get(key []byte) ([]byte, error) { ret := _m.Called(key) var r0 []byte if rf, ok := ret.Get(0).(func([]byte) []byte); ok { r0 = rf(key) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) } } var r1 error if rf, ok := ret.Get(1).(func([]byte) error); ok { r1 = rf(key) } else { r1 = ret.Error(1) } return r0, r1 } // IsOpen provides a mock function with given fields: func (_m *Store) IsOpen() bool { ret := _m.Called() var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() } else { r0 = ret.Get(0).(bool) } return r0 } // KeysWithPrefix provides a mock function with given fields: prefix func (_m *Store) KeysWithPrefix(prefix string) ([]string, error) { ret := _m.Called(prefix) var r0 []string if rf, ok := ret.Get(0).(func(string) []string); ok { r0 = rf(prefix) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]string) } } var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(prefix) } else { r1 = ret.Error(1) } return r0, r1 } // Open provides a mock function with given fields: func (_m *Store) Open() error { ret := _m.Called() var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() } else { r0 = ret.Error(0) } return r0 } // Remove provides a mock function with given fields: key func (_m *Store) Remove(key []byte) error { ret := _m.Called(key) var r0 error if rf, ok := ret.Get(0).(func([]byte) error); ok { r0 = rf(key) } else { r0 = ret.Error(0) } return r0 } // Set provides a mock function with given fields: key, value func (_m *Store) Set(key []byte, value []byte) error { ret := _m.Called(key, value) var r0 error if rf, ok := ret.Get(0).(func([]byte, []byte) error); ok { r0 = rf(key, value) } else { r0 = ret.Error(0) } return r0 } // SetString provides a mock function with given fields: key, value func (_m *Store) SetString(key string, value string) error { ret := _m.Called(key, value) var r0 error if rf, ok := ret.Get(0).(func(string, string) error); ok { r0 = rf(key, value) } else { r0 = ret.Error(0) } return r0 } ================================================ FILE: mocks/Syncer.go ================================================ // Code generated by mockery v2.0.0. DO NOT EDIT. package mocks import ( domain "github.com/FleekHQ/space-daemon/core/space/domain" mock "github.com/stretchr/testify/mock" ) // Syncer is an autogenerated mock type for the Syncer type type Syncer struct { mock.Mock } // AddFileWatch provides a mock function with given fields: addFileInfo func (_m *Syncer) AddFileWatch(addFileInfo domain.AddWatchFile) error { ret := _m.Called(addFileInfo) var r0 error if rf, ok := ret.Get(0).(func(domain.AddWatchFile) error); ok { r0 = rf(addFileInfo) } else { r0 = ret.Error(0) } return r0 } // GetOpenFilePath provides a mock function with given fields: bucketSlug, bucketPath, dbID, cid func (_m *Syncer) GetOpenFilePath(bucketSlug string, bucketPath string, dbID string, cid string) (string, bool) { ret := _m.Called(bucketSlug, bucketPath, dbID, cid) var r0 string if rf, ok := ret.Get(0).(func(string, string, string, string) string); ok { r0 = rf(bucketSlug, bucketPath, dbID, cid) } else { r0 = ret.Get(0).(string) } var r1 bool if rf, ok := ret.Get(1).(func(string, string, string, string) bool); ok { r1 = rf(bucketSlug, bucketPath, dbID, cid) } else { r1 = ret.Get(1).(bool) } return r0, r1 } ================================================ FILE: mocks/Vault.go ================================================ // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import ( domain "github.com/FleekHQ/space-daemon/core/space/domain" mock "github.com/stretchr/testify/mock" vault "github.com/FleekHQ/space-daemon/core/vault" ) // Vault is an autogenerated mock type for the Vault type type Vault struct { mock.Mock } // Retrieve provides a mock function with given fields: uuid, passphrase, backupType func (_m *Vault) Retrieve(uuid string, passphrase string, backupType domain.KeyBackupType) ([]vault.VaultItem, error) { ret := _m.Called(uuid, passphrase, backupType) var r0 []vault.VaultItem if rf, ok := ret.Get(0).(func(string, string, domain.KeyBackupType) []vault.VaultItem); ok { r0 = rf(uuid, passphrase, backupType) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]vault.VaultItem) } } var r1 error if rf, ok := ret.Get(1).(func(string, string, domain.KeyBackupType) error); ok { r1 = rf(uuid, passphrase, backupType) } else { r1 = ret.Error(1) } return r0, r1 } // Store provides a mock function with given fields: uuid, passphrase, backupType, apiToken, items func (_m *Vault) Store(uuid string, passphrase string, backupType domain.KeyBackupType, apiToken string, items []vault.VaultItem) (*vault.StoredVault, error) { ret := _m.Called(uuid, passphrase, backupType, apiToken, items) var r0 *vault.StoredVault if rf, ok := ret.Get(0).(func(string, string, domain.KeyBackupType, string, []vault.VaultItem) *vault.StoredVault); ok { r0 = rf(uuid, passphrase, backupType, apiToken, items) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*vault.StoredVault) } } var r1 error if rf, ok := ret.Get(1).(func(string, string, domain.KeyBackupType, string, []vault.VaultItem) error); ok { r1 = rf(uuid, passphrase, backupType, apiToken, items) } else { r1 = ret.Error(1) } return r0, r1 } ================================================ FILE: mocks/fuse/FSDataSource.go ================================================ // Code generated by mockery v2.0.3. DO NOT EDIT. package fuse import ( context "context" fsds "github.com/FleekHQ/space-daemon/core/fsds" mock "github.com/stretchr/testify/mock" os "os" ) // FSDataSource is an autogenerated mock type for the FSDataSource type type FSDataSource struct { mock.Mock } // CreateEntry provides a mock function with given fields: ctx, path, mode func (_m *FSDataSource) CreateEntry(ctx context.Context, path string, mode os.FileMode) (*fsds.DirEntry, error) { ret := _m.Called(ctx, path, mode) var r0 *fsds.DirEntry if rf, ok := ret.Get(0).(func(context.Context, string, os.FileMode) *fsds.DirEntry); ok { r0 = rf(ctx, path, mode) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*fsds.DirEntry) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, os.FileMode) error); ok { r1 = rf(ctx, path, mode) } else { r1 = ret.Error(1) } return r0, r1 } // DeleteEntry provides a mock function with given fields: ctx, path func (_m *FSDataSource) DeleteEntry(ctx context.Context, path string) error { ret := _m.Called(ctx, path) var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, path) } else { r0 = ret.Error(0) } return r0 } // Get provides a mock function with given fields: ctx, path func (_m *FSDataSource) Get(ctx context.Context, path string) (*fsds.DirEntry, error) { ret := _m.Called(ctx, path) var r0 *fsds.DirEntry if rf, ok := ret.Get(0).(func(context.Context, string) *fsds.DirEntry); ok { r0 = rf(ctx, path) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*fsds.DirEntry) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, path) } else { r1 = ret.Error(1) } return r0, r1 } // GetChildren provides a mock function with given fields: ctx, path func (_m *FSDataSource) GetChildren(ctx context.Context, path string) ([]*fsds.DirEntry, error) { ret := _m.Called(ctx, path) var r0 []*fsds.DirEntry if rf, ok := ret.Get(0).(func(context.Context, string) []*fsds.DirEntry); ok { r0 = rf(ctx, path) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*fsds.DirEntry) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, path) } else { r1 = ret.Error(1) } return r0, r1 } // Open provides a mock function with given fields: ctx, path func (_m *FSDataSource) Open(ctx context.Context, path string) (fsds.FileReadWriterCloser, error) { ret := _m.Called(ctx, path) var r0 fsds.FileReadWriterCloser if rf, ok := ret.Get(0).(func(context.Context, string) fsds.FileReadWriterCloser); ok { r0 = rf(ctx, path) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(fsds.FileReadWriterCloser) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, path) } else { r1 = ret.Error(1) } return r0, r1 } // RenameEntry provides a mock function with given fields: ctx, oldPath, newPath func (_m *FSDataSource) RenameEntry(ctx context.Context, oldPath string, newPath string) error { ret := _m.Called(ctx, oldPath, newPath) var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { r0 = rf(ctx, oldPath, newPath) } else { r0 = ret.Error(0) } return r0 } ================================================ FILE: mocks/fuse/FuseInstaller.go ================================================ // Code generated by mockery v2.0.3. DO NOT EDIT. package fuse import ( context "context" mock "github.com/stretchr/testify/mock" ) // FuseInstaller is an autogenerated mock type for the FuseInstaller type type FuseInstaller struct { mock.Mock } // Install provides a mock function with given fields: ctx, args func (_m *FuseInstaller) Install(ctx context.Context, args map[string]interface{}) error { ret := _m.Called(ctx, args) var r0 error if rf, ok := ret.Get(0).(func(context.Context, map[string]interface{}) error); ok { r0 = rf(ctx, args) } else { r0 = ret.Error(0) } return r0 } // IsInstalled provides a mock function with given fields: ctx func (_m *FuseInstaller) IsInstalled(ctx context.Context) (bool, error) { ret := _m.Called(ctx) var r0 bool if rf, ok := ret.Get(0).(func(context.Context) bool); ok { r0 = rf(ctx) } else { r0 = ret.Get(0).(bool) } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } ================================================ FILE: mocks/mock.go ================================================ package mocks import "github.com/stretchr/testify/mock" // Common utils and helpers can go here // NOTE: mocks generated using https://github.com/vektra/mockery type Path struct { mock.Mock } func (m Path) String() string { args := m.Called() return args.String(0) } func (m Path) Namespace() string { panic("implement me") } func (m Path) Mutable() bool { panic("implement me") } func (m Path) IsValid() error { panic("implement me") } ================================================ FILE: mocks/mock_config.go ================================================ // Code generated by mockery v2.2.1. DO NOT EDIT. package mocks import mock "github.com/stretchr/testify/mock" // Config is an autogenerated mock type for the Config type type Config struct { mock.Mock } // GetBool provides a mock function with given fields: key, defaultValue func (_m *Config) GetBool(key string, defaultValue interface{}) bool { ret := _m.Called(key, defaultValue) var r0 bool if rf, ok := ret.Get(0).(func(string, interface{}) bool); ok { r0 = rf(key, defaultValue) } else { r0 = ret.Get(0).(bool) } return r0 } // GetInt provides a mock function with given fields: key, defaultValue func (_m *Config) GetInt(key string, defaultValue interface{}) int { ret := _m.Called(key, defaultValue) var r0 int if rf, ok := ret.Get(0).(func(string, interface{}) int); ok { r0 = rf(key, defaultValue) } else { r0 = ret.Get(0).(int) } return r0 } // GetString provides a mock function with given fields: key, defaultValue func (_m *Config) GetString(key string, defaultValue interface{}) string { ret := _m.Called(key, defaultValue) var r0 string if rf, ok := ret.Get(0).(func(string, interface{}) string); ok { r0 = rf(key, defaultValue) } else { r0 = ret.Get(0).(string) } return r0 } ================================================ FILE: mocks/mock_env.go ================================================ // Code generated by mockery v2.0.0-alpha.2. DO NOT EDIT. package mocks import mock "github.com/stretchr/testify/mock" // SpaceEnv is an autogenerated mock type for the SpaceEnv type type SpaceEnv struct { mock.Mock } // CurrentFolder provides a mock function with given fields: func (_m *SpaceEnv) CurrentFolder() (string, error) { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { r1 = ret.Error(1) } return r0, r1 } // LogLevel provides a mock function with given fields: func (_m *SpaceEnv) LogLevel() string { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } return r0 } // WorkingFolder provides a mock function with given fields: func (_m *SpaceEnv) WorkingFolder() string { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { r0 = ret.Get(0).(string) } return r0 } ================================================ FILE: mocks/mock_textile_handler.go ================================================ // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import ( events "github.com/FleekHQ/space-daemon/core/events" mock "github.com/stretchr/testify/mock" ) // TextileNotifier is an autogenerated mock type for the TextileNotifier type type TextileNotifier struct { mock.Mock } // SendTextileEvent provides a mock function with given fields: event func (_m *TextileNotifier) SendTextileEvent(event events.TextileEvent) { _m.Called(event) } ================================================ FILE: mocks/mock_textile_users_client.go ================================================ // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import ( context "context" client "github.com/textileio/textile/v2/api/usersd/client" mock "github.com/stretchr/testify/mock" thread "github.com/textileio/go-threads/core/thread" ) // UsersClient is an autogenerated mock type for the UsersClient type type UsersClient struct { mock.Mock } // ListInboxMessages provides a mock function with given fields: ctx, opts func (_m *UsersClient) ListInboxMessages(ctx context.Context, opts ...client.ListOption) ([]client.Message, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] } var _ca []interface{} _ca = append(_ca, ctx) _ca = append(_ca, _va...) ret := _m.Called(_ca...) var r0 []client.Message if rf, ok := ret.Get(0).(func(context.Context, ...client.ListOption) []client.Message); ok { r0 = rf(ctx, opts...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]client.Message) } } var r1 error if rf, ok := ret.Get(1).(func(context.Context, ...client.ListOption) error); ok { r1 = rf(ctx, opts...) } else { r1 = ret.Error(1) } return r0, r1 } // SendMessage provides a mock function with given fields: ctx, from, to, body func (_m *UsersClient) SendMessage(ctx context.Context, from thread.Identity, to thread.PubKey, body []byte) (client.Message, error) { ret := _m.Called(ctx, from, to, body) var r0 client.Message if rf, ok := ret.Get(0).(func(context.Context, thread.Identity, thread.PubKey, []byte) client.Message); ok { r0 = rf(ctx, from, to, body) } else { r0 = ret.Get(0).(client.Message) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, thread.Identity, thread.PubKey, []byte) error); ok { r1 = rf(ctx, from, to, body) } else { r1 = ret.Error(1) } return r0, r1 } // SetupMailbox provides a mock function with given fields: ctx func (_m *UsersClient) SetupMailbox(ctx context.Context) (thread.ID, error) { ret := _m.Called(ctx) var r0 thread.ID if rf, ok := ret.Get(0).(func(context.Context) thread.ID); ok { r0 = rf(ctx) } else { r0 = ret.Get(0).(thread.ID) } var r1 error if rf, ok := ret.Get(1).(func(context.Context) error); ok { r1 = rf(ctx) } else { r1 = ret.Error(1) } return r0, r1 } ================================================ FILE: scripts/windows.bat ================================================ @ECHO off REM Delete credentials associated with space daemon FOR /F "tokens=2" %%c IN ('@CMDKEY /list ^| @FINDSTR space:space') DO (@CMDKEY /delete:%%c) REM Delete folders created by space daemon @RD /S /Q "C:\\Users\\%USERNAME%\\.fleek-ipfs" @RD /S /Q "C:\\Users\\%USERNAME%\\.fleek-space" @RD /S /Q "C:\\Users\\%USERNAME%\\.buckd" @ECHO Removed credentials and deleted space daemon associated folders successfully. ================================================ FILE: swagger/ui/space.swagger.json ================================================ { "swagger": "2.0", "info": { "title": "space.proto", "version": "version not set" }, "consumes": [ "application/json" ], "produces": [ "application/json" ], "paths": { "/v1/apiSessionTokens": { "get": { "operationId": "SpaceApi_GetAPISessionTokens", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGetAPISessionTokensResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "tags": [ "SpaceApi" ] } }, "/v1/appTokens": { "post": { "summary": "Generates an app token with scoped access.", "operationId": "SpaceApi_GenerateAppToken", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGenerateAppTokenResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceGenerateAppTokenRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/appTokens/master": { "post": { "summary": "Initialize master app token\nApp tokens are used to authorize scoped access to a range of methods\nMaster token can only be generated once and has access to all methods", "operationId": "SpaceApi_InitializeMasterAppToken", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceInitializeMasterAppTokenResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceInitializeMasterAppTokenRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/backup": { "post": { "operationId": "SpaceApi_ToggleBucketBackup", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceToggleBucketBackupResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceToggleBucketBackupRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/backup/restore": { "post": { "operationId": "SpaceApi_BucketBackupRestore", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceBucketBackupRestoreResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceBucketBackupRestoreRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/buckets": { "get": { "operationId": "SpaceApi_ListBuckets", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceListBucketsResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "tags": [ "SpaceApi" ] }, "post": { "summary": "Create a new bucket owned by current user (aka keypair)", "operationId": "SpaceApi_CreateBucket", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceCreateBucketResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceCreateBucketRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/buckets/{bucket}/generatePublicFileLink": { "post": { "summary": "Generates a copy of the file that's accessible through IPFS gateways", "operationId": "SpaceApi_GeneratePublicFileLink", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGeneratePublicFileLinkResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "bucket", "in": "path", "required": true, "type": "string" }, { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceGeneratePublicFileLinkRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/buckets/{bucket}/join": { "post": { "summary": "Join bucket", "operationId": "SpaceApi_JoinBucket", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceJoinBucketResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "bucket", "in": "path", "required": true, "type": "string" }, { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceJoinBucketRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/buckets/{bucket}/share": { "post": { "summary": "Share bucket", "operationId": "SpaceApi_ShareBucket", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceShareBucketResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "bucket", "in": "path", "required": true, "type": "string" }, { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceShareBucketRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/deleteAccount": { "post": { "operationId": "SpaceApi_DeleteAccount", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceDeleteAccountResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceDeleteAccountRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/directories": { "get": { "summary": "Get the folder or files in the path directory.\nUnlike ListDirectories, this only returns immediate children at path.", "operationId": "SpaceApi_ListDirectory", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceListDirectoryResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "path", "in": "query", "required": false, "type": "string" }, { "name": "bucket", "in": "query", "required": false, "type": "string" }, { "name": "omitMembers", "in": "query", "required": false, "type": "boolean", "format": "boolean" } ], "tags": [ "SpaceApi" ] }, "post": { "summary": "Creates a folder/directory at the specified path", "operationId": "SpaceApi_CreateFolder", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceCreateFolderResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceCreateFolderRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/directories/all": { "get": { "summary": "Get all folder or files in the default bucket. It fetches all subdirectories too.", "operationId": "SpaceApi_ListDirectories", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceListDirectoriesResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "bucket", "in": "query", "required": false, "type": "string" }, { "name": "omitMembers", "in": "query", "required": false, "type": "boolean", "format": "boolean" } ], "tags": [ "SpaceApi" ] } }, "/v1/files": { "delete": { "summary": "Removes a file or dir from a bucket", "operationId": "SpaceApi_RemoveDirOrFile", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceRemoveDirOrFileResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "path", "in": "query", "required": false, "type": "string" }, { "name": "bucket", "in": "query", "required": false, "type": "string" } ], "tags": [ "SpaceApi" ] }, "post": { "summary": "Adds items (files/folders) to be uploaded to the bucket.", "operationId": "SpaceApi_AddItems", "responses": { "200": { "description": "A successful response.(streaming responses)", "schema": { "type": "object", "properties": { "result": { "$ref": "#/definitions/spaceAddItemsResponse" }, "error": { "$ref": "#/definitions/runtimeStreamError" } }, "title": "Stream result of spaceAddItemsResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceAddItemsRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/files/open": { "post": { "summary": "Open a file in the daemon.\nDaemon keeps track of all open files and closes them if no activity is noticed after a while", "operationId": "SpaceApi_OpenFile", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceOpenFileResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceOpenFileRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/files/openPublic": { "get": { "summary": "Open an encrypted public shared file in the daemon.\nThis requires the decryption key and file hash/cid to work", "operationId": "SpaceApi_OpenPublicFile", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceOpenPublicFileResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "fileCid", "in": "query", "required": false, "type": "string" }, { "name": "password", "in": "query", "required": false, "type": "string" }, { "name": "filename", "in": "query", "required": false, "type": "string" } ], "tags": [ "SpaceApi" ] } }, "/v1/files/sharedByMe": { "get": { "summary": "Gets the files that are shared by the sender", "operationId": "SpaceApi_GetSharedByMeFiles", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGetSharedByMeFilesResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "seek", "in": "query", "required": false, "type": "string" }, { "name": "limit", "in": "query", "required": false, "type": "string", "format": "int64" } ], "tags": [ "SpaceApi" ] } }, "/v1/files/sharedWithMe": { "get": { "summary": "Gets the files that are shared with this recipient", "operationId": "SpaceApi_GetSharedWithMeFiles", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGetSharedWithMeFilesResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "seek", "in": "query", "required": false, "type": "string" }, { "name": "limit", "in": "query", "required": false, "type": "string", "format": "int64" } ], "tags": [ "SpaceApi" ] } }, "/v1/filesinvitation/{invitationID}": { "post": { "operationId": "SpaceApi_HandleFilesInvitation", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceHandleFilesInvitationResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "invitationID", "in": "path", "required": true, "type": "string" }, { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceHandleFilesInvitationRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/fuse": { "get": { "summary": "Get status of FUSE drive. If mounted or unmounted", "operationId": "SpaceApi_GetFuseDriveStatus", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceFuseDriveResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "tags": [ "SpaceApi" ] } }, "/v1/keypairs/delete": { "post": { "operationId": "SpaceApi_DeleteKeyPair", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceDeleteKeyPairResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceDeleteKeyPairRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/keypairs/forceGenerate": { "post": { "summary": "Force Generation of KeyPair. This will override existing keys stored in daemon.", "operationId": "SpaceApi_GenerateKeyPairWithForce", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGenerateKeyPairResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceGenerateKeyPairRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/keypairs/generate": { "post": { "summary": "Generate Key Pair for current account.\nThis will return error if daemon account already has keypairs", "operationId": "SpaceApi_GenerateKeyPair", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGenerateKeyPairResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceGenerateKeyPairRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/keypairs/mnemonic": { "get": { "operationId": "SpaceApi_GetStoredMnemonic", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGetStoredMnemonicResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "tags": [ "SpaceApi" ] } }, "/v1/keypairs/restoreWithMnemonic": { "post": { "summary": "Restores a keypair given a mnemonic.\nThis will override any existing key pair", "operationId": "SpaceApi_RestoreKeyPairViaMnemonic", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceRestoreKeyPairViaMnemonicResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceRestoreKeyPairViaMnemonicRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/localBackups/backup": { "post": { "operationId": "SpaceApi_CreateLocalKeysBackup", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceCreateLocalKeysBackupResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceCreateLocalKeysBackupRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/localBackups/recover": { "post": { "operationId": "SpaceApi_RecoverKeysByLocalBackup", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceRecoverKeysByLocalBackupResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceRecoverKeysByLocalBackupRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/notifications": { "get": { "operationId": "SpaceApi_GetNotifications", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGetNotificationsResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "seek", "in": "query", "required": false, "type": "string" }, { "name": "limit", "in": "query", "required": false, "type": "string", "format": "int64" } ], "tags": [ "SpaceApi" ] } }, "/v1/notifications/lastSeenAt": { "post": { "summary": "This will set the last read timestamp for the user so that the client\ncan check if newer notifications are present for UX", "operationId": "SpaceApi_SetNotificationsLastSeenAt", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceSetNotificationsLastSeenAtResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceSetNotificationsLastSeenAtRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/notifications/{ID}/read": { "post": { "operationId": "SpaceApi_ReadNotification", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceReadNotificationResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "ID", "in": "path", "required": true, "type": "string" }, { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceReadNotificationRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/passphrases/backup": { "post": { "summary": "Backup Key by Passphrase", "operationId": "SpaceApi_BackupKeysByPassphrase", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceBackupKeysByPassphraseResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceBackupKeysByPassphraseRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/passphrases/recover": { "post": { "summary": "Recover Keys by Passphrase", "operationId": "SpaceApi_RecoverKeysByPassphrase", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceRecoverKeysByPassphraseResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceRecoverKeysByPassphraseRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/passphrases/test": { "post": { "summary": "Tests a passphrase to see if it matches the one previously used", "operationId": "SpaceApi_TestKeysPassphrase", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceTestKeysPassphraseResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceTestKeysPassphraseRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/publicKey": { "post": { "operationId": "SpaceApi_GetPublicKey", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGetPublicKeyResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceGetPublicKeyRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/search/files": { "get": { "summary": "Search for files across all users bucket", "operationId": "SpaceApi_SearchFiles", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceSearchFilesResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "query", "in": "query", "required": false, "type": "string" } ], "tags": [ "SpaceApi" ] } }, "/v1/shareFilesViaPublicKey": { "post": { "summary": "Share bucket via public key using Textile Hub inboxing", "operationId": "SpaceApi_ShareFilesViaPublicKey", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceShareFilesViaPublicKeyResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceShareFilesViaPublicKeyRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/sharedWithList": { "get": { "summary": "Returns a list of addresses / public keys of clients to which files where shared or received, ordered by date", "operationId": "SpaceApi_GetRecentlySharedWith", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGetRecentlySharedWithResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "tags": [ "SpaceApi" ] } }, "/v1/subscriptions/file": { "get": { "summary": "Subscribe to file events. This streams responses to the caller", "operationId": "SpaceApi_Subscribe", "responses": { "200": { "description": "A successful response.(streaming responses)", "schema": { "type": "object", "properties": { "result": { "$ref": "#/definitions/spaceFileEventResponse" }, "error": { "$ref": "#/definitions/runtimeStreamError" } }, "title": "Stream result of spaceFileEventResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "tags": [ "SpaceApi" ] } }, "/v1/subscriptions/notification": { "get": { "operationId": "SpaceApi_NotificationSubscribe", "responses": { "200": { "description": "A successful response.(streaming responses)", "schema": { "type": "object", "properties": { "result": { "$ref": "#/definitions/spaceNotificationEventResponse" }, "error": { "$ref": "#/definitions/runtimeStreamError" } }, "title": "Stream result of spaceNotificationEventResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "tags": [ "SpaceApi" ] } }, "/v1/subscriptions/textile": { "get": { "summary": "Subscribe to textile events. This streams responses to the caller", "operationId": "SpaceApi_TxlSubscribe", "responses": { "200": { "description": "A successful response.(streaming responses)", "schema": { "type": "object", "properties": { "result": { "$ref": "#/definitions/spaceTextileEventResponse" }, "error": { "$ref": "#/definitions/runtimeStreamError" } }, "title": "Stream result of spaceTextileEventResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "tags": [ "SpaceApi" ] } }, "/v1/toggleFuse": { "post": { "summary": "Toggle FUSE drive to be mounted or unmounted", "operationId": "SpaceApi_ToggleFuseDrive", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceFuseDriveResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/spaceToggleFuseRequest" } } ], "tags": [ "SpaceApi" ] } }, "/v1/usage": { "get": { "operationId": "SpaceApi_GetUsageInfo", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/spaceGetUsageInfoResponse" } }, "default": { "description": "An unexpected error response", "schema": { "$ref": "#/definitions/runtimeError" } } }, "tags": [ "SpaceApi" ] } } }, "definitions": { "protobufAny": { "type": "object", "properties": { "type_url": { "type": "string" }, "value": { "type": "string", "format": "byte" } } }, "runtimeError": { "type": "object", "properties": { "error": { "type": "string" }, "code": { "type": "integer", "format": "int32" }, "message": { "type": "string" }, "details": { "type": "array", "items": { "$ref": "#/definitions/protobufAny" } } } }, "runtimeStreamError": { "type": "object", "properties": { "grpc_code": { "type": "integer", "format": "int32" }, "http_code": { "type": "integer", "format": "int32" }, "message": { "type": "string" }, "http_status": { "type": "string" }, "details": { "type": "array", "items": { "$ref": "#/definitions/protobufAny" } } } }, "spaceAddItemResult": { "type": "object", "properties": { "sourcePath": { "type": "string" }, "bucketPath": { "type": "string" }, "error": { "type": "string" } } }, "spaceAddItemsRequest": { "type": "object", "properties": { "sourcePaths": { "type": "array", "items": { "type": "string" }, "title": "full paths to file or Folder on FS. Needs to be a location available to the daemon" }, "targetPath": { "type": "string", "description": "target path in bucket." }, "bucket": { "type": "string", "title": "The bucket in which to save the item" } } }, "spaceAddItemsResponse": { "type": "object", "properties": { "result": { "$ref": "#/definitions/spaceAddItemResult" }, "totalFiles": { "type": "string", "format": "int64" }, "totalBytes": { "type": "string", "format": "int64" }, "completedFiles": { "type": "string", "format": "int64" }, "completedBytes": { "type": "string", "format": "int64" } } }, "spaceAllowedMethod": { "type": "object", "properties": { "methodName": { "type": "string" } } }, "spaceBackupKeysByPassphraseRequest": { "type": "object", "properties": { "uuid": { "type": "string" }, "passphrase": { "type": "string" }, "type": { "$ref": "#/definitions/spaceKeyBackupType" } } }, "spaceBackupKeysByPassphraseResponse": { "type": "object" }, "spaceBucket": { "type": "object", "properties": { "key": { "type": "string" }, "name": { "type": "string" }, "path": { "type": "string" }, "createdAt": { "type": "string", "format": "int64" }, "updatedAt": { "type": "string", "format": "int64" }, "members": { "type": "array", "items": { "$ref": "#/definitions/spaceBucketMember" } }, "isPersonalBucket": { "type": "boolean", "format": "boolean" }, "isBackupEnabled": { "type": "boolean", "format": "boolean" }, "itemsCount": { "type": "integer", "format": "int32" } } }, "spaceBucketBackupRestoreRequest": { "type": "object", "properties": { "bucket": { "type": "string" } } }, "spaceBucketBackupRestoreResponse": { "type": "object" }, "spaceBucketMember": { "type": "object", "properties": { "address": { "type": "string" }, "publicKey": { "type": "string" }, "isOwner": { "type": "boolean", "format": "boolean" }, "hasJoined": { "type": "boolean", "format": "boolean" } } }, "spaceCreateBucketRequest": { "type": "object", "properties": { "slug": { "type": "string" } } }, "spaceCreateBucketResponse": { "type": "object", "properties": { "bucket": { "$ref": "#/definitions/spaceBucket" } } }, "spaceCreateFolderRequest": { "type": "object", "properties": { "path": { "type": "string", "title": "target path in bucket to add new empty folder" }, "bucket": { "type": "string", "title": "The bucket in which to add the folder" } } }, "spaceCreateFolderResponse": { "type": "object", "title": "not sure we need to return anything other than an error if we failed" }, "spaceCreateLocalKeysBackupRequest": { "type": "object", "properties": { "pathToKeyBackup": { "type": "string", "title": "The path in which to save the backup" } } }, "spaceCreateLocalKeysBackupResponse": { "type": "object" }, "spaceDeleteAccountRequest": { "type": "object" }, "spaceDeleteAccountResponse": { "type": "object" }, "spaceDeleteKeyPairRequest": { "type": "object" }, "spaceDeleteKeyPairResponse": { "type": "object" }, "spaceEventType": { "type": "string", "enum": [ "ENTRY_ADDED", "ENTRY_DELETED", "ENTRY_UPDATED", "ENTRY_BACKUP_IN_PROGRESS", "ENTRY_BACKUP_READY", "ENTRY_RESTORE_IN_PROGRESS", "ENTRY_RESTORE_READY", "FOLDER_ADDED", "FOLDER_DELETED", "FOLDER_UPDATED" ], "default": "ENTRY_ADDED" }, "spaceFileEventResponse": { "type": "object", "properties": { "type": { "$ref": "#/definitions/spaceEventType" }, "entry": { "$ref": "#/definitions/spaceListDirectoryEntry" }, "bucket": { "type": "string" }, "dbId": { "type": "string" } } }, "spaceFileMember": { "type": "object", "properties": { "publicKey": { "type": "string" }, "address": { "type": "string" } } }, "spaceFullPath": { "type": "object", "properties": { "dbId": { "type": "string" }, "bucket": { "type": "string" }, "path": { "type": "string" } } }, "spaceFuseDriveResponse": { "type": "object", "properties": { "state": { "$ref": "#/definitions/spaceFuseState" } } }, "spaceFuseState": { "type": "string", "enum": [ "UNSUPPORTED", "NOT_INSTALLED", "UNMOUNTED", "MOUNTED" ], "default": "UNSUPPORTED" }, "spaceGenerateAppTokenRequest": { "type": "object", "properties": { "allowedMethods": { "type": "array", "items": { "$ref": "#/definitions/spaceAllowedMethod" } } } }, "spaceGenerateAppTokenResponse": { "type": "object", "properties": { "appToken": { "type": "string" } } }, "spaceGenerateKeyPairRequest": { "type": "object" }, "spaceGenerateKeyPairResponse": { "type": "object", "properties": { "mnemonic": { "type": "string" } } }, "spaceGeneratePublicFileLinkRequest": { "type": "object", "properties": { "bucket": { "type": "string" }, "itemPaths": { "type": "array", "items": { "type": "string" } }, "password": { "type": "string" }, "dbId": { "type": "string", "title": "optional field to specify db id\nfor shared with me files" } } }, "spaceGeneratePublicFileLinkResponse": { "type": "object", "properties": { "link": { "type": "string" }, "fileCid": { "type": "string" } } }, "spaceGetAPISessionTokensResponse": { "type": "object", "properties": { "hubToken": { "type": "string" }, "servicesToken": { "type": "string" } } }, "spaceGetNotificationsResponse": { "type": "object", "properties": { "notifications": { "type": "array", "items": { "$ref": "#/definitions/spaceNotification" } }, "nextOffset": { "type": "string" }, "lastSeenAt": { "type": "string", "format": "int64" } } }, "spaceGetPublicKeyRequest": { "type": "object" }, "spaceGetPublicKeyResponse": { "type": "object", "properties": { "publicKey": { "type": "string", "title": "Public key encoded in hex" } } }, "spaceGetRecentlySharedWithResponse": { "type": "object", "properties": { "members": { "type": "array", "items": { "$ref": "#/definitions/spaceFileMember" } } } }, "spaceGetSharedByMeFilesResponse": { "type": "object", "properties": { "items": { "type": "array", "items": { "$ref": "#/definitions/spaceSharedListDirectoryEntry" } }, "nextOffset": { "type": "string" } } }, "spaceGetSharedWithMeFilesResponse": { "type": "object", "properties": { "items": { "type": "array", "items": { "$ref": "#/definitions/spaceSharedListDirectoryEntry" } }, "nextOffset": { "type": "string" } } }, "spaceGetStoredMnemonicResponse": { "type": "object", "properties": { "mnemonic": { "type": "string" } } }, "spaceGetUsageInfoResponse": { "type": "object", "properties": { "localStarogeUsed": { "type": "string", "format": "uint64" }, "localBandwidthUsed": { "type": "string", "format": "uint64" }, "spaceStorageUsed": { "type": "string", "format": "uint64" }, "spaceBandwidthUsed": { "type": "string", "format": "uint64" }, "usageQuota": { "type": "string", "format": "uint64" } } }, "spaceHandleFilesInvitationRequest": { "type": "object", "properties": { "invitationID": { "type": "string" }, "accept": { "type": "boolean", "format": "boolean" } } }, "spaceHandleFilesInvitationResponse": { "type": "object" }, "spaceInitializeMasterAppTokenRequest": { "type": "object" }, "spaceInitializeMasterAppTokenResponse": { "type": "object", "properties": { "appToken": { "type": "string" } } }, "spaceInvitation": { "type": "object", "properties": { "inviterPublicKey": { "type": "string" }, "invitationID": { "type": "string" }, "status": { "$ref": "#/definitions/spaceInvitationStatus" }, "itemPaths": { "type": "array", "items": { "$ref": "#/definitions/spaceFullPath" } } } }, "spaceInvitationAccept": { "type": "object", "properties": { "invitationID": { "type": "string" } } }, "spaceInvitationStatus": { "type": "string", "enum": [ "PENDING", "ACCEPTED", "REJECTED" ], "default": "PENDING" }, "spaceJoinBucketRequest": { "type": "object", "properties": { "threadinfo": { "$ref": "#/definitions/spaceThreadInfo" }, "bucket": { "type": "string" } } }, "spaceJoinBucketResponse": { "type": "object", "properties": { "result": { "type": "boolean", "format": "boolean" } } }, "spaceKeyBackupType": { "type": "string", "enum": [ "PASSWORD", "ETH" ], "default": "PASSWORD" }, "spaceListBucketsResponse": { "type": "object", "properties": { "buckets": { "type": "array", "items": { "$ref": "#/definitions/spaceBucket" } } } }, "spaceListDirectoriesResponse": { "type": "object", "properties": { "entries": { "type": "array", "items": { "$ref": "#/definitions/spaceListDirectoryEntry" } } } }, "spaceListDirectoryEntry": { "type": "object", "properties": { "path": { "type": "string" }, "isDir": { "type": "boolean", "format": "boolean" }, "name": { "type": "string" }, "sizeInBytes": { "type": "string" }, "created": { "type": "string" }, "updated": { "type": "string" }, "fileExtension": { "type": "string" }, "ipfsHash": { "type": "string" }, "isLocallyAvailable": { "type": "boolean", "format": "boolean" }, "backupCount": { "type": "string", "format": "int64" }, "members": { "type": "array", "items": { "$ref": "#/definitions/spaceFileMember" } }, "isBackupInProgress": { "type": "boolean", "format": "boolean" }, "isRestoreInProgress": { "type": "boolean", "format": "boolean" } } }, "spaceListDirectoryResponse": { "type": "object", "properties": { "entries": { "type": "array", "items": { "$ref": "#/definitions/spaceListDirectoryEntry" } } } }, "spaceNotification": { "type": "object", "properties": { "ID": { "type": "string" }, "subject": { "type": "string" }, "body": { "type": "string" }, "invitationValue": { "$ref": "#/definitions/spaceInvitation" }, "usageAlert": { "$ref": "#/definitions/spaceUsageAlert" }, "invitationAccept": { "$ref": "#/definitions/spaceInvitationAccept" }, "type": { "$ref": "#/definitions/spaceNotificationType" }, "createdAt": { "type": "string", "format": "int64" }, "readAt": { "type": "string", "format": "int64" } } }, "spaceNotificationEventResponse": { "type": "object", "properties": { "notification": { "$ref": "#/definitions/spaceNotification" } } }, "spaceNotificationType": { "type": "string", "enum": [ "UNKNOWN", "INVITATION", "USAGEALERT", "INVITATION_REPLY" ], "default": "UNKNOWN" }, "spaceOpenFileRequest": { "type": "object", "properties": { "path": { "type": "string" }, "bucket": { "type": "string" }, "dbId": { "type": "string" } } }, "spaceOpenFileResponse": { "type": "object", "properties": { "location": { "type": "string" } } }, "spaceOpenPublicFileResponse": { "type": "object", "properties": { "location": { "type": "string" } } }, "spaceReadNotificationRequest": { "type": "object", "properties": { "ID": { "type": "string" } } }, "spaceReadNotificationResponse": { "type": "object" }, "spaceRecoverKeysByLocalBackupRequest": { "type": "object", "properties": { "pathToKeyBackup": { "type": "string" } } }, "spaceRecoverKeysByLocalBackupResponse": { "type": "object" }, "spaceRecoverKeysByPassphraseRequest": { "type": "object", "properties": { "uuid": { "type": "string" }, "passphrase": { "type": "string" } } }, "spaceRecoverKeysByPassphraseResponse": { "type": "object" }, "spaceRemoveDirOrFileResponse": { "type": "object" }, "spaceRestoreKeyPairViaMnemonicRequest": { "type": "object", "properties": { "mnemonic": { "type": "string" } } }, "spaceRestoreKeyPairViaMnemonicResponse": { "type": "object" }, "spaceSearchFilesDirectoryEntry": { "type": "object", "properties": { "entry": { "$ref": "#/definitions/spaceListDirectoryEntry" }, "dbId": { "type": "string" }, "bucket": { "type": "string" } } }, "spaceSearchFilesResponse": { "type": "object", "properties": { "entries": { "type": "array", "items": { "$ref": "#/definitions/spaceSearchFilesDirectoryEntry" } }, "query": { "type": "string" } } }, "spaceSetNotificationsLastSeenAtRequest": { "type": "object", "properties": { "timestamp": { "type": "string", "format": "int64" } } }, "spaceSetNotificationsLastSeenAtResponse": { "type": "object" }, "spaceShareBucketRequest": { "type": "object", "properties": { "bucket": { "type": "string" } } }, "spaceShareBucketResponse": { "type": "object", "properties": { "threadinfo": { "$ref": "#/definitions/spaceThreadInfo" } } }, "spaceShareFilesViaPublicKeyRequest": { "type": "object", "properties": { "publicKeys": { "type": "array", "items": { "type": "string" } }, "paths": { "type": "array", "items": { "$ref": "#/definitions/spaceFullPath" } } } }, "spaceShareFilesViaPublicKeyResponse": { "type": "object" }, "spaceSharedListDirectoryEntry": { "type": "object", "properties": { "entry": { "$ref": "#/definitions/spaceListDirectoryEntry" }, "dbId": { "type": "string" }, "bucket": { "type": "string" }, "isPublicLink": { "type": "boolean", "format": "boolean" } } }, "spaceTestKeysPassphraseRequest": { "type": "object", "properties": { "uuid": { "type": "string" }, "passphrase": { "type": "string" } } }, "spaceTestKeysPassphraseResponse": { "type": "object" }, "spaceTextileEventResponse": { "type": "object", "properties": { "bucket": { "type": "string" } } }, "spaceThreadInfo": { "type": "object", "properties": { "addresses": { "type": "array", "items": { "type": "string" } }, "key": { "type": "string" } } }, "spaceToggleBucketBackupRequest": { "type": "object", "properties": { "bucket": { "type": "string" }, "backup": { "type": "boolean", "format": "boolean" } } }, "spaceToggleBucketBackupResponse": { "type": "object" }, "spaceToggleFuseRequest": { "type": "object", "properties": { "mountDrive": { "type": "boolean", "format": "boolean" } } }, "spaceUsageAlert": { "type": "object", "properties": { "used": { "type": "string", "format": "int64" }, "limit": { "type": "string", "format": "int64" }, "message": { "type": "string" } } } } } ================================================ FILE: tracing/tracing.go ================================================ package tracing import ( "fmt" "io" "github.com/uber/jaeger-client-go/config" "github.com/opentracing/opentracing-go" ) // Initializes a tracer configuring the servicename as the app name func MustInit(appName string) (opentracing.Tracer, io.Closer) { cfg := &config.Configuration{ ServiceName: appName, Sampler: &config.SamplerConfig{ Type: "const", Param: 1, }, Reporter: &config.ReporterConfig{ LogSpans: true, }, } tracer, closer, err := cfg.NewTracer() if err != nil { panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err)) } return tracer, closer }