Repository: sahib/brig Branch: develop Commit: 6b7eccf8fcbd Files: 399 Total size: 9.3 MB Directory structure: gitextract_6878u1a7/ ├── .github/ │ └── ISSUE_TEMPLATE/ │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── .mailmap ├── .travis.yml ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── PULL_REQUEST_TEMPLATE.md ├── README.md ├── Taskfile.yml ├── autocomplete/ │ ├── bash_autocomplete │ └── zsh_autocomplete ├── backend/ │ ├── backend.go │ ├── httpipfs/ │ │ ├── gc.go │ │ ├── gc_test.go │ │ ├── io.go │ │ ├── io_test.go │ │ ├── net.go │ │ ├── net_test.go │ │ ├── pin.go │ │ ├── pin_test.go │ │ ├── pubsub.go │ │ ├── pubsub_test.go │ │ ├── resolve.go │ │ ├── resolve_test.go │ │ ├── shell.go │ │ ├── testing.go │ │ ├── testing_test.go │ │ └── version.go │ └── mock/ │ └── mock.go ├── bench/ │ ├── bench.go │ ├── inputs.go │ ├── runner.go │ └── stats.go ├── brig.go ├── catfs/ │ ├── backend.go │ ├── capnp/ │ │ ├── pinner.capnp │ │ └── pinner.capnp.go │ ├── core/ │ │ ├── coreutils.go │ │ ├── coreutils_test.go │ │ ├── gc.go │ │ ├── gc_test.go │ │ ├── linker.go │ │ ├── linker_test.go │ │ └── testing.go │ ├── db/ │ │ ├── database.go │ │ ├── database_badger.go │ │ ├── database_disk.go │ │ ├── database_memory.go │ │ └── database_test.go │ ├── errors/ │ │ └── errors.go │ ├── fs.go │ ├── fs_test.go │ ├── handle.go │ ├── handle_test.go │ ├── mio/ │ │ ├── chunkbuf/ │ │ │ ├── chunkbuf.go │ │ │ └── chunkbuf_test.go │ │ ├── compress/ │ │ │ ├── algorithm.go │ │ │ ├── compress_test.go │ │ │ ├── header.go │ │ │ ├── heuristic.go │ │ │ ├── heuristic_test.go │ │ │ ├── mime_db.go │ │ │ ├── reader.go │ │ │ └── writer.go │ │ ├── doc.go │ │ ├── encrypt/ │ │ │ ├── format.go │ │ │ ├── format_test.go │ │ │ ├── reader.go │ │ │ └── writer.go │ │ ├── pagecache/ │ │ │ ├── cache.go │ │ │ ├── doc.go │ │ │ ├── mdcache/ │ │ │ │ ├── l1.go │ │ │ │ ├── l1_test.go │ │ │ │ ├── l2.go │ │ │ │ ├── l2_test.go │ │ │ │ ├── mdcache.go │ │ │ │ └── mdcache_test.go │ │ │ ├── overlay.go │ │ │ ├── overlay_test.go │ │ │ ├── page/ │ │ │ │ ├── page.go │ │ │ │ └── page_test.go │ │ │ ├── util.go │ │ │ └── util_test.go │ │ ├── stream.go │ │ └── stream_test.go │ ├── nodes/ │ │ ├── base.go │ │ ├── capnp/ │ │ │ ├── nodes.capnp │ │ │ └── nodes.capnp.go │ │ ├── commit.go │ │ ├── commit_test.go │ │ ├── directory.go │ │ ├── directory_test.go │ │ ├── doc.go │ │ ├── file.go │ │ ├── file_test.go │ │ ├── ghost.go │ │ ├── ghost_test.go │ │ ├── linker.go │ │ └── node.go │ ├── pinner.go │ ├── pinner_test.go │ ├── repin.go │ ├── repin_test.go │ ├── rev.go │ ├── rev_test.go │ └── vcs/ │ ├── capnp/ │ │ ├── patch.capnp │ │ └── patch.capnp.go │ ├── change.go │ ├── change_test.go │ ├── debug.go │ ├── diff.go │ ├── diff_test.go │ ├── history.go │ ├── history_test.go │ ├── mapper.go │ ├── mapper_test.go │ ├── patch.go │ ├── patch_test.go │ ├── reset.go │ ├── reset_test.go │ ├── resolve.go │ ├── resolve_test.go │ ├── sync.go │ ├── sync_test.go │ └── undelete.go ├── client/ │ ├── .gitignore │ ├── client.go │ ├── clienttest/ │ │ └── daemon.go │ ├── fs_cmds.go │ ├── fs_test.go │ ├── net_cmds.go │ ├── net_test.go │ ├── repo_cmds.go │ └── vcs_cmds.go ├── cmd/ │ ├── bug.go │ ├── debug.go │ ├── exit_codes.go │ ├── fs_handlers.go │ ├── help.go │ ├── init.go │ ├── inode_other.go │ ├── inode_unix.go │ ├── iobench.go │ ├── log.go │ ├── net_handlers.go │ ├── parser.go │ ├── pwd/ │ │ ├── pwd-util/ │ │ │ └── pwd-util.go │ │ ├── pwd.go │ │ └── pwd_test.go │ ├── repo_handlers.go │ ├── suggest.go │ ├── tabwriter/ │ │ ├── example_test.go │ │ ├── tabwriter.go │ │ └── tabwriter_test.go │ ├── tree.go │ ├── util.go │ └── vcs_handlers.go ├── defaults/ │ ├── defaults.go │ └── defaults_v0.go ├── docs/ │ ├── .gitignore │ ├── Makefile │ ├── _static/ │ │ └── css/ │ │ └── custom.css │ ├── asciinema/ │ │ ├── 1_init.json │ │ ├── 1_init_with_pwm.json │ │ ├── 2_adding.json │ │ ├── 3_coreutils.json │ │ ├── 4_mount.json │ │ ├── 5_commits.json │ │ ├── 6_history.json │ │ ├── 7_remotes.json │ │ ├── 8_sync.json │ │ └── 9_pin.json │ ├── conf.py │ ├── contributing.rst │ ├── faq.rst │ ├── features.rst │ ├── index.rst │ ├── installation.rst │ ├── make.bat │ ├── quickstart.rst │ ├── requirements.txt │ ├── roadmap.rst │ ├── talk/ │ │ ├── Makefile │ │ ├── demo.rst │ │ ├── index.rst │ │ ├── requirements.txt │ │ └── style.css │ └── tutorial/ │ ├── config.rst │ ├── coreutils.rst │ ├── gateway.rst │ ├── init.rst │ ├── intro.rst │ ├── mounts.rst │ ├── pinning.rst │ ├── remotes.rst │ └── vcs.rst ├── events/ │ ├── backend/ │ │ └── backend.go │ ├── capnp/ │ │ ├── events_api.capnp │ │ └── events_api.capnp.go │ ├── docs.go │ ├── event.go │ ├── listener.go │ ├── listener_test.go │ └── mock/ │ └── mock.go ├── fuse/ │ ├── directory.go │ ├── doc.go │ ├── file.go │ ├── fs.go │ ├── fstab.go │ ├── fuse_test.go │ ├── fusetest/ │ │ ├── client.go │ │ ├── doc.go │ │ ├── helper.go │ │ └── server.go │ ├── handle.go │ ├── mount.go │ ├── stub.go │ └── util.go ├── gateway/ │ ├── db/ │ │ ├── capnp/ │ │ │ ├── user.capnp │ │ │ └── user.capnp.go │ │ ├── db.go │ │ └── db_test.go │ ├── elm/ │ │ ├── .gitignore │ │ ├── Makefile │ │ ├── elm.json │ │ └── src/ │ │ ├── Clipboard.elm │ │ ├── Commands.elm │ │ ├── Main.elm │ │ ├── Modals/ │ │ │ ├── History.elm │ │ │ ├── Mkdir.elm │ │ │ ├── MoveCopy.elm │ │ │ ├── RemoteAdd.elm │ │ │ ├── RemoteFolders.elm │ │ │ ├── RemoteRemove.elm │ │ │ ├── Remove.elm │ │ │ ├── Rename.elm │ │ │ ├── Share.elm │ │ │ └── Upload.elm │ │ ├── Pinger.elm │ │ ├── Routes/ │ │ │ ├── Commits.elm │ │ │ ├── DeletedFiles.elm │ │ │ ├── Diff.elm │ │ │ ├── Ls.elm │ │ │ └── Remotes.elm │ │ ├── Scroll.elm │ │ ├── Util.elm │ │ └── Websocket.elm │ ├── endpoints/ │ │ ├── all_dirs.go │ │ ├── all_dirs_test.go │ │ ├── copy.go │ │ ├── copy_test.go │ │ ├── deleted.go │ │ ├── deleted_test.go │ │ ├── events.go │ │ ├── events_test.go │ │ ├── get.go │ │ ├── get_test.go │ │ ├── history.go │ │ ├── history_test.go │ │ ├── index.go │ │ ├── log.go │ │ ├── log_test.go │ │ ├── login.go │ │ ├── login_test.go │ │ ├── ls.go │ │ ├── ls_test.go │ │ ├── mkdir.go │ │ ├── mkdir_test.go │ │ ├── move.go │ │ ├── move_test.go │ │ ├── pin.go │ │ ├── pin_test.go │ │ ├── ping.go │ │ ├── ping_test.go │ │ ├── redirect.go │ │ ├── remotes_add.go │ │ ├── remotes_add_test.go │ │ ├── remotes_diff.go │ │ ├── remotes_diff_test.go │ │ ├── remotes_list.go │ │ ├── remotes_list_test.go │ │ ├── remotes_remove.go │ │ ├── remotes_remove_test.go │ │ ├── remotes_self.go │ │ ├── remotes_self_test.go │ │ ├── remotes_sync.go │ │ ├── remotes_sync_test.go │ │ ├── remove.go │ │ ├── remove_test.go │ │ ├── reset.go │ │ ├── reset_test.go │ │ ├── testing.go │ │ ├── undelete.go │ │ ├── undelete_test.go │ │ ├── upload.go │ │ ├── upload_test.go │ │ └── util.go │ ├── remotesapi/ │ │ ├── api.go │ │ └── mock.go │ ├── server.go │ ├── server_test.go │ ├── static/ │ │ ├── css/ │ │ │ ├── default.css │ │ │ └── fontawesome.css │ │ ├── js/ │ │ │ ├── app.js │ │ │ ├── init.js │ │ │ └── smoothscroll.js │ │ ├── package.go │ │ └── resource.go │ └── templates/ │ ├── index.html │ ├── package.go │ └── resource.go ├── go.mod ├── go.sum ├── net/ │ ├── authrw.go │ ├── authrw_test.go │ ├── backend/ │ │ └── backend.go │ ├── capnp/ │ │ ├── api.capnp │ │ └── api.capnp.go │ ├── client.go │ ├── client_test.go │ ├── handlers.go │ ├── mock/ │ │ ├── mock.go │ │ └── pinger.go │ ├── peer/ │ │ ├── peer.go │ │ └── peer_test.go │ ├── pinger.go │ ├── pinger_test.go │ ├── resolve_test.go │ └── server.go ├── repo/ │ ├── backend.go │ ├── config.go │ ├── gc.go │ ├── hints/ │ │ ├── doc.go │ │ ├── hints.go │ │ └── hints_test.go │ ├── immutables.go │ ├── init.go │ ├── keys.go │ ├── keys_test.go │ ├── mock/ │ │ └── mock.go │ ├── readme.go │ ├── remotes.go │ ├── remotes_test.go │ ├── repo.go │ ├── repo_test.go │ ├── repopack/ │ │ └── repopack.go │ └── setup/ │ ├── ipfs.go │ └── ipfs_test.go ├── scripts/ │ ├── build.sh │ ├── count-lines-of-code.sh │ ├── create-release-bundle.sh │ ├── docker-normal-startup.sh │ ├── generate.sh │ ├── install-task.sh │ ├── install.sh │ ├── run-linter.sh │ ├── run-tests.sh │ └── test-bed.sh ├── server/ │ ├── api_handler.go │ ├── base.go │ ├── capnp/ │ │ ├── local_api.capnp │ │ └── local_api.capnp.go │ ├── fs_handler.go │ ├── net_handler.go │ ├── path.go │ ├── path_test.go │ ├── remotes_api.go │ ├── repo_handler.go │ ├── rlimit_linux.go │ ├── rlimit_other.go │ ├── server.go │ ├── stream.go │ ├── transfer.go │ └── vcs_handler.go ├── tests/ │ ├── test-init-no-pass.sh │ ├── test-init-pass-helper.sh │ └── test-init-several.sh ├── util/ │ ├── conductor/ │ │ ├── conductor.go │ │ └── conductor_test.go │ ├── hashlib/ │ │ ├── hash.go │ │ └── hash_test.go │ ├── key.go │ ├── log/ │ │ ├── logger.go │ │ └── logger_test.go │ ├── pwutil/ │ │ └── pwutil.go │ ├── server/ │ │ └── server.go │ ├── std.go │ ├── std_test.go │ ├── strings/ │ │ ├── README.md │ │ └── builder.go │ ├── testutil/ │ │ └── testutil.go │ ├── trie/ │ │ ├── buildpath.go │ │ ├── pathricia.go │ │ └── pathricia_test.go │ ├── zipper.go │ └── zipper_test.go └── version/ └── version.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a report to help us improve labels: --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Please always include the output of the following commands in your report!** * ``brig bug -s`` **Expected behavior** A clear and concise description of what you expected to happen. **Additional context** Add any other context about the problem here. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project labels: --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. **Please keep in mind that new features should be orthogonal (i.e. complement) to the existing features.** ================================================ FILE: .gitignore ================================================ brig *.coverprofile coverage.out _vendor* TODO .idea mage ipfs repo/setup/ipfs .task tags cov.out ================================================ FILE: .mailmap ================================================ Christopher Pahl Christopher Pahl Christopher Pahl Christopher Pahl Christopher Pahl Christopher Pahl Christopher Pahl ================================================ FILE: .travis.yml ================================================ language: go sudo: required go: - "1.15" notifications: email: - sahib@online.de install: - sudo apt-get install fuse capnproto - mkdir -p ${GOPATH}/bin - export GOBIN=${GOPATH}/bin - export PATH="${GOPATH}/bin:${PATH}" - export GO111MODULE=on - go get -u github.com/rakyll/gotest - go get -u github.com/phogolabs/parcello - go get -u github.com/phogolabs/parcello/cmd/parcello - go get -u zombiezen.com/go/capnproto2/... - go get -u github.com/go-task/task/v3/cmd/task - wget https://dist.ipfs.io/go-ipfs/v0.7.0/go-ipfs_v0.7.0_linux-amd64.tar.gz -O /tmp/ipfs.tgz - tar -C /tmp -xvf /tmp/ipfs.tgz - cp /tmp/go-ipfs/ipfs $GOBIN - export PATH="${GOPATH}/bin:${PATH}" - task script: - export PATH="${GOPATH}/bin:${PATH}" - travis_wait 30 bash scripts/run-tests.sh ================================================ FILE: CHANGELOG.md ================================================ # Change Log All notable changes to this project will be documented in this file. The format follows [keepachangelog.com]. Please stick to it. ## [0.5.3] -- 2020-07-20 Drastic speed up of listing and show operation. In previous version simple IsCached operation on 500 MB file was taking more than 30 seconds. The reason is splitting a file in chunks of no more than 256 kB in ipfs. It took time to establish connection to ipfs and check status of every chunk. The new caching scheme of intermediate results helps to avoid unnecessary connection. There is an additional heuristic: if a reference/hash stores less or equal to 262158 bytes, then this hash will not have children links (hashes). This seems to be true for IPFS up to v0.6.0 but there is no guarantee that it will be true in further version. The initial check is now very fast, less than second for the same 500 MB file. Also, recursive check does not rerun full check on probed files now, so this is also done much faster. The down side: the caching result are stale and can be misreported for up to 5 minutes (with current cache expiration settings). ### Changed Added caching mechanism for ipfs interaction. ## [0.5.2] -- 2020-07-16 Bug fix release. ### Fixed - Report correct cache status for a hash with multiple children links. The IsCached reported as yes for a large files (>256 kB with ipfs backend) since such files are split into multiple blocks. Strangely, the parent node is somehow precached without asking (maybe it happens when brig checks for the backend size), but its children are not unless we pin or read file content. ### Changed - `brig ls /file.name` will return listing for a single file. Before it worked only with directories. Now it behaves similar to the standard file system listing (ls) command. ## [0.5.1] -- 2020-07-15 Improvements and bug fixes in the fuse layer. The fuse layer consequent read is factor of 20 faster now. ### Fixed - Fix reading larger than 64 kB files. The read was from limitedStream with 64 kB size. It was spitting EOF when end of the buffer was hit, which is not the same as true end of file. ### Changed - Fuse file handle keeps track of the seek position. So, in consequent reads it does not have to reseek the stream which costs a lot of time. On my machine the speed went up from about 200 kB/s to 5 MB/s. It is still much slower than direct read from disk (30 MB/s) but probably expected due to ipfs, compression, and encryption layers. ## [0.5.0] -- 2020-07-13 This version is mostly bug fixes of unreleased version 0.4.2 by Chris Pahl, who is the original author and maintainer of the »brig«. Output of the diff and sync command is now different from the behaviour outlined in the old manual. There are also fixes to make fuse mounts to work properly. Compatibility wise, metadata exchange expects to see the cachedSize field for proper handling, older versions do not provide it. So, I think it justifies the bump in the minor version. TODO: documentation does not reflect all the changes. ### Fixed - Fix the behaviour of fast forward changes (i.e. when the file changed only at one location). - Fix the merge over deleted (ghost) file - Fix handling of the resurrected at remote entries. - Fix gateway 'get' for authorized user - Fix gateway to block download/view files outside of permitted folders - Fix bug with pinning at sync. The program used source node info to pin instead of destination. So a random node at destination was pinned. - Fix bug in repinner size calculator, which could end up with negative numbers in unsigned integer and thus report a crazy high values. - Fix in capnp template to be compatible with more modern version of capnp - Fix handling the pin state border case, when our data report a pin but ipfs is not. We ask to repin at ipfs backend. - Fix the destination pin status preservation during merge. - Multiple fixes in the file system frontend (fuse). - Correct file sizes which propagate to the directory size - Make sure attributes of a file are not stale - Redone writing and staging handling. No if we open file for writing we store the modified content in memory. When we flush to the backend we compare content change with this memory stored content. Old way did not catch such changes. - Touch can do create new file over the ghost nodes. - We can move/rename things within fuse mount via brig backend. TODO: there is small time (several seconds), when file info is not picked by fuse after rename. It might be a bug in fuse library itself. It is not too critical right now. ### Changed - Repinner can unpin explicitly pinned files, if they scheduled for deletion (i.e. beyond min-max version requirement). Otherwise, we will have stale old versions which will take storage space forever. - Diff and Sync show the direction of the merge. Diff takes precedence according to the modification time. TODO: I need to add new conflict resolution strategy: chronological/time and use it only if required. I felt chronological strategy is more natural way for syncing different repos, so I program time resolution as default. - Changes are specific to files not directories. If you create a directory in one repo and do the same on the other, it is not a conflict unless, there are files with different content. Otherwise, it can be easily merged. - Preserve source modification time when merging. - Better display of missing or removed entries. - When syncing create patches for every commit, before the patch was done between old known commit and the current state. That was breaking possibly non conflicting merges, since the common parent was lost during the commit skip. Sync is a bit longer now, but I think it worse it. - Modules are compatible with go v1.14 - Shorten time format for »brig ls« ### Added - New option: fs.repin.pin_unpinned. When set to 'false' it saves traffic and does not pin what is already pinned. When 'true' pins files within pinning requirements (the old behavior); this essentially pre-caches files at the backend but uses traffic and disk space. - New option: fs.sync.pin_added. If false the sync does not pin/cache at sync new/added at remote files. This is handy if we want only sync the metadata but not the content itself. I.e. bandwidth saving mode. Opposite (the old behavior) is also possible if we want to sync the content and are not concerned about bandwidth. - Help for »brig gateway add«. - Added cached size information to listings, show, and internal file or directory info. Technically, it is the same as size of backend, since the cached size could be zero at a given time (TODO rename accordingly). - The cached size is transmitted via capnp. ## [0.4.1 Capricious Clownfish] -- 2019-03-31 A smaller release with some bug fixes and a few new features. Also one bigger stability and speed improvement. Thanks to everyone that gave feedback! ### Fixed - Fix two badger db related crashes that lead to a crash in the daemon. One was related to having nested transactions, the one was related to having an open iterator while committing data to the database. - Fix some dependencies that led to errors for some users (thanks @vasket) - The gateway code now tries to reconnect the websocket whenever it was closed due to bad connectivity or similar issues. This led to a state where files were only updated after reloading the page. - Several smaller fixes in the remotes view, i.e. the owner name was displayed wrong and most of the settings could not be set outside the test environment. Also the diff output was different in the UI and brig diff. - We now error out early if e.g. »brig ls« was issued, but there is no repo. Before it tried to start a daemon and waited a long time before timing out. - Made »brig mkdir« always prefix a »/« to a path which would lead to funny issues otherwise. ### Added - Add a --offline flag to the following subcommands: ``cat``, ``tar``, ``mount`` and ``fstab add``. These flags will only output files that are locally cached and will not cause timeouts therefore. Trying other files will result in an error. - »brig show« now outputs if a file/directory is locally cached. This is not the same as pinned, since you can pin a file but it might not be cached yet. - Make the gateway host all of its JavaScript, fonts and CSS code itself by baking it into the binary. This will enable people running the gateway in environments where no internet connection is available to reach the CDN used before. - Add the possibility to copy the fingerprint in the UI via a button click. Before the fingerprint was shown over two lines which made copying tricky. - A PKGBUILD for ArchLinux was added, which builds ``brig`` from the ``develop`` branch. Thanks @vasket! ### Changed - The ``brig remote ls`` command no longer does active I/O between nodes to check if a node is authenticated. Instead it relies on info from the peer server which can apply better caching. The peer server is also able to use information from dials and requests to/from other peers to update the ping information. - Switch the internal checksum algorithm to ``blake2s-256`` from ``sha3-256``. This change was made for speed reasons and leads to a slightly different looking checksum format in the command line output. This change MIGHT lead to incompatibilities. - Also swap ``scrypt`` with ``argon2`` for key derivation and lower the hashing settings until acceptable performance was achieved. - Replace the Makefile with a magefile, i.e. a build script written in Go only which has no dependencies and can bootstrap itself. - Include IPFS config output in »brig bug«. ### Removed * The old Makefile was removed and replaced with a Go only solution. ## [0.4.0 Capricious Clownfish] -- 2019-03-19 It's only been a few months since the last release (December 2018), but there are a ton of new features / general changes that total in about 15k added lines of code. The biggest changes are definitely refactoring IPFS into its own process and providing a nice UI written in Elm. But those are just two of the biggest ones, see the full list below. As always, ``brig`` is **always looking for contributors.** Anything from feedback to pull requests is greatly appreciated. ### Fixed - Many documentation fixes and updates. - Gateway: Prefer server cipher suites over client's choice. - Gateway: Make sure to enable timeouts. - Bugfix in catfs that could lead to truncated file streams. * Lower the memory hunger of BadgerDB. * Fix a bug that stopped badger transactions when they got too big. ### Added * The IPFS daemon does not live in the ``brig`` process itself anymore. It can now use any existing / running IPFS daemon. If ``ipfs`` is not installed, it will download a local copy and setup a repository in the default place. Notice that this is a completely backwards-incompatible change. * New UI: The Gateway feature was greatly extended and an UI was developed that exposes many features in an easily usable way to people that are used to a Dropbox like interface. See [here](https://brig.readthedocs.io/en/develop/tutorial/gateway.html) for some screenshots of the UI and documentation on how to set it up. The gateway supports users with different roles (``admin``, ``editor``, ``collaborator``, ``viewer``, ``link-only``) and also supports logging as anonymous user (not by default!). You can also limit what users can see which folders. * New event subsystems. This enables users to receive updates in "realtime" from other remotes. This is built on top of the experimental pubsub feature of IPFS and thus needs a daemon that was started with ``--enable-pubsub-experiment``. Users can decide to receive updates from a remote by issuing ``brig remote auto-update enable ``. [More details in the documentation](https://brig.readthedocs.io/en/develop/tutorial/remotes.html#automatic-updating). * Change the way pinning works. ``brig`` will not unpin old versions anymore, but leave that to the [repinning settings](https://brig.readthedocs.io/en/develop/tutorial/pinning.html#repinning). This is an automatic process that will make sure to keep at least ``x`` versions, unpin all versions greater than ``y`` and make sure that only a certain filesystem quota is used. * New ``trash`` subcommand that makes it easy to show deleted files (``brig trash ls``) and undelete them again (``brig trash undelete ``). * New ``brig push`` command to ask a remote to sync with us. For this to work the remote needs to allow this to us via ``brig remote auto-push enable ``. See also the [documentation](https://brig.readthedocs.io/en/develop/tutorial/remotes.html#pushing-changes). * New way to handle conflicts: ``embrace`` will always pick the version of the remote you are syncing with. This is especially useful if you are building an archival node where you can push changes to. See also the [documentation](https://brig.readthedocs.io/en/develop/tutorial/remotes.html#conflicts). You can configure the conflict strategy now either globally, per remote or for a specific folder. * Read only folders. Those are folders that can be shared with others, but when we synchronize with them, the folder is exempted from any modifications. * Implement automated invocation of the garbage collector of IPFS. By default it is called once per hour and will clean up files that were unpinned. Note that this will also unpin files that are not owned by ``brig``! If you don't want this, you should use a separate IPFS instance for ``brig``. * It's now possible to create ``.tar`` files that are filtered by certain patterns. This functionality is currently only exposed in the gateway, not in the command line. * Easier debugging by having a ``pprof`` server open by default (until we consider the daemon to be stable enough to disable it by default). You can get a performance graph of the last 30s by issuing ``go tool pprof -web "http://localhost:$(brig d p)/debug/pprof/profile?seconds=30"`` * One way install script to easily get a ``brig`` binary in seconds on your computer: ``bash <(curl -s https://raw.githubusercontent.com/sahib/brig/master/scripts/install.sh)`` ### Changed * Starting with this release we will provide pre-compiled binaries for the most common platforms on the [release page](https://github.com/sahib/brig/releases). * Introduce proper linting process (``make lint``) * ``init`` will now set some IPFS config values that improve connectivity and performance of ``brig``. You can disable this via ``--no-ipfs-optimization``. * Disable pre-caching by default due to extreme slow-ness. * Migrate to ``go mod`` since we do not need to deal with ``gx`` packages anymore. * There is no (broken) ``make install`` target anymore. Simply do ``make`` and ``sudo cp brig /usr/local/bin`` or wherever you want to put it. ### Removed * A lot of old code that was there to support running IPFS inside the daemon process. As a side effect, ``brig`` is now much snappier. ## [0.3.0 Galloping Galapagos] -- 2018-12-07 ### Fixed - Compression guessing is now using Go's http.DetectContentType() ### Added * New gateway subcommand and feature. Now files and directories can be easily shared to non-brig users via a normal webserver. Also includes easy https setup. ### Changed ### Removed ### Deprecated ## [0.2.0 Baffling Buck] -- 2018-11-21 ### Fixed All features mentioned in the documentation should work now. ### Added Many new features, including password management, partial diffs and partial syncing. ### Changed Many internal things. Too many to list in this early stage. ### Removed Nothing substantial. ### Deprecated Nothing. ## [0.1.0 Analphabetic Antelope] -- 2018-04-21 Initial release on the Linux Info Day 2018 in Augsburg. [unreleased]: https://github.com/sahib/rmlint/compare/master...develop [0.1.0]: https://github.com/sahib/brig/releases/tag/v0.1.0 [keepachangelog.com]: http://keepachangelog.com/ ================================================ FILE: Dockerfile ================================================ FROM golang MAINTAINER sahib@online.de # Most test cases can use the pre-defined BRIG_PATH. ENV BRIG_PATH /var/repo RUN mkdir -p $BRIG_PATH ENV BRIG_USER="charlie@wald.de/container" # Build the brig binary: ENV BRIG_SOURCE /go/src/github.com/sahib/brig ENV BRIG_BINARY_PATH /usr/bin/brig COPY . $BRIG_SOURCE WORKDIR $BRIG_SOURCE RUN make # Download IPFS, so the container can startup faster. # (brig can also download the binary for you, but later) RUN wget https://dist.ipfs.io/go-ipfs/v0.4.19/go-ipfs_v0.4.19_linux-amd64.tar.gz -O /tmp/ipfs.tar.gz RUN tar xfv /tmp/ipfs.tar.gz -C /tmp RUN cp /tmp/go-ipfs/ipfs /usr/bin EXPOSE 6666 EXPOSE 4001 COPY scripts/docker-normal-startup.sh /bin/run.sh CMD ["/bin/bash", "/bin/run.sh"] ================================================ FILE: LICENSE ================================================ GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ================================================ FILE: PULL_REQUEST_TEMPLATE.md ================================================ Here's a small checklist before publishing your pull request: * Did you ``go fmt`` all code? * Does your code style fit with the rest of the code base? * Did you run ``go run mage.go dev:lint``? * Did you write tests if necessary? * Did you consider if changes to the docs are necessary? * Did you check if you need something to CHANGELOG.md? Thank you for your contribution. ================================================ FILE: README.md ================================================ # `brig`: Ship your data around the world
a brig
[![go reportcard](https://goreportcard.com/badge/github.com/sahib/brig)](https://goreportcard.com/report/github.com/sahib/brig) [![GoDoc](https://godoc.org/github.com/sahib/brig?status.svg)](https://godoc.org/github.com/sahib/brig) [![Build Status](https://travis-ci.org/sahib/brig.svg?branch=master)](https://travis-ci.org/sahib/brig) [![Documentation](https://readthedocs.org/projects/rmlint/badge/?version=latest)](http://brig.readthedocs.io/en/latest) [![License: AGPL v3](https://img.shields.io/badge/License-AGPL%20v3-blue.svg)](https://www.gnu.org/licenses/agpl-3.0) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1558/badge)](https://bestpractices.coreinfrastructure.org/en/projects/1558) ![brig gateway in the files tab](docs/_static/gateway-files.png) ## Table of Contents - [`brig`: Ship your data around the world](#brig-ship-your-data-around-the-world) - [Table of Contents](#table-of-contents) - [About](#about) - [Installation](#installation) - [Getting started](#getting-started) - [Status](#status) - [Documentation](#documentation) - [Donations](#donations) - [Focus](#focus) ## About `brig` is a distributed & secure file synchronization tool with version control. It is based on `IPFS`, written in Go and will feel familiar to `git` users. **Key feature highlights:** * Encryption of data in rest and transport + compression on the fly. * Simplified `git` version control. * Sync algorithm that can handle moved files and empty directories and files. * Your data does not need to be stored on the device you are currently using. * FUSE filesystem that feels like a normal (sync) folder. * No central server at all. Still, central architectures can be build with `brig`. * Simple user identification and discovery with users that look like email addresses. Also take a look [at the documentation](http://brig.readthedocs.io/en/latest/index.html) for more details. ## Installation You can download the latest script with the following oneliner: ```bash # Before you execute this, ask yourself if you trust me. $ bash <(curl -s https://raw.githubusercontent.com/sahib/brig/master/scripts/install.sh) ``` Alternatively, you can simply grab the latest binary from the [release tab](https://github.com/sahib/brig/releases). Development versions can be installed easily by compiling yourself. If you have a recent version of `go` (`>= 1.10`) installed, it should be as easy as this: ```bash $ go get -d -v -u github.com/sahib/brig # Download the sources. $ cd $GOPATH/src/github.com/sahib/brig # Go to the source directory. $ git checkout develop # Checkout the develop branch. $ go run mage.go # Build the software. $ $GOPATH/bin/brig help # Run the binary. ``` Please refer to the [install docs](https://brig.readthedocs.io/en/latest/installation.html) for more details. ## Getting started [![asciicast](https://asciinema.org/a/163713.png)](https://asciinema.org/a/163713) ...If you want to know, what to do after you can read the [Quickstart](http://brig.readthedocs.io/en/latest/quickstart.html). There is also a ``#brig`` room on ``matrix.org`` you can join with any [Matrix](https://matrix.org) client. Click [this link](https://riot.im/app/#/room/#brig:matrix.org) to join the room directly via [Riot.im](https://about.riot.im). ## Status This software is in a **beta phase** currently. All mentioned features should work. Things might still change rapidly and there will be no guarantees given before version `1.0.0`. Do not use `brig` yet as only storage for your production data. There are still bugs, but it should be safe enough to toy around with it quite a bit. This project has started end of 2015 and has seen many conceptual changes in the meantime. It started out as research project. After writing my [master theses](https://github.com/disorganizer/brig-thesis) on it, it was put down for a few months until I picked at up again and currently am trying to push it to usable software. If you want to open a bug report, just type `brig bug` to get a readily filled template for you. ## Documentation All documentation can be found on [ReadTheDocs.org](http://brig.readthedocs.io/en/latest/index.html). ## Donations If you're interested in the development and would think about supporting me financially, then please [contact me!](mailto:sahib@online.de) If you'd like to give me a small & steady donation, you can always use *Liberapay*: *Thank you!* ================================================ FILE: Taskfile.yml ================================================ # This file controls how brig is build. # It is a nicer to use alternative to Makefiles. # Please read the documentation over at: # # https://taskfile.dev # # The actual commands that do the work are written in bash. # See the scripts/ folder for them. # # When changing the structure of the repository, please remember # to update the "sources" list in this file if dependencies # of a build target were added, removed or changed. version: '3' tasks: default: deps: [build] elm: desc: "Compile elm sources to Javascript" cmds: - cd gateway/elm && elm make src/Main.elm --output ../static/js/app.js sources: - ./gateway/elm/**/*.elm generates: - ./gateway/static/js/app.js method: checksum summary: | Build the elm frontend. generate: desc: "Generate build dependencies" cmds: - scripts/generate.sh sources: - scripts/generate.sh - ./**/*.capnp - ./gateway/static/**/**/**/** build: deps: [generate] desc: "Build the brig binary" cmds: - ./scripts/build.sh sources: - ./scripts/build.sh - go.mod - ./*.go - ./**/*.go test: desc: "Run integration & unit tests" cmds: - bash scripts/run-tests.sh lint: desc: "Run static linters on the code" cmds: - bash scripts/run-linter.sh sloc: desc: "Count the lines of code" cmds: - bash scripts/count-lines-of-code.sh ================================================ FILE: autocomplete/bash_autocomplete ================================================ #!/bin/bash # This should be installed to /etc/bash_completion.d/brig and sourced. # If you want to try out the autocompletion, just source this file. _cli_bash_autocomplete() { local cur opts base COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) return 0 } complete -F _cli_bash_autocomplete brig ================================================ FILE: autocomplete/zsh_autocomplete ================================================ _cli_zsh_autocomplete() { local -a opts opts=("${(@f)$(_CLI_ZSH_AUTOCOMPLETE_HACK=1 ${words[@]:0:#words[@]-1} --generate-bash-completion)}") _describe 'values' opts return } compdef _cli_zsh_autocomplete brig ================================================ FILE: backend/backend.go ================================================ package backend import ( "errors" "io" "os" "github.com/sahib/brig/backend/httpipfs" "github.com/sahib/brig/backend/mock" "github.com/sahib/brig/catfs" eventsBackend "github.com/sahib/brig/events/backend" netBackend "github.com/sahib/brig/net/backend" "github.com/sahib/brig/repo" log "github.com/sirupsen/logrus" ) var ( // ErrNoSuchBackend is returned when passing an invalid backend name ErrNoSuchBackend = errors.New("No such backend") ) // VersionInfo is a small interface that will return version info about the // backend. type VersionInfo interface { SemVer() string Name() string Rev() string } // Backend is a amalgamation of all backend interfaces required for brig to work. type Backend interface { repo.Backend catfs.FsBackend netBackend.Backend eventsBackend.Backend } // ForwardLogByName will forward the logs of the backend `name` to `w`. func ForwardLogByName(name string, w io.Writer) error { switch name { case "httpipfs": return nil case "mock": return nil } return ErrNoSuchBackend } // FromName returns a suitable backend for a human readable name. // If an invalid name is passed, nil is returned. func FromName(name, path, fingerprint string) (Backend, error) { switch name { case "httpipfs": return httpipfs.NewNode(path, fingerprint) case "mock": user := "alice" if envUser := os.Getenv("BRIG_MOCK_USER"); envUser != "" { user = envUser } if envNetDbPath := os.Getenv("BRIG_MOCK_NET_DB_PATH"); envNetDbPath != "" { path = envNetDbPath } return mock.NewMockBackend(path, user), nil } return nil, ErrNoSuchBackend } // Version returns version info for the backend `name`. func Version(name, path string) VersionInfo { switch name { case "mock": return mock.Version() case "httpipfs": nd, err := httpipfs.NewNode(path, "") if err != nil { log.Debugf("failed to get version") return nil } defer nd.Close() return nd.Version() default: return nil } } ================================================ FILE: backend/httpipfs/gc.go ================================================ package httpipfs import ( "bufio" "bytes" "context" "encoding/json" e "github.com/pkg/errors" h "github.com/sahib/brig/util/hashlib" log "github.com/sirupsen/logrus" ) // GC will trigger the garbage collector of IPFS. // Cleaned up hashes will be returned as a list // (note that those hashes are not always ours) func (nd *Node) GC() ([]h.Hash, error) { ctx := context.Background() resp, err := nd.sh.Request("repo/gc").Send(ctx) if err != nil { return nil, e.Wrapf(resp.Error, "gc request") } defer resp.Close() if resp.Error != nil { return nil, e.Wrapf(resp.Error, "gc resp") } hs := []h.Hash{} br := bufio.NewReader(resp.Output) for { line, err := br.ReadBytes('\n') if err != nil { break } raw := struct { Key map[string]string }{} lr := bytes.NewReader(line) if err := json.NewDecoder(lr).Decode(&raw); err != nil { return nil, e.Wrapf(err, "json decode") } for _, cid := range raw.Key { h, err := h.FromB58String(cid) if err != nil { return nil, e.Wrapf(err, "gc: hash decode") } hs = append(hs, h) } } log.Debugf("GC returned %d hashes", len(hs)) return hs, nil } ================================================ FILE: backend/httpipfs/gc_test.go ================================================ package httpipfs import ( "bytes" "testing" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func TestGC(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithIpfs(t, 1, func(t *testing.T, ipfsPath string) { nd, err := NewNode(ipfsPath, "") require.Nil(t, err) data := testutil.CreateDummyBuf(4096 * 1024) hash, err := nd.Add(bytes.NewReader(data)) require.Nil(t, err) require.Nil(t, nd.Unpin(hash)) hashes, err := nd.GC() require.Nil(t, err) require.True(t, len(hashes) > 0) }) } ================================================ FILE: backend/httpipfs/io.go ================================================ package httpipfs import ( "context" "encoding/json" "fmt" "io" "io/ioutil" "sync" shell "github.com/ipfs/go-ipfs-api" "github.com/sahib/brig/catfs/mio" h "github.com/sahib/brig/util/hashlib" ) func cat(s *shell.Shell, path string, offset int64) (io.ReadCloser, error) { rb := s.Request("cat", path) rb.Option("offset", offset) resp, err := rb.Send(context.Background()) if err != nil { return nil, err } if resp.Error != nil { return nil, resp.Error } return resp.Output, nil } type streamWrapper struct { mu sync.Mutex io.ReadCloser nd *Node hash h.Hash off int64 size int64 } func (sw *streamWrapper) Read(buf []byte) (int, error) { sw.mu.Lock() defer sw.mu.Unlock() n, err := sw.ReadCloser.Read(buf) if err != nil { return n, err } sw.off += int64(n) return n, err } func (sw *streamWrapper) WriteTo(w io.Writer) (int64, error) { sw.mu.Lock() defer sw.mu.Unlock() return io.Copy(w, sw) } func (sw *streamWrapper) cachedSize() (int64, error) { ctx := context.Background() resp, err := sw.nd.sh.Request( "files/stat", "/ipfs/"+sw.hash.B58String(), ).Send(ctx) if err != nil { return -1, err } defer resp.Close() if resp.Error != nil { return -1, resp.Error } raw := struct { Size int64 }{} if err := json.NewDecoder(resp.Output).Decode(&raw); err != nil { return -1, err } return raw.Size, nil } func (sw *streamWrapper) getAbsOffset(offset int64, whence int) (int64, error) { switch whence { case io.SeekStart: sw.off = offset return offset, nil case io.SeekCurrent: sw.off += offset return sw.off, nil case io.SeekEnd: size, err := sw.cachedSize() if err != nil { return -1, err } sw.off = size + offset return sw.off, nil default: return -1, fmt.Errorf("invalid whence: %v", whence) } } // TODO: Seek is currently freaking expensive. // Does IPFS maybe offer a better way to do this? func (sw *streamWrapper) Seek(offset int64, whence int) (int64, error) { sw.mu.Lock() defer sw.mu.Unlock() absOffset, err := sw.getAbsOffset(offset, whence) if err != nil { return -1, err } rc, err := cat(sw.nd.sh, sw.hash.B58String(), absOffset) if err != nil { return -1, err } if sw.ReadCloser != nil { // Not sure if that is even needed... // TODO: measure memory consumption and see if we can do // without discarding left over bytes. go func(rc io.ReadCloser) { io.Copy(ioutil.Discard, rc) rc.Close() }(sw.ReadCloser) } sw.off = absOffset sw.ReadCloser = rc return absOffset, nil } // Cat returns a stream associated with `hash`. func (nd *Node) Cat(hash h.Hash) (mio.Stream, error) { rc, err := cat(nd.sh, hash.B58String(), 0) if err != nil { return nil, err } return &streamWrapper{ nd: nd, hash: hash, ReadCloser: rc, off: 0, size: -1, }, nil } // Add puts the contents of `r` into IPFS and returns its hash. func (nd *Node) Add(r io.Reader) (h.Hash, error) { hs, err := nd.sh.Add(r) if err != nil { return nil, err } return h.FromB58String(hs) } ================================================ FILE: backend/httpipfs/io_test.go ================================================ package httpipfs import ( "bytes" "fmt" "io" "io/ioutil" "testing" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func TestAddCatBasic(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithIpfs(t, 1, func(t *testing.T, ipfsPath string) { nd, err := NewNode(ipfsPath, "") require.Nil(t, err) data := testutil.CreateDummyBuf(4096 * 1024) hash, err := nd.Add(bytes.NewReader(data)) require.Nil(t, err) fmt.Println(hash) stream, err := nd.Cat(hash) require.Nil(t, err) echoData, err := ioutil.ReadAll(stream) require.Nil(t, err) require.Equal(t, data, echoData) }) } func TestAddCatSize(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithIpfs(t, 1, func(t *testing.T, ipfsPath string) { nd, err := NewNode(ipfsPath, "") require.Nil(t, err) data := testutil.CreateDummyBuf(4096 * 1024) hash, err := nd.Add(bytes.NewReader(data)) require.Nil(t, err) stream, err := nd.Cat(hash) require.Nil(t, err) size, err := stream.Seek(0, io.SeekEnd) require.Nil(t, err) require.Equal(t, int64(len(data)), size) off, err := stream.Seek(0, io.SeekStart) require.Nil(t, err) require.Equal(t, int64(0), off) echoData, err := ioutil.ReadAll(stream) require.Nil(t, err) require.Equal(t, data, echoData) }) } ================================================ FILE: backend/httpipfs/net.go ================================================ package httpipfs import ( "context" "encoding/json" "errors" "fmt" "io/ioutil" "net" "os" "path" "path/filepath" "sync" "time" shell "github.com/ipfs/go-ipfs-api" netBackend "github.com/sahib/brig/net/backend" "github.com/sahib/brig/util" log "github.com/sirupsen/logrus" ) type connWrapper struct { net.Conn peer string protocol string targetAddr string sh *shell.Shell } func (cw *connWrapper) LocalAddr() net.Addr { return &addrWrapper{ protocol: cw.protocol, peer: "", } } func (cw *connWrapper) RemoteAddr() net.Addr { return &addrWrapper{ protocol: cw.protocol, peer: cw.peer, } } func (cw *connWrapper) Close() error { defer cw.Conn.Close() return closeStream(cw.sh, cw.protocol, "", cw.targetAddr) } // Dial will open a connection to the peer identified by `peerHash`, // running `protocol` over it. func (nd *Node) Dial(peerHash, fingerprint, protocol string) (net.Conn, error) { if !nd.isOnline() { return nil, ErrOffline } self, err := nd.Identity() if err != nil { return nil, err } if self.Addr == peerHash { // Special case: // When we use the same IPFS daemon for different // brig repositiories, we want still to be able to dial // other brig instances. Since we cannot dial over ipfs // we simply have the port written to /tmp where // we can pick it up on Dial() addr, err := readLocalAddr(peerHash, fingerprint) if err != nil { return nil, err } return net.Dial("tcp", addr) } protocol = path.Join(protocol, peerHash) port := util.FindFreePort() addr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port) if err := forward(nd.sh, protocol, addr, peerHash); err != nil { return nil, err } tcpAddr := fmt.Sprintf("127.0.0.1:%d", port) log.Debugf("dial to »%s« over port %d", peerHash, port) conn, err := net.Dial("tcp", tcpAddr) if err != nil { return nil, err } return &connWrapper{ Conn: conn, peer: peerHash, protocol: protocol, targetAddr: addr, sh: nd.sh, }, nil } ////////////////////////// func forward(sh *shell.Shell, protocol, targetAddr, peerID string) error { ctx := context.Background() peerID = "/ipfs/" + peerID rb := sh.Request("p2p/forward", protocol, targetAddr, peerID) rb.Option("allow-custom-protocol", true) resp, err := rb.Send(ctx) if err != nil { return err } defer resp.Close() if resp.Error != nil { return resp.Error } return nil } func openListener(sh *shell.Shell, protocol, targetAddr string) error { ctx := context.Background() rb := sh.Request("p2p/listen", protocol, targetAddr) rb.Option("allow-custom-protocol", true) resp, err := rb.Send(ctx) if err != nil { return err } defer resp.Close() if err := resp.Error; err != nil { return err } return nil } func closeStream(sh *shell.Shell, protocol, targetAddr, listenAddr string) error { ctx := context.Background() rb := sh.Request("p2p/close") rb.Option("protocol", protocol) if targetAddr != "" { rb.Option("target-address", targetAddr) } if listenAddr != "" { rb.Option("listen-address", listenAddr) } resp, err := rb.Send(ctx) if err != nil { return err } defer resp.Close() if resp.Error != nil { return resp.Error } return nil } type addrWrapper struct { protocol string peer string } func (sa *addrWrapper) Network() string { return sa.protocol } func (sa *addrWrapper) String() string { return sa.peer } type listenerWrapper struct { lst net.Listener protocol string peer string targetAddr string fingerprint string sh *shell.Shell } func (lw *listenerWrapper) Accept() (net.Conn, error) { conn, err := lw.lst.Accept() if err != nil { return nil, err } return &connWrapper{ Conn: conn, peer: lw.peer, protocol: lw.protocol, targetAddr: lw.targetAddr, sh: lw.sh, }, nil } func (lw *listenerWrapper) Addr() net.Addr { return &addrWrapper{ protocol: lw.protocol, peer: lw.peer, } } func (lw *listenerWrapper) Close() error { defer lw.lst.Close() defer deleteLocalAddr(lw.peer, lw.fingerprint) return closeStream(lw.sh, lw.protocol, lw.targetAddr, "") } func buildLocalAddrPath(id, fingerprint string) string { return filepath.Join(os.TempDir(), fmt.Sprintf("brig-%s:%s.addr", id, fingerprint)) } func readLocalAddr(id, fingerprint string) (string, error) { path := buildLocalAddrPath(id, fingerprint) data, err := ioutil.ReadFile(path) if err != nil { return "", err } return string(data), nil } func deleteLocalAddr(id, fingerprint string) error { path := buildLocalAddrPath(id, fingerprint) return os.RemoveAll(path) } func writeLocalAddr(id, fingerprint, addr string) error { path := buildLocalAddrPath(id, fingerprint) return ioutil.WriteFile(path, []byte(addr), 0644) } // Listen will listen to the protocol func (nd *Node) Listen(protocol string) (net.Listener, error) { if !nd.isOnline() { return nil, ErrOffline } self, err := nd.Identity() if err != nil { return nil, err } // TODO: Is this even needed still? // Do we want support for having more than one brig per ipfs. // Append the id to the protocol: protocol = path.Join(protocol, self.Addr) port := util.FindFreePort() addr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port) // Prevent errors by closing any previously opened listeners: if err := closeStream(nd.sh, protocol, "", ""); err != nil { return nil, err } log.Debugf("backend: listening for %s over port %d", protocol, port) if err := openListener(nd.sh, protocol, addr); err != nil { return nil, err } localAddr := fmt.Sprintf("127.0.0.1:%d", port) lst, err := net.Listen("tcp", localAddr) if err != nil { return nil, err } if err := writeLocalAddr(self.Addr, nd.fingerprint, localAddr); err != nil { return nil, err } return &listenerWrapper{ lst: lst, protocol: protocol, peer: self.Addr, targetAddr: addr, fingerprint: nd.fingerprint, sh: nd.sh, }, nil } ///////////////////////////////// type pinger struct { lastSeen time.Time roundtrip time.Duration err error mu sync.Mutex cancel func() nd *Node } // LastSeen returns the time we pinged the remote last time. func (p *pinger) LastSeen() time.Time { p.mu.Lock() defer p.mu.Unlock() return p.lastSeen } // Roundtrip returns the time needed send a single package to // the remote and receive the answer. func (p *pinger) Roundtrip() time.Duration { p.mu.Lock() defer p.mu.Unlock() return p.roundtrip } // Err will return a non-nil error when the current ping did not succeed. func (p *pinger) Err() error { p.mu.Lock() defer p.mu.Unlock() return p.err } // Close will clean up the pinger. func (p *pinger) Close() error { if p.cancel != nil { p.cancel() p.cancel = nil } return nil } func (p *pinger) update(ctx context.Context, addr, self string) { // Edge case: test setups where we ping ourselves. if self == addr { p.mu.Lock() p.err = nil p.lastSeen = time.Now() p.roundtrip = time.Duration(0) p.mu.Unlock() return } // Do the network op without a lock: roundtrip, err := ping(p.nd.sh, addr) p.mu.Lock() if err != nil { p.err = err } else { p.err = nil p.lastSeen = time.Now() p.roundtrip = roundtrip } p.mu.Unlock() } func (p *pinger) Run(ctx context.Context, addr string) error { self, err := p.nd.Identity() if err != nil { return err } p.update(ctx, addr, self.Addr) tckr := time.NewTicker(10 * time.Second) for { select { case <-ctx.Done(): return ctx.Err() case <-tckr.C: p.update(ctx, addr, self.Addr) } } } func ping(sh *shell.Shell, peerID string) (time.Duration, error) { ctx := context.Background() resp, err := sh.Request("ping", peerID).Send(ctx) if err != nil { return 0, err } defer resp.Close() if resp.Error != nil { return 0, resp.Error } raw := struct { Success bool Time int64 }{} if err := json.NewDecoder(resp.Output).Decode(&raw); err != nil { return 0, err } if raw.Success { return time.Duration(raw.Time), nil } return 0, fmt.Errorf("no ping") } // ErrWaiting is the initial error state of a pinger. // The error will be unset once a successful ping was made. var ErrWaiting = errors.New("waiting for route") // Ping will return a pinger for `addr`. func (nd *Node) Ping(addr string) (netBackend.Pinger, error) { if !nd.isOnline() { return nil, ErrOffline } log.Debugf("backend: start ping »%s«", addr) p := &pinger{ nd: nd, err: ErrWaiting, } ctx, cancel := context.WithCancel(context.Background()) p.cancel = cancel go p.Run(ctx, addr) return p, nil } ================================================ FILE: backend/httpipfs/net_test.go ================================================ package httpipfs import ( "bytes" "io" "testing" "time" "github.com/stretchr/testify/require" ) const ( TestProtocol = "/brig/test/1.0" ) var ( TestMessage = []byte("Hello World!") ) func testClientSide(t *testing.T, ipfsPathB string, addr string) { nd, err := NewNode(ipfsPathB, "") require.Nil(t, err) conn, err := nd.Dial(addr, "", TestProtocol) require.Nil(t, err) defer func() { require.Nil(t, conn.Close()) }() _, err = conn.Write(TestMessage) require.Nil(t, err) } func TestDialAndListen(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithDoubleIpfs(t, 1, func(t *testing.T, ipfsPathA, ipfsPathB string) { nd, err := NewNode(ipfsPathA, "") require.Nil(t, err) lst, err := nd.Listen(TestProtocol) require.Nil(t, err) defer func() { require.Nil(t, lst.Close()) }() id, err := nd.Identity() require.Nil(t, err) go testClientSide(t, ipfsPathB, id.Addr) conn, err := lst.Accept() require.Nil(t, err) buf := &bytes.Buffer{} _, err = io.Copy(buf, conn) require.Nil(t, err) require.Equal(t, TestMessage, buf.Bytes()) }) } func TestPing(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithDoubleIpfs(t, 1, func(t *testing.T, ipfsPathA, ipfsPathB string) { ndA, err := NewNode(ipfsPathA, "") require.NoError(t, err) idA, err := ndA.Identity() require.NoError(t, err) pinger, err := ndA.Ping(idA.Addr) require.NoError(t, err) defer func() { require.NoError(t, pinger.Close()) }() for idx := 0; idx < 60; idx++ { if pinger.Err() != ErrWaiting { break } time.Sleep(1 * time.Second) } require.Nil(t, pinger.Err()) require.True(t, pinger.Roundtrip() < time.Second) require.True(t, time.Since(pinger.LastSeen()) < 2*time.Second) }) } func TestDialAndListenOnSingleNode(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithIpfs(t, 1, func(t *testing.T, ipfsPath string) { nd, err := NewNode(ipfsPath, "") require.Nil(t, err) lst, err := nd.Listen(TestProtocol) require.Nil(t, err) defer func() { require.Nil(t, lst.Close()) }() id, err := nd.Identity() require.Nil(t, err) go testClientSide(t, ipfsPath, id.Addr) conn, err := lst.Accept() require.Nil(t, err) buf := &bytes.Buffer{} _, err = io.Copy(buf, conn) require.Nil(t, err) require.Equal(t, TestMessage, buf.Bytes()) }) } func TestPingSelf(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithIpfs(t, 1, func(t *testing.T, ipfsPath string) { nd, err := NewNode(ipfsPath, "") require.Nil(t, err) id, err := nd.Identity() require.Nil(t, err) pinger, err := nd.Ping(id.Addr) require.Nil(t, err) defer func() { require.Nil(t, pinger.Close()) }() for idx := 0; idx < 60; idx++ { if pinger.Err() != ErrWaiting { break } time.Sleep(250 * time.Millisecond) } require.Nil(t, pinger.Err()) require.True(t, pinger.Roundtrip() < time.Second) require.True(t, time.Since(pinger.LastSeen()) < 2*time.Second) }) } ================================================ FILE: backend/httpipfs/pin.go ================================================ package httpipfs import ( "context" "encoding/json" "io" "strings" "github.com/patrickmn/go-cache" h "github.com/sahib/brig/util/hashlib" ) // IsPinned returns true when `hash` is pinned in some way. func (nd *Node) IsPinned(hash h.Hash) (bool, error) { ctx := context.Background() resp, err := nd.sh.Request("pin/ls", hash.B58String()).Send(ctx) if err != nil { return false, err } defer resp.Close() if resp.Error != nil { if strings.HasSuffix(resp.Error.Message, "is not pinned") { return false, nil } return false, resp.Error } raw := struct { Keys map[string]struct { Type string } }{} if err := json.NewDecoder(resp.Output).Decode(&raw); err != nil { return false, err } if len(raw.Keys) == 0 { return false, nil } return true, nil } // Pin will pin `hash`. func (nd *Node) Pin(hash h.Hash) error { return nd.sh.Pin(hash.B58String()) } // Unpin will unpin `hash`. func (nd *Node) Unpin(hash h.Hash) error { err := nd.sh.Unpin(hash.B58String()) if err == nil || err.Error() == "pin/rm: not pinned or pinned indirectly" { return nil } return err } type objectRef struct { Ref string // hash of the ref Err string } // Link is a child of a hash. // Used by IPFS when files get bigger. type Link struct { Name string Hash string Size uint64 } // IsCached checks if hash and all its children are cached func (nd *Node) IsCached(hash h.Hash) (bool, error) { locallyCached := nd.cache.locallyCached stat, found := locallyCached.Get(hash.B58String()) if found { return stat.(bool), nil } // Nothing in the cache, we have to figure it out. // We will execute equivalent of // ipfs refs --offline --recursive hash // note the `--recursive` switch, we need to check all children links // if command fails at least one child link/hash is missing ctx := context.Background() req := nd.sh.Request("refs", hash.B58String()) req.Option("offline", "true") req.Option("recursive", "true") resp, err := req.Send(ctx) if err != nil { return false, err } defer resp.Close() if resp.Error != nil { return false, resp.Error } ref := objectRef{} jsonDecoder := json.NewDecoder(resp.Output) for { if err := jsonDecoder.Decode(&ref); err == io.EOF { break } else if err != nil { return false, err } if ref.Err != "" { // Either main hash or one of its refs/links is not available locally // consequently the whole hash is not cached locallyCached.Set(hash.B58String(), false, cache.DefaultExpiration) return false, nil } } // if we are here, the parent hash and all its children links/hashes are cached locallyCached.Set(hash.B58String(), true, cache.DefaultExpiration) return true, nil } // CachedSize returns the cached size of the node. // Negative indicates unknow eithe due to error or hash not stored locally func (nd *Node) CachedSize(hash h.Hash) (int64, error) { ctx := context.Background() req := nd.sh.Request("object/stat", hash.B58String()) // provides backend size only for cached objects req.Option("offline", "true") resp, err := req.Send(ctx) if err != nil { return -1, err } defer resp.Close() if resp.Error != nil { return -1, resp.Error } raw := struct { CumulativeSize int64 Key string }{} if err := json.NewDecoder(resp.Output).Decode(&raw); err != nil { return -1, err } return raw.CumulativeSize, nil } ================================================ FILE: backend/httpipfs/pin_test.go ================================================ package httpipfs import ( "bytes" "testing" h "github.com/sahib/brig/util/hashlib" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func TestPinUnpin(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithIpfs(t, 1, func(t *testing.T, ipfsPath string) { nd, err := NewNode(ipfsPath, "") require.Nil(t, err) data := testutil.CreateDummyBuf(4096 * 1024) hash, err := nd.Add(bytes.NewReader(data)) require.Nil(t, err) isPinned, err := nd.IsPinned(hash) require.Nil(t, err) require.True(t, isPinned) require.Nil(t, nd.Unpin(hash)) isPinned, err = nd.IsPinned(hash) require.Nil(t, err) require.False(t, isPinned) require.Nil(t, nd.Pin(hash)) isPinned, err = nd.IsPinned(hash) require.Nil(t, err) require.True(t, isPinned) }) } func TestIsCached(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithIpfs(t, 1, func(t *testing.T, ipfsPath string) { nd, err := NewNode(ipfsPath, "") require.Nil(t, err) hash, err := nd.Add(bytes.NewReader([]byte{1, 2, 3})) require.Nil(t, err) isCached, err := nd.IsCached(hash) require.Nil(t, err) require.True(t, isCached) // Let's just hope this hash does not exist locally: dummyHash, err := h.FromB58String("QmanyEbg6appBzzGaGMZm9NKqPVCbrWaB8ayGDerWh6aMB") require.Nil(t, err) isCached, err = nd.IsCached(dummyHash) require.Nil(t, err) require.False(t, isCached) }) } ================================================ FILE: backend/httpipfs/pubsub.go ================================================ package httpipfs import ( "context" shell "github.com/ipfs/go-ipfs-api" eventsBackend "github.com/sahib/brig/events/backend" ) type subWrapper struct { sub *shell.PubSubSubscription } type msgWrapper struct { msg *shell.Message } func (msg *msgWrapper) Data() []byte { return msg.msg.Data } func (msg *msgWrapper) Source() string { return string(msg.msg.From) } func (s *subWrapper) Next(ctx context.Context) (eventsBackend.Message, error) { msg, err := s.sub.Next() if err != nil { return nil, err } return &msgWrapper{msg: msg}, nil } func (s *subWrapper) Close() error { return s.sub.Cancel() } // Subscribe will create a subscription for `topic`. // You can use the subscription to wait for the next incoming message. // This will only work if the daemon supports/has enabled pub sub. func (nd *Node) Subscribe(ctx context.Context, topic string) (eventsBackend.Subscription, error) { if !nd.isOnline() { return nil, ErrOffline } sub, err := nd.sh.PubSubSubscribe(topic) if err != nil { return nil, err } return &subWrapper{sub: sub}, nil } // PublishEvent will publish `data` on `topic`. func (nd *Node) PublishEvent(topic string, data []byte) error { if !nd.isOnline() { return ErrOffline } return nd.sh.PubSubPublish(topic, string(data)) } ================================================ FILE: backend/httpipfs/pubsub_test.go ================================================ package httpipfs import ( "context" "testing" "time" "github.com/stretchr/testify/require" ) func TestPubSub(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") // Only use one ipfs instance, for test performance. WithIpfs(t, 1, func(t *testing.T, ipfsPath string) { nd, err := NewNode(ipfsPath, "") require.Nil(t, err) self, err := nd.Identity() require.Nil(t, err) ctx := context.Background() sub, err := nd.Subscribe(ctx, "test-topic") require.Nil(t, err) defer func() { require.Nil(t, sub.Close()) }() time.Sleep(1 * time.Second) data := []byte("hello world!") go nd.PublishEvent("test-topic", data) msg, err := sub.Next(ctx) require.Nil(t, err) require.Equal(t, data, msg.Data()) require.Equal(t, self.Addr, msg.Source()) }) } ================================================ FILE: backend/httpipfs/resolve.go ================================================ package httpipfs import ( "bufio" "bytes" "context" "encoding/json" shell "github.com/ipfs/go-ipfs-api" ipfsutil "github.com/ipfs/go-ipfs-util" mh "github.com/multiformats/go-multihash" "github.com/sahib/brig/net/peer" h "github.com/sahib/brig/util/hashlib" log "github.com/sirupsen/logrus" ) // PublishName will announce `name` to the network // and make us discoverable. func (nd *Node) PublishName(name string) error { if !nd.isOnline() { return ErrOffline } fullName := "brig:" + string(name) key, err := nd.sh.BlockPut([]byte(fullName), "v0", "sha2-256", -1) log.Debugf("published name: »%s« (key %s)", name, key) return err } // Identity returns our own identity. // It will cache the identity after the first request. func (nd *Node) Identity() (peer.Info, error) { nd.mu.Lock() if nd.cachedIdentity != "" { defer nd.mu.Unlock() return peer.Info{ Name: "httpipfs", Addr: nd.cachedIdentity, }, nil } // Do not hold the lock during net ops: nd.mu.Unlock() id, err := nd.sh.ID() if err != nil { return peer.Info{}, err } nd.mu.Lock() nd.cachedIdentity = id.ID nd.mu.Unlock() return peer.Info{ Name: "httpipfs", Addr: id.ID, }, nil } func findProvider(ctx context.Context, sh *shell.Shell, hash h.Hash) ([]string, error) { resp, err := sh.Request("dht/findprovs", hash.B58String()).Send(ctx) if err != nil { return nil, err } defer resp.Output.Close() if resp.Error != nil { return nil, resp.Error } ids := make(map[string]bool) br := bufio.NewReader(resp.Output) interrupted := false for len(ids) < 20 && !interrupted { line, err := br.ReadBytes('\n') if err != nil { break } raw := struct { Responses []struct { ID string } }{} lr := bytes.NewReader(line) if err := json.NewDecoder(lr).Decode(&raw); err != nil { return nil, err } for _, resp := range raw.Responses { ids[resp.ID] = true } select { case <-ctx.Done(): interrupted = true break } } linearIDs := []string{} for id := range ids { linearIDs = append(linearIDs, id) } return linearIDs, nil } // ResolveName will return all peers that identify themselves as `name`. // If ctx is canceled it will return early, but return no error. func (nd *Node) ResolveName(ctx context.Context, name string) ([]peer.Info, error) { if !nd.isOnline() { return nil, ErrOffline } name = "brig:" + name mhash, err := mh.Sum([]byte(name), ipfsutil.DefaultIpfsHash, -1) if err != nil { return nil, err } log.Debugf("backend: resolve »%s« (%s)", name, mhash.B58String()) ids, err := findProvider(ctx, nd.sh, h.Hash(mhash)) if err != nil { return nil, err } infos := []peer.Info{} for _, id := range ids { infos = append(infos, peer.Info{ Addr: id, Name: peer.Name(name), }) } return infos, nil } ================================================ FILE: backend/httpipfs/resolve_test.go ================================================ package httpipfs import ( "context" "fmt" "testing" "time" "github.com/stretchr/testify/require" ) func TestPublishResolve(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") // Only use one ipfs instance, for test performance. WithDoubleIpfs(t, 1, func(t *testing.T, ipfsPathA, ipfsPathB string) { ndA, err := NewNode(ipfsPathA, "") require.Nil(t, err) ndB, err := NewNode(ipfsPathB, "") require.Nil(t, err) // self, err := ndA.Identity() // require.Nil(t, err) require.Nil(t, ndA.PublishName("alice")) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() infos, err := ndB.ResolveName(ctx, "alice") require.Nil(t, err) // TODO: This test doesn't produce results yet, // most likely because of time issues (would need to run longer?) fmt.Println(infos) }) } ================================================ FILE: backend/httpipfs/shell.go ================================================ package httpipfs import ( "context" "encoding/json" "errors" "os" "path/filepath" "sync" "time" "github.com/blang/semver" shell "github.com/ipfs/go-ipfs-api" ma "github.com/multiformats/go-multiaddr" "github.com/patrickmn/go-cache" "github.com/sahib/brig/repo/setup" log "github.com/sirupsen/logrus" ) var ( // ErrOffline is returned by operations that need online support // to work when the backend is in offline mode. ErrOffline = errors.New("backend is in offline mode") ) // IpfsStateCache contains various backend related caches type IpfsStateCache struct { locallyCached *cache.Cache // shows if the hash and its children is locally cached by ipfs } // Node is the struct that holds the httpipfs backend together. // It is a shallow type that has not much own state and is very light. type Node struct { sh *shell.Shell mu sync.Mutex cachedIdentity string allowNetOps bool fingerprint string version *semver.Version cache *IpfsStateCache quiet bool } func getExperimentalFeatures(sh *shell.Shell) (map[string]bool, error) { ctx := context.Background() resp, err := sh.Request("config/show").Send(ctx) if err != nil { return nil, err } defer resp.Close() if resp.Error != nil { return nil, resp.Error } raw := struct { Experimental map[string]bool }{} if err := json.NewDecoder(resp.Output).Decode(&raw); err != nil { return nil, err } return raw.Experimental, nil } // Option is a option you can pass to NewNode() // It controls the behavior of the node. type Option func(nd *Node) // WithNoLogging will make the node not print log messages. // Useful for commandline use cases. func WithNoLogging() Option { return func(nd *Node) { nd.quiet = true } } func toMultiAddr(ipfsPathOrMultiaddr string) (ma.Multiaddr, error) { if !filepath.IsAbs(ipfsPathOrMultiaddr) { // multiaddr always start with a slash, // this branch affects only file paths. var err error ipfsPathOrMultiaddr, err = filepath.Abs(ipfsPathOrMultiaddr) if err != nil { return nil, err } } if _, err := os.Stat(ipfsPathOrMultiaddr); err == nil { return setup.GetAPIAddrForPath(ipfsPathOrMultiaddr) } return ma.NewMultiaddr(ipfsPathOrMultiaddr) } // NewNode returns a new http based IPFS backend. func NewNode(ipfsPathOrMultiaddr string, fingerprint string, opts ...Option) (*Node, error) { nd := &Node{ allowNetOps: true, fingerprint: fingerprint, cache: &IpfsStateCache{ locallyCached: cache.New(5*time.Minute, 10*time.Minute), }, } for _, opt := range opts { opt(nd) } m, err := toMultiAddr(ipfsPathOrMultiaddr) if err != nil { return nil, err } if !nd.quiet { log.Infof("Connecting to IPFS HTTP API at %s", m.String()) } nd.sh = shell.NewShell(m.String()) versionString, _, err := nd.sh.Version() if err != nil && !nd.quiet { log.Warningf("failed to get version: %v", err) } version, err := semver.Parse(versionString) if err != nil && !nd.quiet { log.Warningf("failed to parse version string of IPFS (»%s«): %v", versionString, err) } if !nd.quiet { log.Infof("The IPFS version is »%s«.", version) if version.LT(semver.MustParse("0.4.18")) { log.Warningf("This version is quite old. Please update, if possible.\n") log.Warningf("We only test on newer versions (>= 0.4.18).\n") } } nd.version = &version if !nd.quiet { features, err := getExperimentalFeatures(nd.sh) if err != nil { log.Warningf("Failed to get experimental feature list: %v", err) } else { if !features["Libp2pStreamMounting"] { log.Warningf("Stream mounting does not seem to be enabled.") log.Warningf("Please execute the following to change that:") log.Warningf("$ ipfs config --json Experimental.Libp2pStreamMounting true") } } } return nd, nil } // IsOnline returns true if the node is in online mode and the daemon is reachable. func (nd *Node) IsOnline() bool { nd.mu.Lock() allowNetOps := nd.allowNetOps nd.mu.Unlock() return nd.sh.IsUp() && allowNetOps } // Connect implements Backend.Connect func (nd *Node) Connect() error { nd.mu.Lock() defer nd.mu.Unlock() nd.allowNetOps = true return nil } // Disconnect implements Backend.Disconnect func (nd *Node) Disconnect() error { nd.mu.Lock() defer nd.mu.Unlock() nd.allowNetOps = false return nil } func (nd *Node) isOnline() bool { nd.mu.Lock() defer nd.mu.Unlock() return nd.allowNetOps } // Close implements Backend.Close func (nd *Node) Close() error { return nil } // Name returns "httpipfs" as name of the backend. func (nd *Node) Name() string { return "httpipfs" } ================================================ FILE: backend/httpipfs/testing.go ================================================ package httpipfs import ( "fmt" "io/ioutil" "os" "os/exec" "testing" "time" shell "github.com/ipfs/go-ipfs-api" "github.com/stretchr/testify/require" ) // WithIpfs starts a new IPFS instance and calls `fn` with the API port to it. // `portOff` is the offset to add on all standard ports. func WithIpfs(t *testing.T, portOff int, fn func(t *testing.T, ipfsPath string)) { ipfsPath, err := ioutil.TempDir("", "brig-httpipfs-test-") require.Nil(t, err) defer os.RemoveAll(ipfsPath) gwtPort := 8081 + portOff swmPort := 4001 + portOff apiPort := 5011 + portOff os.Setenv("IPFS_PATH", ipfsPath) script := [][]string{ {"ipfs", "init"}, {"ipfs", "config", "--json", "Addresses.Swarm", fmt.Sprintf("[\"/ip4/127.0.0.1/tcp/%d\"]", swmPort)}, {"ipfs", "config", "--json", "Experimental.Libp2pStreamMounting", "true"}, {"ipfs", "config", "Addresses.API", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort)}, {"ipfs", "config", "Addresses.Gateway", fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", gwtPort)}, } for _, line := range script { cmd := exec.Command(line[0], line[1:]...) cmd.Env = append(cmd.Env, fmt.Sprintf("IPFS_PATH=%s", ipfsPath)) err := cmd.Run() require.NoError(t, err) } daemonCmd := exec.Command("ipfs", "daemon", "--enable-pubsub-experiment") // daemonCmd.Stdout = os.Stdout // daemonCmd.Stderr = os.Stdout daemonCmd.Env = append(daemonCmd.Env, fmt.Sprintf("IPFS_PATH=%s", ipfsPath)) require.Nil(t, daemonCmd.Start()) defer func() { require.Nil(t, daemonCmd.Process.Kill()) }() // Wait until the daemon actually offers the API interface: localAddr := fmt.Sprintf("localhost:%d", apiPort) for tries := 0; tries < 200; tries++ { if shell.NewShell(localAddr).IsUp() { break } time.Sleep(100 * time.Millisecond) } // Actually call the test: fn(t, ipfsPath) } // WithDoubleIpfs starts two IPFS instances in parallel. func WithDoubleIpfs(t *testing.T, portOff int, fn func(t *testing.T, ipfsPathA, ipfsPathB string)) { chPathA := make(chan string) chPathB := make(chan string) stop := make(chan bool, 2) go WithIpfs(t, portOff, func(t *testing.T, ipfsPathA string) { chPathA <- ipfsPathA <-stop }) go WithIpfs(t, portOff+1, func(t *testing.T, ipfsPathB string) { chPathB <- ipfsPathB <-stop }) fn(t, <-chPathA, <-chPathB) stop <- true stop <- true } ================================================ FILE: backend/httpipfs/testing_test.go ================================================ package httpipfs import ( "bytes" "fmt" "testing" "github.com/stretchr/testify/require" ) func TestIpfsStartup(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithIpfs(t, 1, func(t *testing.T, ipfsPath string) { nd, err := NewNode(ipfsPath, "") require.Nil(t, err) hash, err := nd.Add(bytes.NewReader([]byte("hello"))) require.Nil(t, err, fmt.Sprintf("%v", err)) require.Equal(t, "QmWfVY9y3xjsixTgbd9AorQxH7VtMpzfx2HaWtsoUYecaX", hash.String()) }) } func TestDoubleIpfsStartup(t *testing.T) { t.Skipf("will be replaced by bash based e2e tests") WithDoubleIpfs(t, 1, func(t *testing.T, ipfsPathA, ipfsPathB string) { ndA, err := NewNode(ipfsPathA, "") require.Nil(t, err) ndB, err := NewNode(ipfsPathB, "") require.Nil(t, err) idA, err := ndA.Identity() require.Nil(t, err, fmt.Sprintf("%v", err)) idB, err := ndB.Identity() require.Nil(t, err) require.NotEqual(t, idA.Addr, idB.Addr) }) } ================================================ FILE: backend/httpipfs/version.go ================================================ package httpipfs // VersionInfo holds version info (yeah, golint) type VersionInfo struct { semVer, name, rev string } // SemVer returns a VersionInfo string complying semantic versioning func (v *VersionInfo) SemVer() string { return v.semVer } // Name returns the name of the backend func (v *VersionInfo) Name() string { return v.name } // Rev returns the git revision of the backend func (v *VersionInfo) Rev() string { return v.rev } // Version returns detailed VersionInfo info as struct func (n *Node) Version() *VersionInfo { v, rev, err := n.sh.Version() if err != nil { return nil } return &VersionInfo{ semVer: v, name: "go-ipfs", rev: rev, } } ================================================ FILE: backend/mock/mock.go ================================================ package mock import ( "github.com/sahib/brig/catfs" eventsMock "github.com/sahib/brig/events/mock" netMock "github.com/sahib/brig/net/mock" repoMock "github.com/sahib/brig/repo/mock" ) // Backend is used for local testing. type Backend struct { *catfs.MemFsBackend *repoMock.RepoBackend *netMock.NetBackend *eventsMock.EventsBackend } // NewMockBackend returns a backend.Backend that operates only in memory // and does not use any resources outliving the own process, except the net // part which stores connection info on disk. func NewMockBackend(path, owner string) *Backend { return &Backend{ MemFsBackend: catfs.NewMemFsBackend(), RepoBackend: repoMock.NewMockRepoBackend(), NetBackend: netMock.NewNetBackend(path, owner), EventsBackend: eventsMock.NewEventsBackend(owner), } } // VersionInfo holds version info (yeah, golint) type VersionInfo struct { semVer, name, rev string } // SemVer returns a version string complying semantic versioning func (v *VersionInfo) SemVer() string { return v.semVer } // Name returns the name of the backend func (v *VersionInfo) Name() string { return v.name } // Rev returns the git revision of the backend func (v *VersionInfo) Rev() string { return v.rev } // Version returns detailed version info as struct func Version() *VersionInfo { return &VersionInfo{ semVer: "0.0.1", name: "mock", rev: "HEAD", } } ================================================ FILE: bench/bench.go ================================================ package bench import ( "bytes" "context" "fmt" "io" "io/ioutil" "math/rand" "os" "path/filepath" "runtime" "sort" "strings" "syscall" "time" "github.com/pkg/xattr" "github.com/sahib/brig/backend/httpipfs" "github.com/sahib/brig/catfs/mio" "github.com/sahib/brig/client" "github.com/sahib/brig/client/clienttest" "github.com/sahib/brig/fuse/fusetest" "github.com/sahib/brig/repo/hints" "github.com/sahib/brig/server" "github.com/sahib/brig/util/testutil" ) // Run is a single benchmark run type Run struct { Took time.Duration Allocs int64 CompressionRatio float32 } // Runs is a list of individual runs type Runs []Run // Average returns a fictional average run out of all runs func (runs Runs) Average() Run { sum := Run{} for _, run := range runs { sum.Took += run.Took sum.Allocs += run.Allocs sum.CompressionRatio += run.CompressionRatio } return Run{ Took: sum.Took / time.Duration(len(runs)), Allocs: sum.Allocs / int64(len(runs)), CompressionRatio: sum.CompressionRatio / float32(len(runs)), } } // Bench is the interface every benchmark needs to implement. type Bench interface { // SupportHints should return true for benchmarks where // passing hint influences the benchmark result. SupportHints() bool // CanBeVerified should return true when the test // can use the verifier (i.e. is a read test) CanBeVerified() bool // Bench should read the input from `r` and apply `hint` if applicable. // The time needed to process all of `r` should be returned. Bench(hint hints.Hint, size int64, r io.Reader, w io.Writer) (*Run, error) // Close should clean up the benchmark. Close() error } var ( dummyKey = make([]byte, 32) ) func withRunStats(size int64, fn func() (int64, error)) (*Run, error) { start := time.Now() var memBefore, memAfter runtime.MemStats runtime.ReadMemStats(&memBefore) written, err := fn() runtime.ReadMemStats(&memAfter) took := time.Since(start) return &Run{ Took: took, CompressionRatio: float32(written) / float32(size), Allocs: int64(memAfter.Mallocs) - int64(memBefore.Mallocs), }, err } ////////// type memcpyBench struct{} func newMemcpyBench(_ string, _ bool) (Bench, error) { return memcpyBench{}, nil } func (n memcpyBench) SupportHints() bool { return false } func (n memcpyBench) CanBeVerified() bool { return true } func (n memcpyBench) Bench(hint hints.Hint, size int64, r io.Reader, verifier io.Writer) (*Run, error) { // NOTE: Use DumbCopy, since io.Copy would use the // ReadFrom of ioutil.Discard. This is lightning fast. // We want to measure actual time to copy in memory. return withRunStats(size, func() (int64, error) { return testutil.DumbCopy(verifier, r, false, false) }) } func (n memcpyBench) Close() error { return nil } ////////// type serverCommon struct { daemon *server.Server client *client.Client } func newServerCommon(ipfsPath string) (*serverCommon, error) { backendName := "mock" if ipfsPath != "" { backendName = "httpipfs" } srv, err := clienttest.StartDaemon("ali", backendName, ipfsPath) if err != nil { return nil, err } ctl, err := client.Dial(context.Background(), srv.DaemonURL()) if err != nil { return nil, err } return &serverCommon{ daemon: srv, client: ctl, }, nil } func (sc *serverCommon) Close() error { sc.daemon.Close() sc.client.Close() return nil } type serverStageBench struct { common *serverCommon } func newServerStageBench(ipfsPath string, _ bool) (Bench, error) { common, err := newServerCommon(ipfsPath) if err != nil { return nil, err } return &serverStageBench{common: common}, nil } func (s *serverStageBench) SupportHints() bool { return true } func (s *serverStageBench) CanBeVerified() bool { return false } func (s *serverStageBench) Bench(hint hints.Hint, size int64, r io.Reader, verifier io.Writer) (*Run, error) { path := fmt.Sprintf("/path_%d", rand.Int31()) c := string(hint.CompressionAlgo) e := string(hint.EncryptionAlgo) if err := s.common.client.HintSet(path, &c, &e); err != nil { return nil, err } // That's just for cleaning up after each test. defer s.common.client.Remove(path) return withRunStats(size, func() (int64, error) { return size, s.common.client.StageFromReader(path, r) }) } func (s *serverStageBench) Close() error { return s.common.Close() } type serverCatBench struct { common *serverCommon } func newServerCatBench(ipfsPath string, _ bool) (Bench, error) { common, err := newServerCommon(ipfsPath) if err != nil { return nil, err } return &serverCatBench{common: common}, nil } func (s *serverCatBench) SupportHints() bool { return true } func (s *serverCatBench) CanBeVerified() bool { return true } func (s *serverCatBench) Bench(hint hints.Hint, size int64, r io.Reader, verifier io.Writer) (*Run, error) { path := fmt.Sprintf("/path_%d", rand.Int31()) c := string(hint.CompressionAlgo) e := string(hint.EncryptionAlgo) if err := s.common.client.HintSet(path, &c, &e); err != nil { return nil, err } if err := s.common.client.StageFromReader(path, r); err != nil { return nil, err } // That's just for cleaning up after each test. defer s.common.client.Remove(path) return withRunStats(size, func() (int64, error) { stream, err := s.common.client.Cat(path, true) if err != nil { return 0, err } defer stream.Close() return testutil.DumbCopy(verifier, stream, false, false) }) } func (s *serverCatBench) Close() error { return s.common.Close() } ////////// type mioWriterBench struct{} func newMioWriterBench(_ string, _ bool) (Bench, error) { return &mioWriterBench{}, nil } func (m *mioWriterBench) SupportHints() bool { return true } func (m *mioWriterBench) CanBeVerified() bool { return false } func (m *mioWriterBench) Bench(hint hints.Hint, size int64, r io.Reader, verifier io.Writer) (*Run, error) { stream, _, err := mio.NewInStream(r, "", dummyKey, hint) if err != nil { return nil, err } return withRunStats(size, func() (int64, error) { defer stream.Close() return testutil.DumbCopy(ioutil.Discard, stream, false, false) }) } func (m *mioWriterBench) Close() error { return nil } ////////// type mioReaderBench struct{} func newMioReaderBench(_ string, _ bool) (Bench, error) { return &mioReaderBench{}, nil } func (m *mioReaderBench) SupportHints() bool { return true } func (m *mioReaderBench) CanBeVerified() bool { return true } func (m *mioReaderBench) Bench(hint hints.Hint, size int64, r io.Reader, verifier io.Writer) (*Run, error) { // Produce a buffer with encoded data in the right size. // This is not benched, only the reading of it is. inStream, _, err := mio.NewInStream(r, "", dummyKey, hint) if err != nil { return nil, err } defer inStream.Close() // Read it to memory before measuring. // We do not want to count the encoding in the bench time. streamData, err := ioutil.ReadAll(inStream) if err != nil { return nil, err } return withRunStats(size, func() (int64, error) { outStream, err := mio.NewOutStream( bytes.NewReader(streamData), hint.IsRaw(), dummyKey, ) if err != nil { return -1, err } defer outStream.Close() return testutil.DumbCopy(verifier, outStream, false, false) }) } func (m *mioReaderBench) Close() error { return nil } ////////// type ipfsAddOrCatBench struct { ipfsPath string isAdd bool } func newIPFSAddBench(ipfsPath string, isAdd bool) (Bench, error) { return &ipfsAddOrCatBench{ipfsPath: ipfsPath, isAdd: isAdd}, nil } func (ia *ipfsAddOrCatBench) SupportHints() bool { return false } func (ia *ipfsAddOrCatBench) CanBeVerified() bool { return !ia.isAdd } func (ia *ipfsAddOrCatBench) Bench(hint hints.Hint, size int64, r io.Reader, verifier io.Writer) (*Run, error) { nd, err := httpipfs.NewNode(ia.ipfsPath, "") if err != nil { return nil, err } defer nd.Close() if ia.isAdd { return withRunStats(size, func() (int64, error) { _, err := nd.Add(r) return size, err }) } hash, err := nd.Add(r) if err != nil { return nil, err } return withRunStats(size, func() (int64, error) { stream, err := nd.Cat(hash) if err != nil { return -1, err } return testutil.DumbCopy(verifier, stream, false, false) }) } func (ia *ipfsAddOrCatBench) Close() error { return nil } ////////// type fuseWriteOrReadBench struct { ipfsPath string isWrite bool tmpDir string ctl *fusetest.Client proc *os.Process } func newFuseWriteOrReadBench(ipfsPath string, isWrite bool) (Bench, error) { tmpDir, err := ioutil.TempDir("", "brig-fuse-bench-*") if err != nil { return nil, err } unixSocket := "unix:" + filepath.Join(tmpDir, "socket") proc, err := fusetest.LaunchAsProcess(fusetest.Options{ MountPath: filepath.Join(tmpDir, "mount"), CatfsPath: filepath.Join(tmpDir, "catfs"), IpfsPathOrMultiaddr: ipfsPath, URL: unixSocket, }) if err != nil { return nil, err } // bit time to start things up: time.Sleep(500 * time.Millisecond) ctl, err := fusetest.Dial(unixSocket) if err != nil { return nil, err } return &fuseWriteOrReadBench{ ipfsPath: ipfsPath, isWrite: isWrite, tmpDir: tmpDir, proc: proc, ctl: ctl, }, nil } func (fb *fuseWriteOrReadBench) SupportHints() bool { return true } func (fb *fuseWriteOrReadBench) CanBeVerified() bool { return !fb.isWrite } func (fb *fuseWriteOrReadBench) Bench(hint hints.Hint, size int64, r io.Reader, verifier io.Writer) (*Run, error) { mountDir := filepath.Join(fb.tmpDir, "mount") testPath := filepath.Join(mountDir, fmt.Sprintf("/path_%d", rand.Int31())) const ( xattrEnc = "user.brig.hints.encryption" xattrZip = "user.brig.hints.compression" ) // Make sure hints are followed: if err := xattr.Set(mountDir, xattrEnc, []byte(hint.EncryptionAlgo)); err != nil { return nil, err } if err := xattr.Set(mountDir, xattrZip, []byte(hint.CompressionAlgo)); err != nil { return nil, err } took, err := withRunStats(size, func() (int64, error) { fd, err := os.OpenFile(testPath, os.O_CREATE|os.O_WRONLY, 0600) if err != nil { return -1, err } defer fd.Close() return testutil.DumbCopy(fd, r, false, false) }) if err != nil { return nil, err } if fb.isWrite { // test is done already, no need to read-back. return took, nil } took, err = withRunStats(size, func() (int64, error) { // NOTE: We have to use syscall.O_DIRECT here in order to // bypass the kernel page cache. The write above fills it with // data immediately, thus this read can yield 10x times higher // results (which you still might get in practice, if lucky) fd, err := os.OpenFile(testPath, os.O_RDONLY|syscall.O_DIRECT, 0600) if err != nil { return -1, err } defer fd.Close() return testutil.DumbCopy(verifier, fd, false, false) }) return took, err } func (fb *fuseWriteOrReadBench) Close() error { fb.ctl.QuitServer() time.Sleep(2 * time.Second) fb.proc.Signal(syscall.SIGTERM) var lastError error for retries := 0; retries < 10; retries++ { if err := os.RemoveAll(fb.tmpDir); err != nil { time.Sleep(200 * time.Millisecond) lastError = err continue } lastError = nil break } return lastError } ////////// var ( // Convention: // - If it's using ipfs, put it in the name. // - If it's writing things, put that in the name too as "write". benchMap = map[string]func(string, bool) (Bench, error){ "memcpy": newMemcpyBench, "brig-write-mem": newServerStageBench, "brig-read-mem": newServerCatBench, "brig-write-ipfs": newServerStageBench, "brig-read-ipfs": newServerCatBench, "mio-write": newMioWriterBench, "mio-read": newMioReaderBench, "ipfs-write": newIPFSAddBench, "ipfs-read": newIPFSAddBench, "fuse-write-mem": newFuseWriteOrReadBench, "fuse-write-ipfs": newFuseWriteOrReadBench, "fuse-read-mem": newFuseWriteOrReadBench, "fuse-read-ipfs": newFuseWriteOrReadBench, } ) // ByName returns the benchmark with this name, or an error // if none. If IPFS is used, it should be given as `ipfsPath`. func ByName(name, ipfsPath string) (Bench, error) { newBench, ok := benchMap[name] if !ok { return nil, fmt.Errorf("no such bench: %s", name) } return newBench(ipfsPath, strings.Contains(name, "write")) } // BenchmarkNames returns all possible benchmark names // in an defined & stable sorting. func BenchmarkNames() []string { names := []string{} for name := range benchMap { names = append(names, name) } sort.Slice(names, func(i, j int) bool { if names[i] == names[j] { return false } specials := []string{ "memcpy", "mio", } for _, special := range specials { v := strings.HasSuffix(names[i], special) if v || strings.HasSuffix(names[j], special) { return v } } return names[i] < names[j] }) return names } ================================================ FILE: bench/inputs.go ================================================ package bench import ( "bytes" "encoding/binary" "fmt" "io" "sort" "github.com/sahib/brig/util/testutil" ) // Verifier is a io.Writer that should be used for benchmarks // that read encoded data. It verifies that the data is actually // correct in the sense that it is equal to the original input. type Verifier interface { io.Writer // MissingBytes returns the diff of bytes to the original input. // This number can be negative when too much data was written. // Only 0 is a valid value after the benchmark finished. MissingBytes() int64 } // Input generates input for a benchmark. It defines how the data looks that // is fed to the streaming system. type Input interface { Reader(seed uint64) (io.Reader, error) Size() int64 Verifier() (Verifier, error) Close() error } func benchData(size uint64, name string) []byte { switch name { case "random": return testutil.CreateRandomDummyBuf(int64(size), 23) case "ten": return testutil.CreateDummyBuf(int64(size)) case "mixed": return testutil.CreateMixedDummyBuf(int64(size), 42) default: return nil } } ////////// type memVerifier struct { expect []byte counter int64 } func (m *memVerifier) Write(buf []byte) (int, error) { if int64(len(buf))+m.counter > int64(len(m.expect)) { return -1, fmt.Errorf("verify: got too much data") } slice := m.expect[m.counter : m.counter+int64(len(buf))] if !bytes.Equal(slice, buf) { return -1, fmt.Errorf("verify: data differs in block at %d", m.counter) } m.counter += int64(len(buf)) // Just nod off the data and let GC do the rest. return len(buf), nil } func (m *memVerifier) MissingBytes() int64 { return int64(len(m.expect)) - m.counter } type memInput struct { buf []byte } func newMemInput(size uint64, name string) Input { return &memInput{buf: benchData(size, name)} } func (ni *memInput) Reader(seed uint64) (io.Reader, error) { // Put a few bytes difference at the start to make the complete // stream different than the last seed. This is here to avoid // that consequent runs of a benchmark get speed ups because // they can cache inputs. binary.LittleEndian.PutUint64(ni.buf, seed) return bytes.NewReader(ni.buf), nil } func (ni *memInput) Verifier() (Verifier, error) { return &memVerifier{ expect: ni.buf, counter: 0, }, nil } func (ni *memInput) Size() int64 { return int64(len(ni.buf)) } func (ni *memInput) Close() error { return nil } ////////// var ( inputMap = map[string]func(size uint64) (Input, error){ "ten": func(size uint64) (Input, error) { return newMemInput(size, "ten"), nil }, "random": func(size uint64) (Input, error) { return newMemInput(size, "random"), nil }, "mixed": func(size uint64) (Input, error) { return newMemInput(size, "mixed"), nil }, } ) // InputByName fetches the input by it's name and returns an input // that will produce data with `size` bytes. func InputByName(name string, size uint64) (Input, error) { newInput, ok := inputMap[name] if !ok { return nil, fmt.Errorf("no such input: %s", name) } return newInput(size) } // InputNames returns the sorted list of all possible inputs. func InputNames() []string { names := []string{} for name := range inputMap { names = append(names, name) } sort.Strings(names) return names } ================================================ FILE: bench/runner.go ================================================ package bench import ( "fmt" "io/ioutil" "os" "os/signal" "runtime" "sort" "strings" "syscall" "time" "github.com/sahib/brig/repo/hints" "github.com/sahib/brig/repo/setup" log "github.com/sirupsen/logrus" ) // Config define how the benchmarks are run. type Config struct { InputName string `json:"input_name"` BenchName string `json:"bench_name"` Size uint64 `json:"size"` Encryption string `json:"encryption"` Compression string `json:"compression"` Samples int `json:"samples"` } // Result is the result of a single benchmark run. type Result struct { Name string `json:"name"` Config Config `json:"config"` Encryption string `json:"encryption"` Compression string `json:"compression"` Took time.Duration `json:"took"` Throughput float64 `json:"throughput"` CompressionRate float32 `json:"compression_rate"` Allocs int64 `json:"allocs"` } // buildHints handles wildcards for compression and/or encryption. // If no wildcards are specified, we just take what is set in `cfg`. func buildHints(cfg Config) []hints.Hint { encIsWildcard := cfg.Encryption == "*" zipIsWildcard := cfg.Compression == "*" if encIsWildcard && zipIsWildcard { return hints.AllPossibleHints() } if encIsWildcard { hs := []hints.Hint{} for _, encAlgo := range hints.ValidEncryptionHints() { hs = append(hs, hints.Hint{ CompressionAlgo: hints.CompressionHint(cfg.Compression), EncryptionAlgo: hints.EncryptionHint(encAlgo), }) } return hs } if zipIsWildcard { hs := []hints.Hint{} for _, zipAlgo := range hints.ValidCompressionHints() { hs = append(hs, hints.Hint{ CompressionAlgo: hints.CompressionHint(zipAlgo), EncryptionAlgo: hints.EncryptionHint(cfg.Encryption), }) } return hs } return []hints.Hint{{ CompressionAlgo: hints.CompressionHint(cfg.Compression), EncryptionAlgo: hints.EncryptionHint(cfg.Encryption), }} } func sortHints(hs []hints.Hint) []hints.Hint { sort.Slice(hs, func(i, j int) bool { return hs[i].Less(hs[j]) }) // sorts in-place, but also return for ease of use. return hs } func benchmarkSingle(cfg Config, fn func(result Result), ipfsPath string) error { in, err := InputByName(cfg.InputName, cfg.Size) if err != nil { return err } defer in.Close() out, err := ByName(cfg.BenchName, ipfsPath) if err != nil { return err } defer out.Close() sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGINT) defer signal.Stop(sigCh) for _, hint := range sortHints(buildHints(cfg)) { select { case <-sigCh: fmt.Println("Interrupted") return nil default: // just continue } supportsHints := out.SupportHints() if !supportsHints { // Indicate in output that nothing was encrypted or compressed. hint.CompressionAlgo = hints.CompressionNone hint.EncryptionAlgo = hints.EncryptionNone } if hint.CompressionAlgo == hints.CompressionGuess { // NOTE: We do not benchmark guessing here. // Simply reason is that we do not know from the output // which algorithm was actually used. continue } var runs Runs // probably doesn't do much, just to clean any leftover memory. runtime.GC() for seed := uint64(0); seed < uint64(cfg.Samples); seed++ { r, err := in.Reader(seed) if err != nil { return err } v, err := in.Verifier() if err != nil { return err } run, err := out.Bench(hint, in.Size(), r, v) if err != nil { return err } runs = append(runs, *run) // Most write-only benchmarks cannot be verified, since // we modify the stream and the verifier checks that the stream // is equal to the input. Most read tests involve the same logic // as writing though, so the writer has to work for that. if out.CanBeVerified() { if missing := v.MissingBytes(); missing != 0 { log.Warnf("not all or too much data received in verify: %d", missing) } } } avgRun := runs.Average() throughput := (float64(cfg.Size) / 1000 / 1000) / (float64(avgRun.Took) / float64(time.Second)) fn(Result{ Name: fmt.Sprintf("%s:%s_%s", cfg.BenchName, cfg.InputName, hint), Encryption: string(hint.EncryptionAlgo), Compression: string(hint.CompressionAlgo), Config: cfg, Took: avgRun.Took, Throughput: throughput, CompressionRate: avgRun.CompressionRatio, Allocs: avgRun.Allocs, }) if !supportsHints { // If there are no hints there is no point. // of repeating the benchmark several times. break } } return nil } // IPFS is expensive to set-up, so let's do it only once. func ipfsIsNeeded(cfgs []Config) bool { for _, cfg := range cfgs { if strings.Contains(strings.ToLower(cfg.BenchName), "ipfs") { return true } } return false } // Benchmark runs the benchmarks specified by `cfgs` and call `fn` on each result. func Benchmark(cfgs []Config, fn func(result Result)) error { needsIPFS := ipfsIsNeeded(cfgs) var result *setup.Result if needsIPFS { var err error log.Infof("Setting up IPFS for the benchmarks...") ipfsPath, err := ioutil.TempDir("", "brig-iobench-ipfs-repo-*") if err != nil { return err } result, err = setup.IPFS(setup.Options{ LogWriter: ioutil.Discard, Setup: true, SetDefaultConfig: true, SetExtraConfig: true, IpfsPath: ipfsPath, InitProfile: "test", }) if err != nil { return err } } for _, cfg := range cfgs { var ipfsPath string if result != nil { ipfsPath = result.IpfsPath } if err := benchmarkSingle(cfg, fn, ipfsPath); err != nil { return err } } if needsIPFS { if result.IpfsPath != "" { os.RemoveAll(result.IpfsPath) } if result.PID > 0 { proc, err := os.FindProcess(result.PID) if err != nil { log.WithError(err).Warnf("failed to get IPFS PID") } else { if err := proc.Kill(); err != nil { log.WithError(err).Warnf("failed to kill IPFS PID") } } } } return nil } ================================================ FILE: bench/stats.go ================================================ package bench import ( "time" "github.com/klauspost/cpuid/v2" ) // Stats are system statistics that might influence the benchmark result. type Stats struct { Time time.Time `json:"time"` CPUBrandName string `json:"cpu_brand_name"` LogicalCores int `json:"logical_cores"` HasAESNI bool `json:"has_aesni"` } // FetchStats returns the current statistics. func FetchStats() Stats { return Stats{ Time: time.Now(), CPUBrandName: cpuid.CPU.BrandName, LogicalCores: cpuid.CPU.LogicalCores, HasAESNI: cpuid.CPU.Supports(cpuid.AESNI), } } ================================================ FILE: brig.go ================================================ package main import ( "os" "github.com/sahib/brig/cmd" ) func main() { os.Exit(cmd.RunCmdline(os.Args)) } ================================================ FILE: catfs/backend.go ================================================ package catfs import ( "fmt" "io" "io/ioutil" "github.com/sahib/brig/catfs/mio" "github.com/sahib/brig/catfs/mio/chunkbuf" h "github.com/sahib/brig/util/hashlib" "github.com/sahib/brig/util/testutil" ) // ErrNoSuchHash should be returned whenever the backend is unable // to find an object referenced to by this hash. type ErrNoSuchHash struct { what h.Hash } func (eh ErrNoSuchHash) Error() string { return fmt.Sprintf("No such hash: %s", eh.what.B58String()) } // FsBackend is the interface that needs to be implemented by the data // management layer. type FsBackend interface { // Cat should find the object referenced to by `hash` and // make its data available as mio.Stream. Cat(hash h.Hash) (mio.Stream, error) // Add should read all data in `r` and return the hash under // which it can be accessed on later. Add(r io.Reader) (h.Hash, error) // Pin gives the object at `hash` a "pin". // (i.e. it marks the file to be stored indefinitely in local storage) // When pinning an explicit pin with an implicit pin, the explicit pin // will stay. Upgrading from implicit to explicit is possible though. Pin(hash h.Hash) error // Unpin removes a previously added pin. // If an object is already unpinned this is a no op. Unpin(hash h.Hash) error // IsPinned checks if the file is pinned. IsPinned(hash h.Hash) (bool, error) // IsCached checks if the file contents are available locally. IsCached(hash h.Hash) (bool, error) // CachedSize returns the backend size for a given hash // Nehative indicates that cachedSize is unknown CachedSize(hash h.Hash) (int64, error) } // MemFsBackend is a mock structure that implements FsBackend. type MemFsBackend struct { data map[string][]byte pins map[string]bool } // NewMemFsBackend returns a MemFsBackend (useful for writing tests) func NewMemFsBackend() *MemFsBackend { return &MemFsBackend{ data: make(map[string][]byte), pins: make(map[string]bool), } } // Cat implements FsBackend.Cat by querying memory. func (mb *MemFsBackend) Cat(hash h.Hash) (mio.Stream, error) { data, ok := mb.data[hash.B58String()] if !ok { return nil, ErrNoSuchHash{hash} } chunkBuf := chunkbuf.NewChunkBuffer(data) randRead := testutil.RandomizeReads(chunkBuf, 512, true) return struct { io.Reader io.Seeker io.Closer io.WriterTo }{ Reader: randRead, Seeker: chunkBuf, WriterTo: chunkBuf, Closer: ioutil.NopCloser(chunkBuf), }, nil } // Add implements FsBackend.Add by storing the data in memory. func (mb *MemFsBackend) Add(r io.Reader) (h.Hash, error) { data, err := ioutil.ReadAll(r) if err != nil { return nil, err } hash := h.SumWithBackendHash(data) mb.data[hash.B58String()] = data return hash, nil } // Pin implements FsBackend.Pin by storing a marker in memory. func (mb *MemFsBackend) Pin(hash h.Hash) error { mb.pins[hash.B58String()] = true return nil } // Unpin implements FsBackend.Unpin by removing a marker in memory. func (mb *MemFsBackend) Unpin(hash h.Hash) error { mb.pins[hash.B58String()] = false return nil } // IsPinned implements FsBackend.IsPinned by querying a marker in memory. func (mb *MemFsBackend) IsPinned(hash h.Hash) (bool, error) { isPinned, ok := mb.pins[hash.B58String()] if !ok { return false, nil } return isPinned, nil } // IsCached implements FsBackend.IsCached by checking if the file exists. // If hash found, the file is always cached. func (mb *MemFsBackend) IsCached(hash h.Hash) (bool, error) { _, ok := mb.data[hash.B58String()] return ok, nil } // CachedSize implements FsBackend.CachedSize by returnig data size // If hash found, the file is always cached. func (mb *MemFsBackend) CachedSize(hash h.Hash) (int64, error) { data, ok := mb.data[hash.B58String()] if !ok { return -1, nil // negative indicates unknown size } return int64(len(data)), nil } ================================================ FILE: catfs/capnp/pinner.capnp ================================================ using Go = import "/go.capnp"; @0xba762188b0a6e4cf; $Go.package("capnp"); $Go.import("github.com/sahib/brig/catfs/capnp"); struct Pin { inode @0 :UInt64; isPinned @1 :Bool; } struct PinEntry $Go.doc("A single entry for a certain content node") { # Following attributes will be part of the hash: pins @0 :List(Pin); } ================================================ FILE: catfs/capnp/pinner.capnp.go ================================================ // Code generated by capnpc-go. DO NOT EDIT. package capnp import ( capnp "zombiezen.com/go/capnproto2" text "zombiezen.com/go/capnproto2/encoding/text" schemas "zombiezen.com/go/capnproto2/schemas" ) type Pin struct{ capnp.Struct } // Pin_TypeID is the unique identifier for the type Pin. const Pin_TypeID = 0x985d53e01674ee95 func NewPin(s *capnp.Segment) (Pin, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}) return Pin{st}, err } func NewRootPin(s *capnp.Segment) (Pin, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}) return Pin{st}, err } func ReadRootPin(msg *capnp.Message) (Pin, error) { root, err := msg.RootPtr() return Pin{root.Struct()}, err } func (s Pin) String() string { str, _ := text.Marshal(0x985d53e01674ee95, s.Struct) return str } func (s Pin) Inode() uint64 { return s.Struct.Uint64(0) } func (s Pin) SetInode(v uint64) { s.Struct.SetUint64(0, v) } func (s Pin) IsPinned() bool { return s.Struct.Bit(64) } func (s Pin) SetIsPinned(v bool) { s.Struct.SetBit(64, v) } // Pin_List is a list of Pin. type Pin_List struct{ capnp.List } // NewPin creates a new list of Pin. func NewPin_List(s *capnp.Segment, sz int32) (Pin_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}, sz) return Pin_List{l}, err } func (s Pin_List) At(i int) Pin { return Pin{s.List.Struct(i)} } func (s Pin_List) Set(i int, v Pin) error { return s.List.SetStruct(i, v.Struct) } func (s Pin_List) String() string { str, _ := text.MarshalList(0x985d53e01674ee95, s.List) return str } // Pin_Promise is a wrapper for a Pin promised by a client call. type Pin_Promise struct{ *capnp.Pipeline } func (p Pin_Promise) Struct() (Pin, error) { s, err := p.Pipeline.Struct() return Pin{s}, err } // A single entry for a certain content node type PinEntry struct{ capnp.Struct } // PinEntry_TypeID is the unique identifier for the type PinEntry. const PinEntry_TypeID = 0xdb74f7cf7bc815c6 func NewPinEntry(s *capnp.Segment) (PinEntry, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) return PinEntry{st}, err } func NewRootPinEntry(s *capnp.Segment) (PinEntry, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) return PinEntry{st}, err } func ReadRootPinEntry(msg *capnp.Message) (PinEntry, error) { root, err := msg.RootPtr() return PinEntry{root.Struct()}, err } func (s PinEntry) String() string { str, _ := text.Marshal(0xdb74f7cf7bc815c6, s.Struct) return str } func (s PinEntry) Pins() (Pin_List, error) { p, err := s.Struct.Ptr(0) return Pin_List{List: p.List()}, err } func (s PinEntry) HasPins() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s PinEntry) SetPins(v Pin_List) error { return s.Struct.SetPtr(0, v.List.ToPtr()) } // NewPins sets the pins field to a newly // allocated Pin_List, preferring placement in s's segment. func (s PinEntry) NewPins(n int32) (Pin_List, error) { l, err := NewPin_List(s.Struct.Segment(), n) if err != nil { return Pin_List{}, err } err = s.Struct.SetPtr(0, l.List.ToPtr()) return l, err } // PinEntry_List is a list of PinEntry. type PinEntry_List struct{ capnp.List } // NewPinEntry creates a new list of PinEntry. func NewPinEntry_List(s *capnp.Segment, sz int32) (PinEntry_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) return PinEntry_List{l}, err } func (s PinEntry_List) At(i int) PinEntry { return PinEntry{s.List.Struct(i)} } func (s PinEntry_List) Set(i int, v PinEntry) error { return s.List.SetStruct(i, v.Struct) } func (s PinEntry_List) String() string { str, _ := text.MarshalList(0xdb74f7cf7bc815c6, s.List) return str } // PinEntry_Promise is a wrapper for a PinEntry promised by a client call. type PinEntry_Promise struct{ *capnp.Pipeline } func (p PinEntry_Promise) Struct() (PinEntry, error) { s, err := p.Pipeline.Struct() return PinEntry{s}, err } const schema_ba762188b0a6e4cf = "x\xda\\\xd0\xb1K\xebP\x14\x06\xf0\xef\xbbI_[" + "x\xef\xb5WT\xe8\xd4\x08]\x14\xb5\xd6I\\l\x07" + "\x07\x05!Wg\x85\x90\xa6% 7!\xb9(\xc5\x7f" + "@\\Eps\x13\xdc\xdc\xa4\x82\xa3\xe2\xd6\xcd\xc5\xc5" + "\xc1I\xd0\xd51\x92\xc5\x8a\xd3\x81\x8f\xc3\xf9\x1d\xbej" + "\xbf-d\xe1\x06P\xa5\xc2\x9f\xec\xec\xc3L\xbf\xec\xec" + "\x9eC\xd5(\xb2\xd1\xeb\xe5\xf5\xf1\xcc\xc1-\xec\" " + ";or+\x9f\x1b\x87`\xf60\xf5x4\xfa4\xcf" + "\x905\x8e\xf7\x0a,\x02\xad\xab\x09\xcaaQ\x0e\xeb\xf2" + "}\x0d\xcc|\xcf\xf4\xd2\xa6\xef\x89X\xc7\xcd8\xd4:" + "H\x16}/\xd6qe\xd5\x0d\xb5K\xaa\x92e\x036" + "\x019\xbb\x0c\xa8\x86E\xb5$(\xd9\x9ed\x1e.l" + "\x02j\xde\xa2Z\x11\xac\x87:\xea\x06,C\xb0\x0cf" + "a\xea\xe6\x07\xbb\x00H\x08\xf2\x87g\xfd\xf6rn]" + "\x9b\x84\x83\x1c\xb5)\xb2\xbd\xd3\x0bu\xf7tr\x0fe" + "\x0bv\x1a\xe4_\xa0\xc5mf\x1d'\x0du\x7f?\xb0" + "\x9d@\x9bd\xe0\xf4\xa2\xc4\xf1\x1c?H\x8c\x17j\xc7" + "\x8f\xb4\x09\xb4qt\xd4e\x00(\xfb\xfb\xff\x7fsy" + "\x91\x16UC\xb0\x12\x87:\xe5\x7f\xd0\xb5\xc8\xea\xb8Z" + "0\x0f\xbf\x02\x00\x00\xff\xff\x05\xdde\x03" func init() { schemas.Register(schema_ba762188b0a6e4cf, 0x985d53e01674ee95, 0xdb74f7cf7bc815c6) } ================================================ FILE: catfs/core/coreutils.go ================================================ package core import ( "errors" "fmt" "path" "strings" "time" e "github.com/pkg/errors" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" log "github.com/sirupsen/logrus" ) var ( // ErrIsGhost is returned by Remove() when calling it on a ghost. ErrIsGhost = errors.New("Is a ghost") ) // mkdirParents takes the dirname of repoPath and makes sure all intermediate // directories are created. The last directory will be returned. // If any directory exist already, it will not be touched. // You can also think of it as mkdir -p. func mkdirParents(lkr *Linker, repoPath string) (*n.Directory, error) { repoPath = path.Clean(repoPath) elems := strings.Split(repoPath, "/") for idx := 0; idx < len(elems)-1; idx++ { dirname := strings.Join(elems[:idx+1], "/") if dirname == "" { dirname = "/" } dir, err := Mkdir(lkr, dirname, false) if err != nil { return nil, err } // Return it, if it's the last path component: if idx+1 == len(elems)-1 { return dir, nil } } return nil, fmt.Errorf("Empty path given") } // Mkdir creates the directory at repoPath and any intermediate directories if // createParents is true. It will fail if there is already a file at `repoPath` // and it is not a directory. func Mkdir(lkr *Linker, repoPath string, createParents bool) (dir *n.Directory, err error) { dirname, basename := path.Split(repoPath) // Take special care of the root node: if basename == "" { return lkr.Root() } // Check if the parent exists: parent, lerr := lkr.LookupDirectory(dirname) if lerr != nil && !ie.IsNoSuchFileError(lerr) { err = e.Wrap(lerr, "dirname lookup failed") return } err = lkr.Atomic(func() (bool, error) { // If it's nil, we might need to create it: if parent == nil { if !createParents { return false, ie.NoSuchFile(dirname) } parent, err = mkdirParents(lkr, repoPath) if err != nil { return true, err } } child, err := parent.Child(lkr, basename) if err != nil { return true, err } if child != nil { switch child.Type() { case n.NodeTypeDirectory: // Nothing to do really. Return the old child. dir = child.(*n.Directory) return false, nil case n.NodeTypeFile: return true, fmt.Errorf("`%s` exists and is a file", repoPath) case n.NodeTypeGhost: // Remove the ghost and continue with adding: if err := parent.RemoveChild(lkr, child); err != nil { return true, err } default: return true, ie.ErrBadNode } } // Create it then! dir, err = n.NewEmptyDirectory(lkr, parent, basename, lkr.owner, lkr.NextInode()) if err != nil { return true, err } if err := lkr.StageNode(dir); err != nil { return true, e.Wrapf(err, "stage dir") } log.Debugf("mkdir: %s", dirname) return false, nil }) return } // Remove removes a single node from a directory. // `nd` is the node that shall be removed and may not be root. // The parent directory is returned. func Remove(lkr *Linker, nd n.ModNode, createGhost, force bool) (parentDir *n.Directory, ghost *n.Ghost, err error) { if !force && nd.Type() == n.NodeTypeGhost { err = ErrIsGhost return } parentDir, err = n.ParentDirectory(lkr, nd) if err != nil { return } // We shouldn't delete the root directory // (only directory with a parent) if parentDir == nil { err = fmt.Errorf("refusing to delete root") return } err = lkr.Atomic(func() (bool, error) { if err := parentDir.RemoveChild(lkr, nd); err != nil { return true, fmt.Errorf("failed to remove child: %v", err) } lkr.MemIndexPurge(nd) if err := lkr.StageNode(parentDir); err != nil { return true, err } if createGhost { newGhost, err := n.MakeGhost(nd, lkr.NextInode()) if err != nil { return true, err } if err := parentDir.Add(lkr, newGhost); err != nil { return true, err } if err := lkr.StageNode(newGhost); err != nil { return true, err } ghost = newGhost return false, nil } return false, nil }) return } // prepareParent tries to figure out the correct parent directory when attempting // to move `nd` to `dstPath`. It also removes any nodes that are "in the way" if possible. func prepareParent(lkr *Linker, nd n.ModNode, dstPath string) (*n.Directory, error) { // Check if the destination already exists: destNode, err := lkr.LookupModNode(dstPath) if err != nil && !ie.IsNoSuchFileError(err) { return nil, err } if destNode == nil { // No node at this place yet, attempt to look it up. return lkr.LookupDirectory(path.Dir(dstPath)) } switch destNode.Type() { case n.NodeTypeDirectory: // Move inside of this directory. // Check if there is already a file destDir, ok := destNode.(*n.Directory) if !ok { return nil, ie.ErrBadNode } child, err := destDir.Child(lkr, nd.Name()) if err != nil { return nil, err } // Oh, something is in there? if child != nil { if nd.Type() == n.NodeTypeFile { return nil, fmt.Errorf( "cannot overwrite a directory (%s) with a file (%s)", destNode.Path(), child.Path(), ) } childDir, ok := child.(*n.Directory) if !ok { return nil, ie.ErrBadNode } if childDir.Size() > 0 { return nil, fmt.Errorf( "cannot move over: %s; directory is not empty", child.Path(), ) } // Okay, there is an empty directory. Let's remove it to // replace it with our source node. log.Warningf("Remove child dir: %v", childDir) if _, _, err := Remove(lkr, childDir, false, false); err != nil { return nil, err } } return destDir, nil case n.NodeTypeFile: log.Infof("Remove file: %v", destNode.Path()) parentDir, _, err := Remove(lkr, destNode, false, false) return parentDir, err case n.NodeTypeGhost: // It is already a ghost. Overwrite it and do not create a new one. log.Infof("Remove ghost: %v", destNode.Path()) parentDir, _, err := Remove(lkr, destNode, false, true) return parentDir, err default: return nil, ie.ErrBadNode } } // Copy copies the node `nd` to the path at `dstPath`. func Copy(lkr *Linker, nd n.ModNode, dstPath string) (newNode n.ModNode, err error) { // Forbid moving a node inside of one of it's subdirectories. if nd.Path() == dstPath { err = fmt.Errorf("source and dest are the same file: %v", dstPath) return } if strings.HasPrefix(path.Dir(dstPath), nd.Path()) { err = fmt.Errorf( "cannot copy `%s` into it's own subdir `%s`", nd.Path(), dstPath, ) return } err = lkr.Atomic(func() (bool, error) { parentDir, err := prepareParent(lkr, nd, dstPath) if err != nil { return true, e.Wrapf(err, "handle parent") } // We might copy something into a directory. // In this case, dstPath specifies the directory we move into, // not the file we moved to (which we need here) if parentDir.Path() == dstPath { dstPath = path.Join(parentDir.Path(), path.Base(nd.Path())) } // And add it to the right destination dir: newNode = nd.Copy(lkr.NextInode()) newNode.SetName(path.Base(dstPath)) if err := newNode.SetParent(lkr, parentDir); err != nil { return true, e.Wrapf(err, "set parent") } if err := newNode.NotifyMove(lkr, parentDir, newNode.Path()); err != nil { return true, e.Wrapf(err, "notify move") } return false, lkr.StageNode(newNode) }) return } // Move moves the node `nd` to the path at `dstPath` and leaves // a ghost at the old place. func Move(lkr *Linker, nd n.ModNode, dstPath string) error { // Forbid moving a node inside of one of it's subdirectories. if nd.Type() == n.NodeTypeGhost { return errors.New("cannot move ghosts") } if nd.Path() == dstPath { return fmt.Errorf("Source and Dest are the same file: %v", dstPath) } if strings.HasPrefix(path.Dir(dstPath), nd.Path()) { return fmt.Errorf( "Cannot move `%s` into it's own subdir `%s`", nd.Path(), dstPath, ) } return lkr.Atomic(func() (bool, error) { parentDir, err := prepareParent(lkr, nd, dstPath) if err != nil { return true, err } // Remove the old node: oldPath := nd.Path() _, ghost, err := Remove(lkr, nd, true, true) if err != nil { return true, e.Wrapf(err, "remove old") } if parentDir.Path() == dstPath { dstPath = path.Join(parentDir.Path(), path.Base(oldPath)) } // The node needs to be told that it's path changed, // since it might need to change it's hash value now. if err := nd.NotifyMove(lkr, parentDir, dstPath); err != nil { return true, e.Wrapf(err, "notify move") } err = n.Walk(lkr, nd, true, func(child n.Node) error { return e.Wrapf(lkr.StageNode(child), "stage node") }) if err != nil { return true, err } if err := lkr.AddMoveMapping(nd.Inode(), ghost.Inode()); err != nil { return true, e.Wrapf(err, "add move mapping") } return false, nil }) } // StageFromFileNode is a convinience helper that will call Stage() with all necessary params from `f`. func StageFromFileNode(lkr *Linker, f *n.File) (*n.File, error) { return Stage( lkr, f.Path(), f.ContentHash(), f.BackendHash(), f.Size(), f.CachedSize(), f.Key(), f.ModTime(), f.IsRaw(), ) } // Stage adds a file to brigs DAG this is lesser version since it does not use cachedSize // Do not use it if you can, use StageWithFullInfo couple lines below! // TODO rename Stage calls everywhere (especially in tests) and then // rename Stage -> StageWithoutCacheSize, and StageWithFullInfo -> Stage func Stage( lkr *Linker, repoPath string, contentHash, backendHash h.Hash, size uint64, cachedSize int64, key []byte, modTime time.Time, isRaw bool, ) (file *n.File, err error) { node, lerr := lkr.LookupNode(repoPath) if lerr != nil && !ie.IsNoSuchFileError(lerr) { err = lerr return } err = lkr.Atomic(func() (bool, error) { if node != nil { if node.Type() == n.NodeTypeGhost { ghostParent, err := n.ParentDirectory(lkr, node) if err != nil { return true, err } if ghostParent == nil { return true, fmt.Errorf( "bug: %s has no parent. Is root a ghost?", node.Path(), ) } if err := ghostParent.RemoveChild(lkr, node); err != nil { return true, err } // Act like there was no previous node. // New node will have a different Inode. file = nil } else { var ok bool file, ok = node.(*n.File) if !ok { return true, ie.ErrBadNode } } } needRemove := false if file != nil { // We know this file already. log.WithFields(log.Fields{"file": repoPath}).Info("File exists; modifying.") needRemove = true if file.BackendHash().Equal(backendHash) { log.Debugf("Hash was not modified. Not doing any update.") return false, nil } } else { parent, err := mkdirParents(lkr, repoPath) if err != nil { return true, err } // Create a new file at specified path: file = n.NewEmptyFile(parent, path.Base(repoPath), lkr.owner, lkr.NextInode()) } parentDir, err := n.ParentDirectory(lkr, file) if err != nil { return true, err } if parentDir == nil { return true, fmt.Errorf("%s has no parent yet (BUG)", repoPath) } if needRemove { // Remove the child before changing the hash: if err := parentDir.RemoveChild(lkr, file); err != nil { return true, err } } file.SetSize(size) file.SetCachedSize(cachedSize) file.SetModTime(modTime) file.SetContent(lkr, contentHash) file.SetBackend(lkr, backendHash) file.SetKey(key) file.SetUser(lkr.owner) file.SetIsRaw(isRaw) // Add it again when the hash was changed. log.Debugf("adding %s (%v)", file.Path(), file.BackendHash()) if err := parentDir.Add(lkr, file); err != nil { return true, err } if err := lkr.StageNode(file); err != nil { return true, err } return false, nil }) return } // Log will call `fn` on every commit we currently have, starting // with the most current one (CURR, then HEAD, ...). // If `fn` will return an error, the iteration is being stopped. func Log(lkr *Linker, start *n.Commit, fn func(cmt *n.Commit) error) error { curr := start for curr != nil { if err := fn(curr); err != nil { return err } parent, err := curr.Parent(lkr) if err != nil { return err } if parent == nil { break } parentCmt, ok := parent.(*n.Commit) if !ok { return ie.ErrBadNode } curr = parentCmt } return nil } ================================================ FILE: catfs/core/coreutils_test.go ================================================ package core import ( "path" "sort" "strings" "testing" "time" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" "github.com/stretchr/testify/require" ) func TestMkdir(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { // Test nested creation without -p like flag: dir, err := Mkdir(lkr, "/deep/nested", false) if err == nil || dir != nil { t.Fatalf("Nested mkdir without -p should have failed: %v", err) } AssertDir(t, lkr, "/", true) AssertDir(t, lkr, "/deep", false) AssertDir(t, lkr, "/deep/nested", false) // Test mkdir -p like creating of nested dirs: dir, err = Mkdir(lkr, "/deep/nested", true) if err != nil { t.Fatalf("mkdir -p failed: %v", err) } AssertDir(t, lkr, "/", true) AssertDir(t, lkr, "/deep", true) AssertDir(t, lkr, "/deep/nested", true) // Attempt to mkdir the same directory once more: dir, err = Mkdir(lkr, "/deep/nested", true) if err != nil { t.Fatalf("second mkdir -p failed: %v", err) } // Also without -p, it should just return the respective dir. // (i.e. work like LookupDirectory) // Note: This is a difference to the traditional mkdir. dir, err = Mkdir(lkr, "/deep/nested", false) if err != nil { t.Fatalf("second mkdir without -p failed: %v", err) } // See if an attempt at creating the root failed, // should not and just work like lkr.LookupDirectory("/") dir, err = Mkdir(lkr, "/", false) if err != nil { t.Fatalf("mkdir root failed (without -p): %v", err) } root, err := lkr.Root() if err != nil { t.Fatalf("Failed to retrieve root: %v", err) } if !dir.TreeHash().Equal(root.TreeHash()) { t.Fatal("Root and mkdir('/') differ!") } // Try to mkdir over a regular file: MustTouch(t, lkr, "/cat.png", 1) // This should fail, since we cannot create it. dir, err = Mkdir(lkr, "/cat.png", false) if err == nil { t.Fatal("Creating directory on file should have failed!") } // Same even for -p dir, err = Mkdir(lkr, "/cat.png", true) if err == nil { t.Fatal("Creating directory on file should have failed!") } }) } func TestRemove(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { dir, err := Mkdir(lkr, "/some/nested/directory", true) if err != nil { t.Fatalf("Failed to mkdir a nested directory: %v", err) } AssertDir(t, lkr, "/some/nested/directory", true) path := "/some/nested/directory/cat.png" MustTouch(t, lkr, path, 1) // Check file removal with ghost creation: file, err := lkr.LookupFile(path) if err != nil { t.Fatalf("Failed to lookup nested file: %v", err) } // Fill in a dummy file hash, so we get a ghost instance parentDir, _, err := Remove(lkr, file, true, false) if err != nil { t.Fatalf("Remove failed: %v", err) } if !parentDir.TreeHash().Equal(dir.TreeHash()) { t.Fatalf("Hash differs on %s and %s", dir.Path(), parentDir.TreeHash()) } // Check that a ghost was created for the removed file: ghost, err := lkr.LookupGhost(path) if err != nil { t.Fatalf("Looking up ghost failed: %v", err) } oldFile, err := ghost.OldFile() if err != nil { t.Fatalf("Failed to retrieve old file from ghost: %v", err) } if !oldFile.TreeHash().Equal(file.TreeHash()) { t.Fatal("Old file and original file hashes differ!") } // Check directory removal: nestedDir, err := lkr.LookupDirectory("/some/nested") if err != nil { t.Fatalf("Lookup on /some/nested failed: %v", err) } nestedParentDir, err := nestedDir.Parent(lkr) if err != nil { t.Fatalf("Getting parent of /some/nested failed: %v", err) } // Just fill in a dummy moved to ref, to get a ghost. parentDir, ghost, err = Remove(lkr, nestedDir, true, false) if err != nil { t.Fatalf("Directory removal failed: %v", err) } if ghost == nil || ghost.Type() != n.NodeTypeGhost { t.Fatalf("Ghost node does not look like a ghost: %v", ghost) } if !parentDir.TreeHash().Equal(nestedParentDir.TreeHash()) { t.Fatalf("Hash differs on %s and %s", nestedParentDir.Path(), parentDir.TreeHash()) } }) } func TestRemoveGhost(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { file := MustTouch(t, lkr, "/x", 1) par, err := n.ParentDirectory(lkr, file) if err != nil { t.Fatalf("Failed to get get parent directory of /x: %v", err) } if err := par.RemoveChild(lkr, file); err != nil { t.Fatalf("Removing child /x failed: %v", err) } ghost, err := n.MakeGhost(file, 42) if err != nil { t.Fatalf("Failed to summon ghost: %v", err) } if err := par.Add(lkr, ghost); err != nil { t.Fatalf("Re-adding ghost failed: %v", err) } if err := lkr.StageNode(ghost); err != nil { t.Fatalf("Staging ghost failed: %v", err) } // Try to remove a ghost: if _, _, err := Remove(lkr, ghost, true, false); err != ErrIsGhost { t.Fatalf("Removing ghost failed other than expected: %v", err) } }) } func TestRemoveExistingGhost(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { nd := MustTouch(t, lkr, "/x", 1) _, ghost, err := Remove(lkr, nd, true, true) require.Nil(t, err) _, _, err = Remove(lkr, ghost, false, true) require.Nil(t, err) _, _, err = Remove(lkr, ghost, true, true) require.NotNil(t, err) }) } func moveValidCheck(t *testing.T, lkr *Linker, srcPath, dstPath string) { nd, err := lkr.LookupNode(srcPath) if err == nil { if nd.Type() != n.NodeTypeGhost { t.Fatalf("Source node still exists! (%v): %v", srcPath, nd.Type()) } } else if !ie.IsNoSuchFileError(err) { t.Fatalf("Looking up source node failed: %v", err) } lkDestNode, err := lkr.LookupNode(dstPath) if err != nil { t.Fatalf("Looking up dest path failed: %v", err) } if lkDestNode.Path() != dstPath { t.Fatalf("Dest nod and dest path differ: %v <-> %v", lkDestNode.Path(), dstPath) } } func moveInvalidCheck(t *testing.T, lkr *Linker, srcPath, dstPath string) { node, err := lkr.LookupNode(srcPath) if err != nil { t.Fatalf("Source node vanished during errorneous move: %v", err) } if node.Type() == n.NodeTypeGhost { t.Fatalf("Source node was converted to a ghost: %v", node.Path()) } } var moveAndCopyTestCases = []struct { name string isErrorCase bool setup func(t *testing.T, lkr *Linker) (n.ModNode, string) }{ { name: "basic", isErrorCase: false, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { MustMkdir(t, lkr, "/a/b/c") return MustTouch(t, lkr, "/a/b/c/x", 1), "/a/b/y" }, }, { name: "basic-directory", isErrorCase: false, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { return MustMkdir(t, lkr, "/a/b/short"), "/a/b/looooong" }, }, { name: "basic-same-level", isErrorCase: false, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { return MustTouch(t, lkr, "/a", 1), "/b" }, }, { name: "basic-root-to-sub", isErrorCase: false, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { MustTouch(t, lkr, "/README.md", 1) MustMkdir(t, lkr, "/sub") return MustTouch(t, lkr, "/x", 1), "/sub" }, }, { name: "into-directory", isErrorCase: false, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { MustMkdir(t, lkr, "/a/b/c") MustMkdir(t, lkr, "/a/b/d") return MustTouch(t, lkr, "/a/b/c/x", 1), "/a/b/d" }, }, { name: "into-nonempty-directory", isErrorCase: false, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { MustMkdir(t, lkr, "/a/b/c") MustMkdir(t, lkr, "/a/b/d") MustTouch(t, lkr, "/a/b/d/y", 1) return MustTouch(t, lkr, "/a/b/c/x", 1), "/a/b/d" }, }, { name: "error-to-directory-contains-file", isErrorCase: true, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { MustMkdir(t, lkr, "/src") MustMkdir(t, lkr, "/dst") MustTouch(t, lkr, "/dst/x", 1) return MustTouch(t, lkr, "/src/x", 1), "/dst" }, }, { name: "error-file-over-existing", isErrorCase: false, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { MustMkdir(t, lkr, "/src") MustMkdir(t, lkr, "/dst") MustTouch(t, lkr, "/dst/x", 1) return MustTouch(t, lkr, "/src/x", 1), "/dst/x" }, }, { name: "error-file-over-ghost", isErrorCase: false, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { MustMkdir(t, lkr, "/src") MustMkdir(t, lkr, "/dst") destFile := MustTouch(t, lkr, "/dst/x", 1) MustRemove(t, lkr, destFile) return MustTouch(t, lkr, "/src/x", 1), "/dst/x" }, }, { name: "error-src-equal-dst", isErrorCase: true, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { return MustTouch(t, lkr, "/x", 1), "/x" }, }, { name: "error-into-own-subdir", isErrorCase: true, setup: func(t *testing.T, lkr *Linker) (n.ModNode, string) { // We should not be able to move "/dir" into itself. dir := MustMkdir(t, lkr, "/dir") MustTouch(t, lkr, "/dir/x", 1) return dir, "/dir/own" }, }, } func TestMoveSingle(t *testing.T) { // Cases to cover for move(): // 1. Dest exists: // 1.1. Is a directory. // 1.1.1 E This directory contains basename(src) and it is a file. // 1.1.2 E This directory contains basename(src) and it is a non-empty dir. // 1.1.3 V This directory contains basename(src) and it is a empty dir. // 2. Dest does not exist. // 2.1 V dirname(dest) exists and is a directory. // 2.2 E dirname(dest) does not exists. // 2.2 E dirname(dest) exists and is not a directory. // 3. E Overlap of src and dest paths (src in dest) // Checks for valid cases (V): // 1) src is gone. // 2) dest is the same node as before. // 3) dest has the correct path. // Checks for invalid cases (E): // 1) src is not gone. for _, tc := range moveAndCopyTestCases { t.Run(tc.name, func(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { // Setup src and dest dir with a file in it named like src. srcNd, dstPath := tc.setup(t, lkr) srcPath := srcNd.Path() if err := Move(lkr, srcNd, dstPath); err != nil { if tc.isErrorCase { moveInvalidCheck(t, lkr, srcPath, dstPath) } else { t.Fatalf("Move failed unexpectly: %v", err) } } else { moveValidCheck(t, lkr, srcPath, dstPath) } }) }) } } func TestMoveDirectoryWithChild(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { MustMkdir(t, lkr, "/src") oldFile := MustTouch(t, lkr, "/src/x", 1) oldFile = oldFile.Copy(oldFile.Inode()).(*n.File) MustCommit(t, lkr, "before move") dir, err := lkr.LookupDirectory("/src") require.Nil(t, err) MustMove(t, lkr, dir, "/dst") MustCommit(t, lkr, "after move") file, err := lkr.LookupFile("/dst/x") require.Nil(t, err) require.Equal(t, h.TestDummy(t, 1), file.BackendHash()) _, err = lkr.LookupGhost("/src") require.Nil(t, err) // This will resolve to the old file: _, err = lkr.LookupFile("/src/x") require.NotNil(t, err) }) } func TestMoveDirectory(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { srcDir := MustMkdir(t, lkr, "/src") MustMkdir(t, lkr, "/src/sub") MustTouch(t, lkr, "/src/sub/x", 23) MustTouch(t, lkr, "/src/y", 23) dstDir := MustMove(t, lkr, srcDir, "/dst") expect := []string{ "/dst/sub/x", "/dst/sub", "/dst/y", "/dst", } got := []string{} require.Nil(t, n.Walk(lkr, dstDir, true, func(child n.Node) error { got = append(got, child.Path()) return nil })) sort.Strings(expect) sort.Strings(got) require.Equal(t, len(expect), len(got)) for idx := range expect { if got[idx] != expect[idx] { t.Errorf( "Moved node child `%s` does not match `%s`", got[idx], expect[idx], ) } } }) } func TestMoveDirectoryWithGhosts(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { srcDir := MustMkdir(t, lkr, "/src") MustMkdir(t, lkr, "/src/sub") xFile := MustTouch(t, lkr, "/src/sub/x", 23) MustTouch(t, lkr, "/src/y", 23) MustMove(t, lkr, xFile, "/src/z") dstDir := MustMove(t, lkr, srcDir, "/dst") expect := []string{ "/dst", "/dst/sub", "/dst/sub/x", "/dst/y", "/dst/z", } // Be evil and clear the mem cache in order to check if all changes // were checked into the staging area. lkr.MemIndexClear() got := []string{} require.Nil(t, n.Walk(lkr, dstDir, true, func(child n.Node) error { got = append(got, child.Path()) return nil })) // Check if the moved directory contains the right paths: sort.Strings(got) for idx, expectPath := range expect { if expectPath != got[idx] { t.Fatalf("%d: %s != %s", idx, expectPath, got[idx]) } } ghost, err := lkr.LookupNode(got[2]) require.Nil(t, err) status, err := lkr.Status() require.Nil(t, err) require.Equal(t, "/src/sub/x", ghost.(*n.Ghost).OldNode().Path()) twin, _, err := lkr.MoveMapping(status, ghost) require.Nil(t, err) require.Equal(t, "/dst/z", twin.Path()) }) } func TestStage(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { // Initial stage of the file: key := make([]byte, 32) contentHash1 := h.TestDummy(t, 1) backendHash1 := h.TestDummy(t, 1) file, err := Stage( lkr, "/photos/moose.png", contentHash1, backendHash1, 2, -1, key, time.Now(), false, ) if err != nil { t.Fatalf("Adding of /photos/moose.png failed: %v", err) } contentHash2 := h.TestDummy(t, 2) backendHash2 := h.TestDummy(t, 2) file, err = Stage( lkr, "/photos/moose.png", contentHash2, backendHash2, 3, -1, key, time.Now(), false, ) if err != nil { t.Fatalf("Adding of /photos/moose.png failed: %v", err) } if !file.BackendHash().Equal(h.TestDummy(t, 2)) { t.Fatalf( "File content after update is not what's advertised: %v", file.TreeHash(), ) } }) } func TestStageDirOverGhost(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { empty := MustMkdir(t, lkr, "/empty") MustMove(t, lkr, empty, "/moved_empty") MustMkdir(t, lkr, "/empty") dir, err := lkr.LookupDirectory("/empty") require.Nil(t, err) require.Equal(t, dir.Path(), "/empty") if dir.Type() != n.NodeTypeDirectory { t.Fatalf("/empty is not a directory") } }) } func TestCopy(t *testing.T) { for _, tc := range moveAndCopyTestCases { t.Run(tc.name, func(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { // Setup src and dest dir with a file in it named like src. srcNd, dstPath := tc.setup(t, lkr) srcPath := srcNd.Path() newNd, err := Copy(lkr, srcNd, dstPath) if newNd != nil { if !strings.HasPrefix(newNd.Path(), dstPath) { t.Fatalf( "Node was copied to wrong path: %v (want %v)", newNd.Path(), dstPath, ) } // Make sure the new copy is reachable from parent: par, err := lkr.LookupDirectory(path.Dir(newNd.Path())) if err != nil { t.Fatalf("Failed to lookup parent: %v", err) } newChildNd, err := par.Child(lkr, newNd.Name()) if err != nil { t.Fatalf("Failed to get base path: %v", err) } newNd = newChildNd.(n.ModNode) } if oldNd, lookErr := lkr.LookupNode(srcPath); oldNd == nil || lookErr != nil { t.Fatalf("source node does not exist or is not accesible: %v %v", err, tc.isErrorCase) } if err != nil { if tc.isErrorCase { node, err := lkr.LookupNode(srcPath) if err != nil { t.Fatalf("Source node vanished during errorneous copy: %v", err) } if node.Type() == n.NodeTypeGhost { t.Fatalf("Source node was converted to a ghost: %v", node.Path()) } } else { t.Fatalf("Copy failed unexpectly: %v", err) } // No need to test more. return } if tc.isErrorCase { t.Fatalf("test should have failed") } if newNd == nil { t.Fatalf("Dest node does not exist after copy: %v", err) } if !newNd.BackendHash().Equal(srcNd.BackendHash()) { t.Logf("Content of src and dst differ after copy") t.Logf("WANT: %v", srcNd.BackendHash()) t.Logf("GOT : %v", newNd.BackendHash()) t.Fatalf("Check Copy()") } if newNd.Inode() < srcNd.Inode() { t.Fatalf("New inode has <= inode of src") } // Sanity check: do not rely on Copy() to return a valid, staged node. // Check if we can look it up after the Copy too. nd, err := lkr.LookupNode(newNd.Path()) require.Nil(t, err) require.NotNil(t, nd) }) }) } } ================================================ FILE: catfs/core/gc.go ================================================ package core import ( "github.com/sahib/brig/catfs/db" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" log "github.com/sirupsen/logrus" ) // GarbageCollector implements a small mark & sweep garbage collector. // It exists more for the sake of fault tolerance than it being an // essential part of brig. This is different from the ipfs garbage collector. type GarbageCollector struct { lkr *Linker kv db.Database notifier func(nd n.Node) bool markMap map[string]struct{} } // NewGarbageCollector will return a new GC, operating on `lkr` and `kv`. // It will call `kc` on every collected node. func NewGarbageCollector(lkr *Linker, kv db.Database, kc func(nd n.Node) bool) *GarbageCollector { return &GarbageCollector{ lkr: lkr, kv: kv, notifier: kc, } } func (gc *GarbageCollector) markMoveMap(key []string) error { keys, err := gc.kv.Keys(key...) if err != nil { return err } for _, key := range keys { data, err := gc.kv.Get(key...) if err != nil { return err } node, _, err := gc.lkr.parseMoveMappingLine(string(data)) if err != nil { return err } if node != nil { gc.markMap[node.TreeHash().B58String()] = struct{}{} } } return nil } func (gc *GarbageCollector) mark(cmt *n.Commit, recursive bool) error { if cmt == nil { return nil } root, err := gc.lkr.DirectoryByHash(cmt.Root()) if err != nil { return err } gc.markMap[cmt.TreeHash().B58String()] = struct{}{} err = n.Walk(gc.lkr, root, true, func(child n.Node) error { gc.markMap[child.TreeHash().B58String()] = struct{}{} return nil }) if err != nil { return err } parent, err := cmt.Parent(gc.lkr) if err != nil { return err } if recursive && parent != nil { parentCmt, ok := parent.(*n.Commit) if !ok { return ie.ErrBadNode } return gc.mark(parentCmt, recursive) } return nil } func (gc *GarbageCollector) sweep(prefix []string) (int, error) { removed := 0 return removed, gc.lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { keys, err := gc.kv.Keys(prefix...) if err != nil { return hintRollback(err) } for _, key := range keys { b58Hash := key[len(key)-1] if _, ok := gc.markMap[b58Hash]; ok { continue } hash, err := h.FromB58String(b58Hash) if err != nil { return hintRollback(err) } node, err := gc.lkr.NodeByHash(hash) if err != nil { return hintRollback(err) } if node == nil { continue } // Allow the gc caller to check if he really // wants to delete this node. if gc.notifier != nil && !gc.notifier(node) { continue } // Actually get rid of the node: gc.lkr.MemIndexPurge(node) batch.Erase(key...) removed++ } return false, nil }) } func (gc *GarbageCollector) findAllMoveLocations(head *n.Commit) ([][]string, error) { locations := [][]string{ {"stage", "moves"}, } for { parent, err := head.Parent(gc.lkr) if err != nil { return nil, err } if parent == nil { break } parentCmt, ok := parent.(*n.Commit) if !ok { return nil, ie.ErrBadNode } head = parentCmt location := []string{"moves", head.TreeHash().B58String()} locations = append(locations, location) } return locations, nil } // Run will trigger a GC run. If `allObjects` is false, // only the staging commit will be checked. Otherwise // all objects in the key value store. func (gc *GarbageCollector) Run(allObjects bool) error { gc.markMap = make(map[string]struct{}) head, err := gc.lkr.Status() if err != nil { return err } if err := gc.mark(head, allObjects); err != nil { return err } // Staging might contain moved files that are not reachable anymore, // but still are referenced by the move mapping. // Keep them for now, they will die most likely on MakeCommit() moveMapLocations := [][]string{ {"stage", "moves"}, } if allObjects { moveMapLocations, err = gc.findAllMoveLocations(head) if err != nil { return err } } for _, location := range moveMapLocations { if err := gc.markMoveMap(location); err != nil { return err } } removed, err := gc.sweep([]string{"stage", "objects"}) if err != nil { log.Debugf("removed %d unreachable staging objects.", removed) } if allObjects { removed, err = gc.sweep([]string{"objects"}) if err != nil { return err } if removed > 0 { log.Warningf("removed %d unreachable permanent objects.", removed) log.Warningf("this might indiciate a bug in catfs somewhere.") } } return nil } ================================================ FILE: catfs/core/gc_test.go ================================================ package core import ( "testing" "github.com/sahib/brig/catfs/db" n "github.com/sahib/brig/catfs/nodes" "github.com/stretchr/testify/require" ) func assertNodeExists(t *testing.T, kv db.Database, nd n.Node) { if _, err := kv.Get("stage", "objects", nd.TreeHash().B58String()); err != nil { t.Fatalf("Stage object %v does not exist: %v", nd, err) } } func TestGC(t *testing.T) { mdb := db.NewMemoryDatabase() lkr := NewLinker(mdb) killExpected := make(map[string]bool) killActual := make(map[string]bool) gc := NewGarbageCollector(lkr, mdb, func(nd n.Node) bool { killActual[nd.TreeHash().B58String()] = true return true }) root, err := lkr.Root() if err != nil { t.Fatalf("Failed to retrieve the root: %v", root) } killExpected[root.TreeHash().B58String()] = true sub1, err := n.NewEmptyDirectory(lkr, root, "a", "u", 3) if err != nil { t.Fatalf("Creating sub2 failed: %v", err) } if err := lkr.StageNode(sub1); err != nil { t.Fatalf("Staging root failed: %v", err) } killExpected[root.TreeHash().B58String()] = true killExpected[sub1.TreeHash().B58String()] = true sub2, err := n.NewEmptyDirectory(lkr, sub1, "b", "u", 4) if err != nil { t.Fatalf("Creating sub2 failed: %v", err) } if err := lkr.StageNode(sub2); err != nil { t.Fatalf("Staging root failed: %v", err) } root, err = lkr.Root() require.Nil(t, err) if err := gc.Run(true); err != nil { t.Fatalf("gc run failed: %v", err) } if len(killExpected) != len(killActual) { t.Fatalf( "GC killed %d nodes, but should have killed %d", len(killActual), len(killExpected), ) } for killedHash := range killActual { if _, ok := killExpected[killedHash]; !ok { t.Fatalf("%s was killed, but should not!", killedHash) } if _, err := mdb.Get("stage", "objects", killedHash); err != db.ErrNoSuchKey { t.Fatalf("GC did not wipe key from db: %v", killedHash) } } // Double check that the gc did not delete other stuff from the db: assertNodeExists(t, mdb, root) assertNodeExists(t, mdb, sub1) assertNodeExists(t, mdb, sub2) gc = NewGarbageCollector(lkr, mdb, func(nd n.Node) bool { t.Fatalf("Second gc run found something, first didn't") return true }) if err := gc.Run(true); err != nil { t.Fatalf("Second gc run failed: %v", err) } if err := lkr.MakeCommit(n.AuthorOfStage, "some message"); err != nil { t.Fatalf("MakeCommit() failed: %v", err) } gc = NewGarbageCollector(lkr, mdb, func(nd n.Node) bool { t.Fatalf("Third gc run found something, first didn't") return true }) if err := gc.Run(true); err != nil { t.Fatalf("Third gc run failed: %v", err) } } ================================================ FILE: catfs/core/linker.go ================================================ package core // Layout of the key/value store: // // objects/ => NODE_METADATA // tree/ => NODE_HASH // index/ => COMMIT_HASH // inode/ => NODE_HASH // moves/ => MOVE_INFO // moves/overlay/ => MOVE_INFO // // stage/objects/ => NODE_METADATA // stage/tree/ => NODE_HASH // stage/STATUS => COMMIT_METADATA // stage/moves/ => MOVE_INFO // stage/moves/overlay/ => MOVE_INFO // // stats/max-inode => UINT64 // refs/ => NODE_HASH // // Defined by caller: // // metadata/ => BYTES (Caller defined data) // // NODE is either a Commit, a Directory or a File. // FULL_NODE_PATH may contain slashes and in case of directories, // it will contain a trailing slash. // // The following refs are defined by the system: // HEAD -> Points to the latest finished commit, or nil. // CURR -> Points to the staging commit. // // In git terminology, this file implements the following commands: // // - git add: StageNode(): Create and Update Nodes. // - git status: Status() // - git commit: MakeCommit() // // All write operations are written in one batch or are rolled back // on errors. import ( "encoding/binary" "fmt" "path" "runtime/debug" "strconv" "strings" "time" e "github.com/pkg/errors" "github.com/sahib/brig/catfs/db" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" "github.com/sahib/brig/util/trie" log "github.com/sirupsen/logrus" capnp "zombiezen.com/go/capnproto2" ) // Linker implements the basic logic of brig's data model // It uses an underlying key/value database to // storea a Merkle-DAG with versioned metadata, // similar to what git does internally. type Linker struct { kv db.Database // root of the filesystem root *n.Directory // Path lookup trie ptrie *trie.Node // B58Hash to node index map[string]n.Node // UID to node inodeIndex map[uint64]n.Node // Cache for the linker owner. owner string } // NewLinker returns a new lkr, ready to use. It assumes the key value store // is working and does no check on this. func NewLinker(kv db.Database) *Linker { lkr := &Linker{kv: kv} lkr.MemIndexClear() return lkr } // MemIndexAdd adds `nd` to the in memory index. func (lkr *Linker) MemIndexAdd(nd n.Node, updatePathIndex bool) { lkr.index[nd.TreeHash().B58String()] = nd lkr.inodeIndex[nd.Inode()] = nd if updatePathIndex { path := nd.Path() if nd.Type() == n.NodeTypeDirectory { path = appendDot(path) } lkr.ptrie.InsertWithData(path, nd) } } // MemIndexSwap updates an entry of the in memory index, by deleting // the old entry referenced by oldHash (may be nil). This is necessary // to ensure that old hashes do not resolve to the new, updated instance. // If the old instance is needed, it will be loaded as new instance. // You should not need to call this function, except when implementing own Nodes. func (lkr *Linker) MemIndexSwap(nd n.Node, oldHash h.Hash, updatePathIndex bool) { if oldHash != nil { delete(lkr.index, oldHash.B58String()) } lkr.MemIndexAdd(nd, updatePathIndex) } // MemSetRoot sets the current root, but does not store it yet. It's supposed // to be called after in-memory modifications. Only implementors of new Nodes // might need to call this function. func (lkr *Linker) MemSetRoot(root *n.Directory) { if lkr.root != nil { lkr.MemIndexSwap(root, lkr.root.TreeHash(), true) } else { lkr.MemIndexAdd(root, true) } lkr.root = root } // MemIndexPurge removes `nd` from the memory index. func (lkr *Linker) MemIndexPurge(nd n.Node) { delete(lkr.inodeIndex, nd.Inode()) delete(lkr.index, nd.TreeHash().B58String()) lkr.ptrie.Lookup(nd.Path()).Remove() } // MemIndexClear resets the memory index to zero. // This should not be called mid-flight in operations, // but should be okay to call between atomic operations. func (lkr *Linker) MemIndexClear() { lkr.ptrie = trie.NewNode() lkr.index = make(map[string]n.Node) lkr.inodeIndex = make(map[uint64]n.Node) lkr.root = nil } ////////////////////////// // COMMON NODE HANDLING // ////////////////////////// // NextInode returns a unique identifier, used to identify a single node. You // should not need to call this function, except when implementing own nodes. func (lkr *Linker) NextInode() uint64 { nodeCount, err := lkr.kv.Get("stats", "max-inode") if err != nil && err != db.ErrNoSuchKey { return 0 } // nodeCount might be nil on startup: cnt := uint64(1) if nodeCount != nil { cnt = binary.BigEndian.Uint64(nodeCount) + 1 } cntBuf := make([]byte, 8) binary.BigEndian.PutUint64(cntBuf, cnt) err = lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { batch.Put(cntBuf, "stats", "max-inode") return false, nil }) if err != nil { return 0 } return cnt } // FilesByContents checks what files are associated with the content hashes in // `contents`. It returns a map of content hash b58 to file. This method is // quite heavy and should not be used in loops. There is room for optimizations. func (lkr *Linker) FilesByContents(contents []h.Hash) (map[string]*n.File, error) { keys, err := lkr.kv.Keys() if err != nil { return nil, err } result := make(map[string]*n.File) for _, key := range keys { // Filter non-node storage: fullKey := strings.Join(key, "/") if !strings.HasPrefix(fullKey, "objects") && !strings.HasPrefix(fullKey, "stage/objects") { continue } data, err := lkr.kv.Get(key...) if err != nil { return nil, err } nd, err := n.UnmarshalNode(data) if err != nil { return nil, err } if nd.Type() != n.NodeTypeFile { continue } file, ok := nd.(*n.File) if !ok { return nil, ie.ErrBadNode } for _, content := range contents { if content.Equal(file.BackendHash()) { result[content.B58String()] = file } } } return result, nil } // loadNode loads an individual object by its hash from the object store. It // will return nil if the hash is not there. func (lkr *Linker) loadNode(hash h.Hash) (n.Node, error) { var data []byte var err error b58hash := hash.B58String() // First look in the stage: loadableBuckets := [][]string{ {"stage", "objects", b58hash}, {"objects", b58hash}, } for _, bucketPath := range loadableBuckets { data, err = lkr.kv.Get(bucketPath...) if err != nil && err != db.ErrNoSuchKey { return nil, err } if data != nil { return n.UnmarshalNode(data) } } // Damn, no hash found: return nil, nil } // NodeByHash returns the node identified by hash. // If no such hash could be found, nil is returned. func (lkr *Linker) NodeByHash(hash h.Hash) (n.Node, error) { // Check if we have this this node in the memory cache already: b58Hash := hash.B58String() if cachedNode, ok := lkr.index[b58Hash]; ok { return cachedNode, nil } // Node was not in the cache, load directly from kv. nd, err := lkr.loadNode(hash) if err != nil { return nil, err } if nd == nil { return nil, nil } lkr.MemIndexAdd(nd, false) return nd, nil } func appendDot(path string) string { // path.Join() calls path.Clean() which in turn // removes the '.' at the end when trying to join that. // But since we use the dot to mark directories we shouldn't do that. if strings.HasSuffix(path, "/") { return path + "." } return path + "/." } // ResolveNode resolves a path to a hash and resolves the corresponding node by // calling NodeByHash(). If no node could be resolved, nil is returned. // It does not matter if the node was deleted in the meantime. If so, // a Ghost node is returned which stores the last known state. func (lkr *Linker) ResolveNode(nodePath string) (n.Node, error) { // Check if it's cached already: trieNode := lkr.ptrie.Lookup(nodePath) if trieNode != nil && trieNode.Data != nil { return trieNode.Data.(n.Node), nil } fullPaths := [][]string{ {"stage", "tree", nodePath}, {"tree", nodePath}, } for _, fullPath := range fullPaths { b58Hash, err := lkr.kv.Get(fullPath...) if err != nil && err != db.ErrNoSuchKey { return nil, e.Wrapf(err, "db-lookup") } if err == db.ErrNoSuchKey { continue } bhash, err := h.FromB58String(string(b58Hash)) if err != nil { return nil, err } if bhash != nil { return lkr.NodeByHash(h.Hash(bhash)) } } // Return nil if nothing found: return nil, nil } // StageNode inserts a modified node to the staging area, making sure the // modification is persistent and part of the staging commit. All parent // directories of the node in question will be staged automatically. If there // was no modification it will be a (quite expensive) NOOP. func (lkr *Linker) StageNode(nd n.Node) error { return lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { if err := lkr.stageNodeRecursive(batch, nd); err != nil { return true, e.Wrapf(err, "recursive stage") } // Update the staging commit's root hash: status, err := lkr.Status() if err != nil { return true, fmt.Errorf("failed to retrieve status: %v", err) } root, err := lkr.Root() if err != nil { return true, err } status.SetModTime(time.Now()) status.SetRoot(root.TreeHash()) lkr.MemSetRoot(root) return hintRollback(lkr.saveStatus(status)) }) } // CommitByIndex returns the commit referenced by `index`. // `0` will return the very first commit. Negative numbers will yield // a ErrNoSuchKey error. func (lkr *Linker) CommitByIndex(index int64) (*n.Commit, error) { status, err := lkr.Status() if err != nil { return nil, err } if index < 0 { // Interpret an index of -n as curr-(n+1), // so that -1 means "curr". index = status.Index() + index + 1 } b58Hash, err := lkr.kv.Get("index", strconv.FormatInt(index, 10)) if err != nil && err != db.ErrNoSuchKey { return nil, err } // Special case: status is not in the index bucket. // Do a separate check for it. if err == db.ErrNoSuchKey { if status.Index() == index { return status, nil } owner, _ := lkr.Owner() errmsg := fmt.Sprintf("No commit with index %v for owner `%v`", index, owner) log.Error(errmsg) return nil, ie.NoSuchCommitIndex(index) } hash, err := h.FromB58String(string(b58Hash)) if err != nil { return nil, err } return lkr.CommitByHash(hash) } // NodeByInode resolves a node by it's unique ID. // It will return nil if no corresponding node was found. func (lkr *Linker) NodeByInode(uid uint64) (n.Node, error) { b58Hash, err := lkr.kv.Get("inode", strconv.FormatUint(uid, 10)) if err != nil && err != db.ErrNoSuchKey { return nil, err } hash, err := h.FromB58String(string(b58Hash)) if err != nil { return nil, err } return lkr.NodeByHash(hash) } func (lkr *Linker) stageNodeRecursive(batch db.Batch, nd n.Node) error { if nd.Type() == n.NodeTypeCommit { return fmt.Errorf("bug: commits cannot be staged; use MakeCommit()") } data, err := n.MarshalNode(nd) if err != nil { return e.Wrapf(err, "marshal") } b58Hash := nd.TreeHash().B58String() batch.Put(data, "stage", "objects", b58Hash) uidKey := strconv.FormatUint(nd.Inode(), 10) batch.Put([]byte(nd.TreeHash().B58String()), "inode", uidKey) hashPath := []string{"stage", "tree", nd.Path()} if nd.Type() == n.NodeTypeDirectory { hashPath = append(hashPath, ".") } batch.Put([]byte(b58Hash), hashPath...) // Remember/Update this node in the cache if it's not yet there: lkr.MemIndexAdd(nd, true) // We need to save parent directories too, in case the hash changed: // Note that this will create many pointless directories in staging. // That's okay since we garbage collect it every few seconds // on a higher layer. if nd.Path() == "/" { // Can' go any higher. Save this dir as new virtual root. root, ok := nd.(*n.Directory) if !ok { return ie.ErrBadNode } lkr.MemSetRoot(root) return nil } par, err := lkr.ResolveDirectory(path.Dir(nd.Path())) if err != nil { return e.Wrapf(err, "resolve") } if par != nil { if err := lkr.stageNodeRecursive(batch, par); err != nil { return err } } return nil } ///////////////////// // COMMIT HANDLING // ///////////////////// // SetMergeMarker sets the current status to be a merge commit. // Note that this function only will have a result when MakeCommit() is called afterwards. // Otherwise, the changes will not be written to disk. func (lkr *Linker) SetMergeMarker(with string, remoteHead h.Hash) error { status, err := lkr.Status() if err != nil { return err } status.SetMergeMarker(with, remoteHead) return lkr.saveStatus(status) } // MakeCommit creates a new full commit in the version history. // The current staging commit is finalized with `author` and `message` // and gets saved. A new, identical staging commit is created pointing // to the root of the now new HEAD. // // If nothing changed since the last call to MakeCommit, it will // return ErrNoChange, which can be reacted upon. func (lkr *Linker) MakeCommit(author string, message string) error { return lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { switch err := lkr.makeCommit(batch, author, message); err { case ie.ErrNoChange: return false, err case nil: return false, nil default: return true, err } }) } func (lkr *Linker) makeCommitPutCurrToPersistent(batch db.Batch, rootDir *n.Directory) (map[uint64]bool, error) { exportedInodes := make(map[uint64]bool) return exportedInodes, n.Walk(lkr, rootDir, true, func(child n.Node) error { data, err := n.MarshalNode(child) if err != nil { return err } b58Hash := child.TreeHash().B58String() batch.Put(data, "objects", b58Hash) exportedInodes[child.Inode()] = true childPath := child.Path() if child.Type() == n.NodeTypeDirectory { childPath = appendDot(childPath) } batch.Put([]byte(b58Hash), "tree", childPath) return nil }) } func (lkr *Linker) makeCommit(batch db.Batch, author string, message string) error { head, err := lkr.Head() if err != nil && !ie.IsErrNoSuchRef(err) { return err } status, err := lkr.Status() if err != nil { return err } // Only compare with previous if we have a HEAD yet. if head != nil { if status.Root().Equal(head.Root()) { return ie.ErrNoChange } } rootDir, err := lkr.Root() if err != nil { return err } // Go over all files/directories and save them in tree & objects. // Note that this will only move nodes that are reachable from the current // commit root. Intermediate nodes will not be copied. exportedInodes, err := lkr.makeCommitPutCurrToPersistent(batch, rootDir) if err != nil { return err } // NOTE: `head` may be nil, if it couldn't be resolved, // or (maybe more likely) if this is the first commit. if head != nil { if err := status.SetParent(lkr, head); err != nil { return err } } if err := status.BoxCommit(author, message); err != nil { return err } statusData, err := n.MarshalNode(status) if err != nil { return err } statusB58Hash := status.TreeHash().B58String() batch.Put(statusData, "objects", statusB58Hash) // Remember this commit under his index: batch.Put([]byte(statusB58Hash), "index", strconv.FormatInt(status.Index(), 10)) if err := lkr.SaveRef("HEAD", status); err != nil { return err } // Check if we have already tagged the initial commit. if _, err := lkr.ResolveRef("init"); err != nil { if !ie.IsErrNoSuchRef(err) { // Some other error happened. return err } // This is probably the first commit. Tag it. if err := lkr.SaveRef("INIT", status); err != nil { return err } } // Fixate the moved paths in the stage: if err := lkr.commitMoveMapping(status, exportedInodes); err != nil { return err } if err := lkr.clearStage(batch); err != nil { return err } newStatus, err := n.NewEmptyCommit(lkr.NextInode(), status.Index()+1) if err != nil { return err } newStatus.SetRoot(status.Root()) if err := newStatus.SetParent(lkr, status); err != nil { return err } return lkr.saveStatus(newStatus) } func (lkr *Linker) clearStage(batch db.Batch) error { // Clear the staging area. toClear := [][]string{ {"stage", "objects"}, {"stage", "tree"}, {"stage", "moves"}, } for _, key := range toClear { if err := batch.Clear(key...); err != nil { return err } } return nil } /////////////////////// // METADATA HANDLING // /////////////////////// // MetadataPut remembers a value persisntenly identified by `key`. // It can be used as single-level key value store for user purposes. func (lkr *Linker) MetadataPut(key string, value []byte) error { return lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { batch.Put([]byte(value), "metadata", key) return false, nil }) } // MetadataGet retriesves a previously put key value pair. // It will return nil if no such value could be retrieved. func (lkr *Linker) MetadataGet(key string) ([]byte, error) { return lkr.kv.Get("metadata", key) } //////////////////////// // OWNERSHIP HANDLING // //////////////////////// // Owner returns the owner of the linker. func (lkr *Linker) Owner() (string, error) { if lkr.owner != "" { return lkr.owner, nil } data, err := lkr.MetadataGet("owner") if err != nil { return "", err } // Cache owner, we don't want to reload it again and again. // It will usually not change during runtime, except SetOwner // is called (which is invalidating the cache anyways) lkr.owner = string(data) return lkr.owner, nil } // SetOwner will set the owner to `owner`. func (lkr *Linker) SetOwner(owner string) error { lkr.owner = owner return lkr.MetadataPut("owner", []byte(owner)) } // SetABIVersion will set the ABI version to `version`. func (lkr *Linker) SetABIVersion(version int) error { sv := strconv.Itoa(version) return lkr.MetadataPut("version", []byte(sv)) } //////////////////////// // REFERENCE HANDLING // //////////////////////// // ResolveRef resolves the hash associated with `refname`. If the ref could not // be resolved, ErrNoSuchRef is returned. Typically, Node will be a Commit. // But there are no technical restrictions on which node typ to use. // NOTE: ResolveRef("HEAD") != ResolveRef("head") due to case. func (lkr *Linker) ResolveRef(refname string) (n.Node, error) { origRefname := refname nUps := 0 for idx := len(refname) - 1; idx >= 0; idx-- { if refname[idx] == '^' { nUps++ } else { break } } // Strip the ^s: refname = refname[:len(refname)-nUps] // Special case: the status commit is not part of the normal object store. // Still make it able to resolve it by it's refname "curr". if refname == "curr" || refname == "status" { return lkr.Status() } b58Hash, err := lkr.kv.Get("refs", refname) if err != nil && err != db.ErrNoSuchKey { return nil, err } if len(b58Hash) == 0 { // Try to interpret the refname as b58hash directly. // This path will hit when passing a commit hash directly // as `refname` to this method. b58Hash = []byte(refname) } hash, err := h.FromB58String(string(b58Hash)) if err != nil { // Could not parse hash, so it's probably none. return nil, ie.ErrNoSuchRef(refname) } status, err := lkr.Status() if err != nil { return nil, err } // Special case: Allow the resolving of `curr` // by using its status hash and check it explicitly. var nd n.Node if status.TreeHash().Equal(hash) { nd = status } else { nd, err = lkr.NodeByHash(h.Hash(hash)) if err != nil { return nil, err } } if nd == nil { return nil, ie.ErrNoSuchRef(refname) } // Possibly advance a few commits until we hit the one // the user required. cmt, ok := nd.(*n.Commit) if ok { for i := 0; i < nUps; i++ { parentNd, err := cmt.Parent(lkr) if err != nil { return nil, err } if parentNd == nil { log.Warningf("ref `%s` is too far back; stopping at `init`", origRefname) break } parentCmt, ok := parentNd.(*n.Commit) if !ok { break } cmt = parentCmt } nd = cmt } return nd, nil } // SaveRef stores a reference to `nd` persistently. The caller is responsbiel // to ensure that the node is already in the blockstore, otherwise it won't be // resolvable. func (lkr *Linker) SaveRef(refname string, nd n.Node) error { refname = strings.ToLower(refname) return lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { batch.Put([]byte(nd.TreeHash().B58String()), "refs", refname) return false, nil }) } // ListRefs lists all currently known refs. func (lkr *Linker) ListRefs() ([]string, error) { refs := []string{} keys, err := lkr.kv.Keys("refs") if err != nil { return nil, err } for _, key := range keys { if len(key) <= 1 { continue } refs = append(refs, key[1]) } return refs, nil } // RemoveRef removes the ref named `refname`. func (lkr *Linker) RemoveRef(refname string) error { return lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { batch.Erase("refs", refname) return false, nil }) } // Head is just a shortcut for ResolveRef("HEAD"). func (lkr *Linker) Head() (*n.Commit, error) { nd, err := lkr.ResolveRef("head") if err != nil { return nil, err } cmt, ok := nd.(*n.Commit) if !ok { return nil, fmt.Errorf("oh-oh, HEAD is not a Commit... %v", nd) } return cmt, nil } // Root returns the current root directory of CURR. // It is never nil when err is nil. func (lkr *Linker) Root() (*n.Directory, error) { if lkr.root != nil { return lkr.root, nil } status, err := lkr.Status() if err != nil { return nil, err } rootNd, err := lkr.DirectoryByHash(status.Root()) if err != nil { return nil, err } lkr.MemSetRoot(rootNd) return rootNd, nil } // Status returns the current staging commit. // It is never nil, unless err is nil. func (lkr *Linker) Status() (*n.Commit, error) { var cmt *n.Commit var err error return cmt, lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { cmt, err = lkr.status(batch) return hintRollback(err) }) } func (lkr *Linker) status(batch db.Batch) (cmt *n.Commit, err error) { cmt, err = lkr.loadStatus() if err != nil { return nil, err } if cmt != nil { return cmt, nil } // Shoot, no commit exists yet. // We need to create an initial one. cmt, err = n.NewEmptyCommit(lkr.NextInode(), 0) if err != nil { return nil, err } // Setup a new commit and set root from last HEAD or new one. head, err := lkr.Head() if err != nil && !ie.IsErrNoSuchRef(err) { return nil, err } var rootHash h.Hash if ie.IsErrNoSuchRef(err) { // There probably wasn't a HEAD yet. if root, err := lkr.ResolveDirectory("/"); err == nil && root != nil { rootHash = root.TreeHash() } else { // No root directory then. Create a shiny new one and stage it. inode := lkr.NextInode() newRoot, err := n.NewEmptyDirectory(lkr, nil, "/", lkr.owner, inode) if err != nil { return nil, err } // Can't call StageNode(), since that would call Status(), // causing and endless loop of grief and doom. if err := lkr.stageNodeRecursive(batch, newRoot); err != nil { return nil, err } rootHash = newRoot.TreeHash() } } else { if err := cmt.SetParent(lkr, head); err != nil { return nil, err } rootHash = head.Root() } cmt.SetRoot(rootHash) if err := lkr.saveStatus(cmt); err != nil { return nil, err } return cmt, nil } func (lkr *Linker) loadStatus() (*n.Commit, error) { data, err := lkr.kv.Get("stage", "STATUS") if err != nil && err != db.ErrNoSuchKey { return nil, err } if data == nil { return nil, nil } msg, err := capnp.Unmarshal(data) if err != nil { return nil, err } // It's there already. Just unmarshal it. cmt := &n.Commit{} if err := cmt.FromCapnp(msg); err != nil { return nil, err } return cmt, nil } // saveStatus copies cmt to stage/STATUS. func (lkr *Linker) saveStatus(cmt *n.Commit) error { return lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { head, err := lkr.Head() if err != nil && !ie.IsErrNoSuchRef(err) { return hintRollback(err) } if head != nil { if err := cmt.SetParent(lkr, head); err != nil { return hintRollback(err) } } if err := cmt.BoxCommit(n.AuthorOfStage, ""); err != nil { return hintRollback(err) } data, err := n.MarshalNode(cmt) if err != nil { return hintRollback(err) } inode := strconv.FormatUint(cmt.Inode(), 10) batch.Put(data, "stage", "STATUS") batch.Put([]byte(cmt.TreeHash().B58String()), "inode", inode) return hintRollback(lkr.SaveRef("CURR", cmt)) }) } ///////////////////////////////// // CONVINIENT ACCESS FUNCTIONS // ///////////////////////////////// // LookupNode takes the root node and tries to resolve the path from there. // Deleted paths are recognized in contrast to ResolveNode. // If a path does not exist NoSuchFile is returned. func (lkr *Linker) LookupNode(repoPath string) (n.Node, error) { root, err := lkr.Root() if err != nil { return nil, err } return root.Lookup(lkr, repoPath) } // LookupNodeAt works like LookupNode but returns the node at the state of `cmt`. func (lkr *Linker) LookupNodeAt(cmt *n.Commit, repoPath string) (n.Node, error) { root, err := lkr.DirectoryByHash(cmt.Root()) if err != nil { return nil, err } if root == nil { return nil, nil } return root.Lookup(lkr, repoPath) } // LookupModNode is like LookupNode but returns a readily cast ModNode. func (lkr *Linker) LookupModNode(repoPath string) (n.ModNode, error) { node, err := lkr.LookupNode(repoPath) if err != nil { return nil, err } if node == nil { return nil, nil } snode, ok := node.(n.ModNode) if !ok { return nil, ie.ErrBadNode } return snode, nil } // LookupModNodeAt is like LookupNodeAt but with readily cast type. func (lkr *Linker) LookupModNodeAt(cmt *n.Commit, repoPath string) (n.ModNode, error) { node, err := lkr.LookupNodeAt(cmt, repoPath) if err != nil { return nil, err } if node == nil { return nil, nil } snode, ok := node.(n.ModNode) if !ok { return nil, ie.ErrBadNode } return snode, nil } // DirectoryByHash calls NodeByHash and attempts to convert // it to a Directory as convinience. func (lkr *Linker) DirectoryByHash(hash h.Hash) (*n.Directory, error) { nd, err := lkr.NodeByHash(hash) if err != nil { return nil, err } if nd == nil { return nil, nil } dir, ok := nd.(*n.Directory) if !ok { return nil, ie.ErrBadNode } return dir, nil } // ResolveDirectory calls ResolveNode and converts the result to a Directory. // This only accesses nodes from the filesystem and does not differentiate // between ghosts and living nodes. func (lkr *Linker) ResolveDirectory(dirpath string) (*n.Directory, error) { nd, err := lkr.ResolveNode(appendDot(path.Clean(dirpath))) if err != nil { return nil, err } if nd == nil { return nil, nil } dir, ok := nd.(*n.Directory) if !ok { return nil, ie.ErrBadNode } return dir, nil } // LookupDirectory calls LookupNode and converts the result to a Directory. func (lkr *Linker) LookupDirectory(repoPath string) (*n.Directory, error) { nd, err := lkr.LookupNode(repoPath) if err != nil { return nil, err } if nd == nil { return nil, nil } dir, ok := nd.(*n.Directory) if !ok { return nil, ie.ErrBadNode } return dir, nil } // FileByHash calls NodeByHash and converts the result to a File. func (lkr *Linker) FileByHash(hash h.Hash) (*n.File, error) { nd, err := lkr.NodeByHash(hash) if err != nil { return nil, err } file, ok := nd.(*n.File) if !ok { return nil, ie.ErrBadNode } return file, nil } // LookupFile calls LookupNode and converts the result to a file. func (lkr *Linker) LookupFile(repoPath string) (*n.File, error) { nd, err := lkr.LookupNode(repoPath) if err != nil { return nil, err } if nd == nil { return nil, nil } file, ok := nd.(*n.File) if !ok { return nil, ie.ErrBadNode } return file, nil } // LookupGhost calls LookupNode and converts the result to a ghost. func (lkr *Linker) LookupGhost(repoPath string) (*n.Ghost, error) { nd, err := lkr.LookupNode(repoPath) if err != nil { return nil, err } if nd == nil { return nil, nil } ghost, ok := nd.(*n.Ghost) if !ok { return nil, ie.ErrBadNode } return ghost, nil } // CommitByHash lookups a commit by it's hash. // If the commit could not be found, nil is returned. func (lkr *Linker) CommitByHash(hash h.Hash) (*n.Commit, error) { nd, err := lkr.NodeByHash(hash) if err != nil { return nil, err } if nd == nil { return nil, nil } cmt, ok := nd.(*n.Commit) if !ok { return nil, ie.ErrBadNode } return cmt, nil } // HaveStagedChanges returns true if there were changes in the staging area. // If an error occurs, the first return value is undefined. func (lkr *Linker) HaveStagedChanges() (bool, error) { head, err := lkr.Head() if err != nil && !ie.IsErrNoSuchRef(err) { return false, err } if ie.IsErrNoSuchRef(err) { // There is no HEAD yet. Assume we have changes. return true, nil } status, err := lkr.Status() if err != nil { return false, err } // Check if the root hashes of CURR and HEAD differ. return !status.Root().Equal(head.Root()), nil } // CheckoutCommit resets the current staging commit back to the commit // referenced by cmt. If force is false, it will check if there any staged errors in // the staging area and return ErrStageNotEmpty if there are any. If force is // true, all changes will be overwritten. func (lkr *Linker) CheckoutCommit(cmt *n.Commit, force bool) error { // Check if the staging area is empty if no force given: if !force { haveStaged, err := lkr.HaveStagedChanges() if err != nil { return err } if haveStaged { return ie.ErrStageNotEmpty } } status, err := lkr.Status() if err != nil { return err } root, err := lkr.DirectoryByHash(cmt.Root()) if err != nil { return err } return lkr.Atomic(func() (bool, error) { // Set the current virtual in-memory cached root lkr.MemSetRoot(root) status.SetRoot(cmt.Root()) // Invalidate the cache, causing NodeByHash and ResolveNode to load the // file from the boltdb again: lkr.MemIndexClear() return hintRollback(lkr.saveStatus(status)) }) } // AddMoveMapping takes note that the the node with `fromInode` has been moved // to `toInode` in the staging commit. func (lkr *Linker) AddMoveMapping(fromInode, toInode uint64) (err error) { // Make sure the actual checkout will land as one batch on disk: srcInode := strconv.FormatUint(fromInode, 10) srcToDstKey := []string{"stage", "moves", srcInode} dstInode := strconv.FormatUint(toInode, 10) dstToSrcKey := []string{"stage", "moves", dstInode} return lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { if _, err = lkr.kv.Get(srcToDstKey...); err == db.ErrNoSuchKey { line := []byte(fmt.Sprintf("> inode %d", toInode)) batch.Put(line, srcToDstKey...) batch.Put(line, "stage", "moves", "overlay", srcInode) } else if err != nil { return hintRollback(err) } // Also remember the move in the other direction. if _, err = lkr.kv.Get(dstToSrcKey...); err == db.ErrNoSuchKey { line := []byte(fmt.Sprintf("< inode %d", fromInode)) batch.Put(line, dstToSrcKey...) batch.Put(line, "stage", "moves", "overlay", dstInode) } else { return hintRollback(err) } return false, nil }) } func (lkr *Linker) parseMoveMappingLine(line string) (n.Node, MoveDir, error) { splitLine := strings.SplitN(line, " ", 3) if len(splitLine) < 3 { return nil, 0, fmt.Errorf("Malformed stage move line: `%s`", line) } dir := moveDirFromString(splitLine[0]) if dir == MoveDirUnknown { return nil, 0, fmt.Errorf("Unrecognized move direction `%s`", splitLine[0]) } switch splitLine[1] { case "inode": inode, err := strconv.ParseUint(splitLine[2], 10, 64) if err != nil { return nil, 0, err } node, err := lkr.NodeByInode(inode) if err != nil { return nil, 0, err } return node, dir, nil case "hash": hash, err := h.FromB58String(splitLine[2]) if err != nil { return nil, 0, err } node, err := lkr.NodeByHash(hash) if err != nil { return nil, 0, err } return node, dir, nil default: return nil, 0, fmt.Errorf("Unsupported move map type: %s", splitLine[1]) } } // Process a sinlge key of the move mapping: func (lkr *Linker) commitMoveMappingKey( batch db.Batch, status *n.Commit, exported map[uint64]bool, key []string, ) error { inode, err := strconv.ParseUint(key[len(key)-1], 10, 64) if err != nil { return err } // Only export move mapping that relate to nodes that were actually // exported from staging. We do not want to export intermediate moves. if _, ok := exported[inode]; !ok { return nil } data, err := lkr.kv.Get(key...) if err != nil { return err } dstNode, moveDirection, err := lkr.parseMoveMappingLine(string(data)) if err != nil { return err } if moveDirection == MoveDirDstToSrc { return nil } if dstNode == nil { return fmt.Errorf("Failed to find dest node for commit map: %v", string(data)) } srcNode, err := lkr.NodeByInode(inode) if err != nil { return err } if srcNode == nil { return fmt.Errorf("Failed to find source node for commit map: %d", inode) } // Write a bidirectional mapping for this node: dstB58 := dstNode.TreeHash().B58String() srcB58 := srcNode.TreeHash().B58String() forwardLine := fmt.Sprintf("%v hash %s", moveDirection, dstB58) batch.Put( []byte(forwardLine), "moves", status.TreeHash().B58String(), srcB58, ) batch.Put( []byte(forwardLine), "moves", "overlay", srcB58, ) reverseLine := fmt.Sprintf( "%v hash %s", moveDirection.Invert(), srcB58, ) batch.Put( []byte(reverseLine), "moves", status.TreeHash().B58String(), dstB58, ) batch.Put( []byte(reverseLine), "moves", "overlay", dstB58, ) // We need to verify that all ghosts will be copied out from staging. // In some special cases, not all used ghosts are reachable in // MakeCommit. // // Consider for example this case: // // $ touch x // $ commit // $ move x y // $ touch x // $ commit // // => In the last commit the ghost from the move (x) is overwritten by // a new file and thus will not be reachable anymore. In order to store // the full history of the file we need to also keep this ghost. for _, checkHash := range []string{dstB58, srcB58} { srcKey := []string{"stage", "objects", checkHash} dstKey := []string{"objects", checkHash} _, err = lkr.kv.Get(dstKey...) if err == db.ErrNoSuchKey { err = nil // This part of the move was not reachable, we need to copy it // to the object store additionally. if err := db.CopyKey(lkr.kv, srcKey, dstKey); err != nil { return err } } if err != nil { return err } } // We already have a bidir mapping for this node, no need to mention // them further. (would not hurt, but would be duplicated work) delete(exported, srcNode.Inode()) delete(exported, dstNode.Inode()) return nil } func (lkr *Linker) commitMoveMapping(status *n.Commit, exported map[uint64]bool) error { return lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { keys, err := lkr.kv.Keys("stage", "moves") if err != nil { return hintRollback(err) } for _, key := range keys { if err := lkr.commitMoveMappingKey(batch, status, exported, key); err != nil { return hintRollback(err) } } return false, nil }) } const ( // MoveDirUnknown should only be used for init purposes. MoveDirUnknown = iota // MoveDirSrcToDst means that this file was moved from source to dest. // (Therefore it is the new destination file and probably not a ghost) MoveDirSrcToDst // MoveDirDstToSrc means that this place was moved somewhere else. // (Therefore it is a likely a ghost and the new file lives somewhere else) MoveDirDstToSrc // MoveDirNone tells us that this file did not move. MoveDirNone ) // MoveDir describes the direction of a move. type MoveDir int func (md MoveDir) String() string { switch md { case MoveDirSrcToDst: return ">" case MoveDirDstToSrc: return "<" case MoveDirNone: return "*" default: return "" } } // Invert changes the direction of a move, if it has one. func (md MoveDir) Invert() MoveDir { switch md { case MoveDirSrcToDst: return MoveDirDstToSrc case MoveDirDstToSrc: return MoveDirSrcToDst default: return md } } func moveDirFromString(spec string) MoveDir { switch spec { case ">": return MoveDirSrcToDst case "<": return MoveDirDstToSrc case "*": return MoveDirNone default: return MoveDirUnknown } } // MoveEntryPoint tells us if a node participated in a move. // If so, the new node and the corresponding move direction is returned. func (lkr *Linker) MoveEntryPoint(nd n.Node) (n.Node, MoveDir, error) { moveData, err := lkr.kv.Get( "stage", "moves", "overlay", strconv.FormatUint(nd.Inode(), 10), ) if err != nil && err != db.ErrNoSuchKey { return nil, MoveDirUnknown, err } if moveData == nil { moveData, err = lkr.kv.Get("moves", "overlay", nd.TreeHash().B58String()) if err != nil && err != db.ErrNoSuchKey { return nil, MoveDirUnknown, err } if moveData == nil { return nil, MoveDirNone, nil } } node, moveDir, err := lkr.parseMoveMappingLine(string(moveData)) if err != nil { return nil, MoveDirUnknown, err } if node == nil { // No move mapping found for this node. // Note that this not an error. return nil, MoveDirNone, nil } return node, moveDir, err } // MoveMapping will lookup if the node pointed to by `nd` was part of a moving // operation and if so, to what node it was moved and if it was the source or // the dest node. func (lkr *Linker) MoveMapping(cmt *n.Commit, nd n.Node) (n.Node, MoveDir, error) { // Stage and committed space use a different format to store move mappings. // This is because in staging nodes can still be modified, so the "dest" // part of the mapping is a moving target. Therefore we store the destination // not as hash or path (which also might be moved), but as inode reference. // Inodes always resolve to the latest version of a node. // When committing, the mappings will be "fixed" by converting the inode to // a hash value, to make sure we link to a specific version. status, err := lkr.Status() if err != nil { return nil, MoveDirUnknown, err } // Only look into staging if we are actually in the STATUS commit. // The lookups in the stage level are on an inode base. This would // cause jumping around in the history for older commits. if cmt == nil || cmt.TreeHash().Equal(status.TreeHash()) { inodeKey := strconv.FormatUint(nd.Inode(), 10) moveData, err := lkr.kv.Get("stage", "moves", inodeKey) if err != nil && err != db.ErrNoSuchKey { return nil, MoveDirUnknown, err } if err != db.ErrNoSuchKey { node, moveDir, err := lkr.parseMoveMappingLine(string(moveData)) if err != nil { return nil, MoveDirUnknown, err } if node != nil { return node, moveDir, err } } } if cmt == nil { return nil, MoveDirNone, nil } moveData, err := lkr.kv.Get("moves", cmt.TreeHash().B58String(), nd.TreeHash().B58String()) if err != nil && err != db.ErrNoSuchKey { return nil, MoveDirUnknown, err } if moveData == nil { return nil, MoveDirNone, nil } node, moveDir, err := lkr.parseMoveMappingLine(string(moveData)) if err != nil { return nil, MoveDirUnknown, err } if node == nil { // No move mapping found for this node. // Note that this not an error. return nil, MoveDirNone, nil } return node, moveDir, err } // ExpandAbbrev tries to find an object reference that stats with `abbrev`. // If so, it will return the respective hash for it. // If none is found, it is considered as an error. // If more than one was found ie.ErrAmbigious is returned. func (lkr *Linker) ExpandAbbrev(abbrev string) (h.Hash, error) { prefixes := [][]string{ {"stage", "objects"}, {"objects"}, } // Special case: Make it possible to abbrev the commit // of ``curr`` - it does live in stage/STATUS, not somewhere else. curr, err := lkr.Status() if err != nil { return nil, err } if strings.HasPrefix(curr.TreeHash().B58String(), abbrev) { return curr.TreeHash(), nil } for _, prefix := range prefixes { matches, err := lkr.kv.Glob(append(prefix, abbrev)) if err != nil { return nil, err } if len(matches) > 1 { return nil, ie.ErrAmbigiousRev } if len(matches) == 0 { continue } match := matches[0] return h.FromB58String(match[len(match)-1]) } return nil, fmt.Errorf("No such abbrev: %v", abbrev) } // IterAll goes over all nodes in the commit range `from` until (including) `to`. // Already visited nodes will not be visited again if they did not change. // If `from` is nil, HEAD is assumed. // If `to` is nil, INIT is assumed. func (lkr *Linker) IterAll(from, to *n.Commit, fn func(n.ModNode, *n.Commit) error) error { visited := make(map[string]struct{}) return lkr.iterAll(from, to, visited, fn) } func (lkr *Linker) iterAll(from, to *n.Commit, visited map[string]struct{}, fn func(n.ModNode, *n.Commit) error) error { if from == nil { head, err := lkr.Status() if err != nil { return err } from = head } root, err := lkr.DirectoryByHash(from.Root()) if err != nil { return err } walker := func(child n.Node) error { if _, ok := visited[child.TreeHash().B58String()]; ok { return n.ErrSkipChild } modChild, ok := child.(n.ModNode) if !ok { return ie.ErrBadNode } visited[child.TreeHash().B58String()] = struct{}{} return fn(modChild, from) } if err := n.Walk(lkr, root, false, walker); err != nil { return e.Wrapf(err, "iter-all: walk") } // Check if we're already at the lowest commit: if to != nil && from.TreeHash().Equal(to.TreeHash()) { return nil } prev, err := from.Parent(lkr) if err != nil { return err } if prev == nil { // Definite end of line. return nil } prevCmt, ok := prev.(*n.Commit) if !ok { return ie.ErrBadNode } return lkr.iterAll(prevCmt, to, visited, fn) } // Atomic is like AtomicWithBatch but does not require using a batch. // Use this for read-only operations. It's only syntactic sugar though. func (lkr *Linker) Atomic(fn func() (bool, error)) (err error) { return lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { return fn() }) } // AtomicWithBatch will execute `fn` in one transaction. // If anything goes wrong (i.e. `fn` returns an error) func (lkr *Linker) AtomicWithBatch(fn func(batch db.Batch) (bool, error)) (err error) { batch := lkr.kv.Batch() // A panicking program should not leave the persistent linker state // inconsistent. This is really a last defence against all odds. defer func() { if r := recover(); r != nil { batch.Rollback() lkr.MemIndexClear() err = fmt.Errorf("panic rollback: %v; stack: %s", r, string(debug.Stack())) } }() needRollback, err := fn(batch) if needRollback && err != nil { hadWrites := batch.HaveWrites() batch.Rollback() // Only clear the whole index if something was written. // Also, this prevents the slightly misleading log message below // in case of read-only operations. if hadWrites { // clearing the mem index will cause it to be read freshly from disk // with the old state. This costs a little performance but saves me // from writing special in-memory rollback logic for now. lkr.MemIndexClear() log.Warningf("rolled back due to error: %v %s", err, debug.Stack()) } return err } // Attempt to write it to disk. // If that fails we're better off deleting our internal cache. // so memory and disk is in sync. if flushErr := batch.Flush(); flushErr != nil { lkr.MemIndexClear() log.Warningf("flush to db failed, resetting mem index: %v", flushErr) } return err } // helper to return errros that should trigger a rollback in AtomicWithBatch() func hintRollback(err error) (bool, error) { if err != nil { return true, err } return false, nil } // KV returns the key value store passed when constructing the linker. func (lkr *Linker) KV() db.Database { return lkr.kv } ================================================ FILE: catfs/core/linker_test.go ================================================ package core import ( "errors" "fmt" "io/ioutil" "os" "sort" "strings" "testing" "unsafe" "github.com/sahib/brig/catfs/db" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" "github.com/stretchr/testify/require" ) // Basic test to see if the root node can be inserted and stored. // A new staging commit should be also created in the background. // On the second run, the root node should be already cached. func TestLinkerInsertRoot(t *testing.T) { WithDummyKv(t, func(kv db.Database) { lkr := NewLinker(kv) root, err := n.NewEmptyDirectory(lkr, nil, "/", "u", 2) if err != nil { t.Fatalf("Creating empty root dir failed: %v", err) } if err := lkr.StageNode(root); err != nil { t.Fatalf("Staging root failed: %v", err) } sameRoot, err := lkr.ResolveDirectory("/") if err != nil { t.Fatalf("Resolving root failed: %v", err) } if sameRoot == nil { t.Fatal("Resolving root failed (is nil)") } if path := sameRoot.Path(); path != "/" { t.Fatalf("Path of root is not /: %s", path) } ptrRoot, err := lkr.ResolveDirectory("/") if err != nil { t.Fatalf("Second lookup of root failed: %v", err) } if unsafe.Pointer(ptrRoot) != unsafe.Pointer(sameRoot) { t.Fatal("Second root did not come from the cache") } status, err := lkr.Status() if err != nil { t.Fatalf("Failed to retrieve status: %v", err) } if !status.Root().Equal(root.TreeHash()) { t.Fatalf("status.root and root differ: %v <-> %v", status.Root(), root.TreeHash()) } }) } func TestLinkerRefs(t *testing.T) { author := n.AuthorOfStage WithDummyKv(t, func(kv db.Database) { lkr := NewLinker(kv) root, err := lkr.Root() if err != nil { t.Fatalf("Failed to create root: %v", err) } newFile := n.NewEmptyFile(root, "cat.png", "u", 2) if err != nil { t.Fatalf("Failed to create empty file: %v", err) } newFile.SetSize(10) newFile.SetContent(lkr, h.TestDummy(t, 1)) if err := root.Add(lkr, newFile); err != nil { t.Fatalf("Adding empty file failed: %v", err) } if err := lkr.StageNode(newFile); err != nil { t.Fatalf("Staging new file failed: %v", err) } if _, err := lkr.Head(); !ie.IsErrNoSuchRef(err) { t.Fatalf("There is a HEAD from start?!") } cmt, err := lkr.Status() if err != nil || cmt == nil { t.Fatalf("Failed to retrieve status: %v", err) } if err := lkr.MakeCommit(author, "First commit"); err != nil { t.Fatalf("Making commit failed: %v", err) } // Assert that staging is empy (except the "/stage/STATUS" part) foundKeys := []string{} keys, err := kv.Keys("stage") require.Nil(t, err) for _, key := range keys { foundKeys = append(foundKeys, strings.Join(key, "/")) } require.Equal(t, []string{"stage/STATUS"}, foundKeys) head, err := lkr.Head() if err != nil { t.Fatalf("Obtaining HEAD failed: %v", err) } status, err := lkr.Status() if err != nil { t.Fatalf("Failed to obtain the status: %v", err) } if !head.Root().Equal(status.Root()) { t.Fatalf("HEAD and CURR are not equal after first commit.") } if err := lkr.MakeCommit(author, "No."); err != ie.ErrNoChange { t.Fatalf("Committing without change led to a new commit: %v", err) } }) } func TestLinkerNested(t *testing.T) { WithDummyKv(t, func(kv db.Database) { lkr := NewLinker(kv) root, err := lkr.Root() if err != nil { t.Fatalf("Fetching initial root failed: %v", err) return } sub, err := n.NewEmptyDirectory(lkr, root, "sub", "u", 3) if err != nil { t.Fatalf("Creating empty sub dir failed: %v", err) return } par, err := sub.Parent(lkr) if err != nil { t.Fatalf("Failed to get parent of /sub") } if par.Path() != "/" { t.Fatalf("Parent path of /sub is not /") } if topPar, err := par.Parent(lkr); topPar != nil || err != nil { t.Fatalf("Parent of / is not nil: %v (%v)", topPar, err) } if err := lkr.StageNode(sub); err != nil { t.Fatalf("Staging /sub failed: %v", err) } sameSubDir, err := lkr.ResolveDirectory("/sub") if err != nil { t.Fatalf("Resolving /sub failed: %v", err) } _, err = lkr.NodeByInode(sameSubDir.Inode()) if err != nil { t.Fatalf("Resolving /sub by ID (%d) failed: %v", sameSubDir.Inode(), err) } subpub, err := n.NewEmptyDirectory(lkr, sameSubDir, "pub", "u", 4) if err != nil { t.Fatalf("Creating of deep sub failed") } if err := lkr.StageNode(subpub); err != nil { t.Fatalf("Staging /sub/pub failed: %v", err) } newRootDir, err := lkr.ResolveDirectory("/") if err != nil { t.Fatalf("Failed to resolve new root dir") } if !newRootDir.TreeHash().Equal(root.TreeHash()) { t.Fatalf("New / and old / have different hashes, despite being same instance %p %p", newRootDir, root) } count := 0 if err := n.Walk(lkr, root, true, func(c n.Node) error { count++; return nil }); err != nil { t.Fatalf("Failed to walk the tree: %v", err) } if count != 3 { t.Fatalf("There are more or less than 3 elems in the tree: %d", count) } // Index shall only contain the nodes with their most current hash values. if len(lkr.index) != 3 { t.Fatalf("Index does not contain the expected 3 elements.") } gc := NewGarbageCollector(lkr, kv, nil) if err := gc.Run(true); err != nil { t.Fatalf("Garbage collector failed to run: %v", err) } if err := lkr.MakeCommit(n.AuthorOfStage, "first message"); err != nil { t.Fatalf("Making first commit failed: %v", err) } }) } // Test if Linker can load objects after closing/re-opening the kv. func TestLinkerPersistence(t *testing.T) { dbPath, err := ioutil.TempDir("", "brig-test") if err != nil { t.Fatalf("Failed to create temp dir: %v", err) } defer os.RemoveAll(dbPath) kv, err := db.NewDiskDatabase(dbPath) if err != nil { t.Fatalf("Could not create dummy kv for tests: %v", err) } lkr := NewLinker(kv) if err := lkr.MakeCommit(n.AuthorOfStage, "initial commit"); err != nil { t.Fatalf("Failed to create initial commit out of nothing: %v", err) } head, err := lkr.Head() if err != nil { t.Fatalf("Failed to retrieve Head after initial commit: %v", err) } oldHeadHash := head.TreeHash().Clone() if err := kv.Close(); err != nil { t.Fatalf("Closing the dummy kv failed: %v", err) } kv, err = db.NewDiskDatabase(dbPath) if err != nil { t.Fatalf("Could not create second dummy kv: %v", err) } lkr = NewLinker(kv) head, err = lkr.Head() if err != nil { t.Fatalf("Failed to retrieve head after kv reload: %v", err) } if !oldHeadHash.Equal(head.TreeHash()) { t.Fatalf("HEAD hash differs before and after reload: %v <-> %v", oldHeadHash, head.TreeHash()) } if err := kv.Close(); err != nil { t.Fatalf("Closing the second kv failed: %v", err) } } func TestCollideSameObjectHash(t *testing.T) { WithDummyKv(t, func(kv db.Database) { lkr := NewLinker(kv) root, err := lkr.Root() if err != nil { t.Fatalf("Failed to retrieve root: %v", err) } sub, err := n.NewEmptyDirectory(lkr, root, "sub", "u", 3) if err != nil { t.Fatalf("Creating empty sub dir failed: %v", err) return } if err := lkr.StageNode(sub); err != nil { t.Fatalf("Staging /sub failed: %v", err) } file1 := n.NewEmptyFile(sub, "a.png", "u", 4) if err != nil { t.Fatalf("Failed to create empty file1: %v", err) } file2 := n.NewEmptyFile(root, "a.png", "u", 5) if err != nil { t.Fatalf("Failed to create empty file2: %v", err) } file3 := n.NewEmptyFile(root, "b.png", "u", 6) if err != nil { t.Fatalf("Failed to create empty file3: %v", err) } file1.SetContent(lkr, h.TestDummy(t, 1)) file2.SetContent(lkr, h.TestDummy(t, 1)) file3.SetContent(lkr, h.TestDummy(t, 1)) if err := sub.Add(lkr, file1); err != nil { t.Fatalf("Failed to add file1: %v", err) } if err := root.Add(lkr, file2); err != nil { t.Fatalf("Failed to add file2: %v", err) } if err := root.Add(lkr, file3); err != nil { t.Fatalf("Failed to add file3: %v", err) } if err := lkr.StageNode(file1); err != nil { t.Fatalf("Failed to stage file1: %v", err) } if err := lkr.StageNode(file2); err != nil { t.Fatalf("Failed to stage file2: %v", err) } if err := lkr.StageNode(file3); err != nil { t.Fatalf("Failed to stage file3: %v", err) } if file1.TreeHash().Equal(file2.TreeHash()) { t.Fatalf("file1 and file2 hash is equal: %v", file1.TreeHash()) } if file2.TreeHash().Equal(file3.TreeHash()) { t.Fatalf("file2 and file3 hash is equal: %v", file2.TreeHash()) } // Make sure we load the actual hashes from disk: lkr.MemIndexClear() file1Reset, err := lkr.LookupFile("/sub/a.png") if err != nil { t.Fatalf("Re-Lookup of file1 failed: %v", err) } file2Reset, err := lkr.LookupFile("/a.png") if err != nil { t.Fatalf("Re-Lookup of file2 failed: %v", err) } file3Reset, err := lkr.LookupFile("/b.png") if err != nil { t.Fatalf("Re-Lookup of file3 failed: %v", err) } if file1Reset.TreeHash().Equal(file2Reset.TreeHash()) { t.Fatalf("file1Reset and file2Reset hash is equal: %v", file1.TreeHash()) } if file2Reset.TreeHash().Equal(file3Reset.TreeHash()) { t.Fatalf("file2Reset and file3Reset hash is equal: %v", file2.TreeHash()) } }) } func TestHaveStagedChanges(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { hasChanges, err := lkr.HaveStagedChanges() if err != nil { t.Fatalf("have staged changes failed before touch: %v", err) } if hasChanges { t.Fatalf("HaveStagedChanges has changes before something happened") } MustTouch(t, lkr, "/x.png", 1) hasChanges, err = lkr.HaveStagedChanges() if err != nil { t.Fatalf("have staged changes failed after touch: %v", err) } if !hasChanges { t.Fatalf("HaveStagedChanges has no changes after something happened") } MustCommit(t, lkr, "second") hasChanges, err = lkr.HaveStagedChanges() if err != nil { t.Fatalf("have staged changes failed after commit: %v", err) } if hasChanges { t.Fatalf("HaveStagedChanges has changes after commit") } }) } func TestFilesByContent(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { file := MustTouch(t, lkr, "/x.png", 1) contents := []h.Hash{file.BackendHash()} result, err := lkr.FilesByContents(contents) require.Nil(t, err) resultFile, ok := result[file.BackendHash().B58String()] require.True(t, ok) require.Len(t, result, 1) require.Equal(t, file, resultFile) }) } func TestResolveRef(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { initCmt, err := lkr.Head() require.Nil(t, err) cmts := []*n.Commit{initCmt} for idx := 0; idx < 10; idx++ { _, cmt := MustTouchAndCommit(t, lkr, "/x", byte(idx)) cmts = append([]*n.Commit{cmt}, cmts...) } // Insert the init cmt a few times as fodder: cmts = append(cmts, initCmt) cmts = append(cmts, initCmt) cmts = append(cmts, initCmt) for nUp := 0; nUp < len(cmts)+3; nUp++ { refname := "head" for idx := 0; idx < nUp; idx++ { refname += "^" } expect := initCmt if nUp < len(cmts) { expect = cmts[nUp] } ref, err := lkr.ResolveRef(refname) require.Nil(t, err) require.Equal(t, expect, ref) } _, err = lkr.ResolveRef("he^^ad") require.Equal(t, err, ie.ErrNoSuchRef("he^^ad")) }) } type iterResult struct { path, commit string } func TestIterAll(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { init, err := lkr.Head() require.Nil(t, err) c0 := init.TreeHash().B58String() x := MustTouch(t, lkr, "/x", 1) MustTouch(t, lkr, "/y", 1) first := MustCommit(t, lkr, "first") c1 := first.TreeHash().B58String() MustModify(t, lkr, x, 2) status, err := lkr.Status() require.Nil(t, err) c2 := status.TreeHash().B58String() results := []iterResult{} require.Nil(t, lkr.IterAll(nil, nil, func(nd n.ModNode, cmt *n.Commit) error { results = append(results, iterResult{nd.Path(), cmt.TreeHash().B58String()}) return nil })) sort.Slice(results, func(i, j int) bool { // Do not change orderings between commits: if results[i].commit != results[j].commit { return false } return results[i].path < results[j].path }) expected := []iterResult{ {"/", c2}, {"/x", c2}, {"/y", c2}, {"/", c1}, {"/x", c1}, {"/", c0}, } for idx, result := range results { require.Equal(t, result.path, expected[idx].path) require.Equal(t, result.commit, expected[idx].commit) } }) } func TestAtomic(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { err := lkr.Atomic(func() (bool, error) { MustTouch(t, lkr, "/x", 1) return false, nil }) require.Nil(t, err) err = lkr.Atomic(func() (bool, error) { MustTouch(t, lkr, "/y", 1) return true, errors.New("artificial error") }) require.NotNil(t, err) err = lkr.Atomic(func() (bool, error) { MustTouch(t, lkr, "/z", 1) panic("woah") }) require.NotNil(t, err) x, err := lkr.LookupFile("/x") require.Nil(t, err) require.Equal(t, x.Path(), "/x") _, err = lkr.LookupFile("/y") require.NotNil(t, err) require.True(t, ie.IsNoSuchFileError(err)) _, err = lkr.LookupFile("/z") require.NotNil(t, err) require.True(t, ie.IsNoSuchFileError(err)) }) } func TestCommitByIndex(t *testing.T) { // Note: WithReloadingLinker creates an init commit. WithDummyLinker(t, func(lkr *Linker) { head, err := lkr.Head() require.Nil(t, err) require.Equal(t, head.Index(), int64(0)) status, err := lkr.Status() require.Nil(t, err) require.Equal(t, int64(1), status.Index()) // Must modify something to commit: MustTouch(t, lkr, "/x", 1) require.Nil(t, lkr.MakeCommit("me", "is mario")) newHead, err := lkr.Head() require.Nil(t, err) require.Equal(t, int64(1), newHead.Index()) status, err = lkr.Status() require.Nil(t, err) require.Equal(t, int64(2), status.Index()) // Lookup the just created commits: // Pre-existing init commit: c1, err := lkr.CommitByIndex(0) require.Nil(t, err) require.Equal(t, "init", c1.Message()) // Our commit: c2, err := lkr.CommitByIndex(1) require.Nil(t, err) require.Equal(t, "is mario", c2.Message()) // Same as the status commit: c3, err := lkr.CommitByIndex(2) require.Nil(t, err) require.NotNil(t, c3) require.Equal(t, status.TreeHash(), c3.TreeHash()) // Not existing: c4, err := lkr.CommitByIndex(3) require.True(t, ie.IsErrNoSuchCommitIndex(err)) require.Nil(t, c4) }) } func TestLookupNodeAt(t *testing.T) { WithDummyLinker(t, func(lkr *Linker) { fmt.Println("start") for idx := byte(0); idx < 10; idx++ { MustTouchAndCommit(t, lkr, "/x", idx) } fmt.Println("done") for idx := 0; idx < 10; idx++ { // commit index of 0 is init, so + 1 cmt, err := lkr.CommitByIndex(int64(idx + 1)) require.Nil(t, err) nd, err := lkr.LookupNodeAt(cmt, "/x") require.Nil(t, err) require.Equal(t, nd.ContentHash(), h.TestDummy(t, byte(idx))) } // Init should not exist: init, err := lkr.CommitByIndex(0) require.Nil(t, err) nd, err := lkr.LookupNodeAt(init, "/x") require.Nil(t, nd) require.True(t, ie.IsNoSuchFileError(err)) // Stage should have the last change: stage, err := lkr.CommitByIndex(11) require.Nil(t, err) stageNd, err := lkr.LookupNodeAt(stage, "/x") require.Nil(t, err) require.Equal(t, stageNd.ContentHash(), h.TestDummy(t, 9)) // quick check to see if the next commit is really empty // (tests only the test setup) last, err := lkr.CommitByIndex(12) require.True(t, ie.IsErrNoSuchCommitIndex(err)) require.Nil(t, last) }) } ================================================ FILE: catfs/core/testing.go ================================================ package core import ( "fmt" "io/ioutil" "os" "path" "testing" "time" "github.com/sahib/brig/catfs/db" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" "github.com/stretchr/testify/require" ) // WithDummyKv creates a testing key value store and passes it to `fn`. func WithDummyKv(t *testing.T, fn func(kv db.Database)) { dbPath, err := ioutil.TempDir("", "brig-test") if err != nil { t.Fatalf("Failed to create temp dir: %v", err) } defer os.RemoveAll(dbPath) kv, err := db.NewDiskDatabase(dbPath) if err != nil { t.Fatalf("Could not create dummy kv for tests: %v", err) } fn(kv) if err := kv.Close(); err != nil { t.Fatalf("Closing the dummy kv failed: %v", err) } } // WithDummyLinker creates a testing linker and passes it to `fn`. func WithDummyLinker(t *testing.T, fn func(lkr *Linker)) { WithDummyKv(t, func(kv db.Database) { lkr := NewLinker(kv) require.Nil(t, lkr.SetOwner("alice")) MustCommit(t, lkr, "init") fn(lkr) }) } // WithReloadingLinker creates a testing linker and passes it to `fn1`. // It then closes the linker and lets it load a second time and passes it to `fn2`. // This is useful to test persistency issues. func WithReloadingLinker(t *testing.T, fn1 func(lkr *Linker), fn2 func(lkr *Linker)) { WithDummyKv(t, func(kv db.Database) { lkr1 := NewLinker(kv) require.Nil(t, lkr1.SetOwner("alice")) MustCommit(t, lkr1, "init") fn1(lkr1) lkr2 := NewLinker(kv) fn2(lkr2) }) } // WithLinkerPair creates two linkers, useful for testing syncing. func WithLinkerPair(t *testing.T, fn func(lkrSrc, lkrDst *Linker)) { WithDummyLinker(t, func(lkrSrc *Linker) { WithDummyLinker(t, func(lkrDst *Linker) { require.Nil(t, lkrSrc.SetOwner("src")) require.Nil(t, lkrDst.SetOwner("dst")) fn(lkrSrc, lkrDst) }) }) } // AssertDir asserts the existence of a directory. func AssertDir(t *testing.T, lkr *Linker, path string, shouldExist bool) { dir, err := lkr.LookupDirectory(path) if shouldExist { if err != nil { t.Fatalf("exist-check: Directory lookup failed for %s: %v", path, err) } if dir == nil || dir.Path() != path { t.Fatalf("exist-check: directory does not exist: %s -> %v", path, dir) } } else { if dir != nil { t.Fatalf("exist-check: Dir exists, but should not: %v", path) } } } // MustMkdir creates a directory or fails on `t`. func MustMkdir(t *testing.T, lkr *Linker, repoPath string) *n.Directory { dir, err := Mkdir(lkr, repoPath, true) if err != nil { t.Fatalf("Failed to create directories %s: %v", repoPath, err) } return dir } // MustTouch creates a new node at `touchPath` and sets its content hash // to a hash derived from `seed`. func MustTouch(t *testing.T, lkr *Linker, touchPath string, seed byte) *n.File { dirname := path.Dir(touchPath) parent, err := lkr.LookupDirectory(dirname) if err != nil { t.Fatalf("touch: Failed to lookup: %s", dirname) } basePath := path.Base(touchPath) file := n.NewEmptyFile(parent, basePath, lkr.owner, lkr.NextInode()) file.SetBackend(lkr, h.TestDummy(t, seed)) file.SetContent(lkr, h.TestDummy(t, seed)) file.SetKey(make([]byte, 32)) child, err := parent.Child(lkr, basePath) if err != nil { t.Fatalf("touch: Failed to lookup child: %v %v", touchPath, err) } if child != nil { if err := parent.RemoveChild(lkr, child); err != nil { t.Fatalf("touch: failed to remove previous node: %v", err) } } if err := parent.Add(lkr, file); err != nil { t.Fatalf("touch: Adding %s to root failed: %v", touchPath, err) } if err := lkr.StageNode(file); err != nil { t.Fatalf("touch: Staging %s failed: %v", touchPath, err) } return file } // MustMove moves the node `nd` to `destPath` or fails `t`. func MustMove(t *testing.T, lkr *Linker, nd n.ModNode, destPath string) n.ModNode { if err := Move(lkr, nd, destPath); err != nil { t.Fatalf("move of %s to %s failed: %v", nd.Path(), destPath, err) } newNd, err := lkr.LookupModNode(destPath) if err != nil { t.Fatalf("Failed to lookup dest path `%s` of new node: %v", destPath, err) } return newNd } // MustRemove removes the node `nd` or fails. func MustRemove(t *testing.T, lkr *Linker, nd n.ModNode) n.ModNode { if _, _, err := Remove(lkr, nd, true, false); err != nil { t.Fatalf("Failed to remove %s: %v", nd.Path(), err) } newNd, err := lkr.LookupModNode(nd.Path()) if err != nil { t.Fatalf("Failed to lookup dest path `%s` of deleted node: %v", nd.Path(), err) } return newNd } // MustCommit commits the current state with `msg`. func MustCommit(t *testing.T, lkr *Linker, msg string) *n.Commit { if err := lkr.MakeCommit(n.AuthorOfStage, msg); err != nil { t.Fatalf("Failed to make commit with msg %s: %v", msg, err) } head, err := lkr.Head() if err != nil { t.Fatalf("Failed to retrieve head after commit: %v", err) } return head } // MustCommitIfPossible with is like MustCommit, but allows empty changesets. func MustCommitIfPossible(t *testing.T, lkr *Linker, msg string) *n.Commit { haveChanges, err := lkr.HaveStagedChanges() if err != nil { t.Fatalf("Failed to check for changes: %v", err) } if haveChanges { return MustCommit(t, lkr, msg) } return nil } // MustTouchAndCommit is a combined MustTouch and MustCommit. func MustTouchAndCommit(t *testing.T, lkr *Linker, path string, seed byte) (*n.File, *n.Commit) { file, err := Stage( lkr, path, h.TestDummy(t, seed), h.TestDummy(t, seed), uint64(seed), -1, nil, time.Now(), false, ) if err != nil { t.Fatalf("Failed to stage %s at %d: %v", path, seed, err) } return file, MustCommit(t, lkr, fmt.Sprintf("cmt %d", seed)) } // MustModify changes the content of an existing node. func MustModify(t *testing.T, lkr *Linker, file *n.File, seed int) { parent, err := lkr.LookupDirectory(path.Dir(file.Path())) // root, err := lkr.Root() if err != nil { t.Fatalf("Failed to get root: %v", err) } if err := parent.RemoveChild(lkr, file); err != nil && !ie.IsNoSuchFileError(err) { t.Fatalf("Unable to remove %s from /: %v", file.Path(), err) } file.SetSize(uint64(seed)) file.SetBackend(lkr, h.TestDummy(t, byte(seed))) file.SetContent(lkr, h.TestDummy(t, byte(seed))) if err := parent.Add(lkr, file); err != nil { t.Fatalf("Unable to add %s to /: %v", file.Path(), err) } if err := lkr.StageNode(file); err != nil { t.Fatalf("Failed to stage %s for second: %v", file.Path(), err) } } // MustLookupDirectory loads an existing dir or fails. func MustLookupDirectory(t *testing.T, lkr *Linker, path string) *n.Directory { dir, err := lkr.LookupDirectory(path) if err != nil { t.Fatalf("Failed to lookup directory %v: %v", path, err) } return dir } ================================================ FILE: catfs/db/database.go ================================================ package db import ( "errors" "io" ) var ( // ErrNoSuchKey is returned when Get() was passed a non-existent key ErrNoSuchKey = errors.New("This key does not exist") ) // Batch is an API object used to model a transaction. type Batch interface { // Put sets `val` at `key`. Put(val []byte, key ...string) // Clear all contents below and including `key`. Clear(key ...string) error // Erase a key from the database. Erase(key ...string) // Flush the batch to the database. // Only now, all changes will be written to disk. Flush() error // Rollback will forget all changes without executing them. Rollback() // HaveWrites returns true when the batch contains something // we can write to the disk on Flush(). HaveWrites() bool } // Database is a key/value store that offers different buckets // for storage. Keys are strings, values are arbitrary untyped data. type Database interface { // Get retrievies the key `key` out of bucket. // If no such key exists, it will return (nil, ErrNoSuchKey) // If a badge is currently open, Get() shall still return the // most current value currently set by the last Put() call // to `key`. Get(key ...string) ([]byte, error) // Keys iterates over all keys in the database If the error is returned by // `fn` the iteration stops and the error value is returned. // The keys are returned in lexical ordering. Keys(prefix ...string) ([][]string, error) // Batch returns a new Batch object, that will allow modifications // of the state. Batch() can be called recursive: The changes will // only be flushed to disk if batch.Flush() was called equal times // to the number Batch() was called. Batch() Batch // Export backups all database content to `w` in // an implemenation specific format that can be read by Import. Export(w io.Writer) error // Import reads a previously exported db dump by Export from `r`. // Existing keys might be overwritten if the dump also contains them. Import(r io.Reader) error // Close closes the database. Since I/O may happen, an error is returned. Close() error // Glob finds all existing keys in the store, starting with prefix. Glob(prefix []string) ([][]string, error) } // CopyKey is a helper method to copy a bunch of keys in `src` to `dst`. func CopyKey(db Database, src, dst []string) error { data, err := db.Get(src...) if err != nil { return err } batch := db.Batch() batch.Put(data, dst...) return batch.Flush() } ================================================ FILE: catfs/db/database_badger.go ================================================ package db import ( "io" "strings" "sync" "sync/atomic" "time" badger "github.com/dgraph-io/badger/v3" log "github.com/sirupsen/logrus" ) // BadgerDatabase is a database implementation based on BadgerDB type BadgerDatabase struct { mu sync.Mutex isStopped int64 db *badger.DB txn *badger.Txn refCount int haveWrites bool writeTimes []time.Time gcTicker *time.Ticker } // NewBadgerDatabase creates a new badger database. func NewBadgerDatabase(path string) (*BadgerDatabase, error) { opts := badger.DefaultOptions(path). WithValueLogFileSize(10 * 1024 * 1024). //default is 2GB we should not need 2GB WithMemTableSize(10 * 1024 * 1024). //default is 64MB WithNumVersionsToKeep(1). // it is default but it's better to force it WithCompactL0OnClose(true). WithSyncWrites(false). WithLogger(nil) db, err := badger.Open(opts) if err != nil { return nil, err } gcTicker := time.NewTicker(5 * time.Minute) bdb := &BadgerDatabase{ db: db, gcTicker: gcTicker, } go func() { for range gcTicker.C { if atomic.LoadInt64(&bdb.isStopped) > 0 { return } err := bdb.runGC() if err != nil { log.WithError(err).Error("badger GC failed") } } }() return bdb, nil } func (bdb *BadgerDatabase) runGC() error { opts := bdb.db.Opts() bdb.mu.Lock() defer bdb.mu.Unlock() log.Debugf("Performing GC for badger DB in %s", opts.Dir) tStart := time.Now() defer func() { log.Debugf("GC collection on %s took %v", opts.Dir, time.Now().Sub(tStart)) }() // we will go through array of write times to see if it is time to run GC var gcStatsUpdateDelay = 5 * time.Minute var deadlineMet = false n := 0 for _, t := range bdb.writeTimes { if time.Now().Before(t.Add(gcStatsUpdateDelay)) { bdb.writeTimes[n] = t n++ } else { deadlineMet = true } } bdb.writeTimes = bdb.writeTimes[:n] if !deadlineMet { log.Debugf("DB in %s has no new stats for GC", opts.Dir) return nil } // In large DB, GC will happen automatically, because compaction will find garbage // but we are to small and compactors do not run (150 MB is small). // So we need to run Flatten bdb.db.Flatten(5) // Very likely Flatten will not do much because the hard coded priority is too small. // At this point, we hope that there is something for GC var errGC error var success = false for errGC == nil { // cleans DB online and it is safe to rerun on success errGC = bdb.db.RunValueLogGC(0.5) if errGC == nil && !success { success = true } } if success { log.Debugf("Cleaned some garbage for DB in %s", opts.Dir) return nil } // Now we have a dilemma: we could trust badger GC mechanism and stop here. // But unfortunately for our typical size (even as high as 150 MB) // compaction, even with Flatten(), does not kick in. // The only way to truly force compaction (to update stats for GC) is to close DB // see Note in https://github.com/dgraph-io/badger/issues/767#issuecomment-485713746 // After Close() the GC on a next run will have updated statistic // Actually even Close() does not guaranteed success, it requires more than a minute // to update stats after DB was modified. But eventually GC stats will be ready. if bdb.txn != nil { // someone still using DB, we will try to Close/Open next time return nil } var err error for retries := 0; retries < 10; retries++ { err = bdb.db.Close() if err == nil { continue } log.Errorf("Could not close DB in %s", opts.Dir) time.Sleep(1 * time.Second) } if err != nil { log.Fatalf("Could not close DB in %s", opts.Dir) return err } for retries := 0; retries < 10; retries++ { bdb.db, err = badger.Open(opts) if err == nil { return nil } log.Errorf("Could not reopen DB in %s", opts.Dir) time.Sleep(1 * time.Second) } log.Fatalf("Could not reopen DB in %s", opts.Dir) return err } func (db *BadgerDatabase) view(fn func(txn *badger.Txn) error) error { // If we have an open transaction, retrieve the values from there. // Otherwise we would not be able to retrieve in-memory values. if db.txn != nil { return fn(db.txn) } // If no transaction is running (no Batch()-call), use a fresh view txn. return db.db.View(fn) } // Get is the badger implementation of Database.Get. func (db *BadgerDatabase) Get(key ...string) ([]byte, error) { db.mu.Lock() defer db.mu.Unlock() data := []byte{} err := db.view(func(txn *badger.Txn) error { if db.txn != nil { txn = db.txn } keyPath := strings.Join(key, ".") item, err := txn.Get([]byte(keyPath)) if err == badger.ErrKeyNotFound { return ErrNoSuchKey } if err != nil { return err } data, err = item.ValueCopy(nil) return err }) if err != nil { return nil, err } return data, nil } // Keys is the badger implementation of Database.Keys. func (db *BadgerDatabase) Keys(prefix ...string) ([][]string, error) { db.mu.Lock() defer db.mu.Unlock() keys := [][]string{} return keys, db.view(func(txn *badger.Txn) error { iter := txn.NewIterator(badger.IteratorOptions{}) defer iter.Close() for iter.Rewind(); iter.Valid(); iter.Next() { item := iter.Item() fullKey := string(item.Key()) splitKey := strings.Split(fullKey, ".") hasPrefix := len(prefix) <= len(splitKey) for i := 0; hasPrefix && i < len(prefix) && i < len(splitKey); i++ { if prefix[i] != splitKey[i] { hasPrefix = false } } if hasPrefix { keys = append(keys, strings.Split(fullKey, ".")) } } return nil }) } // Export is the badger implementation of Database.Export. func (db *BadgerDatabase) Export(w io.Writer) error { db.mu.Lock() defer db.mu.Unlock() _, err := db.db.Backup(w, 0) return err } // Import is the badger implementation of Database.Import. func (db *BadgerDatabase) Import(r io.Reader) error { db.mu.Lock() defer db.mu.Unlock() return db.db.Load(r, 1) } // Glob is the badger implementation of Database.Glob func (db *BadgerDatabase) Glob(prefix []string) ([][]string, error) { db.mu.Lock() defer db.mu.Unlock() fullPrefix := strings.Join(prefix, ".") results := [][]string{} err := db.view(func(txn *badger.Txn) error { iter := txn.NewIterator(badger.IteratorOptions{}) defer iter.Close() for iter.Seek([]byte(fullPrefix)); iter.Valid(); iter.Next() { fullKey := string(iter.Item().Key()) if !strings.HasPrefix(fullKey, fullPrefix) { break } // Don't do recursive globbing: leftOver := fullKey[len(fullPrefix):] if !strings.Contains(leftOver, ".") { results = append(results, strings.Split(fullKey, ".")) } } return nil }) return results, err } // Batch is the badger implementation of Database.Batch func (db *BadgerDatabase) Batch() Batch { db.mu.Lock() defer db.mu.Unlock() return db.batch() } func (db *BadgerDatabase) batch() Batch { if db.txn == nil { db.txn = db.db.NewTransaction(true) } db.refCount++ return db } // Put is the badger implementation of Database.Put func (db *BadgerDatabase) Put(val []byte, key ...string) { db.mu.Lock() defer db.mu.Unlock() db.haveWrites = true fullKey := []byte(strings.Join(key, ".")) err := db.withRetry(func() error { return db.txn.Set(fullKey, val) }) if err != nil { log.Warningf("badger: failed to set key %s: %v", fullKey, err) } } func (db *BadgerDatabase) withRetry(fn func() error) error { if err := fn(); err != badger.ErrTxnTooBig { // This also returns nil. return err } // Commit previous (almost too big) transaction: if err := db.txn.Commit(); err != nil { // Something seems pretty wrong. return err } db.txn = db.db.NewTransaction(true) // If this fails again, we're out of luck. return fn() } // Clear is the badger implementation of Database.Clear func (db *BadgerDatabase) Clear(key ...string) error { db.mu.Lock() defer db.mu.Unlock() db.haveWrites = true iter := db.txn.NewIterator(badger.IteratorOptions{}) prefix := strings.Join(key, ".") keys := [][]byte{} for iter.Rewind(); iter.Valid(); iter.Next() { item := iter.Item() key := make([]byte, len(item.Key())) copy(key, item.Key()) keys = append(keys, key) } // This has to happen here, since withRetry might call // txn.Discard() which will complain about open iterators. // (I previously used a defer which executed too late) iter.Close() for _, key := range keys { if !strings.HasPrefix(string(key), prefix) { continue } err := db.withRetry(func() error { return db.txn.Delete(key) }) if err != nil { return err } } return nil } // Erase is the badger implementation of Database.Erase func (db *BadgerDatabase) Erase(key ...string) { db.mu.Lock() defer db.mu.Unlock() db.haveWrites = true fullKey := []byte(strings.Join(key, ".")) err := db.withRetry(func() error { return db.txn.Delete(fullKey) }) if err != nil { log.Warningf("badger: failed to del key %s: %v", fullKey, err) } } // Flush is the badger implementation of Database.Flush func (db *BadgerDatabase) Flush() error { db.mu.Lock() defer db.mu.Unlock() db.refCount-- if db.refCount > 0 { return nil } if db.refCount < 0 { log.Errorf("negative batch ref count: %d", db.refCount) return nil } defer db.txn.Discard() if err := db.txn.Commit(); err != nil { return err } db.txn = nil if db.haveWrites { db.writeTimes = append(db.writeTimes, time.Now()) } db.haveWrites = false return nil } // Rollback is the badger implementation of Database.Rollback func (db *BadgerDatabase) Rollback() { db.mu.Lock() defer db.mu.Unlock() db.refCount-- if db.refCount > 0 { return } if db.refCount < 0 { log.Errorf("negative batch ref count: %d", db.refCount) return } db.txn.Discard() db.txn = nil db.haveWrites = false db.refCount = 0 } // HaveWrites is the badger implementation of Database.HaveWrites func (db *BadgerDatabase) HaveWrites() bool { db.mu.Lock() defer db.mu.Unlock() return db.haveWrites } // Close is the badger implementation of Database.Close func (db *BadgerDatabase) Close() error { db.mu.Lock() defer db.mu.Unlock() db.gcTicker.Stop() atomic.StoreInt64(&db.isStopped, 1) // With an open transaction it would deadlock: if db.txn != nil { db.txn.Discard() db.txn = nil db.haveWrites = false } if db.db != nil { oldDb := db.db db.db = nil if err := oldDb.Close(); err != nil { return err } } return nil } ================================================ FILE: catfs/db/database_disk.go ================================================ package db import ( "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strings" "time" "github.com/sahib/brig/util" ) const ( debug = false ) // DiskDatabase is a database that simply uses the filesystem as storage. // Each bucket is one directory. Leaf keys are simple files. // The exported form of the database is simply a gzipped .tar of the directory. // // Note that this database backends was written for easy debugging. // It is currently by no means optimized for fast reads and writes and // could be probably made a lot faster if we ever need that. type DiskDatabase struct { basePath string cache map[string][]byte ops []func() error refs int64 deletes map[string]struct{} } // NewDiskDatabase creates a new database at `basePath`. func NewDiskDatabase(basePath string) (*DiskDatabase, error) { return &DiskDatabase{ basePath: basePath, cache: make(map[string][]byte), deletes: make(map[string]struct{}), }, nil } func fixDirectoryKeys(key []string) string { if len(key) == 0 { return "" } // Filter potential ".." elements that could be used // to break out of the database store and read whatever files // outside of it due to bad intentions. keyCopy := key[:0] for _, val := range key { if val != ".." { keyCopy = append(keyCopy, val) } else { keyCopy = append(keyCopy, "DOTDOT") } } key = keyCopy switch lastPart := key[len(key)-1]; { case lastPart == "DOT": return filepath.Join(key[:len(key)-1]...) + "/__NO_DOT__" case lastPart == ".": return filepath.Join(key[:len(key)-1]...) + "/DOT" case strings.HasSuffix(lastPart, "/."): return filepath.Join(key[:len(key)-1]...) + strings.TrimRight(lastPart, ".") + "/DOT" default: return filepath.Join(key...) } } func reverseDirectoryKeys(key string) []string { parts := strings.Split(key, string(filepath.Separator)) if len(parts) > 0 && parts[0] == "" { parts = parts[1:] } switch parts[len(parts)-1] { case "DOT": parts[len(parts)-1] = "." case "__NO_DOT__": parts[len(parts)-1] = "DOT" } return parts } // Flush is the disk implementation of Database.Flush func (db *DiskDatabase) Flush() error { db.refs-- if db.refs < 0 { db.refs = 0 } if db.refs > 0 { return nil } if debug { fmt.Println("FLUSH") } // Clear the cache first, if any of the next step fail, // we have at least the current state. db.cache = make(map[string][]byte) db.deletes = make(map[string]struct{}) // Make sure that db.ops is nil, even if Flush failed. ops := db.ops db.ops = nil // Currently no revertible operations are implemented. If something goes // wrong on the filesystem, chances are high that we're not able to revert // previous ops anyways. for _, op := range ops { if err := op(); err != nil { return err } } return nil } // Rollback is the disk implementation of Database.Rollback func (db *DiskDatabase) Rollback() { if debug { fmt.Println("ROLLBACK") } db.refs = 0 db.ops = nil db.cache = make(map[string][]byte) db.deletes = make(map[string]struct{}) } // Get a single value from `bucket` by `key`. func (db *DiskDatabase) Get(key ...string) ([]byte, error) { if debug { fmt.Println("GET", key) } fullKey := path.Join(key...) // if it's a key that was already deleted in a transaction, // we should acknowledge it as deleted. if _, ok := db.deletes[fullKey]; ok { return nil, ErrNoSuchKey } data, ok := db.cache[fullKey] if ok { return data, nil } // We have to go to the disk to find the right key: filePath := filepath.Join(db.basePath, fixDirectoryKeys(key)) data, err := ioutil.ReadFile(filePath) // #nosec if os.IsNotExist(err) { return nil, ErrNoSuchKey } return data, err } // Batch is the disk implementation of Database.Batch func (db *DiskDatabase) Batch() Batch { db.refs++ return db } func removeNonDirs(path string) error { if path == "/" || path == "" { return nil } info, err := os.Stat(path) if err != nil && !os.IsNotExist(err) { return err } if info != nil && !info.IsDir() { return os.Remove(path) } return removeNonDirs(filepath.Dir(path)) } // Put stores a new `val` under `key` at `bucket`. // Implementation detail: `key` may contain slashes (/). If used, those keys // will result in a nested directory structure. func (db *DiskDatabase) Put(val []byte, key ...string) { if debug { fmt.Println("SET", key) } db.ops = append(db.ops, func() error { filePath := filepath.Join(db.basePath, fixDirectoryKeys(key)) // If any of the parent are non-directories, // we need to remove them, since more nesting is requested. // (e.g. set /a/b/c/d over /a/b/c, where c is a file) parentDir := filepath.Dir(filePath) if err := removeNonDirs(parentDir); err != nil { return err } if err := os.MkdirAll(parentDir, 0700); err != nil { return err } // It is allowed to set a key over an existing one. // i.e. set "a/b" over "a/b/c". This requires us to potentially // delete nested directories (c). info, err := os.Stat(filePath) if err != nil && !os.IsNotExist(err) { return err } if info != nil && info.IsDir() { if err := os.RemoveAll(filePath); err != nil { return err } } return ioutil.WriteFile(filePath, val, 0600) }) fullKey := path.Join(key...) db.cache[fullKey] = val delete(db.deletes, fullKey) } // Clear removes all keys below and including `key`. func (db *DiskDatabase) Clear(key ...string) error { if debug { fmt.Println("CLEAR", key) } // Cache the real modification for later: db.ops = append(db.ops, func() error { filePrefix := filepath.Join(db.basePath, fixDirectoryKeys(key)) walker := func(path string, info os.FileInfo, err error) error { if os.IsNotExist(err) { return nil } if err != nil { return err } if !info.IsDir() { return os.Remove(path) } return nil } return filepath.Walk(filePrefix, walker) }) // Make sure we also modify the currently cached objects: prefix := path.Join(key...) for key := range db.cache { if strings.HasPrefix(key, prefix) { delete(db.cache, key) db.deletes[key] = struct{}{} } } // Also check what keys we actually need to delete. filePrefix := filepath.Join(db.basePath, fixDirectoryKeys(key)) walker := func(filePath string, info os.FileInfo, err error) error { if os.IsNotExist(err) { return nil } if err != nil { return err } if !info.IsDir() { key := reverseDirectoryKeys(filePath[len(db.basePath):]) db.deletes[path.Join(key...)] = struct{}{} } return nil } return filepath.Walk(filePrefix, walker) } // Erase is the disk implementation of Database.Erase func (db *DiskDatabase) Erase(key ...string) { if debug { fmt.Println("ERASE", key) } db.ops = append(db.ops, func() error { fullPath := filepath.Join(db.basePath, fixDirectoryKeys(key)) err := os.Remove(fullPath) if os.IsNotExist(err) { return ErrNoSuchKey } return err }) fullKey := path.Join(key...) db.deletes[fullKey] = struct{}{} delete(db.cache, fullKey) } // HaveWrites is the disk implementation of Database.HaveWrites func (db *DiskDatabase) HaveWrites() bool { return len(db.ops) > 0 } // Keys is the disk implementation of Database.Keys func (db *DiskDatabase) Keys(prefix ...string) ([][]string, error) { fullPath := filepath.Join(db.basePath, fixDirectoryKeys(prefix)) if _, err := os.Stat(fullPath); os.IsNotExist(err) { return nil, nil } keys := [][]string{} return keys, filepath.Walk(fullPath, func(filePath string, info os.FileInfo, err error) error { if err != nil { return err } if !info.IsDir() { key := reverseDirectoryKeys(filePath[len(db.basePath):]) if _, ok := db.deletes[path.Join(key...)]; !ok { keys = append(keys, key) } } return nil }) } // Glob is the disk implementation of Database.Glob func (db *DiskDatabase) Glob(prefix []string) ([][]string, error) { fullPrefix := filepath.Join(db.basePath, filepath.Join(prefix...)) matches, err := filepath.Glob(fullPrefix + "*") if err != nil { return nil, err } results := [][]string{} for _, match := range matches { info, err := os.Stat(match) if err != nil { return nil, err } if !info.IsDir() { key := match[len(db.basePath)+1:] if _, ok := db.deletes[key]; !ok { results = append(results, strings.Split(key, string(filepath.Separator))) } } } return results, nil } // Export writes all key/values into a gzipped .tar that is written to `w`. func (db *DiskDatabase) Export(w io.Writer) error { archiveName := fmt.Sprintf("brigmeta-%s.gz", time.Now().Format(time.RFC3339)) return util.Tar(db.basePath, archiveName, w) } // Import a gzipped tar from `r` into the current database. func (db *DiskDatabase) Import(r io.Reader) error { return util.Untar(r, db.basePath) } // Close the database func (db *DiskDatabase) Close() error { return nil } ================================================ FILE: catfs/db/database_memory.go ================================================ package db import ( "encoding/gob" "io" "path" "sort" "strings" ) // MemoryDatabase is a purely in memory database. type MemoryDatabase struct { data map[string][]byte oldData map[string][]byte haveWrites bool refCount int } // a shallow copy is enough here. func shallowCopyMap(src map[string][]byte) map[string][]byte { dst := make(map[string][]byte) for k, v := range src { copyV := make([]byte, len(v)) copy(copyV, v) dst[k] = copyV } return dst } // NewMemoryDatabase allocates a new empty MemoryDatabase func NewMemoryDatabase() *MemoryDatabase { return &MemoryDatabase{ data: make(map[string][]byte), } } // Batch is a no-op for a memory database. func (mdb *MemoryDatabase) Batch() Batch { if mdb.refCount == 0 { mdb.oldData = shallowCopyMap(mdb.data) } mdb.refCount++ return mdb } // Flush is a no-op for a memory database. func (mdb *MemoryDatabase) Flush() error { mdb.refCount-- if mdb.refCount == 0 { mdb.haveWrites = false } return nil } // Rollback is a no-op for a memory database func (mdb *MemoryDatabase) Rollback() { if mdb.oldData != nil { mdb.data = shallowCopyMap(mdb.oldData) mdb.oldData = nil } mdb.refCount = 0 } // Get returns `key` of `bucket`. func (mdb *MemoryDatabase) Get(key ...string) ([]byte, error) { data, ok := mdb.data[path.Join(key...)] if !ok { return nil, ErrNoSuchKey } return data, nil } // Put sets `key` in `bucket` to `data`. func (mdb *MemoryDatabase) Put(data []byte, key ...string) { mdb.haveWrites = true mdb.data[path.Join(key...)] = data } // Clear removes all keys includin and below `key`. func (mdb *MemoryDatabase) Clear(key ...string) error { mdb.haveWrites = true joinedKey := path.Join(key...) for mapKey := range mdb.data { if strings.HasPrefix(mapKey, joinedKey) { delete(mdb.data, mapKey) } } return nil } // Erase removes `key` func (mdb *MemoryDatabase) Erase(key ...string) { fullKey := path.Join(key...) mdb.haveWrites = true delete(mdb.data, fullKey) } // Keys will return all keys currently stored in the memory map func (mdb *MemoryDatabase) Keys(prefix ...string) ([][]string, error) { keys := [][]string{} for key := range mdb.data { splitKey := strings.Split(key, "/") hasPrefix := len(prefix) <= len(splitKey) for i := 0; hasPrefix && i < len(prefix) && i < len(splitKey); i++ { if prefix[i] != splitKey[i] { hasPrefix = false } } if hasPrefix { keys = append(keys, splitKey) } } sort.Slice(keys, func(i, j int) bool { a := strings.Join(keys[i], ".") b := strings.Join(keys[j], ".") return a < b }) return keys, nil } // HaveWrites returns true if there are any open writes. func (mdb *MemoryDatabase) HaveWrites() bool { return mdb.haveWrites } // Glob returns all keys starting with `prefix`. func (mdb *MemoryDatabase) Glob(prefix []string) ([][]string, error) { prefixKey := path.Join(prefix...) var result [][]string keys, err := mdb.Keys() if err != nil { return nil, err } for _, key := range keys { fullKey := path.Join(key...) if strings.HasPrefix(fullKey, prefixKey) { // Filter "directories": suffix := fullKey[len(prefixKey):] if !strings.Contains(suffix, "/") { result = append(result, strings.Split(fullKey, "/")) } } } sort.Slice(result, func(i, j int) bool { return path.Join(result[i]...) < path.Join(result[j]...) }) return result, nil } // Export encodes the internal memory map to a gob structure, // and writes it to `w`. func (mdb *MemoryDatabase) Export(w io.Writer) error { return gob.NewEncoder(w).Encode(mdb.data) } // Import imports a previously exported dump and decodes the gob structure. func (mdb *MemoryDatabase) Import(r io.Reader) error { return gob.NewDecoder(r).Decode(&mdb.data) } // Close the memory - a no op. func (mdb *MemoryDatabase) Close() error { return nil } ================================================ FILE: catfs/db/database_test.go ================================================ package db import ( "bytes" "fmt" "io/ioutil" "os" "sort" "strings" "testing" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func withDiskDatabase(fn func(db *DiskDatabase)) error { testDir, _ := ioutil.TempDir("", "brig-") defer os.RemoveAll(testDir) db, err := NewDiskDatabase(testDir) if err != nil { return err } fn(db) return db.Close() } func withBadgerDatabase(fn func(db *BadgerDatabase)) error { testDir, _ := ioutil.TempDir("", "brig-") defer os.RemoveAll(testDir) db, err := NewBadgerDatabase(testDir) if err != nil { return err } fn(db) return db.Close() } func withMemDatabase(fn func(db *MemoryDatabase)) error { mdb := NewMemoryDatabase() fn(mdb) return mdb.Close() } func withDbByName(name string, fn func(db Database)) error { switch name { case "memory": return withMemDatabase(func(db *MemoryDatabase) { fn(db) }) case "disk": return withDiskDatabase(func(db *DiskDatabase) { fn(db) }) case "badger": return withBadgerDatabase(func(db *BadgerDatabase) { fn(db) }) default: panic("bad db name: " + name) } } func withDbsByName(name string, fn func(db1, db2 Database)) error { return withDbByName(name, func(db1 Database) { withDbByName(name, func(db2 Database) { fn(db1, db2) }) }) } ////////// func TestDatabase(t *testing.T) { t.Run("memory", func(t *testing.T) { testDatabaseOneDb(t, "memory") testDatabaseTwoDbs(t, "memory") }) t.Run("disk", func(t *testing.T) { testDatabaseOneDb(t, "disk") testDatabaseTwoDbs(t, "disk") }) t.Run("badger", func(t *testing.T) { testDatabaseOneDb(t, "badger") testDatabaseTwoDbs(t, "badger") }) } func TestGCRace(t *testing.T) { withDbByName("badger", func(db Database) { b := db.Batch() b.Put([]byte{0}, "0") require.Nil(t, b.Flush()) keys, err := db.Keys() require.Nil(t, err) for _, key := range keys { b := db.Batch() b.Erase(key...) } }) } ////////// func testDatabaseTwoDbs(t *testing.T, name string) { tcs := []struct { name string test func(t *testing.T, db1, db2 Database) }{ { name: "export-import", test: testExportImport, }, } t.Run("double", func(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { require.Nil(t, withDbsByName(name, func(db1, db2 Database) { tc.test(t, db1, db2) })) }) } }) } func testDatabaseOneDb(t *testing.T, name string) { tcs := []struct { name string test func(t *testing.T, db Database) }{ { name: "put-and-get", test: testPutAndGet, }, { name: "glob", test: testGlob, }, { name: "clear", test: testClear, }, { name: "clear-prefix", test: testClearPrefix, }, { name: "invalid-access", test: testInvalidAccess, }, { name: "recursive-batch", test: testRecursiveBatch, }, { name: "rollback", test: testRollback, }, { name: "erase", test: testErase, }, { name: "keys", test: testKeys, }, } t.Run("single", func(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { require.Nil(t, withDbByName(name, func(db Database) { tc.test(t, db) })) }) } }) } /////////// func testErase(t *testing.T, db Database) { batch := db.Batch() batch.Put([]byte{1}, "existing_key") batch.Flush() batch = db.Batch() batch.Erase("existing_key") _, err := db.Get("existing_key") require.Equal(t, ErrNoSuchKey, err) batch.Flush() _, err = db.Get("existing_key") require.Equal(t, ErrNoSuchKey, err) } func testKeys(t *testing.T, db Database) { batch := db.Batch() expect := [][]string{} for i := 0; i < 15; i++ { key := fmt.Sprintf("%d", i) batch.Put([]byte{byte(i)}, key) expect = append(expect, []string{key}) } batch.Flush() sort.Slice(expect, func(i, j int) bool { a := strings.Join(expect[i], ".") b := strings.Join(expect[j], ".") return a < b }) extractKeys := func(prefixes []string) [][]string { keys, err := db.Keys(prefixes...) require.Nil(t, err) return keys } keys := extractKeys(nil) require.Equal(t, expect, keys) keys = extractKeys([]string{"1"}) require.Equal(t, [][]string{{"1"}}, keys, ) } func testRollback(t *testing.T, db Database) { batch := db.Batch() batch.Put([]byte{1}, "existing_key") batch.Flush() batch = db.Batch() batch.Put([]byte{2}, "existing_key") batch.Put([]byte{2}, "some_key") data, err := db.Get("some_key") require.Nil(t, err) require.Equal(t, []byte{2}, data) batch.Rollback() data, err = db.Get("existing_key") require.Nil(t, err) require.Equal(t, []byte{1}, data) data, err = db.Get("some_key") require.Equal(t, ErrNoSuchKey, err) require.Nil(t, data) } func testRecursiveBatch(t *testing.T, db Database) { batch1 := db.Batch() batch2 := db.Batch() batch2.Put([]byte{1}, "batch2_key") val, err := db.Get("batch2_key") require.Nil(t, err) require.Equal(t, []byte{1}, val) require.True(t, batch1.HaveWrites()) require.True(t, batch2.HaveWrites()) require.Nil(t, batch2.Flush()) require.True(t, batch1.HaveWrites()) require.True(t, batch2.HaveWrites()) require.Nil(t, batch1.Flush()) require.False(t, batch1.HaveWrites()) require.False(t, batch2.HaveWrites()) } func testPutAndGet(t *testing.T, db Database) { testKeys := [][]string{ {"some", "stuff", "x"}, {"some", "stuff", "."}, {".", ".", "."}, {"some", "stuff", "__NO_DOT__"}, {"some", "stuff", "DOT"}, } for _, key := range testKeys { t.Run(strings.Join(key, "."), func(t *testing.T) { batch := db.Batch() batch.Put([]byte("hello"), key...) require.Nil(t, batch.Flush()) data, err := db.Get(key...) require.Nil(t, err) require.Equal(t, []byte("hello"), data) }) } } func testInvalidAccess(t *testing.T, db Database) { val, err := db.Get("hello", "world") require.Equal(t, ErrNoSuchKey, err) require.Nil(t, val) } func testClear(t *testing.T, db Database) { batch := db.Batch() for i := 0; i < 100; i++ { batch.Put([]byte{1}, "a", "b", "c", fmt.Sprintf("%d", i)) } require.Nil(t, batch.Flush()) batch = db.Batch() require.Nil(t, batch.Clear()) // before flush: for i := 0; i < 100; i++ { _, err := db.Get("a", "b", "c", fmt.Sprintf("%d", i)) require.Equal(t, ErrNoSuchKey, err) } require.Nil(t, batch.Flush()) // after flush: for i := 0; i < 100; i++ { _, err := db.Get("a", "b", "c", fmt.Sprintf("%d", i)) require.Equal(t, ErrNoSuchKey, err) } } func testClearPrefix(t *testing.T, db Database) { batch := db.Batch() for i := 0; i < 10; i++ { batch.Put([]byte{1}, "a", "b", "c", fmt.Sprintf("%d", i)) } for i := 0; i < 10; i++ { batch.Put([]byte{1}, "x", "y", "z", fmt.Sprintf("%d", i)) } require.Nil(t, batch.Flush()) batch = db.Batch() require.Nil(t, batch.Clear("a")) // before flush: for i := 0; i < 10; i++ { _, err := db.Get("a", "b", "c", fmt.Sprintf("%d", i)) require.Equal(t, ErrNoSuchKey, err) } for i := 0; i < 10; i++ { data, err := db.Get("x", "y", "z", fmt.Sprintf("%d", i)) require.Nil(t, err) require.Equal(t, []byte{1}, data) } require.Nil(t, batch.Flush()) // after flush: for i := 0; i < 10; i++ { _, err := db.Get("a", "b", "c", fmt.Sprintf("%d", i)) require.Equal(t, ErrNoSuchKey, err) } for i := 0; i < 10; i++ { data, err := db.Get("x", "y", "z", fmt.Sprintf("%d", i)) require.Nil(t, err) require.Equal(t, []byte{1}, data) } } func testGlob(t *testing.T, db Database) { batch := db.Batch() batch.Put([]byte{1}, "a", "b", "pref_1") batch.Put([]byte{2}, "a", "b", "pref_2") batch.Put([]byte{3}, "a", "b", "prev_3") batch.Put([]byte{4}, "a", "b", "pref_dir", "x") err := batch.Flush() require.Nil(t, err) matches, err := db.Glob([]string{"a", "b", "pref_"}) require.Nil(t, err) require.Equal(t, [][]string{ {"a", "b", "pref_1"}, {"a", "b", "pref_2"}, }, matches) } func testExportImport(t *testing.T, db1, db2 Database) { testKeys := [][]string{ {"some", "stuff", "x"}, {"some", "stuff", "."}, {"some", "stuff", "__NO_DOT__"}, {"some", "stuff", "DOT"}, } batch := db1.Batch() for _, key := range testKeys { batch.Put([]byte{1, 2, 3}, key...) } require.Nil(t, batch.Flush()) for _, key := range testKeys { data, err := db1.Get(key...) require.Nil(t, err) require.Equal(t, []byte{1, 2, 3}, data) } buf := &bytes.Buffer{} require.Nil(t, db1.Export(buf)) require.Nil(t, db2.Import(buf)) for _, key := range testKeys { data, err := db2.Get(key...) require.Nil(t, err) require.Equal(t, []byte{1, 2, 3}, data) } } // Regression bug fix: too many key/values in a transaction // will cause badger to return ErrTxnTooBig, which should // be handled as retry. This code triggers this. func TestLargeBatch(t *testing.T) { nKeys := 1000 * 10 for _, name := range []string{"badger", "memory", "disk"} { t.Run(name, func(t *testing.T) { withDbByName(name, func(db Database) { batch := db.Batch() for idx := 0; idx < nKeys; idx++ { val := testutil.CreateRandomDummyBuf(int64(1024), int64(idx)) key := fmt.Sprintf("idx-%d", idx) batch.Put(val, key) } require.Nil(t, batch.Flush()) for idx := 0; idx < nKeys; idx++ { expect := testutil.CreateRandomDummyBuf(int64(1024), int64(idx)) key := fmt.Sprintf("idx-%d", idx) got, err := db.Get(key) require.Nil(t, err) require.Equal(t, expect, got) } }) }) } } func BenchmarkDatabase(b *testing.B) { benchmarks := map[string]func(*testing.B, Database){ "put": benchmarkDatabasePut, "get": benchmarkDatabaseGet, } for benchmarkName, benchmark := range benchmarks { b.Run(benchmarkName, func(b *testing.B) { for _, name := range []string{"badger", "memory", "disk"} { b.Run(name, func(b *testing.B) { withDbByName(name, func(db Database) { b.ResetTimer() benchmark(b, db) b.StopTimer() }) }) } }) } } func benchmarkDatabasePut(b *testing.B, db Database) { batch := db.Batch() for i := 0; i < b.N; i++ { keyName := fmt.Sprintf("key_%d", i%(1024*1024)) batch.Put(testutil.CreateDummyBuf(128), keyName) } batch.Flush() } func benchmarkDatabaseGet(b *testing.B, db Database) { batch := db.Batch() for i := 0; i < b.N; i++ { keyName := fmt.Sprintf("key_%d", i%(1024*1024)) batch.Put(testutil.CreateDummyBuf(128), "prefix", keyName) } batch.Flush() b.ResetTimer() for i := 0; i < b.N; i++ { keyName := fmt.Sprintf("key_%d", i%(1024*1024)) db.Get("prefix", keyName) } } ================================================ FILE: catfs/errors/errors.go ================================================ package catfs import ( "errors" "fmt" ) var ( // ErrStageNotEmpty is returned by Reset() when it was called without force. // and there are still changes in the staging area. ErrStageNotEmpty = errors.New("there are changes in the staging area; use the --force") // ErrNoChange is returned when trying to commit, but there is no change. ErrNoChange = errors.New("nothing changed between the given versions") // ErrAmbigiousRev is returned when a ref string could mean several commits. ErrAmbigiousRev = errors.New("there is more than one rev with this prefix") // ErrExists is returned if a node already exists at a path, but should not. ErrExists = errors.New("File exists") // ErrBadNode is returned when a wrong node type was passed to a method. ErrBadNode = errors.New("Cannot convert to concrete type. Broken input data?") ) ////////////// // ErrNoSuchRef is returned when a bad ref was used type ErrNoSuchRef string func (e ErrNoSuchRef) Error() string { return fmt.Sprintf("No ref found named `%s`", string(e)) } // IsErrNoSuchRef checks if `err` is a no such ref error. func IsErrNoSuchRef(err error) bool { _, ok := err.(ErrNoSuchRef) return ok } ///////////////// // ErrNoSuchCommitIndex is returned when a bad commit was used type ErrNoSuchCommitIndex struct { index int64 } func (e ErrNoSuchCommitIndex) Error() string { return fmt.Sprintf("No commit with index `%d` found", e.index) } // NoSuchCommitIndex returns an error for a missing index at `ind` func NoSuchCommitIndex(ind int64) error { return &ErrNoSuchCommitIndex{ind} } // IsErrNoSuchCommitIndex checks if `err` is a no such ref error. func IsErrNoSuchCommitIndex(err error) bool { _, ok := err.(*ErrNoSuchCommitIndex) return ok } ///////////////// type errNoSuchFile struct { path string } // Error will return an error description detailin what path is missing. func (e *errNoSuchFile) Error() string { return "No such file or directory: " + e.path } ////////////// // NoSuchFile creates a new error that reports `path` as missing func NoSuchFile(path string) error { return &errNoSuchFile{path} } // IsNoSuchFileError asserts that `err` means that the file could not be found func IsNoSuchFileError(err error) bool { _, ok := err.(*errNoSuchFile) return ok } ================================================ FILE: catfs/fs.go ================================================ package catfs import ( "archive/tar" "bytes" "crypto/rand" "errors" "fmt" "io" "io/ioutil" "path" "sort" "strconv" "strings" "sync" "time" "github.com/sahib/config" log "github.com/sirupsen/logrus" capnp "zombiezen.com/go/capnproto2" e "github.com/pkg/errors" c "github.com/sahib/brig/catfs/core" "github.com/sahib/brig/catfs/db" ie "github.com/sahib/brig/catfs/errors" "github.com/sahib/brig/catfs/mio" "github.com/sahib/brig/catfs/mio/pagecache" n "github.com/sahib/brig/catfs/nodes" "github.com/sahib/brig/catfs/vcs" "github.com/sahib/brig/repo/hints" "github.com/sahib/brig/util" h "github.com/sahib/brig/util/hashlib" ) const ( abiVersion = 1 defaultEncryptionKeyLength = 32 ) func emptyFileEncryptionKey() []byte { return make([]byte, defaultEncryptionKeyLength) } // HintManager is the API for looking up hints. type HintManager interface { // Lookup should return stream hints for the path. // Hints are recursive, so we iterate until the root path // to find the correct hint. Lookup(path string) hints.Hint // Set should remember `hint` for `path` and below. Set(path string, hint hints.Hint) error } // dummy hint manager that will always yield the default. type defaultHintManager struct{} func (dhm defaultHintManager) Lookup(path string) hints.Hint { return hints.Default() } func (dhm defaultHintManager) Set(path string, hint hints.Hint) error { return fmt.Errorf("no hint manager, cannot remember hints") } // FS (short for Filesystem) is the central API entry for everything related to // paths. It exposes a POSIX-like interface where path are mapped to the // actual underlying hashes and the associated metadata. // // Additionally it supports version control commands like MakeCommit(), // Checkout() etc. The API is file-centric, i.e. directories are created on // the fly for some operations like Stage(). Empty directories can be created // via Mkdir() though. type FS struct { mu sync.Mutex // underlying key/value store kv db.Database // linker (holds all nodes together) lkr *c.Linker // garbage collector for dead metadata links gc *c.GarbageCollector // channel to schedule gc runs and quit the gc loop gcControl chan bool // channel to schedule auto commits and quit the loop autoCommitControl chan bool // channel to schedule repins and quit the loop repinControl chan string // Actual storage backend (e.g. ipfs or memory) bk FsBackend // internal config cfg *config.Config // cache for the isPinned operation pinner *Pinner // wether this fs is read only and cannot be changed. // It can be change by applying patches though. readOnly bool // interface to load stream hints hintManager HintManager // cache for storing pages written to catfs.Handle // (may be nil if not used, e.g. for tests) pageCache pagecache.Cache } // ErrReadOnly is returned when a file system was created in read only mode // and a modifying operation was called on it. var ErrReadOnly = errors.New("fs is read only") // StatInfo describes the metadata of a single node. // The concept is comparable to the POSIX stat() call. type StatInfo struct { // Path is the full path to the file Path string // TreeHash is the hash of the node in the DAG TreeHash h.Hash // ContentHash is the actual hash of the content // (used to test for content equality) ContentHash h.Hash // BackendHash is the hash under which the file is reachable // in the backend. BackendHash h.Hash // User is the name of the user that modified this node last. User string // Size in bytes Size uint64 // Cached Size in bytes, i.e. size at backend CachedSize int64 // Inode is a unique number specific to this node Inode uint64 // Depth is the hierarchy level inside of this node (root has 0) Depth int // ModTime is the last modification timestamp ModTime time.Time // IsDir tells you if this node is a dir IsDir bool // IsPinned tells you if this node is pinned (either implicit or explicit) IsPinned bool // IsExplicit is true when the user pinned this node on purpose IsExplicit bool // IsRaw indicates if the stream associated with the file (if any) // was encoded by brig or can be consumed from ipfs directly. IsRaw bool // Key is the encryption key for the file. Key []byte } // DiffPair is a pair of nodes. // It is returned by MakeDiff(), where the source // is a node on the remote side and the dst node is // a node on our side. type DiffPair struct { Src StatInfo `json:"src"` Dst StatInfo `json:"dst"` } // Diff is a list of things that changed between to commits type Diff struct { // Added is a list of nodes that were added newly Added []StatInfo `json:"added"` // Removed is a list of nodes that were removed Removed []StatInfo `json:"removed"` // Ignored is a list of nodes that were not considered Ignored []StatInfo `json:"ignored"` // Missing is a list of nodes that the remoe side is missing Missing []StatInfo `json:"missing"` // Moved is a list of nodes that changed path Moved []DiffPair `json:"moved"` // Merged is a list of nodes that can be merged automatically Merged []DiffPair `json:"merged"` // Conflict is a list of nodes that cannot be merged automatically Conflict []DiffPair `json:"conflict"` } // Commit gives information about a single commit. type Commit struct { // Hash is the id of this commit Hash h.Hash // Msg describes the committed contents Msg string // Tags is a user defined list of tags // (tags like HEAD, CURR and INIT are assigned dynamically as exception) Tags []string // Date is the time when the commit was made Date time.Time // Index is the index of the commit: Index int64 } // Change describes a single change to a node between two versions type Change struct { // Path is the node that was changed Path string // IsPinned tells you if the content is pinned at this stage IsPinned bool // IsExplicty tells you if the content is pinned explicitly. IsExplicit bool // Change describes what was changed Change string // MovedTo indicates that the node at this Path was moved to // another location and that there is no node at this location now. MovedTo string // WasPreviouslyAt is filled when the node was moved // and was previously at another location. WasPreviouslyAt string // Head is the commit after the change Head *Commit // Next is the commit before the change Next *Commit } // ExplicitPin is a pair of path and commit id. type ExplicitPin struct { Path string Commit string } ///////////////////// // UTILITY HELPERS // ///////////////////// func (fs *FS) nodeToStat(nd n.Node) *StatInfo { isPinned, isExplicit, err := fs.pinner.IsNodePinned(nd) if err != nil { log.Warningf("stat: failed to acquire pin state: %v", err) } var isDir bool var isRaw bool var key []byte switch nd.Type() { case n.NodeTypeFile: file, ok := nd.(*n.File) if ok { key = make([]byte, len(file.Key())) copy(key, file.Key()) } isRaw = file.IsRaw() case n.NodeTypeDirectory: isDir = true case n.NodeTypeGhost: ghost, ok := nd.(*n.Ghost) if ok { isDir = (ghost.OldNode().Type() == n.NodeTypeDirectory) } } return &StatInfo{ Path: nd.Path(), User: nd.User(), ModTime: nd.ModTime(), IsDir: isDir, Inode: nd.Inode(), Size: nd.Size(), CachedSize: nd.CachedSize(), Depth: n.Depth(nd), IsPinned: isPinned, IsExplicit: isExplicit, IsRaw: isRaw, ContentHash: nd.ContentHash().Clone(), BackendHash: nd.BackendHash().Clone(), TreeHash: nd.TreeHash().Clone(), Key: key, } } func lookupFileOrDir(lkr *c.Linker, path string) (n.ModNode, error) { nd, err := lkr.LookupNode(path) if err != nil { return nil, err } if nd == nil || nd.Type() == n.NodeTypeGhost { return nil, ie.NoSuchFile(path) } modNd, ok := nd.(n.ModNode) if !ok { return nil, ie.ErrBadNode } return modNd, nil } func (fs *FS) handleGcEvent(nd n.Node) bool { if nd.Type() != n.NodeTypeFile { return true } file, ok := nd.(*n.File) if !ok { return true } content := file.BackendHash() log.Infof("unpinning gc'd node %v", content.B58String()) // This node will not be reachable anymore by brig. // Make sure it is also unpinned to save space. if err := fs.pinner.Unpin(file.Inode(), file.BackendHash(), true); err != nil { log.Warningf("unpinning attempt failed: %v", err) } // Still return true, no need to stop the GC return true } /////////////////////////////// // ACTUAL API IMPLEMENTATION // /////////////////////////////// func (fs *FS) doGcRun() { fs.mu.Lock() defer fs.mu.Unlock() owner, err := fs.lkr.Owner() if err != nil { log.Warningf("gc: failed to get owner: %v", err) return } log.Debugf("filesystem GC (for %s): running", owner) if err := fs.gc.Run(true); err != nil { log.Warnf("failed to run GC: %v", err) } } // NewFilesystem creates a new CATFS filesystem. // This filesystem stores all its data in a Merkle DAG and is fully versioned. func NewFilesystem( backend FsBackend, dbPath string, owner string, readOnly bool, fsCfg *config.Config, hintManager HintManager, pageCache pagecache.Cache, ) (*FS, error) { kv, err := db.NewBadgerDatabase(dbPath) if err != nil { return nil, err } lkr := c.NewLinker(kv) if err := lkr.SetOwner(owner); err != nil { return nil, err } // NOTE: This is the place to start migrations in the future. if err := lkr.SetABIVersion(abiVersion); err != nil { return nil, err } pinCache, err := NewPinner(lkr, backend) if err != nil { return nil, err } if hintManager == nil { hintManager = defaultHintManager{} } // NOTE: We do not need to validate fsCfg here. // This is already done on the side of our config module. // (we just need to convert a few keys to the vcs.SyncOptions enum later). fs := &FS{ kv: kv, lkr: lkr, bk: backend, cfg: fsCfg, readOnly: readOnly, gcControl: make(chan bool, 1), autoCommitControl: make(chan bool, 1), repinControl: make(chan string, 1), pinner: pinCache, hintManager: hintManager, pageCache: pageCache, } // Start the garbage collection background task. // It will run locked every few seconds and removes unreachable // objects from the staging area. fs.gc = c.NewGarbageCollector(lkr, kv, fs.handleGcEvent) go fs.gcLoop() go fs.autoCommitLoop() go fs.repinLoop() return fs, nil } func (fs *FS) gcLoop() { gcTicker := time.NewTicker(120 * time.Second) defer gcTicker.Stop() for { select { case state := <-fs.gcControl: if state { fs.doGcRun() } else { // Quit the gc loop: log.Debugf("quitting the GC loop") return } case <-gcTicker.C: fs.doGcRun() } } } func (fs *FS) autoCommitLoop() { lastCheck := time.Now() checkTicker := time.NewTicker(1 * time.Second) defer checkTicker.Stop() for { select { case <-fs.autoCommitControl: log.Debugf("quitting the auto commit loop") return case <-checkTicker.C: isEnabled := fs.cfg.Bool("autocommit.enabled") if !isEnabled { continue } if time.Since(lastCheck) >= fs.cfg.Duration("autocommit.interval") { lastCheck = time.Now() msg := fmt.Sprintf("auto commit at »%s«", time.Now().Format(time.RFC822)) if err := fs.MakeCommit(msg); err != nil && err != ie.ErrNoChange { log.Warningf("failed to create auto commit: %v", err) } } } } } func (fs *FS) repinLoop() { if fs.readOnly { return } lastCheck := time.Now() checkTicker := time.NewTicker(1 * time.Second) defer checkTicker.Stop() for { select { case root := <-fs.repinControl: if root == "" { log.Debugf("quitting the repin loop") return } // Execute a repin immediately otherwise. // (and reset the timer, so we don't get it twice) if err := fs.repin(root); err != nil { log.Warningf("repin failed: %v", err) } lastCheck = time.Now() case <-checkTicker.C: isEnabled := fs.cfg.Bool("repin.enabled") if !isEnabled { continue } if time.Since(lastCheck) >= fs.cfg.Duration("repin.interval") { lastCheck = time.Now() if err := fs.repin("/"); err != nil { log.Warningf("repin failed: %v", err) } } } } } // Close will clean up internal storage. func (fs *FS) Close() error { fs.mu.Lock() defer fs.mu.Unlock() go func() { fs.gcControl <- false }() go func() { fs.autoCommitControl <- false }() go func() { fs.repinControl <- "" }() if err := fs.pinner.Close(); err != nil { log.Warnf("Failed to close pin cache: %v", err) } return fs.kv.Close() } // Export will export a serialized version of the filesystem to `w`. func (fs *FS) Export(w io.Writer) error { fs.mu.Lock() defer fs.mu.Unlock() return fs.kv.Export(w) } // Import will read a previously FS dump from `r`. func (fs *FS) Import(r io.Reader) error { fs.mu.Lock() defer fs.mu.Unlock() if err := fs.kv.Import(r); err != nil { return err } // disk (probably) changed, delete memcache: fs.lkr.MemIndexClear() return nil } ///////////////////// // CORE OPERATIONS // ///////////////////// // Move will move the file or directory at `src` to `dst`. // If it does not exist, an error will be returned. func (fs *FS) Move(src, dst string) error { fs.mu.Lock() defer fs.mu.Unlock() if fs.readOnly { return ErrReadOnly } srcNd, err := lookupFileOrDir(fs.lkr, src) if err != nil { return err } return c.Move(fs.lkr, srcNd, dst) } // Copy will copy the file or directory at `src` to `dst`. // If it does not exist, an error will be returned. func (fs *FS) Copy(src, dst string) error { fs.mu.Lock() defer fs.mu.Unlock() if fs.readOnly { return ErrReadOnly } srcNd, err := lookupFileOrDir(fs.lkr, src) if err != nil { return err } _, err = c.Copy(fs.lkr, srcNd, dst) return err } // Mkdir creates a new empty directory at `dir`, possibly creating // all intermediate parents if `createParents` is set. func (fs *FS) Mkdir(dir string, createParents bool) error { fs.mu.Lock() defer fs.mu.Unlock() if fs.readOnly { return ErrReadOnly } // "brig mkdir ." somehow is able to overwrite everything: dir = strings.TrimLeft(path.Clean(dir), ".") _, err := c.Mkdir(fs.lkr, dir, createParents) return err } // Remove removes the file or directory at `path`. func (fs *FS) Remove(path string) error { fs.mu.Lock() defer fs.mu.Unlock() if fs.readOnly { return ErrReadOnly } nd, err := lookupFileOrDir(fs.lkr, path) if err != nil { return err } // TODO: What should remove do with the pin state? _, _, err = c.Remove(fs.lkr, nd, true, true) return err } // Stat delivers detailed information about the node at `path`. func (fs *FS) Stat(path string) (*StatInfo, error) { fs.mu.Lock() defer fs.mu.Unlock() nd, err := fs.lkr.LookupNode(path) if err != nil { return nil, err } if nd.Type() == n.NodeTypeGhost { return nil, ie.NoSuchFile(path) } return fs.nodeToStat(nd), nil } // Filter implements a quick and easy way to search over all files // by using a query that checks if it is part of the path. func (fs *FS) Filter(root, query string) ([]*StatInfo, error) { fs.mu.Lock() defer fs.mu.Unlock() rootNd, err := fs.lkr.LookupNode(root) if err != nil { return nil, err } if rootNd.Type() == n.NodeTypeGhost { return nil, ie.NoSuchFile(root) } query = strings.ToLower(query) result := []*StatInfo{} err = n.Walk(fs.lkr, rootNd, false, func(child n.Node) error { // Ghost nodes should not be visible to the outside. if child.Type() == n.NodeTypeGhost { return nil } // Special case: Forget about the root node. // It should not be part of the results. childPath := child.Path() if childPath == root { return nil } childPath = strings.ToLower(childPath[len(root):]) if !strings.Contains(childPath, query) { return nil } result = append(result, fs.nodeToStat(child)) return n.ErrSkipChild }) sort.Slice(result, func(i, j int) bool { iDepth := result[i].Depth jDepth := result[j].Depth if iDepth == jDepth { return result[i].Path < result[j].Path } return iDepth < jDepth }) if err != nil { return nil, err } return result, nil } // List returns stat info for each node below (and including) root. // Nodes deeper than maxDepth will not be shown. If maxDepth is a // negative number, all nodes will be shown. func (fs *FS) List(root string, maxDepth int) ([]*StatInfo, error) { fs.mu.Lock() defer fs.mu.Unlock() // NOTE: This method is highly inefficient: // - iterates over all nodes even if maxDepth is >= 0 // // Fix whenever it proves to be a problem. // I don't want to engineer something now until I know what's needed. rootNd, err := fs.lkr.LookupNode(root) if err != nil { return nil, err } if rootNd.Type() == n.NodeTypeGhost { return nil, ie.NoSuchFile(root) } // Start counting max depth relative to the root: if maxDepth >= 0 { maxDepth += n.Depth(rootNd) } result := []*StatInfo{} if rootNd.Type() == n.NodeTypeFile { // There is no point to Walk through file, it has no children // but we need to report on itself result = append(result, fs.nodeToStat(rootNd)) return result, nil } err = n.Walk(fs.lkr, rootNd, false, func(child n.Node) error { if maxDepth < 0 || n.Depth(child) <= maxDepth { if maxDepth >= 0 && child.Path() == root { return nil } // Ghost nodes should not be visible to the outside. if child.Type() == n.NodeTypeGhost { return nil } result = append(result, fs.nodeToStat(child)) } return nil }) sort.Slice(result, func(i, j int) bool { iDepth := result[i].Depth jDepth := result[j].Depth if iDepth == jDepth { return result[i].Path < result[j].Path } return iDepth < jDepth }) if err != nil { return nil, err } return result, nil } //////////////////////// // PINNING OPERATIONS // //////////////////////// // preCache makes the backend fetch the data already from the network, // even though it might not be needed yet. func (fs *FS) preCache(hash h.Hash) error { stream, err := fs.bk.Cat(hash) if err != nil { return err } _, err = io.Copy(ioutil.Discard, stream) return err } func (fs *FS) preCacheInBackground(hash h.Hash) { if !fs.cfg.Bool("pre_cache.enabled") { return } go func() { if err := fs.preCache(hash); err != nil { log.Debugf("failed to pre-cache `%s`: %v", hash, err) } }() } // Pin will pin the file or directory at `path` explicitly. func (fs *FS) Pin(path, rev string, explicit bool) error { return fs.doPin(path, rev, fs.pinner.PinNode, explicit) } // Unpin will unpin the file or directory at `path` explicitly. func (fs *FS) Unpin(path, rev string, explicit bool) error { return fs.doPin(path, rev, fs.pinner.UnpinNode, explicit) } func (fs *FS) doPin(path, rev string, op func(nd n.Node, explicit bool) error, explicit bool) error { fs.mu.Lock() defer fs.mu.Unlock() cmt, err := parseRev(fs.lkr, rev) if err != nil { return err } root, err := fs.lkr.DirectoryByHash(cmt.Root()) if err != nil { return err } nd, err := root.Lookup(fs.lkr, path) if err != nil { return err } if nd == nil || nd.Type() == n.NodeTypeGhost { return ie.NoSuchFile(path) } if err := op(nd, explicit); err != nil { return err } // Make sure the data is available (if requested): if nd.Type() == n.NodeTypeFile { fs.preCacheInBackground(nd.BackendHash()) } return nil } // IsPinned returns true for files and directories that are pinned. // A directory only counts as pinned if all files and directories // in it are also pinned. func (fs *FS) IsPinned(path string) (bool, bool, error) { fs.mu.Lock() defer fs.mu.Unlock() nd, err := lookupFileOrDir(fs.lkr, path) if err != nil { return false, false, err } return fs.pinner.IsNodePinned(nd) } //////////////////////// // STAGING OPERATIONS // //////////////////////// func prefixSlash(s string) string { if !strings.HasPrefix(s, "/") { return "/" + s } return s } // Touch creates an empty file at `path` if it does not exist yet. // If it exists, it's mod time is being updated to the current time. func (fs *FS) Touch(path string) error { fs.mu.Lock() if fs.readOnly { fs.mu.Unlock() return ErrReadOnly } nd, err := fs.lkr.LookupNode(path) if err != nil && !ie.IsNoSuchFileError(err) { fs.mu.Unlock() return err } if nd != nil { modNd, ok := nd.(n.ModNode) if !ok { // What could it be if lookup returns not a node? fs.mu.Unlock() return nil } if modNd.Type() != n.NodeTypeGhost { modNd.SetModTime(time.Now()) fs.mu.Unlock() return nil } } // We may not call Stage() with a lock. fs.mu.Unlock() // Nothing or a ghost there, stage an empty file // 0 sized (newly touched) files should have the same key // to point to the same backend file key := emptyFileEncryptionKey() return fs.stageWithKey(prefixSlash(path), bytes.NewReader([]byte{}), key) } // Truncate cuts of the output of the file at `path` to `size`. // `size` should be between 0 and the size of the file, // all other values will be ignored. // // Note that this is not implemented as an actual IO operation. // It is possible to go back to a bigger size until the actual // content was changed via Stage(). func (fs *FS) Truncate(path string, size uint64) error { fs.mu.Lock() defer fs.mu.Unlock() if fs.readOnly { return ErrReadOnly } nd, err := fs.lkr.LookupModNode(path) if err != nil { return err } if nd.Type() != n.NodeTypeFile { return fmt.Errorf("`%s` is not a file", path) } nd.SetSize(size) return fs.lkr.StageNode(nd) } func (fs *FS) renewPins(oldFile, newFile *n.File) error { pinExplicit := false if oldFile != nil { oldBackendHash := oldFile.BackendHash() if oldBackendHash.Equal(newFile.BackendHash()) { // Nothing changed, nothing to do... return nil } _, isExplicit, err := fs.pinner.IsNodePinned(oldFile) if err != nil { return err } // If the old file was pinned explicitly, we should also pin // the new file explicitly to carry over that info. pinExplicit = isExplicit if !isExplicit { if err := fs.pinner.UnpinNode(oldFile, pinExplicit); err != nil { return err } } } return fs.pinner.PinNode(newFile, pinExplicit) } func (fs *FS) preStageKeyGen(path string) ([]byte, error) { fs.mu.Lock() defer fs.mu.Unlock() // See if we already have such a file. // If not we gonna need to generate new key for it // based on the content hash. oldNode, err := fs.lkr.LookupNode(path) // Check that we're handling the right kind of node. // We should be able to add on-top of ghosts, but directories // are pointless as input. var oldFile *n.File if err == nil { switch oldNode.Type() { case n.NodeTypeDirectory: return nil, fmt.Errorf("Cannot stage over directory: %v", path) case n.NodeTypeGhost: // Act like there was no such node: err = ie.NoSuchFile(path) case n.NodeTypeFile: var ok bool oldFile, ok = oldNode.(*n.File) if !ok { return nil, ie.ErrBadNode } } } if err != nil && !ie.IsNoSuchFileError(err) { return nil, err } if oldFile != nil && oldFile.Size() != 0 { return oldFile.Key(), nil } // only create a new key for new files. // The key depends on the content hash and the size. key := make([]byte, defaultEncryptionKeyLength) if _, err := rand.Read(key); err != nil { return nil, e.Wrapf(err, "failed to generate random key") } return key, nil } // Stage reads all data from `r` and stores as content of the node at `path`. // If `path` already exists, it will be updated. func (fs *FS) Stage(path string, r io.Reader) error { if fs.readOnly { return ErrReadOnly } path = prefixSlash(path) key, err := fs.preStageKeyGen(path) if err != nil { return err } return fs.stageWithKey(path, r, key) } // stageWithKey reads all data from `r` and stores as content of the node at `path`. // It uses provided encryption key // If `path` already exists, it will be updated. func (fs *FS) stageWithKey(path string, r io.Reader, key []byte) error { if fs.readOnly { return ErrReadOnly } path = prefixSlash(path) // NOTE: fs.mu is not locked here since I/O can be done in parallel. // If you need locking, you can do it at the bottom of this method. // Branch off a part of the stream and pipe it through // a hash writer to compute the hash while reading the stream: hashWriter := h.NewHashWriter() hashReader := io.TeeReader(r, hashWriter) // Do the same with the size. // This actually measures the size of the stream and is // therefore guaranteed to find out the actual stream size. sizeAcc := &util.SizeAccumulator{} sizeReader := io.TeeReader(hashReader, sizeAcc) hint := fs.hintManager.Lookup(path) stream, isRaw, err := mio.NewInStream(sizeReader, path, key, hint) if err != nil { return err } backendHash, err := fs.bk.Add(stream) if err != nil { return err } // The stream was consumed, we now know those attrs: size := sizeAcc.Size() contentHash := hashWriter.Finalize() // Lock it again for the metadata staging: fs.mu.Lock() defer fs.mu.Unlock() cachedSize, err := fs.bk.CachedSize(backendHash) if err != nil { return err } // Remember the metadata: newFile, err := c.Stage( fs.lkr, path, contentHash, backendHash, size, cachedSize, key, time.Now(), isRaw, ) if err != nil { return err } return fs.pinner.PinNode(newFile, false) } //////////////////// // I/O OPERATIONS // //////////////////// type tarEntry struct { path string size int64 stream mio.Stream } func (fs *FS) getTarableEntries(root string, filter func(node *StatInfo) bool) ([]tarEntry, string, error) { fs.mu.Lock() defer fs.mu.Unlock() rootNd, err := fs.lkr.LookupNode(root) if err != nil { return nil, "", err } entries := []tarEntry{} err = n.Walk(fs.lkr, rootNd, false, func(child n.Node) error { if filter != nil && rootNd.Path() != child.Path() { // Ask the API user if he wants this node in his archive: if !filter(fs.nodeToStat(child)) { return n.ErrSkipChild } } if child.Type() != n.NodeTypeFile { return nil } file, ok := child.(*n.File) if !ok { return ie.ErrBadNode } stream, err := fs.catHash( file.BackendHash(), file.Key(), file.Size(), file.IsRaw(), ) if err != nil { return e.Wrapf(err, "failed to open stream for %s", file.Path()) } entries = append(entries, tarEntry{ path: child.Path(), size: int64(child.Size()), stream: stream, }) return nil }) // Make sure that the entries are served in lexicographical order. sort.Slice(entries, func(i, j int) bool { return entries[i].path < entries[j].path }) prefixPath := root if rootNd.Type() != n.NodeTypeDirectory { prefixPath = path.Dir(root) } return entries, prefixPath, err } // Tar produces a tar archive from the file or directory at `root` and writes // the output to `w`. If you want compression, supply a gzip writer. func (fs *FS) Tar(root string, w io.Writer, filter func(node *StatInfo) bool) error { // getTarableEntries is locking fs.mu while it is running. // the rest of the code in this method should NOT use any nodes // or anything that is open to race conditions! entries, prefixPath, err := fs.getTarableEntries(root, filter) if err != nil { return err } tw := tar.NewWriter(w) // Make sure to close all remaining streams when any error happens. // Also clean up the tar writer. This might flush some data still. // The user of this API should not use `w` if an error happens. cleanup := func(idx int) { for ; idx < len(entries); idx++ { entry := entries[idx] if err := entry.stream.Close(); err != nil { log.Debugf("could not close stream: %v (file descriptor leak?)", entry.path) } } tw.Close() } for idx, entry := range entries { hdr := &tar.Header{ Name: entry.path[len(prefixPath):], Mode: 0600, Size: entry.size, } if err := tw.WriteHeader(hdr); err != nil { cleanup(idx) return err } if _, err := io.Copy(tw, entry.stream); err != nil { cleanup(idx) return err } if err := entry.stream.Close(); err != nil { cleanup(idx + 1) return err } } return tw.Close() } // Cat will open a file read-only and expose it's underlying data as stream. // If no such path is known or it was deleted, nil is returned as stream. func (fs *FS) Cat(path string) (mio.Stream, error) { fs.mu.Lock() file, err := fs.lkr.LookupFile(path) if err == ie.ErrBadNode { fs.mu.Unlock() return nil, ie.NoSuchFile(path) } if err != nil { fs.mu.Unlock() return nil, err } // Copy all attributes, since accessing them beyond the lock might be racy. size := file.Size() backendHash := file.BackendHash().Clone() key := make([]byte, len(file.Key())) isRaw := file.IsRaw() copy(key, file.Key()) fs.mu.Unlock() return fs.catHash(backendHash, key, size, isRaw) } // NOTE: This method can be called without locking fs.mu! func (fs *FS) catHash(backendHash h.Hash, key []byte, size uint64, isRaw bool) (mio.Stream, error) { rawStream, err := fs.bk.Cat(backendHash) if err != nil { return nil, err } stream, err := mio.NewOutStream(rawStream, isRaw, key) if err != nil { return nil, err } // Truncate stream to file size. Data stream might be bigger // for example when fuse decided to truncate the file, but // did not flush it already. return mio.LimitStream(stream, size), nil } // Open returns a file like object that can be used for modifying a file in memory. // If you want to have seekable read-only stream, use Cat(), it has less overhead. func (fs *FS) Open(path string) (*Handle, error) { fs.mu.Lock() defer fs.mu.Unlock() nd, err := fs.lkr.LookupNode(path) if err != nil { return nil, err } file, ok := nd.(*n.File) if !ok { return nil, fmt.Errorf("Can only open files: %v", path) } return newHandle(fs, file, fs.readOnly), nil } //////////////////// // VCS OPERATIONS // //////////////////// // MakeCommit bundles all staged changes into one commit described by `msg`. // If no changes were made since the last call to MakeCommit() ErrNoConflict // is returned. func (fs *FS) MakeCommit(msg string) error { fs.mu.Lock() defer fs.mu.Unlock() owner, err := fs.lkr.Owner() if err != nil { return err } return fs.lkr.MakeCommit(owner, msg) } func (fs *FS) isMove(nd n.ModNode) (bool, error) { cmt, err := fs.lkr.Status() if err != nil { return false, err } walker := vcs.NewHistoryWalker(fs.lkr, cmt, nd) for walker.Next() { state := walker.State() if state.Mask == vcs.ChangeTypeNone { continue } if state.Mask&vcs.ChangeTypeMove != 0 { return true, nil } return false, nil } return false, nil } // DeletedNodes returns all nodes under `root` that were deleted. // This does not include files that were moved. Note that you // cannot pass the paths of those files to methods like Cat(), // since they will refuse to work on deleted files. func (fs *FS) DeletedNodes(root string) ([]*StatInfo, error) { fs.mu.Lock() defer fs.mu.Unlock() rootNd, err := fs.lkr.LookupNode(root) if err != nil { return nil, err } nodes := []*StatInfo{} err = n.Walk(fs.lkr, rootNd, false, func(child n.Node) error { if child.Type() != n.NodeTypeGhost { return nil } modNd, ok := child.(n.ModNode) if !ok { return ie.ErrBadNode } isMove, err := fs.isMove(modNd) if err != nil { return err } if !isMove { nodes = append(nodes, fs.nodeToStat(modNd)) } return nil }) if err != nil { return nil, err } return nodes, nil } // Undelete tries to recover a file or directory that was previously deleted. // This will fail when being called on a regular file or directory. // You can obtain deleted paths by using DeletedNodes() func (fs *FS) Undelete(root string) error { fs.mu.Lock() defer fs.mu.Unlock() if fs.readOnly { return ErrReadOnly } if err := vcs.Undelete(fs.lkr, root); err != nil { return err } nd, err := fs.lkr.LookupModNode(root) if err != nil { return err } return fs.pinner.PinNode(nd, false) } // Head translates the "head" symbol to a ref. func (fs *FS) Head() (string, error) { fs.mu.Lock() defer fs.mu.Unlock() head, err := fs.lkr.Head() if err != nil { return "", err } return head.TreeHash().B58String(), nil } // Curr translates the "curr" symbol to a ref. func (fs *FS) Curr() (string, error) { fs.mu.Lock() defer fs.mu.Unlock() status, err := fs.lkr.Status() if err != nil { return "", err } return status.TreeHash().B58String(), nil } func commitToExternal(cmt *n.Commit, hashToRef map[string][]string) *Commit { tags := []string{} if hashToRef != nil { tags = hashToRef[cmt.TreeHash().B58String()] } return &Commit{ Hash: cmt.TreeHash().Clone(), Msg: cmt.Message(), Tags: tags, Date: cmt.ModTime(), Index: cmt.Index(), } } // History returns all modifications of a node with one entry per commit. func (fs *FS) History(path string) ([]Change, error) { fs.mu.Lock() defer fs.mu.Unlock() nd, err := fs.lkr.LookupModNode(path) if err != nil { return nil, err } status, err := fs.lkr.Status() if err != nil { return nil, err } hist, err := vcs.History(fs.lkr, nd, status, nil) if err != nil { return nil, err } hashToRef, err := fs.buildCommitHashToRefTable() if err != nil { return nil, err } entries := []Change{} for _, change := range hist { head := commitToExternal(change.Head, hashToRef) var next *Commit if change.Next != nil { next = commitToExternal(change.Next, hashToRef) } isPinned, isExplicit, err := fs.pinner.IsNodePinned(change.Curr) if err != nil { return nil, err } entries = append(entries, Change{ Path: change.Curr.Path(), Change: change.Mask.String(), IsPinned: isPinned, IsExplicit: isExplicit, Head: head, Next: next, MovedTo: change.MovedTo, WasPreviouslyAt: change.WasPreviouslyAt, }) } return entries, nil } func (fs *FS) buildSyncCfg() (*vcs.SyncOptions, error) { // Helper method to easily pin depending on a condition variable doPinOrUnpin := func(doPin, explicit bool, nd n.ModNode) { file, ok := nd.(*n.File) if !ok { // Non-files are simply ignored. return } op := fs.pinner.UnpinNode opName := "unpin" if doPin { op = fs.pinner.PinNode opName = "pin" } if err := op(file, explicit); err != nil { log.Warningf("Failed to %s (hash: %v)", opName, file.BackendHash()) } } conflictStrategy := vcs.ConflictStrategyFromString( fs.cfg.String("sync.conflict_strategy"), ) if conflictStrategy == vcs.ConflictStragetyUnknown { return nil, fmt.Errorf("unknown conflict strategy: %v", conflictStrategy) } return &vcs.SyncOptions{ ConflictStrategy: conflictStrategy, IgnoreDeletes: fs.cfg.Bool("sync.ignore_removed"), IgnoreMoves: fs.cfg.Bool("sync.ignore_moved"), OnAdd: func(newNd n.ModNode) bool { if fs.cfg.Bool("sync.pin_added") { // do pinning and more importantly caching doPinOrUnpin(true, false, newNd) } return true }, OnRemove: func(oldNd n.ModNode) bool { doPinOrUnpin(false, true, oldNd) return true }, // OnMerge: func(newNd, oldNd n.ModNode) bool { OnMerge: func(nd n.ModNode, isGet bool, ndPinStats *vcs.PinStats) bool { // During merge we are acting on the same node // but we are modifying its hashes thus we would not be // able to get node pins by reusing node itself. // Our main goal is either get pin info from nd node // or to set it according to the previously obtained info. if isGet { isPinned, isExplicit, err := fs.pinner.IsNodePinned(nd) if err != nil { log.Warnf( "failed to check pin status of old node `%s` (%v)", nd.Path(), nd.BackendHash(), ) // better don't change something. return false } ndPinStats.Pinned = isPinned ndPinStats.Explicit = isExplicit return true } // If are not getting then we are setting // Pin new node with old pin state: if ndPinStats.Pinned { // Preserving the state doPinOrUnpin(ndPinStats.Pinned, ndPinStats.Explicit, nd) } else { // if it was unpinned we are forcing it to be unppinned doPinOrUnpin(false, true, nd) } return true }, OnConflict: func(src, dst n.ModNode) bool { // Don't need to do something, // conflict files will not get a pin by default. return true }, }, nil } // SyncOption is a option that can be passed to Sync. type SyncOption func(cfg *vcs.SyncOptions) // SyncOptMessage sets the commit message that will be // given to MakeCommit() on a sync commit. func SyncOptMessage(msg string) SyncOption { return func(cfg *vcs.SyncOptions) { cfg.Message = msg } } // SyncOptConflictStrategy overwrites the conflict strategy // (see also fs.sync.conflict_strategy which acts as default) func SyncOptConflictStrategy(strategy string) SyncOption { return func(cfg *vcs.SyncOptions) { if strategy == "" { return } cfg.ConflictStrategy = vcs.ConflictStrategyFromString(strategy) } } // SyncOptReadOnlyFolders allows you to set a set of folders // that will be protected from modifications by the sync. func SyncOptReadOnlyFolders(folders []string) SyncOption { return func(cfg *vcs.SyncOptions) { if cfg.ReadOnlyFolders == nil { cfg.ReadOnlyFolders = make(map[string]bool) } for _, folder := range folders { cfg.ReadOnlyFolders[folder] = true } } } // SyncOptConflictgStrategyPerFolder allows you to set a specific conflict // resolution strategy for specific folders. The key of the map is the folder, // the key is the conflict strategy name. func SyncOptConflictgStrategyPerFolder(strategies map[string]string) SyncOption { return func(cfg *vcs.SyncOptions) { if cfg.ConflictStrategyPerFolder == nil { cfg.ConflictStrategyPerFolder = make(map[string]vcs.ConflictStrategy) } for folder, strategy := range strategies { cs := vcs.ConflictStrategyFromString(strategy) if cs == vcs.ConflictStragetyUnknown { continue } cfg.ConflictStrategyPerFolder[folder] = cs } } } // Sync will synchronize the state of two filesystems. // If one of filesystems have unstaged changes, they will be committted first. // If our filesystem was changed by Sync(), a new merge commit will also be created. func (fs *FS) Sync(remote *FS, options ...SyncOption) error { fs.mu.Lock() defer fs.mu.Unlock() if fs.readOnly { return ErrReadOnly } // build default config from the defaults/base config: syncCfg, err := fs.buildSyncCfg() if err != nil { return err } for _, option := range options { option(syncCfg) } return vcs.Sync(remote.lkr, fs.lkr, syncCfg) } // MakeDiff will return a diff between `headRevOwn` and `headRevRemote`. // `remote` is the filesystem `headRevRemote` belongs to and may be the same as `fs`. func (fs *FS) MakeDiff(remote *FS, headRevOwn, headRevRemote string) (*Diff, error) { fs.mu.Lock() defer fs.mu.Unlock() srcHead, err := parseRev(remote.lkr, headRevRemote) if err != nil { return nil, e.Wrapf(err, "parse remote ref") } dstHead, err := parseRev(fs.lkr, headRevOwn) if err != nil { return nil, e.Wrapf(err, "parse own ref") } syncCfg, err := fs.buildSyncCfg() if err != nil { return nil, err } realDiff, err := vcs.MakeDiff(remote.lkr, fs.lkr, srcHead, dstHead, syncCfg) if err != nil { return nil, e.Wrapf(err, "make diff") } // "fake" is the diff that we give to the outside. // Internally we have a bit more knowledge. fakeDiff := &Diff{} // Convert the simple slice parts: for _, nd := range realDiff.Added { fakeDiff.Added = append(fakeDiff.Added, *fs.nodeToStat(nd)) } for _, nd := range realDiff.Ignored { fakeDiff.Ignored = append(fakeDiff.Ignored, *fs.nodeToStat(nd)) } for _, nd := range realDiff.Removed { fakeDiff.Removed = append(fakeDiff.Removed, *fs.nodeToStat(nd)) } for _, nd := range realDiff.Missing { fakeDiff.Missing = append(fakeDiff.Missing, *fs.nodeToStat(nd)) } // And also convert the slightly more complex pairs: for _, pair := range realDiff.Moved { fakeDiff.Moved = append(fakeDiff.Moved, DiffPair{ Src: *fs.nodeToStat(pair.Src), Dst: *fs.nodeToStat(pair.Dst), }) } for _, pair := range realDiff.Merged { fakeDiff.Merged = append(fakeDiff.Merged, DiffPair{ Src: *fs.nodeToStat(pair.Src), Dst: *fs.nodeToStat(pair.Dst), }) } for _, pair := range realDiff.Conflict { fakeDiff.Conflict = append(fakeDiff.Conflict, DiffPair{ Src: *fs.nodeToStat(pair.Src), Dst: *fs.nodeToStat(pair.Dst), }) } return fakeDiff, nil } func (fs *FS) buildCommitHashToRefTable() (map[string][]string, error) { names, err := fs.lkr.ListRefs() if err != nil { return nil, err } hashToRef := make(map[string][]string) for _, name := range names { cmt, err := fs.lkr.ResolveRef(name) if err != nil { return nil, err } if cmt != nil { key := cmt.TreeHash().B58String() hashToRef[key] = append(hashToRef[key], name) } } return hashToRef, nil } // Log returns a list of commits starting with the staging commit until the // initial commit. For each commit, metadata is collected and fn is called. // The log starts at the revision pointed to by `head`. // If `head` is an empty string, "curr" is assumed. func (fs *FS) Log(head string, fn func(c *Commit) error) error { fs.mu.Lock() defer fs.mu.Unlock() var ( headCmt *n.Commit err error ) if head == "" { headCmt, err = fs.lkr.Status() if err != nil { return err } } else { headCmt, err = parseRev(fs.lkr, head) if err != nil { return err } } hashToRef, err := fs.buildCommitHashToRefTable() if err != nil { return err } return c.Log(fs.lkr, headCmt, func(cmt *n.Commit) error { return fn(commitToExternal(cmt, hashToRef)) }) } // Reset restores the state of `path` to the state in `rev`. func (fs *FS) Reset(path, rev string) error { fs.mu.Lock() defer fs.mu.Unlock() if fs.readOnly { return ErrReadOnly } if path == "/" || path == "" { return fs.checkout(rev, false) } cmt, err := parseRev(fs.lkr, rev) if err != nil { return err } oldNode, err := vcs.ResetNode(fs.lkr, cmt, path) if err != nil { return err } // The old node does not necessarily exist: if oldNode != nil { if err := fs.pinner.UnpinNode(oldNode, false); err != nil { return err } } // Cannot (un)pin non-existing file anymore. newNode, err := fs.lkr.LookupNode(path) if ie.IsNoSuchFileError(err) { return nil } if err != nil { return err } return fs.pinner.PinNode(newNode, false) } // Checkout reverts all state to the commit referenced by `rev`. // If `force` is true a non-empty staging area will be overwritten. func (fs *FS) Checkout(rev string, force bool) error { fs.mu.Lock() defer fs.mu.Unlock() return fs.checkout(rev, force) } func (fs *FS) checkout(rev string, force bool) error { cmt, err := parseRev(fs.lkr, rev) if err != nil { return err } return fs.lkr.CheckoutCommit(cmt, force) } // Tag saves a human readable name for the revision pointed to by `rev`. // There are three pre-defined tags available: // // - HEAD: The last full commit. // - CURR: The current commit (== staging commit) // - INIT: the initial commit. // // The tagname is case-insensitive. func (fs *FS) Tag(rev, name string) error { fs.mu.Lock() defer fs.mu.Unlock() cmt, err := parseRev(fs.lkr, rev) if err != nil { return e.Wrap(err, "parse ref") } return fs.lkr.SaveRef(name, cmt) } // RemoveTag removes a previously created tag. func (fs *FS) RemoveTag(name string) error { fs.mu.Lock() defer fs.mu.Unlock() return fs.lkr.RemoveRef(name) } // FilesByContent returns all stat info for the content hashes referenced in // `contents`. The return value is a map with the content hash as key and a // StatInfo describing the exact file content. func (fs *FS) FilesByContent(contents []h.Hash) (map[string]StatInfo, error) { fs.mu.Lock() defer fs.mu.Unlock() files, err := fs.lkr.FilesByContents(contents) if err != nil { return nil, err } infos := make(map[string]StatInfo) for content, file := range files { infos[content] = *fs.nodeToStat(file) } return infos, nil } // ScheduleGCRun runs GC run at the next possible time. // This method does not block until the run is finished. func (fs *FS) ScheduleGCRun() { // Putting a value into gcControl might block, // so better do it in the background. go func() { fs.gcControl <- true }() } func (fs *FS) writeLastPatchIndex(index int64) error { fromIndexData := []byte(strconv.FormatInt(index, 10)) return fs.lkr.MetadataPut("fs.last-merge-index", fromIndexData) } func (fs *FS) autoCommitStagedChanges(remoteName string) error { haveStagedChanges, err := fs.lkr.HaveStagedChanges() if err != nil { return err } // Commit changes if there are any. // This is a little unfortunate implication on how the current // way of sending getting patches work. Creating a patch itself // works with a staging commit, but the versioning does not work // anymore then, since the same version might have a different // set of changes. if !haveStagedChanges { return nil } owner, err := fs.lkr.Owner() if err != nil { return err } msg := fmt.Sprintf("auto commit on metadata request from »%s«", remoteName) return fs.lkr.MakeCommit(owner, msg) } // MakePatch creates a binary patch with all file changes starting with // `fromRev`. Note that commit information is not exported, only individual // file and directory changes. // // The byte structured returned by this method may change at any point // and may not be relied upon. // // The `remoteName` is the name of the remote we're creating the patch for. // It's only used for display purpose in the commit message. func (fs *FS) MakePatch(fromRev string, folders []string, remoteName string) ([]byte, error) { fs.mu.Lock() defer fs.mu.Unlock() if err := fs.autoCommitStagedChanges(remoteName); err != nil { return nil, err } from, err := parseRev(fs.lkr, fromRev) if err != nil { return nil, err } patch, err := vcs.MakePatch(fs.lkr, from, folders) if err != nil { return nil, err } msg, err := patch.ToCapnp() if err != nil { return nil, err } return msg.Marshal() } // MakePatches works like MakePatch but produces individual patches for commit. // This allows to persist the history to some extent. func (fs *FS) MakePatches(fromRev string, folders []string, remoteName string) ([]byte, error) { fs.mu.Lock() defer fs.mu.Unlock() if err := fs.autoCommitStagedChanges(remoteName); err != nil { return nil, err } from, err := parseRev(fs.lkr, fromRev) if err != nil { return nil, err } patches, err := vcs.MakePatches(fs.lkr, from, folders) if err != nil { return nil, err } msg, err := patches.ToCapnp() if err != nil { return nil, err } return msg.Marshal() } // ApplyPatch reads the binary patch coming from MakePatch and tries to apply it. func (fs *FS) ApplyPatch(data []byte) error { fs.mu.Lock() defer fs.mu.Unlock() msg, err := capnp.Unmarshal(data) if err != nil { return err } patch := &vcs.Patch{} if err := patch.FromCapnp(msg); err != nil { return err } return fs.applyPatches(vcs.Patches{patch}) } // ApplyPatches reads the binary patch coming from MakePatches and tries to apply them. func (fs *FS) ApplyPatches(data []byte) error { fs.mu.Lock() defer fs.mu.Unlock() msg, err := capnp.Unmarshal(data) if err != nil { return err } patches := &vcs.Patches{} if err := patches.FromCapnp(msg); err != nil { return err } return fs.applyPatches(*patches) } func (fs *FS) applyPatches(patches vcs.Patches) error { owner, err := fs.lkr.Owner() if err != nil { return err } highestIndex := int64(-1) for _, patch := range patches { if err := vcs.ApplyPatch(fs.lkr, patch); err != nil { return err } if idx := patch.CurrIndex; highestIndex < idx { highestIndex = idx } cmtMsg := fmt.Sprintf("apply patch with %d changes", len(patch.Changes)) if err := fs.lkr.MakeCommit(owner, cmtMsg); err != nil { if err == ie.ErrNoChange { // Empty commits are totally possible. continue } return err } } // Remember what patch index we merged last. // This info can be read via LastPatchIndex() to determine // the next version to get from the remote. fromIndexData := []byte(strconv.FormatInt(highestIndex, 10)) return fs.lkr.MetadataPut("fs.last-merge-index", fromIndexData) } // LastPatchIndex will return the current version of this filesystem // regarding patch state. func (fs *FS) LastPatchIndex() (int64, error) { fs.mu.Lock() defer fs.mu.Unlock() fromIndexData, err := fs.lkr.MetadataGet("fs.last-merge-index") if err != nil && err != db.ErrNoSuchKey { return -1, err } // If we did not merge yet with anyone we have to // ask for a full fetch. if err == db.ErrNoSuchKey { return 0, nil } return strconv.ParseInt(string(fromIndexData), 10, 64) } // CommitInfo returns detailed info about a certain commit. func (fs *FS) CommitInfo(rev string) (*Commit, error) { fs.mu.Lock() defer fs.mu.Unlock() cmt, err := parseRev(fs.lkr, rev) if cmt == nil || ie.IsErrNoSuchRef(err) { return nil, nil } hashToRef, err := fs.buildCommitHashToRefTable() if err != nil { return nil, err } return commitToExternal(cmt, hashToRef), nil } // HaveStagedChanges returns true if there are changes that were not committed yet. func (fs *FS) HaveStagedChanges() (bool, error) { fs.mu.Lock() defer fs.mu.Unlock() return fs.lkr.HaveStagedChanges() } // IsCached will return true when the file is cached locally. func (fs *FS) IsCached(path string) (bool, error) { fs.mu.Lock() defer fs.mu.Unlock() nd, err := fs.lkr.LookupNode(path) if err != nil { return false, err } if nd.Type() == n.NodeTypeDirectory && nd.NChildren() == 0 { return true, nil } totalCount := 0 cachedCount := 0 errNotCachedSentinel := errors.New("not cached found") err = n.Walk(fs.lkr, nd, true, func(child n.Node) error { if child.Type() != n.NodeTypeFile { return nil } totalCount++ isCached, err := fs.bk.IsCached(child.BackendHash()) if err != nil { return err } if isCached { // Make sure that we do not count empty directories // as pinned nodes. cachedCount++ } else { // Return a special error here to stop Walk() iterating. // One file is enough to stop IsCached() from being true. return errNotCachedSentinel } return nil }) if err != nil && err != errNotCachedSentinel { return false, err } return cachedCount == totalCount, nil } // Hints returns the hint manager passed to NewFilesystem func (fs *FS) Hints() HintManager { return fs.hintManager } ================================================ FILE: catfs/fs_test.go ================================================ package catfs import ( "archive/tar" "bytes" "fmt" "io" "io/ioutil" "os" "sort" "testing" "time" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" "github.com/sahib/brig/catfs/mio" "github.com/sahib/brig/catfs/mio/chunkbuf" "github.com/sahib/brig/catfs/mio/compress" "github.com/sahib/brig/catfs/mio/pagecache/mdcache" n "github.com/sahib/brig/catfs/nodes" "github.com/sahib/brig/defaults" "github.com/sahib/brig/repo/hints" h "github.com/sahib/brig/util/hashlib" "github.com/sahib/brig/util/testutil" "github.com/sahib/config" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" ) func init() { log.SetLevel(log.WarnLevel) } func withDummyFSReadOnly(t *testing.T, readOnly bool, fn func(fs *FS)) { backend := NewMemFsBackend() owner := "alice" dbPath, err := ioutil.TempDir("", "brig-fs-test") if err != nil { t.Fatalf("Failed to create temp dir: %v", err) } defer func() { if err := os.RemoveAll(dbPath); err != nil { t.Fatalf("Failed to clean up %s: %v", dbPath, err) } }() cfg, err := config.Open(nil, defaults.Defaults, config.StrictnessPanic) require.Nil(t, err) fsCfg := cfg.Section("fs") mdc, err := mdcache.New(mdcache.Options{ MaxMemoryUsage: 1024 * 1024, }) require.NoError(t, err) fs, err := NewFilesystem( backend, dbPath, owner, readOnly, fsCfg, nil, mdc, ) if err != nil { t.Fatalf("Failed to create filesystem: %v", err) } fn(fs) if err := fs.Close(); err != nil { t.Fatalf("Failed to close filesystem: %v", err) } } func withDummyFS(t *testing.T, fn func(fs *FS)) { withDummyFSReadOnly(t, false, fn) } func TestStat(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { _, err := fs.Stat("/sub/x") require.True(t, ie.IsNoSuchFileError(err)) c.MustMkdir(t, fs.lkr, "/sub") file := c.MustTouch(t, fs.lkr, "/sub/x", 1) info, err := fs.Stat("/sub/x") require.Nil(t, err) require.Equal(t, info.Path, "/sub/x") require.Equal(t, info.IsDir, false) require.Equal(t, info.Size, uint64(0)) require.Equal(t, info.Inode, file.Inode()) require.Equal(t, info.TreeHash, file.TreeHash()) data := make([]byte, 42) require.Nil(t, fs.Stage("/sub/x", bytes.NewReader(data))) info, err = fs.Stat("/sub/x") require.Nil(t, err) require.Equal(t, info.Size, uint64(len(data))) require.Equal(t, info.TreeHash, file.TreeHash()) info, err = fs.Stat("/sub") require.Nil(t, err) require.Equal(t, info.Path, "/sub") require.Equal(t, info.IsDir, true) require.Equal(t, uint64(len(data)), info.Size) }) } func TestLogAndTag(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { cmts := []*n.Commit{} for idx := 0; idx < 10; idx++ { _, cmt := c.MustTouchAndCommit(t, fs.lkr, "/x", byte(idx)) hash := cmt.TreeHash().B58String() if err := fs.Tag(hash, fmt.Sprintf("tag%d", idx)); err != nil { t.Fatalf("Failed to tag %v: %v", hash, err) } cmts = append(cmts, cmt) } status, err := fs.lkr.Status() require.Nil(t, err) cmts = append(cmts, status) log := []*Commit{} require.Nil(t, fs.Log("", func(c *Commit) error { log = append(log, c) return nil })) for idx, entry := range log { ridx := len(cmts) - idx - 1 cmt := cmts[ridx] require.Equal(t, entry.Hash, cmt.TreeHash()) msg := fmt.Sprintf("cmt %d", ridx) tags := []string{fmt.Sprintf("tag%d", ridx)} // 0 is status, 1 is head, 10 is initial switch idx { case 0: tags = []string{"curr"} msg = "" case 1: tags = append(tags, "head") case 10: tags = append(tags, "init") } sort.Sort(sort.Reverse(sort.StringSlice(entry.Tags))) require.EqualValues(t, tags, entry.Tags) require.Equal(t, entry.Msg, msg) } }) } var TestKey = []byte("01234567890ABCDE01234567890ABCDE") func TestCat(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { raw := []byte{1, 2, 3} rinRaw := bytes.NewBuffer(raw) rin, isRaw, err := mio.NewInStream( rinRaw, "", TestKey, hints.Default(), ) require.Nil(t, err) backendHash, err := fs.bk.Add(rin) require.Nil(t, err) contentHash := h.TestDummy(t, 23) // Stage the file manually (without fs.Stage) _, err = c.Stage( fs.lkr, "/x", contentHash, backendHash, uint64(len(raw)), int64(len(raw)), TestKey, time.Now(), isRaw, ) require.Nil(t, err) // Cat the file again: stream, err := fs.Cat("/x") require.Nil(t, err) // Check if the returned stream really contains 1,2,3 result := bytes.NewBuffer(nil) _, err = stream.WriteTo(result) require.Nil(t, err) require.Equal(t, result.Bytes(), raw) }) } func TestStageBasic(t *testing.T) { t.Parallel() log.SetLevel(log.DebugLevel) tcs := []int64{ 0, 1, 3, 8 * 1024, 64*1024 + 1, 4 * 1024 * 1024, } for _, size := range tcs { t.Run(fmt.Sprintf("size-%d", size), func(t *testing.T) { withDummyFS(t, func(fs *FS) { tc := testutil.CreateDummyBuf(size) buf := chunkbuf.NewChunkBuffer(tc) require.NoError(t, fs.Stage("/x", buf)) stream, err := fs.Cat("/x") require.NoError(t, err) data, err := ioutil.ReadAll(stream) require.NoError(t, err) require.Equal(t, len(tc), len(data)) require.Equal(t, tc, data) require.NoError(t, stream.Close()) file, err := fs.lkr.LookupFile("/x") require.NoError(t, err) key := file.Key() oldKey := make([]byte, len(key)) oldSize := file.Size() copy(oldKey, key) // Also insert some more data to modify an existing file. nextData := []byte{6, 6, 6, 6, 6, 6} require.NoError(t, fs.Stage("/x", chunkbuf.NewChunkBuffer((nextData)))) stream, err = fs.Cat("/x") require.NoError(t, err) data, err = ioutil.ReadAll(stream) require.NoError(t, err) require.Equal(t, data, nextData) require.NoError(t, stream.Close()) // Check that the key did not change during modifying an existing file. // This is only true if both of the sizes are not equal to zero // Recall that 0 sized file has emptyFileEncryptionKey file, err = fs.lkr.LookupFile("/x") require.NoError(t, err) if (oldSize != 0 && file.Size() != 0) || (oldSize == file.Size()) { require.Equal(t, file.Key(), oldKey) } else { require.NotEqual(t, file.Key(), oldKey) } }) }) } } func TestHistory(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { require.Nil(t, fs.MakeCommit("hello")) require.Nil(t, fs.Stage("/x", chunkbuf.NewChunkBuffer([]byte{1}))) require.Nil(t, fs.MakeCommit("1")) require.Nil(t, fs.Stage("/x", chunkbuf.NewChunkBuffer([]byte{2}))) require.Nil(t, fs.MakeCommit("2")) require.Nil(t, fs.Stage("/x", chunkbuf.NewChunkBuffer([]byte{3}))) require.Nil(t, fs.MakeCommit("3")) hist, err := fs.History("/x") require.Nil(t, err) log := []*Commit{} require.Nil(t, fs.Log("", func(c *Commit) error { log = append(log, c) return nil })) for idx, entry := range hist { require.Equal(t, entry.Path, "/x") change := "none" switch idx { case 1, 2: change = "modified" case 3: change = "added" } require.Equal(t, entry.Change, change) require.Equal( t, log[idx].Hash.B58String(), entry.Head.Hash.B58String(), ) } }) } func mustReadPath(t *testing.T, fs *FS, path string) []byte { stream, err := fs.Cat(path) require.Nil(t, err) data, err := ioutil.ReadAll(stream) require.Nil(t, err) return data } func TestReset(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { require.Nil(t, fs.MakeCommit("hello")) require.Nil(t, fs.Stage("/x", chunkbuf.NewChunkBuffer([]byte{1}))) require.Nil(t, fs.MakeCommit("1")) // Modify on stage: require.Nil(t, fs.Stage("/x", chunkbuf.NewChunkBuffer([]byte{2}))) require.Nil(t, fs.Reset("/x", "HEAD")) data := mustReadPath(t, fs, "/x") require.Equal(t, data[0], byte(1)) if err := fs.MakeCommit("2"); err != ie.ErrNoChange { t.Fatalf("Reset did clearly not reset stuff... (something changed)") } // Remove the file and then reset it (like git checkout -- file) require.Nil(t, fs.Remove("/x")) if _, err := fs.Cat("/x"); !ie.IsNoSuchFileError(err) { t.Fatalf("Something wrong with removed node") } // Check if we can recover the delete: require.Nil(t, fs.Reset("/x", "HEAD")) data = mustReadPath(t, fs, "/x") require.Equal(t, data[0], byte(1)) // Reset to something non-existing -> error. require.NotNil(t, fs.Reset("/x", "DEADBEEF")) // Reset to the very first commit - node did not exist back then. require.Nil(t, fs.Reset("/x", "INIT")) // Should not exist anymore currently. _, err := fs.Stat("/x") require.True(t, ie.IsNoSuchFileError(err)) }) } func TestCheckout(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { require.Nil(t, fs.MakeCommit("hello")) hello, err := fs.Head() require.Nil(t, err) require.Nil(t, fs.Touch("/x")) require.Nil(t, fs.Touch("/y")) require.Nil(t, fs.Touch("/z")) require.Nil(t, fs.Stage("/x", bytes.NewReader([]byte{1, 2, 3}))) require.Nil(t, fs.Remove("/y")) require.Nil(t, fs.Move("/z", "/a")) require.Nil(t, fs.MakeCommit("world")) world, err := fs.Head() require.Nil(t, err) require.Nil(t, fs.Touch("/new")) require.Nil(t, fs.Stage("/x", bytes.NewReader([]byte{4, 5, 6}))) err = fs.Checkout(world, false) require.Equal(t, err, ie.ErrStageNotEmpty) err = fs.Checkout(world, true) require.Nil(t, err) _, err = fs.Stat("/new") require.True(t, ie.IsNoSuchFileError(err)) xStream, err := fs.Cat("/x") require.Nil(t, err) data, err := ioutil.ReadAll(xStream) require.Nil(t, err) require.Equal(t, data, []byte{1, 2, 3}) err = fs.Checkout(hello, true) require.Nil(t, err) _, err = fs.Stat("/x") require.True(t, ie.IsNoSuchFileError(err)) }) } func TestExportImport(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { require.Nil(t, fs.MakeCommit("hello world")) // Add a single file: buf := chunkbuf.NewChunkBuffer([]byte{1, 2, 3}) require.Nil(t, fs.Stage("/x", buf)) require.Nil(t, fs.MakeCommit("touchy touchy")) // Stage something to see if this will also be exported // (it most defintely should) buf = chunkbuf.NewChunkBuffer([]byte{3, 2, 1}) require.Nil(t, fs.Stage("/x", buf)) mem := &bytes.Buffer{} require.Nil(t, fs.Export(mem)) // Check if we can import all this data: withDummyFS(t, func(newFs *FS) { require.Nil(t, fs.Import(mem)) stream, err := fs.Cat("/x") require.Nil(t, err) data, err := ioutil.ReadAll(stream) require.Nil(t, err) require.Equal(t, []byte{3, 2, 1}, data) }) }) } func TestSync(t *testing.T) { t.Parallel() // There are a lot more tests in vcs/* // This is only a test to see if the high-level api is working. withDummyFS(t, func(fsa *FS) { require.Nil(t, fsa.MakeCommit("hello a")) withDummyFS(t, func(fsb *FS) { require.Nil(t, fsb.MakeCommit("hello b")) require.Nil(t, fsa.Sync(fsb)) require.Nil(t, fsb.Stage("/x", bytes.NewReader([]byte{1}))) require.Nil(t, fsb.Stage("/y", bytes.NewReader([]byte{2}))) require.Nil(t, fsb.Stage("/z", bytes.NewReader([]byte{3}))) // Actually sync the results: require.Nil(t, fsa.Sync(fsb)) info, err := fsa.Stat("/x") require.Nil(t, err) require.Equal(t, info.Path, "/x") info, err = fsa.Stat("/y") require.Nil(t, err) require.Equal(t, info.Path, "/y") info, err = fsa.Stat("/z") require.Nil(t, err) require.Equal(t, info.Path, "/z") }) }) } func TestMakeDiff(t *testing.T) { t.Parallel() // There are a lot more tests in vcs/* // This is only a test for the high-level api. withDummyFS(t, func(fsa *FS) { fsaX := c.MustTouch(t, fsa.lkr, "/x", 1) fsaY := c.MustTouch(t, fsa.lkr, "/y", 2) fsaZ := c.MustTouch(t, fsa.lkr, "/z", 3) require.Nil(t, fsa.MakeCommit("hello a")) withDummyFS(t, func(fsb *FS) { require.Nil(t, fsb.MakeCommit("hello b")) require.Nil(t, fsa.Sync(fsb)) fsbX := c.MustTouch(t, fsb.lkr, "/x", 4) c.MustTouch(t, fsb.lkr, "/y", 5) fsbZ := c.MustTouch(t, fsb.lkr, "/z", 6) fsbA := c.MustTouch(t, fsb.lkr, "/a", 7) require.Nil(t, fsb.MakeCommit("stuff")) require.Nil(t, fsb.Remove("/y")) require.Nil(t, fsb.MakeCommit("before diff")) // Use the upwards notation: diff, err := fsa.MakeDiff(fsb, "head^^^", "curr") require.Nil(t, err) require.Equal(t, []StatInfo{*fsb.nodeToStat(fsbA)}, diff.Added) require.Equal(t, []StatInfo{*fsa.nodeToStat(fsaY)}, diff.Removed) require.Equal(t, []DiffPair{{ Src: *fsb.nodeToStat(fsbX), Dst: *fsa.nodeToStat(fsaX), }, { Src: *fsb.nodeToStat(fsbZ), Dst: *fsa.nodeToStat(fsaZ), }}, diff.Conflict) }) }) } func TestPin(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { // NOTE: Both files have the same content. require.Nil(t, fs.Stage("/x", bytes.NewReader([]byte{1}))) require.Nil(t, fs.Stage("/y", bytes.NewReader([]byte{1}))) require.Nil(t, fs.Unpin("/x", "curr", true)) require.Nil(t, fs.Unpin("/y", "curr", true)) isPinned, isExplicit, err := fs.IsPinned("/x") require.Nil(t, err) require.False(t, isPinned) require.False(t, isExplicit) require.Nil(t, fs.Pin("/x", "curr", true)) isPinned, isExplicit, err = fs.IsPinned("/x") require.Nil(t, err) require.True(t, isPinned) require.True(t, isExplicit) isPinned, isExplicit, err = fs.IsPinned("/") require.Nil(t, err) require.False(t, isPinned) require.False(t, isExplicit) require.Nil(t, fs.Pin("/", "curr", true)) isPinned, isExplicit, err = fs.IsPinned("/") require.Nil(t, err) require.True(t, isPinned) require.True(t, isExplicit) require.Nil(t, fs.Unpin("/", "curr", true)) isPinned, isExplicit, err = fs.IsPinned("/") require.Nil(t, err) require.False(t, isPinned) require.False(t, isExplicit) isPinned, isExplicit, err = fs.IsPinned("/x") require.Nil(t, err) require.False(t, isPinned) require.False(t, isExplicit) }) } func TestMkdir(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { err := fs.Mkdir("/a/b/c/d", false) require.True(t, ie.IsNoSuchFileError(err)) _, err = fs.Stat("/a") require.True(t, ie.IsNoSuchFileError(err)) err = fs.Mkdir("/a/b/c/d", true) require.Nil(t, err) info, err := fs.Stat("/a") require.Nil(t, err) require.True(t, info.IsDir) // Check that it still works if the directory exists err = fs.Mkdir("/a/b/c/d", false) require.Nil(t, err) err = fs.Mkdir("/a/b/c/d", true) require.Nil(t, err) err = fs.Mkdir("/a/b/c", false) require.Nil(t, err) }) } func TestMove(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Touch("/x")) require.Nil(t, fs.Move("/x", "/y")) _, err := fs.Stat("/x") require.True(t, ie.IsNoSuchFileError(err)) info, err := fs.Stat("/y") require.Nil(t, err) require.Equal(t, info.Path, "/y") require.False(t, info.IsDir) }) } func TestTouch(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { require.NoError(t, fs.Touch("/y")) yInfo, err := fs.Stat("/y") require.NoError(t, err) // Check that the empty file has emptyFileEncryptionKey require.Equal(t, yInfo.Key, emptyFileEncryptionKey()) require.NoError(t, fs.Touch("/x")) oldInfo, err := fs.Stat("/x") require.NoError(t, err) // Double Check that the empty file has emptyFileEncryptionKey require.Equal(t, oldInfo.Key, emptyFileEncryptionKey()) // Check that two empty files have same backend hash require.Equal(t, oldInfo.BackendHash, yInfo.BackendHash) // Check that two empty files have same conternt hash require.Equal(t, oldInfo.ContentHash, yInfo.ContentHash) require.NoError(t, fs.Stage("/x", bytes.NewReader([]byte{1, 2, 3}))) require.NoError(t, fs.Touch("/x")) newInfo, err := fs.Stat("/x") require.NoError(t, err) // Check that the non empty file encryption key is different from emptyFileEncryptionKey require.NotEqual(t, newInfo.Key, emptyFileEncryptionKey()) // Check that the timestamp advanced only. require.True(t, oldInfo.ModTime.Before(newInfo.ModTime)) // Also check that the content was not deleted: stream, err := fs.Cat("/x") require.NoError(t, err) data, err := ioutil.ReadAll(stream) require.NoError(t, err) require.Equal(t, data, []byte{1, 2, 3}) require.NoError(t, stream.Close()) }) } func TestHead(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { _, err := fs.Head() require.True(t, ie.IsErrNoSuchRef(err)) require.Nil(t, fs.MakeCommit("init")) ref, err := fs.Head() require.Nil(t, err) headCmt, err := fs.lkr.ResolveRef("head") require.Nil(t, err) require.Equal(t, headCmt.TreeHash().B58String(), ref) }) } func TestList(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Touch("/x")) require.Nil(t, fs.Mkdir("/1/2/3/", true)) require.Nil(t, fs.Touch("/1/2/3/y")) entries, err := fs.List("/1/2", -1) require.Nil(t, err) require.Equal(t, len(entries), 3) require.Equal(t, entries[0].Path, "/1/2") require.Equal(t, entries[1].Path, "/1/2/3") require.Equal(t, entries[2].Path, "/1/2/3/y") entries, err = fs.List("/", 1) require.Nil(t, err) require.Equal(t, 2, len(entries)) require.Equal(t, entries[0].Path, "/1") require.Equal(t, entries[1].Path, "/x") dir, err := fs.lkr.LookupDirectory("/1") require.Nil(t, err) // Check if ghosts are being treated as not existent: c.MustMove(t, fs.lkr, dir, "/666") _, err = fs.List("/1", -1) require.True(t, ie.IsNoSuchFileError(err)) _, err = fs.List("/666", -1) require.Nil(t, err) }) } func TestTag(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Touch("/x")) require.Nil(t, fs.MakeCommit("init")) head, err := fs.Head() require.Nil(t, err) // try with an abbreviated tag name. require.Nil(t, fs.Tag(head[:10], "xxx")) cmt, err := fs.lkr.ResolveRef("xxx") require.Nil(t, err) require.Equal(t, cmt.(*n.Commit).Message(), "init") require.Nil(t, fs.RemoveTag("xxx")) cmt, err = fs.lkr.ResolveRef("xxx") require.Nil(t, cmt) require.True(t, ie.IsErrNoSuchRef(err)) }) } func TestStageUnmodified(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Stage("/x", bytes.NewReader([]byte{1}))) infoOld, err := fs.Stat("/x") require.Nil(t, err) // Just to be sure: time.Sleep(50 * time.Millisecond) require.Nil(t, fs.Stage("/x", bytes.NewReader([]byte{1}))) infoNew, err := fs.Stat("/x") require.Nil(t, err) require.Equal(t, infoOld.ModTime, infoNew.ModTime) }) } func TestTruncate(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { data := testutil.CreateDummyBuf(1024) require.Nil(t, fs.Stage("/x", bytes.NewReader(data))) for _, size := range []int{1025, 512, 1, 0, 1024} { t.Run(fmt.Sprintf("size-%d", size), func(t *testing.T) { require.Nil(t, fs.Truncate("/x", uint64(size))) // clamp to 1024 for assertion: readSize := size if size > 1024 { readSize = 1024 } stream, err := fs.Cat("/x") require.Nil(t, err) readData, err := ioutil.ReadAll(stream) require.Nil(t, err) require.Equal(t, len(readData), readSize) require.Equal(t, readData, data[:readSize]) }) } require.NotNil(t, fs.Truncate("/", 0)) }) } func TestChangingCompressAlgos(t *testing.T) { t.Parallel() withDummyFS(t, func(fs *FS) { // Create a file which will not be compressed. oldData := testutil.CreateDummyBuf(compress.HeaderSizeThreshold - 1) require.Nil(t, fs.Stage("/a-text-file.go", bytes.NewReader(oldData))) // Second run will use another compress algorithm, since we're // over the header size limit in the compression guesser. newData := testutil.CreateDummyBuf(compress.HeaderSizeThreshold + 1) require.Nil(t, fs.Stage("/a-text-file.go", bytes.NewReader(newData))) stream, err := fs.Cat("/a-text-file.go") require.Nil(t, err) gotData, err := ioutil.ReadAll(stream) require.Nil(t, err) require.Equal(t, newData, gotData) }) } func TestPatch(t *testing.T) { withDummyFS(t, func(srcFs *FS) { withDummyFS(t, func(dstFs *FS) { require.Nil(t, srcFs.MakeCommit("init")) require.Nil(t, srcFs.Touch("/x")) require.Nil(t, srcFs.MakeCommit("added x")) srcIndex, err := srcFs.LastPatchIndex() require.Nil(t, err) require.Equal(t, int64(0), srcIndex) dstIndex, err := dstFs.LastPatchIndex() require.Nil(t, err) require.Equal(t, int64(0), dstIndex) patch, err := srcFs.MakePatch("commit[0]", nil, "") require.Nil(t, err) require.Nil(t, dstFs.ApplyPatch(patch)) srcX, err := srcFs.Stat("/x") require.Nil(t, err) srcIndex, err = srcFs.LastPatchIndex() require.Nil(t, err) require.Equal(t, int64(0), srcIndex) dstX, err := dstFs.Stat("/x") require.Nil(t, err) require.Equal(t, srcX.Path, dstX.Path) require.Equal(t, srcX.Size, dstX.Size) require.Equal(t, srcX.ContentHash, dstX.ContentHash) require.Equal(t, srcX.BackendHash, dstX.BackendHash) dstIndex, err = dstFs.LastPatchIndex() require.Nil(t, err) require.Equal(t, int64(2), dstIndex) }) }) } func TestTar(t *testing.T) { withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Stage("/a/file.png", bytes.NewReader([]byte("hello")))) require.Nil(t, fs.Stage("/b/file.jpg", bytes.NewReader([]byte("world")))) require.Nil(t, fs.Stage("/c/file.gif", bytes.NewReader([]byte("!")))) buf := &bytes.Buffer{} require.Nil(t, fs.Tar("/", buf, func(info *StatInfo) bool { // Exclude the /c directory: return info.Path != "/c" })) r := tar.NewReader(buf) for idx := 0; ; idx++ { hdr, err := r.Next() if err == io.EOF { break } require.Nil(t, err) data, err := ioutil.ReadAll(r) switch idx { case 0: require.Equal(t, []byte("hello"), data) require.Equal(t, "a/file.png", hdr.Name) require.Equal(t, int64(5), hdr.Size) case 1: require.Equal(t, []byte("world"), data) require.Equal(t, "b/file.jpg", hdr.Name) require.Equal(t, int64(5), hdr.Size) default: require.True(t, false, "should not be reached") } } }) } func TestReadOnly(t *testing.T) { withDummyFSReadOnly(t, true, func(fs *FS) { err := fs.Stage("/x", bytes.NewReader([]byte{1, 2, 3})) require.Equal(t, ErrReadOnly, err) }) } func TestDeletedNodesDirectory(t *testing.T) { withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Mkdir("/dir_a", true)) require.Nil(t, fs.Mkdir("/dir_b", true)) require.Nil(t, fs.MakeCommit("added")) require.Nil(t, fs.Remove("/dir_a")) require.Nil(t, fs.Move("/dir_b", "/dir_c")) require.Nil(t, fs.MakeCommit("{re,}move")) dels, err := fs.DeletedNodes("/") require.Nil(t, err) require.Len(t, dels, 1) require.Equal(t, "/dir_a", dels[0].Path) require.True(t, dels[0].IsDir) }) } func TestDeletedNodesFile(t *testing.T) { withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Stage("/a", bytes.NewReader([]byte("hello")))) require.Nil(t, fs.Stage("/b", bytes.NewReader([]byte("world")))) require.Nil(t, fs.MakeCommit("added")) require.Nil(t, fs.Remove("/a")) require.Nil(t, fs.Move("/b", "/c")) require.Nil(t, fs.MakeCommit("{re,}move")) dels, err := fs.DeletedNodes("/") require.Nil(t, err) require.Len(t, dels, 1) require.Equal(t, "/a", dels[0].Path) require.False(t, dels[0].IsDir) }) } func TestUndeleteFile(t *testing.T) { withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Stage("/a", bytes.NewReader([]byte("hello")))) require.Nil(t, fs.Stage("/b", bytes.NewReader([]byte("world")))) require.Nil(t, fs.MakeCommit("initial")) require.Nil(t, fs.Remove("/a")) require.Nil(t, fs.Move("/b", "/c")) require.Nil(t, fs.MakeCommit("{re,}move")) require.Nil(t, fs.Undelete("/a")) info, err := fs.Stat("/a") require.Nil(t, err) require.Equal(t, "/a", info.Path) require.False(t, info.IsDir) stream, err := fs.Cat("/a") require.Nil(t, err) data, err := ioutil.ReadAll(stream) require.Nil(t, err) require.Equal(t, []byte("hello"), data) // This file was moved -> Don't bring it back. require.NotNil(t, fs.Undelete("/b")) }) } func TestUndeleteDirectory(t *testing.T) { withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Stage("/dir/a", bytes.NewReader([]byte("hello")))) require.Nil(t, fs.Stage("/dir/sub/b", bytes.NewReader([]byte("world")))) require.Nil(t, fs.Mkdir("/dir/empty", true)) require.Nil(t, fs.MakeCommit("initial")) require.Nil(t, fs.Remove("/dir")) require.Nil(t, fs.MakeCommit("remove")) _, err := fs.Stat("/dir/a") require.True(t, ie.IsNoSuchFileError(err)) require.Nil(t, fs.Undelete("/dir")) info, err := fs.Stat("/dir") require.Nil(t, err) require.Equal(t, "/dir", info.Path) require.True(t, info.IsDir) entries, err := fs.List("/dir", -1) require.Nil(t, err) require.Equal(t, 5, len(entries)) paths := []string{} for _, entry := range entries { paths = append(paths, entry.Path) } require.Equal(t, []string{ "/dir", "/dir/a", "/dir/empty", "/dir/sub", "/dir/sub/b", }, paths) }) } ================================================ FILE: catfs/handle.go ================================================ package catfs import ( "errors" "fmt" "io" "sync" "github.com/sahib/brig/catfs/mio" "github.com/sahib/brig/catfs/mio/pagecache" n "github.com/sahib/brig/catfs/nodes" ) var ( // ErrIsClosed is returned when an operation is performed on an already // closed file. ErrIsClosed = errors.New("file handle is closed") ) // Handle is a emulation of a os.File handle, as returned by os.Open() // It supports the usual operations like open, read, write and seek. // Take note that the flushing operation currently is quite expensive. type Handle struct { fs *FS file *n.File lock sync.Mutex layer *pagecache.Layer stream mio.Stream wasModified bool isClosed bool readOnly bool } func newHandle(fs *FS, file *n.File, readOnly bool) *Handle { return &Handle{ fs: fs, file: file, readOnly: readOnly, } } func (hdl *Handle) initStreamIfNeeded() error { if hdl.fs.pageCache == nil { return errors.New("no page cache was initialized") } if hdl.stream != nil { return nil } // Initialize the stream lazily to avoid I/O on open() rawStream, err := hdl.fs.bk.Cat(hdl.file.BackendHash()) if err != nil { return err } // Stack the mio stack on top: hdl.stream, err = mio.NewOutStream( rawStream, hdl.file.IsRaw(), hdl.file.Key(), ) if err != nil { return err } hdl.layer, err = pagecache.NewLayer( hdl.stream, hdl.fs.pageCache, int64(hdl.file.Inode()), int64(hdl.file.Size()), ) return err } // Read will try to fill `buf` as much as possible. // The seek pointer will be advanced by the number of bytes written. // Take care, `buf` might still have contents, even if io.EOF was returned. func (hdl *Handle) Read(buf []byte) (int, error) { hdl.lock.Lock() defer hdl.lock.Unlock() if hdl.isClosed { return 0, ErrIsClosed } if err := hdl.initStreamIfNeeded(); err != nil { return 0, err } return hdl.layer.Read(buf) } // ReadAt reads from the overlay at `off` into `buf`. func (hdl *Handle) ReadAt(buf []byte, off int64) (int, error) { hdl.lock.Lock() defer hdl.lock.Unlock() if hdl.isClosed { return 0, ErrIsClosed } if err := hdl.initStreamIfNeeded(); err != nil { return 0, err } return hdl.layer.ReadAt(buf, off) } // Write will write the contents of `buf` to the current position. // It will return the number of currently written bytes. func (hdl *Handle) Write(buf []byte) (int, error) { hdl.lock.Lock() defer hdl.lock.Unlock() if hdl.readOnly { return 0, ErrReadOnly } if hdl.isClosed { return 0, ErrIsClosed } if err := hdl.initStreamIfNeeded(); err != nil { return 0, err } hdl.wasModified = true return hdl.layer.Write(buf) } // WriteAt writes data from `buf` at offset `off` counted from the start (0 offset). // Mimics `WriteAt` from `io` package https://golang.org/pkg/io/#WriterAt func (hdl *Handle) WriteAt(buf []byte, off int64) (n int, err error) { hdl.lock.Lock() defer hdl.lock.Unlock() if hdl.readOnly { return 0, ErrReadOnly } if hdl.isClosed { return 0, ErrIsClosed } if err := hdl.initStreamIfNeeded(); err != nil { return 0, err } hdl.wasModified = true return hdl.layer.WriteAt(buf, off) } // Seek will jump to the `offset` relative to `whence`. // There next read and write operation will start from this point. func (hdl *Handle) Seek(offset int64, whence int) (int64, error) { hdl.lock.Lock() defer hdl.lock.Unlock() if hdl.isClosed { return 0, ErrIsClosed } if err := hdl.initStreamIfNeeded(); err != nil { return 0, err } n, err := hdl.layer.Seek(offset, whence) if err != nil { return 0, err } return n, nil } // Truncate truncates the file to a specific length. func (hdl *Handle) Truncate(size uint64) error { hdl.lock.Lock() defer hdl.lock.Unlock() if hdl.readOnly { return ErrReadOnly } if hdl.isClosed { return ErrIsClosed } if err := hdl.initStreamIfNeeded(); err != nil { return err } hdl.fs.mu.Lock() hdl.file.SetSize(size) hdl.fs.mu.Unlock() hdl.layer.Truncate(int64(size)) return nil } // unlocked version of Flush() func (hdl *Handle) flush() error { // flush unsets the layer, so we don't flush twice. if hdl.layer == nil { return nil } // No need to flush anything if no write calls happened. if !hdl.wasModified { return nil } // Make sure that hdl.layer is unset in any case. // but only do that in case of real writes. defer func() { hdl.layer = nil hdl.stream = nil hdl.wasModified = false }() // Jump back to the beginning of the file, since fs.Stage() // should read all content starting from there. n, err := hdl.layer.Seek(0, io.SeekStart) if err != nil { return err } if n != 0 { return fmt.Errorf("seek offset is not 0") } path := hdl.file.Path() if err := hdl.fs.Stage(path, hdl.layer); err != nil { return err } return hdl.layer.Close() } // Flush makes sure to write the current state to the backend. // Please remember that this method is currently pretty expensive. func (hdl *Handle) Flush() error { hdl.lock.Lock() defer hdl.lock.Unlock() if hdl.readOnly { return ErrReadOnly } if hdl.isClosed { return ErrIsClosed } return hdl.flush() } // Close will finalize the file. It should not be used after. // This will call flush if it did not happen yet. func (hdl *Handle) Close() error { hdl.lock.Lock() defer hdl.lock.Unlock() if hdl.isClosed { return ErrIsClosed } hdl.isClosed = true return hdl.flush() } // Path returns the absolute path of the file. func (hdl *Handle) Path() string { hdl.lock.Lock() defer hdl.lock.Unlock() return hdl.file.Path() } ================================================ FILE: catfs/handle_test.go ================================================ package catfs import ( "bytes" "fmt" "io" "io/ioutil" "testing" "github.com/sahib/brig/catfs/mio/compress" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func TestOpenRead(t *testing.T) { withDummyFS(t, func(fs *FS) { rawData := []byte{1, 2, 3} require.Nil(t, fs.Stage("/x", bytes.NewReader(rawData))) fd, err := fs.Open("/x") require.Nil(t, err) data, err := ioutil.ReadAll(fd) require.Nil(t, err) require.Equal(t, data, rawData) require.Nil(t, fd.Close()) }) } func TestOpenWrite(t *testing.T) { withDummyFS(t, func(fs *FS) { rawData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} require.Nil(t, fs.Stage("/x", bytes.NewReader(rawData))) fd, err := fs.Open("/x") require.Nil(t, err) n, err := fd.Write([]byte{3, 2, 1}) require.Nil(t, err) require.Equal(t, n, 3) data, err := ioutil.ReadAll(fd) require.Nil(t, err) // require.Equal(t, rawData[3:], data) require.Equal(t, []byte{3, 2, 1, 4, 5, 6, 7, 8, 9, 10}, data) // Check that we can also seek back to start after reading to the end. // (and also check if the write overlay actually did work) pos, err := fd.Seek(0, io.SeekStart) require.Nil(t, err) require.Equal(t, pos, int64(0)) data, err = ioutil.ReadAll(fd) require.Nil(t, err) require.Equal(t, []byte{3, 2, 1, 4, 5, 6, 7, 8, 9, 10}, data) require.Nil(t, fd.Close()) }) } func TestOpenTruncate(t *testing.T) { rawData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} for idx := 0; idx < len(rawData)+5; idx++ { t.Run(fmt.Sprintf("truncate_%d", idx), func(t *testing.T) { withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Stage("/x", bytes.NewReader(rawData))) fd, err := fs.Open("/x") require.Nil(t, err) require.Nil(t, fd.Truncate(uint64(idx))) data, err := ioutil.ReadAll(fd) require.Nil(t, err) // cap rawData index: rawIdx := idx if idx >= len(rawData) { rawIdx = len(rawData) } require.Equal(t, rawData[:rawIdx], data) require.Nil(t, fd.Close()) // Check if the result was really written: stream, err := fs.Cat("/x") require.Nil(t, err) persistentData, err := ioutil.ReadAll(stream) require.Nil(t, err) require.Equal(t, rawData[:rawIdx], persistentData) }) }) } } func TestOpenOpAfterClose(t *testing.T) { withDummyFS(t, func(fs *FS) { rawData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} require.Nil(t, fs.Stage("/x", bytes.NewReader(rawData))) fd, err := fs.Open("/x") require.Nil(t, err) require.Nil(t, fd.Close()) _, err = ioutil.ReadAll(fd) require.Equal(t, err, ErrIsClosed) }) } // TODO: More tests. This still feels buggy. // Cases needed for: // - 0, SEEK_END // - 9, SEEK_SET // - ... func TestOpenExtend(t *testing.T) { t.Run("start-10", func(t *testing.T) { testOpenExtend(t, 10, io.SeekStart) }) t.Run("curr-10", func(t *testing.T) { testOpenExtend(t, 10, io.SeekCurrent) }) t.Run("end-0", func(t *testing.T) { testOpenExtend(t, 0, io.SeekEnd) }) } func testOpenExtend(t *testing.T, pos int64, whence int) { withDummyFS(t, func(fs *FS) { rawData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} require.Nil(t, fs.Stage("/x", bytes.NewReader(rawData))) fd, err := fs.Open("/x") require.Nil(t, err) pos, err := fd.Seek(pos, whence) require.Nil(t, err) require.Equal(t, pos, int64(pos)) n, err := fd.Write([]byte{11, 12, 13}) require.Nil(t, err) require.Equal(t, n, 3) require.Nil(t, fd.Close()) stream, err := fs.Cat("/x") require.Nil(t, err) postData, err := ioutil.ReadAll(stream) require.Nil(t, err) expected := make([]byte, 13) copy(expected, rawData) copy(expected[10:], []byte{11, 12, 13}) require.Equal(t, expected, postData) }) } // Read data from the handle like fuse would: // Seek to an offset, read a chunk and then advance to next block. // block size and file size may var heavily here. func TestHandleFuseLikeRead(t *testing.T) { tcs := []struct { fileSize int blockSize int }{ {2048, 400}, } for _, tc := range tcs { testHandleFuseLikeRead(t, tc.fileSize, tc.blockSize) } } func testHandleFuseLikeRead(t *testing.T, fileSize, blockSize int) { // fuse reads data always with a prior seek. // try to emulate this behaviour here. withDummyFS(t, func(fs *FS) { rawData := testutil.CreateDummyBuf(int64(fileSize)) require.Nil(t, fs.Stage("/x", bytes.NewReader(rawData))) fd, err := fs.Open("/x") require.Nil(t, err) left := len(rawData) for left > 0 { toRead := blockSize if left < blockSize { toRead = left } offset := len(rawData) - left buf := make([]byte, toRead) if _, err = fd.Seek(int64(offset), io.SeekStart); err != nil { t.Fatalf("Seek to %d failed", offset) } n, err := fd.Read(buf) if err != nil { t.Fatalf("Read failed: %v", err) } if n != toRead { t.Fatalf("Handle read less than expected (wanted %d, got %d)", toRead, n) } if !bytes.Equal(buf, rawData[offset:offset+toRead]) { t.Fatalf("Block [%d:%d] differs from raw data", offset, offset+toRead) } left -= blockSize } require.Nil(t, fd.Close()) }) } func TestHandleChangeCompression(t *testing.T) { withDummyFS(t, func(fs *FS) { // Create a file which will not be compressed. size := int64(compress.HeaderSizeThreshold - 1) oldData := testutil.CreateDummyBuf(size) require.Nil(t, fs.Mkdir("/sub", false)) require.Nil(t, fs.Stage("/sub/a-text-file.go", bytes.NewReader(oldData))) // Second run will use another compress algorithm, since we're // over the header size limit in the compression guesser. fd, err := fs.Open("/sub/a-text-file.go") require.Nil(t, err) // "echo(1)" does a flush after open (for whatever reason) require.Nil(t, fd.Flush()) offset, err := fd.Seek(size, io.SeekStart) require.Nil(t, err) require.Equal(t, offset, size) expectedData := []byte("xxxxx") n, err := fd.Write(expectedData) require.Nil(t, err) require.Equal(t, n, len(expectedData)) require.Nil(t, fd.Flush()) require.Nil(t, fd.Close()) stream, err := fs.Cat("/sub/a-text-file.go") require.Nil(t, err) gotData, err := ioutil.ReadAll(stream) require.Nil(t, err) expectData := append(oldData, expectedData...) require.Equal(t, expectData, gotData) }) } ================================================ FILE: catfs/mio/chunkbuf/chunkbuf.go ================================================ package chunkbuf import ( "io" "github.com/sahib/brig/util" ) // ChunkBuffer represents a custom buffer struct with Read/Write and Seek support. type ChunkBuffer struct { buf []byte readOff int64 writeOff int64 size int64 } const ( maxChunkSize = 64 * 1024 ) func (c *ChunkBuffer) Write(p []byte) (int, error) { n := copy(c.buf[c.writeOff:c.size], p) c.writeOff += int64(n) c.size = util.Max64(c.size, c.writeOff) return n, nil } // Reset resets the buffer like bytes.Buffer func (c *ChunkBuffer) Reset(data []byte) { c.readOff = 0 c.writeOff = 0 c.size = int64(len(data)) c.buf = data } // Len tells you the current size of the buffer contents func (c *ChunkBuffer) Len() int { return int(c.size - c.readOff) } func (c *ChunkBuffer) Read(p []byte) (int, error) { n := copy(p, c.buf[c.readOff:c.size]) c.readOff += int64(n) if n < len(p) { return n, io.EOF } return n, nil } // Seek implements io.Seeker func (c *ChunkBuffer) Seek(offset int64, whence int) (int64, error) { switch whence { case io.SeekCurrent: c.readOff += offset case io.SeekEnd: c.readOff = c.size + offset case io.SeekStart: c.readOff = offset } c.readOff = util.Min64(c.readOff, c.size) c.writeOff = c.readOff return c.readOff, nil } // Close is a no-op only existing to fulfill io.Closer func (c *ChunkBuffer) Close() error { return nil } // WriteTo implements the io.WriterTo interface func (c *ChunkBuffer) WriteTo(w io.Writer) (int64, error) { n, err := w.Write(c.buf[c.readOff:]) if err != nil { return 0, err } c.readOff += int64(n) return int64(n), nil } // NewChunkBuffer returns a ChunkBuffer with the given data. if data is nil a // ChunkBuffer with 64k is returned. // Note that chunkbuf will take over ownership over the buf. func NewChunkBuffer(data []byte) *ChunkBuffer { if data == nil { data = make([]byte, maxChunkSize) } return &ChunkBuffer{buf: data, size: int64(len(data))} } ================================================ FILE: catfs/mio/chunkbuf/chunkbuf_test.go ================================================ package chunkbuf import ( "bytes" "io" "io/ioutil" "testing" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func TestChunkBufBasic(t *testing.T) { data := testutil.CreateDummyBuf(1024) buf := NewChunkBuffer(data) copiedData, err := ioutil.ReadAll(buf) require.Nil(t, err) require.Equal(t, data, copiedData) } func TestChunkBufEOF(t *testing.T) { data := testutil.CreateDummyBuf(1024) buf := NewChunkBuffer(data) cache := make([]byte, 2048) n, err := buf.Read(cache) require.True(t, err == io.EOF) require.Equal(t, n, 1024) require.Nil(t, buf.Close()) } func TestChunkBufWriteTo(t *testing.T) { data := testutil.CreateDummyBuf(1024) buf := NewChunkBuffer(data) stdBuf := &bytes.Buffer{} n, err := buf.WriteTo(stdBuf) require.Nil(t, err) require.Equal(t, int64(n), int64(1024)) require.Equal(t, data, stdBuf.Bytes()) } func TestChunkBufSeek(t *testing.T) { data := testutil.CreateDummyBuf(1024) buf := NewChunkBuffer(data) var err error var n int cache := make([]byte, 128) n, err = buf.Read(cache) require.Nil(t, err) require.Equal(t, n, 128) require.Equal(t, cache[:n], data[:n]) jumpedTo, err := buf.Seek(256, io.SeekStart) require.Nil(t, err) require.Equal(t, int64(jumpedTo), int64(256)) cache = make([]byte, 128) n, err = buf.Read(cache) require.Nil(t, err) require.Equal(t, n, 128) require.Equal(t, cache[:n], data[256:n+256]) // read advanced by 128, add 128 to go to 512 jumpedTo, err = buf.Seek(128, io.SeekCurrent) require.Nil(t, err) require.Equal(t, int64(jumpedTo), int64(512)) cache = make([]byte, 128) n, err = buf.Read(cache) require.Nil(t, err) require.Equal(t, n, 128) require.Equal(t, cache[:n], data[512:n+512]) // read advanced by 128, add 128 to go to 512 jumpedTo, err = buf.Seek(-128, io.SeekEnd) require.Nil(t, err) require.Equal(t, int64(jumpedTo), int64(896)) cache = make([]byte, 128) n, err = buf.Read(cache) require.Nil(t, err) require.Equal(t, n, 128) require.Equal(t, cache[:n], data[896:n+896]) } func TestChunkBufWrite(t *testing.T) { data := testutil.CreateDummyBuf(1024) ref := testutil.CreateDummyBuf(1024) buf := NewChunkBuffer(data) ref[0] = 1 ref[1] = 2 ref[2] = 3 n, err := buf.Write([]byte{1, 2, 3}) require.Nil(t, err) require.Equal(t, n, 3) jumpedTo, err := buf.Seek(-1, io.SeekEnd) require.Nil(t, err) require.Equal(t, int64(jumpedTo), int64(1023)) ref[1023] = 255 n, err = buf.Write([]byte{255, 255, 255}) require.Nil(t, err) require.Equal(t, n, 1) jumpedTo, err = buf.Seek(0, io.SeekStart) require.Nil(t, err) require.Equal(t, int64(jumpedTo), int64(0)) stdBuf := &bytes.Buffer{} nWriteTo, err := buf.WriteTo(stdBuf) require.Nil(t, err) require.Equal(t, nWriteTo, int64(1024)) require.Equal(t, stdBuf.Bytes(), ref) } ================================================ FILE: catfs/mio/compress/algorithm.go ================================================ package compress import ( "errors" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" "github.com/pierrec/lz4/v4" ) var ( // ErrBadAlgo is returned on a unsupported/unknown algorithm. ErrBadAlgo = errors.New("invalid algorithm type") ) const ( // AlgoUnknown represents an unknown algorithm. // When trying to use it an error will occur. AlgoUnknown = AlgorithmType(iota) // AlgoSnappy represents the snappy compression algorithm: // https://en.wikipedia.org/wiki/Snappy_(software) AlgoSnappy //AlgoLZ4 represents the lz4 compression algorithm: // https://en.wikipedia.org/wiki/LZ4_(compression_algorithm) AlgoLZ4 // AlgoZstd represents the zstd compression algorithm: // https://de.wikipedia.org/wiki/Zstandard AlgoZstd ) // AlgorithmType user defined type to store the algorithm type. type AlgorithmType byte // IsValid returns true if `at` is a valid algorithm type. func (at AlgorithmType) IsValid() bool { switch at { case AlgoSnappy, AlgoLZ4, AlgoZstd: return true } return false } func (at AlgorithmType) String() string { name, ok := algoToString[at] if !ok { return "unknown" } return name } // Algorithm is the common interface for all supported algorithms. type Algorithm interface { // Encode should encode `src` into the buffer provided by `dst`. // It should return a sub-slice of `dst`. `dst` should be big // enough to hold `src`. Use MaxEncodeBufferSize() to be sure. Encode(dst, src []byte) ([]byte, error) // Decode decodes the data in `src` to `dst`, returning a subslice // of `dst` to indicate the actual size. Decode(dst, src []byte) ([]byte, error) // MaxEncodeBufferSize should return the maximum size an encoded // (i.e. compressed) buffer of input size maxChunkSize may have. // This will be bigger than maxChunkSize since random data will // be inflated by almost all algorithms. MaxEncodeBufferSize() int } type snappyAlgo struct{} type lz4Algo struct { compressor *lz4.Compressor } type zstdAlgo struct{} var ( zstdWriter *zstd.Encoder zstdReader *zstd.Decoder ) func init() { var err error // NOTE: zstd package allows us to use the same writer and reader // stateless if we just use block encoding/decoding. // This saves us some extra allocations. // TODO: configure compression level? zstdWriter, err = zstd.NewWriter( nil, zstd.WithEncoderLevel(zstd.SpeedDefault), ) if err != nil { // configuring the writer wrong is a programmer error. panic(err) } // NOTE: reader should set max memory bound with WithDecoderMaxMemory. // we can deduce it from maxChunkSize and protect against // malicious inputs. zstdReader, err = zstd.NewReader( nil, zstd.WithDecoderMaxMemory(32*maxChunkSize), ) if err != nil { // configuring the reader wrong is a programmer error. panic(err) } } var ( // AlgoMap is a map of available algorithms. algoMap = map[AlgorithmType]func() Algorithm{ AlgoSnappy: func() Algorithm { return snappyAlgo{} }, AlgoLZ4: func() Algorithm { // TODO: we could configure compression level here. return &lz4Algo{ compressor: &lz4.Compressor{}, } }, AlgoZstd: func() Algorithm { return zstdAlgo{} }, } algoToString = map[AlgorithmType]string{ AlgoSnappy: "snappy", AlgoLZ4: "lz4", AlgoZstd: "zstd", } ) func (a snappyAlgo) Encode(dst, src []byte) ([]byte, error) { return snappy.Encode(dst, src), nil } func (a snappyAlgo) Decode(dst, src []byte) ([]byte, error) { return snappy.Decode(dst, src) } func (a snappyAlgo) MaxEncodeBufferSize() int { return snappy.MaxEncodedLen(maxChunkSize) } ///////////////////////// func (a *lz4Algo) Encode(dst, src []byte) ([]byte, error) { n, err := a.compressor.CompressBlock(src, dst) if err != nil { return dst[:n], err } // NOTE: n == 0 is returned when the data is not easy to compress // and the `dst` buf is too small to hold it. Since we always // supply a large enough buf this should not happen. return dst[:n], nil } func (a *lz4Algo) Decode(dst, src []byte) ([]byte, error) { n, err := lz4.UncompressBlock(src, dst) return dst[:n], err } func (a *lz4Algo) MaxEncodeBufferSize() int { return lz4.CompressBlockBound(maxChunkSize) } ///////////////////////// func (a zstdAlgo) Encode(dst, src []byte) ([]byte, error) { return zstdWriter.EncodeAll(src, dst[:0]), nil } func (a zstdAlgo) Decode(dst, src []byte) ([]byte, error) { return zstdReader.DecodeAll(src, dst[:0]) } func (a zstdAlgo) MaxEncodeBufferSize() int { // TODO: Is there a better way to estimate? return maxChunkSize * 2 } func algorithmFromType(a AlgorithmType) (Algorithm, error) { newAlgoFn, ok := algoMap[a] if !ok { return nil, ErrBadAlgo } return newAlgoFn(), nil } ================================================ FILE: catfs/mio/compress/compress_test.go ================================================ package compress import ( "bytes" "fmt" "io" "io/ioutil" "os" "testing" "github.com/sahib/brig/util" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) var ( TestOffsets = []int64{-1, -500, 0, 1, -C64K, -C32K, C64K - 1, C64K, C64K + 1, C32K - 1, C32K, C32K + 1, C64K - 5, C64K + 5, C32K - 5, C32K + 5} TestSizes = []int64{0, 1, 4096, C64K - 1, C64K, C64K + 1, C32K - 1, C32K, C32K + 1, C64K - 5, C64K + 5, C32K - 5, C32K + 5} CompressionAlgos = []AlgorithmType{AlgoLZ4, AlgoSnappy, AlgoZstd} ) func openDest(t *testing.T, dest string) *os.File { if _, err := os.Stat(dest); !os.IsNotExist(err) && err != nil { t.Fatalf("Opening destination %v failed: %v\n", dest, err) } fd, err := os.OpenFile(dest, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755) if err != nil { t.Fatalf("Opening source %v failed: %v\n", dest, err) } return fd } func openSrc(t *testing.T, src string) *os.File { fd, err := os.Open(src) if err != nil { t.Fatalf("Opening source %v failed: %v\n", src, err) } return fd } func createTempFile(t *testing.T) string { fd, err := ioutil.TempFile("", "brig-mio-compress") path := fd.Name() require.Nil(t, err) require.Nil(t, fd.Close()) return path } const ( C64K = 64 * 1024 C32K = 32 * 1024 ) func TestCompressDecompress(t *testing.T) { t.Parallel() sizes := TestSizes algos := CompressionAlgos for _, algo := range algos { for _, size := range sizes { name := fmt.Sprintf("%s-size%d", algo, size) t.Run(name, func(t *testing.T) { t.Parallel() testCompressDecompress(t, size, algo, true, true) testCompressDecompress(t, size, algo, false, false) testCompressDecompress(t, size, algo, true, false) testCompressDecompress(t, size, algo, false, true) }) } } } func testCompressDecompress(t *testing.T, size int64, algo AlgorithmType, useReadFrom, useWriteTo bool) { // Fake data file is written to disk, // as compression reader has to be a ReadSeeker. zipPath := createTempFile(t) defer testutil.Remover(t, zipPath) data := testutil.CreateDummyBuf(size) zipFileDest := openDest(t, zipPath) // Compress. w, err := NewWriter(zipFileDest, algo) if err != nil { t.Errorf("Writer init failed %v", err) return } if _, err := testutil.DumbCopy(w, bytes.NewReader(data), useReadFrom, useWriteTo); err != nil { t.Errorf("Compress failed %v", err) return } if err := w.Close(); err != nil { t.Errorf("Compression writer close failed: %v", err) return } if err := zipFileDest.Close(); err != nil { t.Errorf("close(zipFileDest) failed: %v", err) return } // Read compressed file into buffer. dataUncomp := bytes.NewBuffer(nil) dataFromZip := openSrc(t, zipPath) // Uncompress. r := NewReader(dataFromZip) if _, err := testutil.DumbCopy(dataUncomp, r, useReadFrom, useWriteTo); err != nil { t.Errorf("Decompression failed: %v", err) return } if err := dataFromZip.Close(); err != nil { t.Errorf("Zip close failed: %v", err) return } // Compare. got, want := dataUncomp.Bytes(), data if !bytes.Equal(got, want) { t.Error("Uncompressed data and input data does not match.") t.Errorf("\tGOT: %v", util.OmitBytes(got, 10)) t.Errorf("\tWANT: %v", util.OmitBytes(want, 10)) return } } func TestSeek(t *testing.T) { t.Parallel() sizes := TestSizes offsets := TestOffsets algos := CompressionAlgos for _, algo := range algos { for _, size := range sizes { for _, off := range offsets { name := fmt.Sprintf("%s-size%d-off%d", algo, size, off) t.Run(name, func(t *testing.T) { t.Parallel() testSeek(t, size, off, algo, false, false) testSeek(t, size, off, algo, true, true) testSeek(t, size, off, algo, false, true) testSeek(t, size, off, algo, true, false) }) } } } } func testSeek(t *testing.T, size, offset int64, algo AlgorithmType, useReadFrom, useWriteTo bool) { // Fake data file is written to disk, // as compression reader has to be a ReadSeeker. zipPath := createTempFile(t) defer testutil.Remover(t, zipPath) data := testutil.CreateDummyBuf(size) zipFileDest := openDest(t, zipPath) // Compress. w, err := NewWriter(zipFileDest, algo) if err != nil { t.Errorf("Writer init failed %v", err) return } if _, err := testutil.DumbCopy(w, bytes.NewReader(data), useReadFrom, useWriteTo); err != nil { t.Errorf("Compress failed %v", err) return } defer testutil.Remover(t, zipPath) if err := w.Close(); err != nil { t.Errorf("Compression writer close failed: %v", err) return } if err := zipFileDest.Close(); err != nil { t.Errorf("close(zipFileDest) failed: %v", err) return } // Read compressed file into buffer. dataUncomp := bytes.NewBuffer(nil) dataFromZip := openSrc(t, zipPath) zr := NewReader(dataFromZip) // Set specific offset before read. _, err = zr.Seek(offset, io.SeekStart) if err == io.EOF && offset < size && offset > -1 { t.Errorf("Seek failed even with EOF: %d <= %d", offset, size) return } if err != io.EOF && err != nil { t.Errorf("Seek failed: %v", err) return } // Read starting at a specific offset. if _, err := testutil.DumbCopy(dataUncomp, zr, useReadFrom, useWriteTo); err != nil { t.Errorf("Decompression failed: %v", err) return } if err := dataFromZip.Close(); err != nil { t.Errorf("Zip close failed: %v", err) return } // Compare. maxOffset := offset if offset > size { maxOffset = size } if offset < 0 { maxOffset = 0 } got, want := dataUncomp.Bytes(), data[maxOffset:] if !bytes.Equal(got, want) { t.Error("Uncompressed data and input data does not match.") t.Errorf("\tGOT: %v", util.OmitBytes(got, 10)) t.Errorf("\tWANT: %v", util.OmitBytes(want, 10)) return } } func TestReadItAllTwice(t *testing.T) { for _, algo := range []AlgorithmType{AlgoLZ4, AlgoSnappy} { t.Run(fmt.Sprintf("%v", algo), func(t *testing.T) { data := testutil.CreateDummyBuf(2 * 4096) zipData, err := Pack(data, algo) require.Nil(t, err) r := bytes.NewReader(zipData) zr := NewReader(r) readData1, err := ioutil.ReadAll(zr) require.Nil(t, err) n, err := zr.Seek(0, io.SeekStart) require.Nil(t, err) require.Equal(t, n, int64(0)) readData2, err := ioutil.ReadAll(zr) require.Nil(t, err) require.Equal(t, readData1, readData2) }) } } // fuse will use Seek() to jump to each position. // when reading a complete file it will call seek before each read. func TestReadFuseLike(t *testing.T) { algo := AlgorithmType(AlgoSnappy) for _, size := range TestSizes { t.Run(fmt.Sprintf("%v", size), func(t *testing.T) { data := testutil.CreateDummyBuf(size) compressedData, err := Pack(data, algo) require.Nil(t, err) r := NewReader(bytes.NewReader(compressedData)) bufSize := 4096 buf := make([]byte, bufSize) offset := int64(0) for { seekOffset, err := r.Seek(offset, io.SeekStart) if err != io.EOF { require.Nil(t, err) require.Equal(t, offset, seekOffset) } n, err := r.Read(buf) if err != io.EOF { require.Nil(t, err) } // check that n returns something that makes sense: require.Equal(t, util.Min(bufSize, len(data)-int(offset)), n) require.Equal(t, data[offset:offset+int64(n)], buf[:n]) offset += int64(n) // If this test goes into an endless loop: that's why. if err == io.EOF { break } } }) } } func TestCheckSize(t *testing.T) { data := testutil.CreateDummyBuf(6041) packData, err := Pack(data, AlgoSnappy) require.Nil(t, err) r := NewReader(bytes.NewReader(packData)) size, err := r.Seek(0, io.SeekEnd) require.Nil(t, err) require.Equal(t, int64(len(data)), size) off, err := r.Seek(0, io.SeekStart) require.Nil(t, err) require.Equal(t, int64(0), off) buf := &bytes.Buffer{} n, err := io.Copy(buf, r) require.Nil(t, err) require.Equal(t, int64(len(data)), n) require.Equal(t, data, buf.Bytes()) } ================================================ FILE: catfs/mio/compress/header.go ================================================ package compress // TODO: rename to header.go import ( "bytes" "encoding/binary" "errors" ) var ( // ErrBadIndex is returned on invalid compression index. ErrBadIndex = errors.New("broken compression index") // ErrHeaderTooSmall is returned if the header is less than 10 bytes. // It usually indicates a broken file or a non-compressed file. ErrHeaderTooSmall = errors.New("header is less than 10 bytes") // ErrBadMagicNumber is returned if the first 8 bytes of the stream is not // the expected "elchwald". ErrBadMagicNumber = errors.New("bad magic number in compressed stream") // ErrBadAlgorithm is returned when the algorithm was either not present // or it had an invalid value ErrBadAlgorithm = errors.New("invalid algorithm") // ErrUnsupportedVersion is returned when we don't have a reader that // understands that format. ErrUnsupportedVersion = errors.New("version of this format is not supported") // MagicNumber is the magic number in front of a compressed stream MagicNumber = []byte("elchwald") ) const ( maxChunkSize = 64 * 1024 indexChunkSize = 16 trailerSize = 12 headerSize = 12 currentVersion = 1 ) // record structure reprenents a offset mapping {uncompressed offset, compressedOffset}. // A chunk of maxChunkSize is defined by two records. The size of a specific // record can be determinated by a simple substitution of two record offsets. type record struct { rawOff int64 zipOff int64 } // trailer holds basic information about the compressed file. type trailer struct { chunksize uint32 indexSize uint64 } func (t *trailer) marshal(buf []byte) { binary.LittleEndian.PutUint32(buf[0:4], t.chunksize) binary.LittleEndian.PutUint64(buf[4:12], t.indexSize) } func (t *trailer) unmarshal(buf []byte) { t.chunksize = binary.LittleEndian.Uint32(buf[0:4]) t.indexSize = binary.LittleEndian.Uint64(buf[4:12]) } func (rc *record) marshal(buf []byte) { binary.LittleEndian.PutUint64(buf[0:8], uint64(rc.rawOff)) binary.LittleEndian.PutUint64(buf[8:16], uint64(rc.zipOff)) } func (rc *record) unmarshal(buf []byte) { rc.rawOff = int64(binary.LittleEndian.Uint64(buf[0:8])) rc.zipOff = int64(binary.LittleEndian.Uint64(buf[8:16])) } type header struct { algo AlgorithmType version uint16 } func makeHeader(algo AlgorithmType, version byte) []byte { algoField := make([]byte, 2) binary.LittleEndian.PutUint16(algoField, uint16(algo)) versionField := make([]byte, 2) binary.LittleEndian.PutUint16(versionField, uint16(version)) suffix := append(versionField, algoField...) return append(MagicNumber, suffix...) } func readHeader(bheader []byte) (*header, error) { if len(bheader) < 10 { return nil, ErrHeaderTooSmall } if !bytes.Equal(bheader[:len(MagicNumber)], MagicNumber) { return nil, ErrBadMagicNumber } // This version only understands itself currently: version := binary.LittleEndian.Uint16(bheader[8:10]) if version != currentVersion { return nil, ErrUnsupportedVersion } if len(bheader) < 12 { return nil, ErrBadAlgorithm } algo := AlgorithmType(binary.LittleEndian.Uint16(bheader[10:12])) if !algo.IsValid() { return nil, ErrBadAlgorithm } return &header{ algo: algo, version: version, }, nil } // Pack compresses `data` with `algo` and returns the resulting data. // This is a convenience method meant to be used for small data packages. func Pack(data []byte, algo AlgorithmType) ([]byte, error) { zipBuf := &bytes.Buffer{} zipW, err := NewWriter(zipBuf, algo) if err != nil { return nil, err } if _, err := zipW.ReadFrom(bytes.NewReader(data)); err != nil { return nil, err } if err := zipW.Close(); err != nil { return nil, err } return zipBuf.Bytes(), nil } // Unpack unpacks `data` and returns the decompressed data. // The algorithm is read from the data itself. // This is a convinience method meant to be used for small data packages. func Unpack(data []byte) ([]byte, error) { buf := &bytes.Buffer{} if _, err := NewReader(bytes.NewReader(data)).WriteTo(buf); err != nil { return nil, err } return buf.Bytes(), nil } ================================================ FILE: catfs/mio/compress/heuristic.go ================================================ package compress import ( "mime" "net/http" "path/filepath" "strings" "github.com/sdemontfort/go-mimemagic" ) var ( // TextFileExtensions not covered by mime.TypeByExtension TextFileExtensions = map[string]bool{ ".go": true, ".json": true, ".yaml": true, ".xml": true, ".txt": true, } ) const ( // HeaderSizeThreshold is the number of bytes needed to enable compression at all. HeaderSizeThreshold = 2048 ) func guessMime(path string, buf []byte) string { httpMatch := http.DetectContentType(buf) if httpMatch != "application/octet-stream" { return httpMatch } // try to guess it from the buffer we pass: match := mimemagic.Match("", buf) if match == "" { // try to guess it from the file path: match = mime.TypeByExtension(filepath.Ext(path)) } // handle a few edge cases: if TextFileExtensions[filepath.Ext(path)] { return "text/plain" } return match } func isCompressible(mimetype string) bool { if strings.HasPrefix(mimetype, "text/") { return true } return CompressibleMapping[mimetype] } // GuessAlgorithm takes the path name and the header data of it // and tries to guess a suitable compression algorithm. func GuessAlgorithm(path string, header []byte) (AlgorithmType, error) { if len(header) < HeaderSizeThreshold { return AlgoUnknown, nil } mime := guessMime(path, header) if mime == "" { // the guesses below work only when mime is known return AlgoSnappy, nil } compressible := isCompressible(mime) if !compressible { return AlgoUnknown, nil } // text like files probably deserve some thorough compression: if strings.HasPrefix(mime, "text/") { return AlgoLZ4, nil } // fallback to snappy for generic files: return AlgoSnappy, nil } ================================================ FILE: catfs/mio/compress/heuristic_test.go ================================================ package compress import ( "testing" "github.com/sahib/brig/util/testutil" ) type testCase struct { path string header []byte expectedAlgo AlgorithmType } var ( testCases = []testCase{ { "1.txt", testutil.CreateDummyBuf(HeaderSizeThreshold - 1), AlgoUnknown, }, { "2.txt", testutil.CreateDummyBuf(HeaderSizeThreshold), AlgoLZ4, }, { "3.opus", append( []byte{0x4f, 0x67, 0x67, 0x53}, testutil.CreateDummyBuf(HeaderSizeThreshold)..., ), AlgoUnknown, }, { "4.zip", append( []byte{0x50, 0x4b, 0x3, 0x4}, testutil.CreateDummyBuf(HeaderSizeThreshold)..., ), AlgoUnknown, }, } ) func TestChooseCompressAlgo(t *testing.T) { t.Parallel() for _, tc := range testCases { t.Run(tc.path, func(t *testing.T) { if algo, err := GuessAlgorithm(tc.path, tc.header); err != nil { t.Errorf("Error: %v", err) } else if algo != tc.expectedAlgo { t.Errorf( "For path '%s' expected '%s', got '%s'", tc.path, algoToString[tc.expectedAlgo], algoToString[algo], ) } }) } } ================================================ FILE: catfs/mio/compress/mime_db.go ================================================ package compress // CompressibleMapping maps between mime types and a bool indicating // if they're compressible. Choice of Algorithm comes later. // // This was converted from this mime db: // https://cdn.rawgit.com/jshttp/mime-db/master/db.json var CompressibleMapping = map[string]bool{ "application/3gpdash-qoe-report+xml": true, "application/3gpp-ims+xml": true, "application/activity+json": true, "application/alto-costmap+json": true, "application/alto-costmapfilter+json": true, "application/alto-directory+json": true, "application/alto-endpointcost+json": true, "application/alto-endpointcostparams+json": true, "application/alto-endpointprop+json": true, "application/alto-endpointpropparams+json": true, "application/alto-error+json": true, "application/alto-networkmap+json": true, "application/alto-networkmapfilter+json": true, "application/atom+xml": true, "application/atomcat+xml": true, "application/atomdeleted+xml": true, "application/atomsvc+xml": true, "application/auth-policy+xml": true, "application/bdoc": false, "application/beep+xml": true, "application/calendar+json": true, "application/calendar+xml": true, "application/ccmp+xml": true, "application/ccxml+xml": true, "application/cdfx+xml": true, "application/cea-2018+xml": true, "application/cellml+xml": true, "application/clue_info+xml": true, "application/cnrp+xml": true, "application/coap-group+json": true, "application/conference-info+xml": true, "application/cpl+xml": true, "application/csta+xml": true, "application/cstadata+xml": true, "application/csvm+json": true, "application/dart": true, "application/dash+xml": true, "application/davmount+xml": true, "application/dialog-info+xml": true, "application/dicom+json": true, "application/dicom+xml": true, "application/docbook+xml": true, "application/dskpp+xml": true, "application/dssc+xml": true, "application/ecmascript": true, "application/edi-x12": false, "application/edifact": false, "application/emergencycalldata.comment+xml": true, "application/emergencycalldata.control+xml": true, "application/emergencycalldata.deviceinfo+xml": true, "application/emergencycalldata.providerinfo+xml": true, "application/emergencycalldata.serviceinfo+xml": true, "application/emergencycalldata.subscriberinfo+xml": true, "application/emergencycalldata.veds+xml": true, "application/emma+xml": true, "application/emotionml+xml": true, "application/epp+xml": true, "application/fdt+xml": true, "application/fhir+json": true, "application/fhir+xml": true, "application/fido.trusted-apps+json": true, "application/font-woff": false, "application/framework-attributes+xml": true, "application/geo+json": true, "application/geoxacml+xml": true, "application/gml+xml": true, "application/gpx+xml": true, "application/gzip": false, "application/held+xml": true, "application/ibe-key-request+xml": true, "application/ibe-pkg-reply+xml": true, "application/im-iscomposing+xml": true, "application/inkml+xml": true, "application/its+xml": true, "application/java-archive": false, "application/java-serialized-object": false, "application/java-vm": false, "application/javascript": true, "application/jf2feed+json": true, "application/jose+json": true, "application/jrd+json": true, "application/json": true, "application/json-patch+json": true, "application/jsonml+json": true, "application/jwk+json": true, "application/jwk-set+json": true, "application/kpml-request+xml": true, "application/kpml-response+xml": true, "application/ld+json": true, "application/lgr+xml": true, "application/load-control+xml": true, "application/lost+xml": true, "application/lostsync+xml": true, "application/mads+xml": true, "application/manifest+json": true, "application/marcxml+xml": true, "application/mathml+xml": true, "application/mathml-content+xml": true, "application/mathml-presentation+xml": true, "application/mbms-associated-procedure-description+xml": true, "application/mbms-deregister+xml": true, "application/mbms-envelope+xml": true, "application/mbms-msk+xml": true, "application/mbms-msk-response+xml": true, "application/mbms-protection-description+xml": true, "application/mbms-reception-report+xml": true, "application/mbms-register+xml": true, "application/mbms-register-response+xml": true, "application/mbms-schedule+xml": true, "application/mbms-user-service-description+xml": true, "application/media-policy-dataset+xml": true, "application/media_control+xml": true, "application/mediaservercontrol+xml": true, "application/merge-patch+json": true, "application/metalink+xml": true, "application/metalink4+xml": true, "application/mets+xml": true, "application/mmt-usd+xml": true, "application/mods+xml": true, "application/mrb-consumer+xml": true, "application/mrb-publish+xml": true, "application/msc-ivr+xml": true, "application/msc-mixer+xml": true, "application/msword": false, "application/mud+json": true, "application/nlsml+xml": true, "application/octet-stream": false, "application/oebps-package+xml": true, "application/ogg": false, "application/omdoc+xml": true, "application/p2p-overlay+xml": true, "application/patch-ops-error+xml": true, "application/pdf": false, "application/pgp-encrypted": false, "application/pidf+xml": true, "application/pidf-diff+xml": true, "application/pls+xml": true, "application/poc-settings+xml": true, "application/postscript": true, "application/ppsp-tracker+json": true, "application/problem+json": true, "application/problem+xml": true, "application/provenance+xml": true, "application/prs.xsf+xml": true, "application/pskc+xml": true, "application/raml+yaml": true, "application/rdap+json": true, "application/rdf+xml": true, "application/reginfo+xml": true, "application/reputon+json": true, "application/resource-lists+xml": true, "application/resource-lists-diff+xml": true, "application/rfc+xml": true, "application/rlmi+xml": true, "application/rls-services+xml": true, "application/route-apd+xml": true, "application/route-s-tsid+xml": true, "application/route-usd+xml": true, "application/rsd+xml": true, "application/rss+xml": true, "application/rtf": true, "application/samlassertion+xml": true, "application/samlmetadata+xml": true, "application/sbml+xml": true, "application/scaip+xml": true, "application/scim+json": true, "application/sep+xml": true, "application/shf+xml": true, "application/simple-filter+xml": true, "application/smil+xml": true, "application/soap+xml": true, "application/sparql-results+xml": true, "application/spirits-event+xml": true, "application/srgs+xml": true, "application/sru+xml": true, "application/ssdl+xml": true, "application/ssml+xml": true, "application/tar": true, "application/tei+xml": true, "application/thraud+xml": true, "application/ttml+xml": true, "application/urc-grpsheet+xml": true, "application/urc-ressheet+xml": true, "application/urc-targetdesc+xml": true, "application/urc-uisocketdesc+xml": true, "application/vcard+json": true, "application/vcard+xml": true, "application/vnd.1000minds.decision-model+xml": true, "application/vnd.3gpp-prose+xml": true, "application/vnd.3gpp-prose-pc3ch+xml": true, "application/vnd.3gpp.access-transfer-events+xml": true, "application/vnd.3gpp.bsf+xml": true, "application/vnd.3gpp.gmop+xml": true, "application/vnd.3gpp.mcptt-affiliation-command+xml": true, "application/vnd.3gpp.mcptt-floor-request+xml": true, "application/vnd.3gpp.mcptt-info+xml": true, "application/vnd.3gpp.mcptt-location-info+xml": true, "application/vnd.3gpp.mcptt-mbms-usage-info+xml": true, "application/vnd.3gpp.mcptt-signed+xml": true, "application/vnd.3gpp.mid-call+xml": true, "application/vnd.3gpp.sms+xml": true, "application/vnd.3gpp.srvcc-ext+xml": true, "application/vnd.3gpp.srvcc-info+xml": true, "application/vnd.3gpp.state-and-event-info+xml": true, "application/vnd.3gpp.ussd+xml": true, "application/vnd.3gpp2.bcmcsinfo+xml": true, "application/vnd.adobe.xdp+xml": true, "application/vnd.amadeus+json": true, "application/vnd.amundsen.maze+xml": true, "application/vnd.android.package-archive": false, "application/vnd.api+json": true, "application/vnd.apothekende.reservation+json": true, "application/vnd.apple.installer+xml": true, "application/vnd.apple.pkpass": false, "application/vnd.avalon+json": true, "application/vnd.avistar+xml": true, "application/vnd.balsamiq.bmml+xml": true, "application/vnd.bbf.usp.msg+json": true, "application/vnd.bekitzur-stech+json": true, "application/vnd.biopax.rdf+xml": true, "application/vnd.capasystems-pg+json": true, "application/vnd.chemdraw+xml": true, "application/vnd.citationstyles.style+xml": true, "application/vnd.collection+json": true, "application/vnd.collection.doc+json": true, "application/vnd.collection.next+json": true, "application/vnd.coreos.ignition+json": true, "application/vnd.criticaltools.wbs+xml": true, "application/vnd.ctct.ws+xml": true, "application/vnd.cyan.dean.root+xml": true, "application/vnd.dart": true, "application/vnd.datapackage+json": true, "application/vnd.dataresource+json": true, "application/vnd.dece.ttml+xml": true, "application/vnd.dm.delegation+xml": true, "application/vnd.document+json": true, "application/vnd.drive+json": true, "application/vnd.dvb.notif-aggregate-root+xml": true, "application/vnd.dvb.notif-container+xml": true, "application/vnd.dvb.notif-generic+xml": true, "application/vnd.dvb.notif-ia-msglist+xml": true, "application/vnd.dvb.notif-ia-registration-request+xml": true, "application/vnd.dvb.notif-ia-registration-response+xml": true, "application/vnd.dvb.notif-init+xml": true, "application/vnd.emclient.accessrequest+xml": true, "application/vnd.eprints.data+xml": true, "application/vnd.eszigno3+xml": true, "application/vnd.etsi.aoc+xml": true, "application/vnd.etsi.cug+xml": true, "application/vnd.etsi.iptvcommand+xml": true, "application/vnd.etsi.iptvdiscovery+xml": true, "application/vnd.etsi.iptvprofile+xml": true, "application/vnd.etsi.iptvsad-bc+xml": true, "application/vnd.etsi.iptvsad-cod+xml": true, "application/vnd.etsi.iptvsad-npvr+xml": true, "application/vnd.etsi.iptvservice+xml": true, "application/vnd.etsi.iptvsync+xml": true, "application/vnd.etsi.iptvueprofile+xml": true, "application/vnd.etsi.mcid+xml": true, "application/vnd.etsi.overload-control-policy-dataset+xml": true, "application/vnd.etsi.pstn+xml": true, "application/vnd.etsi.sci+xml": true, "application/vnd.etsi.simservs+xml": true, "application/vnd.etsi.tsl+xml": true, "application/vnd.geo+json": true, "application/vnd.geocube+xml": true, "application/vnd.google-apps.document": false, "application/vnd.google-apps.presentation": false, "application/vnd.google-apps.spreadsheet": false, "application/vnd.google-earth.kml+xml": true, "application/vnd.google-earth.kmz": false, "application/vnd.gov.sk.e-form+xml": true, "application/vnd.gov.sk.xmldatacontainer+xml": true, "application/vnd.hal+json": true, "application/vnd.hal+xml": true, "application/vnd.handheld-entertainment+xml": true, "application/vnd.hc+json": true, "application/vnd.heroku+json": true, "application/vnd.hyper+json": true, "application/vnd.hyper-item+json": true, "application/vnd.hyperdrive+json": true, "application/vnd.ims.lis.v2.result+json": true, "application/vnd.ims.lti.v2.toolconsumerprofile+json": true, "application/vnd.ims.lti.v2.toolproxy+json": true, "application/vnd.ims.lti.v2.toolproxy.id+json": true, "application/vnd.ims.lti.v2.toolsettings+json": true, "application/vnd.ims.lti.v2.toolsettings.simple+json": true, "application/vnd.informedcontrol.rms+xml": true, "application/vnd.infotech.project+xml": true, "application/vnd.iptc.g2.catalogitem+xml": true, "application/vnd.iptc.g2.conceptitem+xml": true, "application/vnd.iptc.g2.knowledgeitem+xml": true, "application/vnd.iptc.g2.newsitem+xml": true, "application/vnd.iptc.g2.newsmessage+xml": true, "application/vnd.iptc.g2.packageitem+xml": true, "application/vnd.iptc.g2.planningitem+xml": true, "application/vnd.irepository.package+xml": true, "application/vnd.las.las+json": true, "application/vnd.las.las+xml": true, "application/vnd.liberty-request+xml": true, "application/vnd.llamagraphics.life-balance.exchange+xml": true, "application/vnd.marlin.drm.actiontoken+xml": true, "application/vnd.marlin.drm.conftoken+xml": true, "application/vnd.marlin.drm.license+xml": true, "application/vnd.mason+json": true, "application/vnd.micro+json": true, "application/vnd.miele+json": true, "application/vnd.mozilla.xul+xml": true, "application/vnd.ms-excel": false, "application/vnd.ms-fontobject": true, "application/vnd.ms-office.activex+xml": true, "application/vnd.ms-opentype": true, "application/vnd.ms-outlook": false, "application/vnd.ms-playready.initiator+xml": true, "application/vnd.ms-powerpoint": false, "application/vnd.ms-printdevicecapabilities+xml": true, "application/vnd.ms-printing.printticket+xml": true, "application/vnd.ms-printschematicket+xml": true, "application/vnd.ms-xpsdocument": false, "application/vnd.nearst.inv+json": true, "application/vnd.nokia.conml+xml": true, "application/vnd.nokia.iptv.config+xml": true, "application/vnd.nokia.landmark+xml": true, "application/vnd.nokia.landmarkcollection+xml": true, "application/vnd.nokia.n-gage.ac+xml": true, "application/vnd.nokia.pcd+xml": true, "application/vnd.oasis.opendocument.graphics": false, "application/vnd.oasis.opendocument.presentation": false, "application/vnd.oasis.opendocument.spreadsheet": false, "application/vnd.oasis.opendocument.text": false, "application/vnd.oftn.l10n+json": true, "application/vnd.oipf.contentaccessdownload+xml": true, "application/vnd.oipf.contentaccessstreaming+xml": true, "application/vnd.oipf.dae.svg+xml": true, "application/vnd.oipf.dae.xhtml+xml": true, "application/vnd.oipf.mippvcontrolmessage+xml": true, "application/vnd.oipf.spdiscovery+xml": true, "application/vnd.oipf.spdlist+xml": true, "application/vnd.oipf.ueprofile+xml": true, "application/vnd.oipf.userprofile+xml": true, "application/vnd.oma.bcast.associated-procedure-parameter+xml": true, "application/vnd.oma.bcast.drm-trigger+xml": true, "application/vnd.oma.bcast.imd+xml": true, "application/vnd.oma.bcast.notification+xml": true, "application/vnd.oma.bcast.sgdd+xml": true, "application/vnd.oma.bcast.smartcard-trigger+xml": true, "application/vnd.oma.bcast.sprov+xml": true, "application/vnd.oma.cab-address-book+xml": true, "application/vnd.oma.cab-feature-handler+xml": true, "application/vnd.oma.cab-pcc+xml": true, "application/vnd.oma.cab-subs-invite+xml": true, "application/vnd.oma.cab-user-prefs+xml": true, "application/vnd.oma.dd2+xml": true, "application/vnd.oma.drm.risd+xml": true, "application/vnd.oma.group-usage-list+xml": true, "application/vnd.oma.lwm2m+json": true, "application/vnd.oma.pal+xml": true, "application/vnd.oma.poc.detailed-progress-report+xml": true, "application/vnd.oma.poc.final-report+xml": true, "application/vnd.oma.poc.groups+xml": true, "application/vnd.oma.poc.invocation-descriptor+xml": true, "application/vnd.oma.poc.optimized-progress-report+xml": true, "application/vnd.oma.scidm.messages+xml": true, "application/vnd.oma.xcap-directory+xml": true, "application/vnd.omads-email+xml": true, "application/vnd.omads-file+xml": true, "application/vnd.omads-folder+xml": true, "application/vnd.openblox.game+xml": true, "application/vnd.openstreetmap.data+xml": true, "application/vnd.openxmlformats-officedocument.custom-properties+xml": true, "application/vnd.openxmlformats-officedocument.customxmlproperties+xml": true, "application/vnd.openxmlformats-officedocument.drawing+xml": true, "application/vnd.openxmlformats-officedocument.drawingml.chart+xml": true, "application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml": true, "application/vnd.openxmlformats-officedocument.drawingml.diagramcolors+xml": true, "application/vnd.openxmlformats-officedocument.drawingml.diagramdata+xml": true, "application/vnd.openxmlformats-officedocument.drawingml.diagramlayout+xml": true, "application/vnd.openxmlformats-officedocument.drawingml.diagramstyle+xml": true, "application/vnd.openxmlformats-officedocument.extended-properties+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.commentauthors+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.comments+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.handoutmaster+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.notesmaster+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.notesslide+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.presentation": false, "application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.presprops+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.slide+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.slidelayout+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.slidemaster+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.slideshow.main+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.slideupdateinfo+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.tablestyles+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.tags+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.template.main+xml": true, "application/vnd.openxmlformats-officedocument.presentationml.viewprops+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.calcchain+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.dialogsheet+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.externallink+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.pivotcachedefinition+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.pivotcacherecords+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.pivottable+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.querytable+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.revisionheaders+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.revisionlog+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.sharedstrings+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": false, "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.sheetmetadata+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.tablesinglecells+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.template.main+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.usernames+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.volatiledependencies+xml": true, "application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml": true, "application/vnd.openxmlformats-officedocument.theme+xml": true, "application/vnd.openxmlformats-officedocument.themeoverride+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.document": false, "application/vnd.openxmlformats-officedocument.wordprocessingml.document.glossary+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.fonttable+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.template.main+xml": true, "application/vnd.openxmlformats-officedocument.wordprocessingml.websettings+xml": true, "application/vnd.openxmlformats-package.core-properties+xml": true, "application/vnd.openxmlformats-package.digital-signature-xmlsignature+xml": true, "application/vnd.openxmlformats-package.relationships+xml": true, "application/vnd.oracle.resource+json": true, "application/vnd.otps.ct-kip+xml": true, "application/vnd.pagerduty+json": true, "application/vnd.paos+xml": true, "application/vnd.poc.group-advertisement+xml": true, "application/vnd.pwg-xhtml-print+xml": true, "application/vnd.radisys.moml+xml": true, "application/vnd.radisys.msml+xml": true, "application/vnd.radisys.msml-audit+xml": true, "application/vnd.radisys.msml-audit-conf+xml": true, "application/vnd.radisys.msml-audit-conn+xml": true, "application/vnd.radisys.msml-audit-dialog+xml": true, "application/vnd.radisys.msml-audit-stream+xml": true, "application/vnd.radisys.msml-conf+xml": true, "application/vnd.radisys.msml-dialog+xml": true, "application/vnd.radisys.msml-dialog-base+xml": true, "application/vnd.radisys.msml-dialog-fax-detect+xml": true, "application/vnd.radisys.msml-dialog-fax-sendrecv+xml": true, "application/vnd.radisys.msml-dialog-group+xml": true, "application/vnd.radisys.msml-dialog-speech+xml": true, "application/vnd.radisys.msml-dialog-transform+xml": true, "application/vnd.recordare.musicxml+xml": true, "application/vnd.restful+json": true, "application/vnd.route66.link66+xml": true, "application/vnd.shootproof+json": true, "application/vnd.siren+json": true, "application/vnd.software602.filler.form+xml": true, "application/vnd.solent.sdkm+xml": true, "application/vnd.sun.wadl+xml": true, "application/vnd.syncml+xml": true, "application/vnd.syncml.dm+xml": true, "application/vnd.syncml.dmddf+xml": true, "application/vnd.syncml.dmtnds+xml": true, "application/vnd.tableschema+json": true, "application/vnd.think-cell.ppttc+json": true, "application/vnd.tmd.mediaflex.api+xml": true, "application/vnd.uoml+xml": true, "application/vnd.vel+json": true, "application/vnd.wv.csp+xml": true, "application/vnd.wv.ssp+xml": true, "application/vnd.xacml+json": true, "application/vnd.xmi+xml": true, "application/vnd.yamaha.openscoreformat.osfpvg+xml": true, "application/vnd.zzazz.deck+xml": true, "application/voicexml+xml": true, "application/voucher-cms+json": true, "application/wasm": true, "application/watcherinfo+xml": true, "application/webpush-options+json": true, "application/wsdl+xml": true, "application/wspolicy+xml": true, "application/x-7z-compressed": false, "application/x-arj": false, "application/x-bdoc": false, "application/x-bzip": false, "application/x-bzip2": false, "application/x-deb": false, "application/x-dtbncx+xml": true, "application/x-dtbook+xml": true, "application/x-dtbresource+xml": true, "application/x-dvi": false, "application/x-httpd-php": true, "application/x-java-jnlp-file": false, "application/x-javascript": true, "application/x-latex": false, "application/x-mpegurl": false, "application/x-ns-proxy-autoconfig": true, "application/x-pkcs12": false, "application/x-rar-compressed": false, "application/x-sh": true, "application/x-shockwave-flash": false, "application/x-stuffit": false, "application/x-tar": true, "application/x-virtualbox-hdd": true, "application/x-virtualbox-ova": true, "application/x-virtualbox-ovf": true, "application/x-virtualbox-vbox": true, "application/x-virtualbox-vbox-extpack": false, "application/x-virtualbox-vdi": true, "application/x-virtualbox-vhd": true, "application/x-virtualbox-vmdk": true, "application/x-web-app-manifest+json": true, "application/x-www-form-urlencoded": true, "application/x-xliff+xml": true, "application/x-xpinstall": false, "application/xacml+xml": true, "application/xaml+xml": true, "application/xcap-att+xml": true, "application/xcap-caps+xml": true, "application/xcap-diff+xml": true, "application/xcap-el+xml": true, "application/xcap-error+xml": true, "application/xcap-ns+xml": true, "application/xcon-conference-info+xml": true, "application/xcon-conference-info-diff+xml": true, "application/xenc+xml": true, "application/xhtml+xml": true, "application/xhtml-voice+xml": true, "application/xliff+xml": true, "application/xml": true, "application/xml-dtd": true, "application/xml-patch+xml": true, "application/xmpp+xml": true, "application/xop+xml": true, "application/xproc+xml": true, "application/xslt+xml": true, "application/xspf+xml": true, "application/xv+xml": true, "application/yang-data+json": true, "application/yang-data+xml": true, "application/yang-patch+json": true, "application/yang-patch+xml": true, "application/yin+xml": true, "application/zip": false, "audio/3gpp": false, "audio/basic": false, "audio/l24": false, "audio/mp3": false, "audio/mp4": false, "audio/mpeg": false, "audio/ogg": false, "audio/vnd.rn-realaudio": false, "audio/vnd.wave": false, "audio/vorbis": false, "audio/wav": false, "audio/wave": false, "audio/webm": false, "audio/x-aac": false, "audio/x-caf": false, "font/otf": true, "image/apng": false, "image/bmp": true, "image/gif": false, "image/jp2": false, "image/jpeg": false, "image/jpm": false, "image/jpx": false, "image/pjpeg": false, "image/png": false, "image/svg+xml": true, "image/tiff": false, "image/vnd.adobe.photoshop": true, "image/x-icon": true, "image/x-ms-bmp": true, "image/x-xcf": false, "message/http": false, "message/imdn+xml": true, "message/partial": false, "message/rfc822": true, "model/gltf+json": true, "model/gltf-binary": true, "model/iges": false, "model/mesh": false, "model/vnd.collada+xml": true, "model/vnd.moml+xml": true, "model/vrml": false, "model/x3d+binary": false, "model/x3d+vrml": false, "model/x3d+xml": true, "multipart/alternative": false, "multipart/encrypted": false, "multipart/form-data": false, "multipart/mixed": false, "multipart/related": false, "multipart/signed": false, "text/cache-manifest": true, "text/calender": true, "text/cmd": true, "text/css": true, "text/csv": true, "text/html": true, "text/javascript": true, "text/jsx": true, "text/markdown": true, "text/n3": true, "text/plain": true, "text/richtext": true, "text/rtf": true, "text/tab-separated-values": true, "text/uri-list": true, "text/vcard": true, "text/vtt": true, "text/x-gwt-rpc": true, "text/x-jquery-tmpl": true, "text/x-markdown": true, "text/x-org": true, "text/x-processing": true, "text/x-suse-ymp": true, "text/xml": true, "video/mp4": false, "video/mpeg": false, "video/ogg": false, "video/quicktime": false, "video/webm": false, "video/x-flv": false, "video/x-matroska": false, "video/x-ms-wmv": false, "x-shader/x-fragment": true, "x-shader/x-vertex": true, } ================================================ FILE: catfs/mio/compress/reader.go ================================================ package compress import ( "fmt" "io" "sort" "github.com/sahib/brig/catfs/mio/chunkbuf" ) // Reader implements an decompressing reader type Reader struct { // Underlying raw, compressed datastream. rawR io.ReadSeeker // Index with records which contain chunk offsets. index []record // Buffer holds currently read data; maxChunkSize. chunkBuf *chunkbuf.ChunkBuffer // Structure with parsed trailer. trailer *trailer // Current seek offset in the compressed stream. rawSeekOffset int64 // Current seek offset in the uncompressed stream. zipSeekOffset int64 // Marker to identify initial read. isInitialRead bool // Holds algorithm interface. algo Algorithm // buffer for reading in the raw stream for decoding. rawBuf []byte // buffer to hold one chunks so Read() can take chunks of it. decBuf []byte } // Seek implements io.Seeker func (r *Reader) Seek(destOff int64, whence int) (int64, error) { switch whence { case io.SeekEnd: if destOff > 0 { return 0, io.EOF } if err := r.parseTrailerIfNeeded(); err != nil { return 0, err } return r.Seek(r.index[len(r.index)-1].rawOff+destOff, io.SeekStart) case io.SeekCurrent: return r.Seek(r.zipSeekOffset+destOff, io.SeekStart) } if err := r.parseTrailerIfNeeded(); err != nil { return 0, err } if destOff < 0 { return 0, io.EOF } destRecord, _ := r.chunkLookup(destOff, true) currRecord, _ := r.chunkLookup(r.zipSeekOffset, true) r.rawSeekOffset = destRecord.zipOff r.zipSeekOffset = destOff // Don't re-read if offset is in current chunk. if currRecord.rawOff != destRecord.rawOff || !r.isInitialRead { if _, err := r.readZipChunk(); err != nil && err != io.EOF { return 0, err } } toRead := destOff - destRecord.rawOff if _, err := r.chunkBuf.Seek(toRead, io.SeekStart); err != nil { return 0, err } return destOff, nil } // Return start (prevRecord) and end (currRecord) of a chunk currOff is located // in. If currOff is 0, the first and second record is returned. If currOff is // at the end of file the end record (currRecord) is returned twice. The offset // difference (chunksize) between prevRecord and currRecord is then equal to 0. func (r *Reader) chunkLookup(currOff int64, isRawOff bool) (*record, *record) { // Get smallest index that is before given currOff. i := sort.Search(len(r.index), func(i int) bool { if isRawOff { return r.index[i].rawOff > currOff } return r.index[i].zipOff > currOff }) // Beginning of the file, first chunk: prev offset is 0, curr offset is 1. if i == 0 { return &r.index[i], &r.index[i+1] } // End of the file, last chunk: prev and curr offset is the last index. if i == len(r.index) { return &r.index[i-1], &r.index[i-1] } return &r.index[i-1], &r.index[i] } func (r *Reader) parseTrailerIfNeeded() error { if r.trailer != nil { return nil } // Attempt to read the front header: headerBuf := [headerSize]byte{} if _, err := io.ReadFull(r.rawR, headerBuf[:]); err != nil { return err } header, err := readHeader(headerBuf[:]) if err != nil { return err } // Goto end of file and read trailer buffer. if _, err := r.rawR.Seek(-trailerSize, io.SeekEnd); err != nil { return err } buf := [trailerSize]byte{} n, err := io.ReadFull(r.rawR, buf[:]) if err != nil { return err } if n != trailerSize { return fmt.Errorf("read trailer was too small: %d bytes", n) } r.trailer = &trailer{} r.trailer.unmarshal(buf[:]) algo, err := algorithmFromType(header.algo) if err != nil { return err } // Allocate the rawBuf depending on the algorithm that was used. // Every compression algorithm might produce data that is bigger // than the original data in edge cases. `rawBuf` has to be big // enough to account for this edge case. We double check this // during Read() to avoid overflows. r.algo = algo r.rawBuf = make([]byte, algo.MaxEncodeBufferSize()) // Seek and read index into buffer. seekIdx := -(int64(r.trailer.indexSize) + trailerSize) if _, err := r.rawR.Seek(seekIdx, io.SeekEnd); err != nil { return err } indexBuf := make([]byte, r.trailer.indexSize) if _, err := io.ReadFull(r.rawR, indexBuf); err != nil { return err } // Build index with records. A record encapsulates a raw offset and the // compressed offset it is mapped to. prevRecord := record{-1, -1} for i := uint64(0); i < (r.trailer.indexSize / indexChunkSize); i++ { currRecord := record{} currRecord.unmarshal(indexBuf) if prevRecord.rawOff >= currRecord.rawOff { return ErrBadIndex } if prevRecord.zipOff >= currRecord.zipOff { return ErrBadIndex } r.index = append(r.index, currRecord) indexBuf = indexBuf[indexChunkSize:] } // Set Reader to beginning of file if _, err := r.rawR.Seek(headerSize, io.SeekStart); err != nil { return err } r.rawSeekOffset = headerSize r.zipSeekOffset = 0 return nil } // WriteTo implements io.WriterTo func (r *Reader) WriteTo(w io.Writer) (int64, error) { if err := r.parseTrailerIfNeeded(); err != nil { return 0, err } written := int64(0) n, cerr := io.Copy(w, r.chunkBuf) if cerr != nil { return n, cerr } written += n for { decData, rerr := r.readZipChunk() if rerr == io.EOF { return written, nil } if rerr != nil { return written, rerr } n, werr := w.Write(decData) written += int64(n) if werr != nil { return written, werr } } } // Read reads len(p) bytes from the compressed stream into p. func (r *Reader) Read(p []byte) (int, error) { if err := r.parseTrailerIfNeeded(); err != nil { return 0, err } read := 0 for { if r.chunkBuf.Len() != 0 { n, err := r.chunkBuf.Read(p) // NOTE: Read() might return io.EOF to indicate that the // chunk is exhausted. We should look at the next chunk // (readZipChunk will figure out if there are any) if err != nil && err != io.EOF { return n, err } r.zipSeekOffset += int64(n) read += n p = p[n:] } if len(p) == 0 { break } if _, err := r.readZipChunk(); err != nil { return read, err } } return read, nil } func (r *Reader) fixZipChunk() (int64, error) { // Get the start and end record of the chunk currOff is located in. prevRecord, currRecord := r.chunkLookup(r.rawSeekOffset, false) if currRecord == nil || prevRecord == nil { return 0, ErrBadIndex } // Determinate uncompressed chunksize; should only be 0 on empty file or at the end of file. chunkSize := currRecord.zipOff - prevRecord.zipOff if chunkSize == 0 { return 0, io.EOF } // Set Reader to compressed offset. if _, err := r.rawR.Seek(prevRecord.zipOff, io.SeekStart); err != nil { return 0, err } r.rawSeekOffset = currRecord.zipOff r.zipSeekOffset = prevRecord.rawOff r.isInitialRead = false return chunkSize, nil } func (r *Reader) readZipChunk() ([]byte, error) { // Get current position of the Reader; offset of the compressed file. r.chunkBuf.Reset(nil) chunkSize, err := r.fixZipChunk() if err != nil { return nil, err } if len(r.rawBuf) < int(chunkSize) { // NOTE: When len(r.rawBuf) is < chunkSize ErrShortBuffer is returned. // This should only happen on malicious input with far too high chunkSize. // r.rawBuf should be able to hold any compressed stream derived from any // block with size maxChunkSize. return nil, io.ErrShortBuffer } n, err := io.ReadFull(r.rawR, r.rawBuf[:int(chunkSize)]) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return nil, err } // decData should be a slice of `r.decBuf` to avoid allocations. decData, err := r.algo.Decode(r.decBuf, r.rawBuf[:n]) if err != nil { return nil, err } r.chunkBuf.Reset(decData) return decData, nil } // NewReader returns a new ReadSeeker with compression support. As random access // is the purpose of this layer, a ReadSeeker is required as parameter. The used // compression algorithm is chosen based on trailer information. func NewReader(r io.ReadSeeker) *Reader { return &Reader{ rawR: r, chunkBuf: chunkbuf.NewChunkBuffer([]byte{}), decBuf: make([]byte, maxChunkSize), } } ================================================ FILE: catfs/mio/compress/writer.go ================================================ package compress import ( "bytes" "io" "github.com/sahib/brig/util" ) // Writer implements a compression writer. type Writer struct { // Underlying raw, uncompressed data stream. rawW io.Writer // Buffers data into maxChunkSize chunks. chunkBuf *bytes.Buffer // Index with records which contain chunk offsets. index []record // Accumulator representing uncompressed offset. rawOff int64 // Accumulator representing compressed offset. zipOff int64 // Holds trailer data. trailer *trailer // Holds algorithm interface. algo Algorithm // Type of the algorithm algoType AlgorithmType // Becomes true after the first write. headerWritten bool encBuf []byte } func (w *Writer) addRecordToIndex() { w.index = append(w.index, record{w.rawOff, w.zipOff}) } func (w *Writer) flushBuffer(data []byte) error { if len(data) <= 0 { return nil } // Add record with start offset of the current chunk. w.addRecordToIndex() // Compress and flush the current chunk. // encData should be a slice of `w.encBuf` encData, err := w.algo.Encode(w.encBuf, data) if err != nil { return err } n, err := w.rawW.Write(encData) if err != nil { return err } // Update offset for the current chunk. w.rawOff += int64(len(data)) w.zipOff += int64(n) return nil } func (w *Writer) writeHeaderIfNeeded() error { if w.headerWritten { return nil } if _, err := w.rawW.Write(makeHeader(w.algoType, currentVersion)); err != nil { return err } w.headerWritten = true w.zipOff += headerSize return nil } // ReadFrom implements io.ReaderFrom func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { read := 0 buf := make([]byte, maxChunkSize) if err := w.writeHeaderIfNeeded(); err != nil { return 0, err } for { // Only last block may be < maxChunkSize. // So we need to make sure to fill the buffer as best we can. n, rerr := io.ReadFull(r, buf) read += n if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { return int64(read), rerr } werr := w.flushBuffer(buf[:n]) if werr != nil && werr != io.EOF { return int64(read), werr } if werr == io.EOF || rerr == io.EOF || rerr == io.ErrUnexpectedEOF { return int64(read), nil } } } func (w *Writer) Write(p []byte) (n int, err error) { if err := w.writeHeaderIfNeeded(); err != nil { return 0, err } written := len(p) // Compress only maxChunkSize equal chunks. for { n, _ := w.chunkBuf.Write(p[:util.Min(len(p), maxChunkSize)]) if w.chunkBuf.Len() < maxChunkSize { break } if err := w.flushBuffer(w.chunkBuf.Next(maxChunkSize)); err != nil { return 0, err } p = p[n:] } return written, nil } // NewWriter returns a WriteCloser with compression support. func NewWriter(w io.Writer, algoType AlgorithmType) (*Writer, error) { algo, err := algorithmFromType(algoType) if err != nil { return nil, err } return &Writer{ rawW: w, algo: algo, algoType: algoType, encBuf: make([]byte, algo.MaxEncodeBufferSize()), chunkBuf: &bytes.Buffer{}, trailer: &trailer{}, }, nil } // Close cleans up internal resources. // Make sure to call close always since it might write data. func (w *Writer) Close() error { if err := w.writeHeaderIfNeeded(); err != nil { return err } // Write remaining bytes left in buffer and update index. if err := w.flushBuffer(w.chunkBuf.Bytes()); err != nil { return err } w.addRecordToIndex() // Handle trailer of uncompressed file. // Write compression index trailer and close stream. w.trailer.indexSize = uint64(indexChunkSize * len(w.index)) indexBuf := make([]byte, w.trailer.indexSize) indexBufStartOff := indexBuf for _, record := range w.index { record.marshal(indexBuf) indexBuf = indexBuf[indexChunkSize:] } if n, err := w.rawW.Write(indexBufStartOff); err != nil || uint64(n) != w.trailer.indexSize { return err } // Write trailer buffer (algo, chunksize, indexsize) // at the end of file and close the stream. trailerSizeBuf := make([]byte, trailerSize) w.trailer.marshal(trailerSizeBuf) if _, err := w.rawW.Write(trailerSizeBuf); err != nil { return err } return nil } ================================================ FILE: catfs/mio/doc.go ================================================ // Package mio (short for memory input/output) implements the layered io stack // of brig. This includes currently three major parts: // // - encrypt - Encryption and Decryption layer with seeking support. // - compress - Seekable Compression and Decompression with exchangable algorithms. // - overlay - In-Memory write overlay over a io.Reader with seek support. // // This package itself contains utils that stack those on top of each of other // in an already usable fashion. package mio ================================================ FILE: catfs/mio/encrypt/format.go ================================================ // Package encrypt implements the encryption layer of brig. // The file format used looks something like this: // // [HEADER][[BLOCKHEADER][PAYLOAD]...] // // HEADER is 20+16 bytes big and contains the following fields: // - 8 Byte: Magic number (to identify non-brig files quickly) // - 4 Byte: Flags (describing the stream) // - 2 Byte: Key length in bytes // - 2 Byte: Reserved for future use. // - 4 Byte: Maximum size of each block (last may be less) // - 16 Byte: MAC protecting the header from forgery // // BLOCKHEADER contains the following fields: // - 8 Byte: Nonce: Derived from the current block number. // The block number is checked to be correct on decryption. // // PAYLOAD contains the actual encrypted data, which includes a MAC at the end. // The size of the MAC depends on the algorithm, for poly1305 it's 16 bytes. // // All header metadata is encoded in little endian. // // Reader/Writer are capable or reading/writing this format. Additionally, // Reader supports efficient seeking into the encrypted data, provided the // underlying datastream supports seeking. SEEK_END is only supported when the // number of encrypted blocks is present in the header. package encrypt import ( "bytes" "crypto/aes" "crypto/cipher" "crypto/hmac" "encoding/binary" "errors" "fmt" "io" chacha "golang.org/x/crypto/chacha20poly1305" "golang.org/x/crypto/sha3" ) // Flags indicate with what options a stream was encoded. // Some flags are not compatible to each other, see below. type Flags int32 // Possible ciphers in Counter mode: const ( // FlagEmpty is invalid FlagEmpty = Flags(0) // FlagEncryptAES256GCM indicates the stream was encrypted with AES256 in GCM mode. // This should be fast on modern CPUs. FlagEncryptAES256GCM = Flags(1) << iota // FlagEncryptChaCha20 incidate that the stream was encrypted with ChaCha20. // This can be a good choice if your CPU does not support the AES-NI instruction set. FlagEncryptChaCha20 // reserve some flags for more encryption types. // no particular reason, just want to have enc-type flags to be in line. flagReserved1 flagReserved2 flagReserved3 flagReserved4 flagReserved5 flagReserved6 // FlagCompressedInside indicates that the encrypted data was also compressed. // This can be used to decide at runtime what streaming is needed. FlagCompressedInside ) // Other constants: const ( // Size of the header mac: macSize = 16 // current file format version, increment on incompatible changes. version = 1 // Size of the initial header: headerSize = 20 + macSize // Default maxBlockSize if not set defaultMaxBlockSize = 64 * 1024 defaultDecBufferSize = defaultMaxBlockSize defaultEncBufferSize = defaultMaxBlockSize + 40 ) var ( // MagicNumber contains the first 8 byte of every brig header. // For various reasons, it is the ascii string "moosecat". MagicNumber = []byte{ 0x6d, 0x6f, 0x6f, 0x73, 0x65, 0x63, 0x61, 0x74, } ) //////////////////// // Header Parsing // //////////////////// // GenerateHeader creates a valid header for the format file func GenerateHeader(key []byte, maxBlockSize int64, flags Flags) []byte { // This is in big endian: header := []byte{ // magic number (8 Byte): 0, 0, 0, 0, 0, 0, 0, 0, // Flags (4 byte): 0, 0, 0, 0, // Key length (4 Byte): 0, 0, 0, 0, // Block length (4 Byte): 0, 0, 0, 0, // MAC Header (16 Byte): 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } // Magic number: copy(header[:len(MagicNumber)], MagicNumber) binary.LittleEndian.PutUint32(header[8:12], uint32(flags)) // Encode key size (static at the moment): binary.LittleEndian.PutUint32(header[12:16], uint32(32)) // Encode max block size: binary.LittleEndian.PutUint32(header[16:20], uint32(maxBlockSize)) // Calculate a MAC of the header; this needs to be done last: headerMac := hmac.New(sha3.New224, key) if _, err := headerMac.Write(header[:headerSize-macSize]); err != nil { return nil } // Copy the MAC to the output: shortHeaderMac := headerMac.Sum(nil)[:macSize] copy(header[headerSize-macSize:headerSize], shortHeaderMac) return header } // HeaderInfo represents a parsed header. type HeaderInfo struct { // Version of the file format. Currently always 1. Version uint16 // Cipher type used in the file. CipherBit Flags // KeyLen is the number of bytes in the encryption key. KeyLen uint32 // BlockLen is the max. number of bytes in a block. // The last block may be smaller. BlockLen uint32 // Flags control the encryption algorithm and other things. Flags Flags } var ( // ErrSmallHeader is returned when the header is too small to parse. // Usually happens when trying to decrypt a raw stream. ErrSmallHeader = errors.New("header is too small") // ErrBadMagic is returned when the stream does not start with the magic number. // Usually happens when trying to decrypt a raw or compressed stream. ErrBadMagic = errors.New("magic number missing") // ErrBadFlags means that you passed an invalid flags combination // or the stream was modified to have wrong flags. ErrBadFlags = errors.New("inconsistent header flags") // ErrBadHeaderMAC means that the header is not what the writer originally // put into the stream. Usually means somebody or something changed it. ErrBadHeaderMAC = errors.New("header mac differs from expected") ) func cipherTypeBitFromFlags(flags Flags) (Flags, error) { var cipherBit Flags var bits = []Flags{ FlagEncryptAES256GCM, FlagEncryptChaCha20, } for _, bit := range bits { if flags&bit == 0 { continue } if cipherBit != 0 { // only one bit at the same time allowed. return 0, ErrBadFlags } cipherBit = bit } if cipherBit == 0 { // no algorithm set: also error out. return 0, ErrBadFlags } return cipherBit, nil } // ParseHeader parses the header of the format file. Returns the flags, key // and block length. If parsing fails, an error is returned. func ParseHeader(header, key []byte) (*HeaderInfo, error) { if len(header) < len(MagicNumber) { return nil, ErrSmallHeader } if bytes.Compare(header[:len(MagicNumber)], MagicNumber) != 0 { return nil, ErrBadMagic } if len(header) < headerSize { return nil, ErrSmallHeader } flags := Flags(binary.LittleEndian.Uint32(header[8:12])) keyLen := binary.LittleEndian.Uint32(header[12:16]) blockLen := binary.LittleEndian.Uint32(header[16:20]) cipherBit, err := cipherTypeBitFromFlags(flags) if err != nil { return nil, err } // Check the header mac: headerMac := hmac.New(sha3.New224, key) if _, err := headerMac.Write(header[:headerSize-macSize]); err != nil { return nil, err } storedMac := header[headerSize-macSize : headerSize] shortHeaderMac := headerMac.Sum(nil)[:macSize] if !hmac.Equal(shortHeaderMac, storedMac) { return nil, ErrBadHeaderMAC } return &HeaderInfo{ Version: version, CipherBit: cipherBit, KeyLen: keyLen, BlockLen: blockLen, Flags: flags, }, nil } ////////////////////// // Common Utilities // ////////////////////// func createAEADWorker(cipherType Flags, key []byte) (cipher.AEAD, error) { switch cipherType { case FlagEncryptAES256GCM: block, err := aes.NewCipher(key) if err != nil { return nil, err } return cipher.NewGCM(block) case FlagEncryptChaCha20: return chacha.New(key) default: return nil, fmt.Errorf("no such cipher type: %d", cipherType) } } type aeadCommon struct { // Nonce that form the first aead.NonceSize() bytes // of the output nonce []byte // Key used for encryption/decryption key []byte // For more information, see: // https://en.wikipedia.org/wiki/Authenticated_encryption aead cipher.AEAD // Buffer for encrypted data (maxBlockSize + overhead) encBuf []byte } func (c *aeadCommon) initAeadCommon(key []byte, cipherBit Flags, maxBlockSize int64) error { aead, err := createAEADWorker(cipherBit, key) if err != nil { return err } c.encBuf = make([]byte, 0, maxBlockSize+int64(aead.Overhead())) c.nonce = make([]byte, aead.NonceSize()) c.aead = aead c.key = key return nil } // Encrypt is a utility function which encrypts the data from source with key // and writes the resulting encrypted data to dest. func Encrypt(key []byte, source io.Reader, dest io.Writer, flags Flags) (int64, error) { layer, err := NewWriter(dest, key, flags) if err != nil { return 0, err } n, err := io.CopyBuffer(layer, source, make([]byte, defaultEncBufferSize)) if err != nil { return n, err } return n, layer.Close() } // Decrypt is a utility function which decrypts the data from source with key // and writes the resulting encrypted data to dest. func Decrypt(key []byte, source io.Reader, dest io.Writer) (int64, error) { layer, err := NewReader(source, key) if err != nil { return 0, err } return io.CopyBuffer(dest, layer, make([]byte, defaultDecBufferSize)) } ================================================ FILE: catfs/mio/encrypt/format_test.go ================================================ package encrypt import ( "bytes" "fmt" "io" "io/ioutil" "math" "os" "testing" "github.com/sahib/brig/util" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) var TestKey = []byte("01234567890ABCDE01234567890ABCDE") const ExtraDebug = false func openFiles(from, to string) (*os.File, *os.File, error) { fdFrom, err := os.Open(from) if err != nil { return nil, nil, err } fdTo, err := os.OpenFile(to, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0755) if err != nil { fdFrom.Close() return nil, nil, err } return fdFrom, fdTo, nil } func encryptFile(key []byte, from, to string) (n int64, outErr error) { fdFrom, fdTo, err := openFiles(from, to) if err != nil { return 0, err } defer func() { // Only fdTo needs to be closed, Decrypt closes fdFrom. if err := fdFrom.Close(); err != nil { outErr = err } if err := fdTo.Close(); err != nil { outErr = err } }() return Encrypt(key, fdFrom, fdTo, FlagEncryptAES256GCM) } func decryptFile(key []byte, from, to string) (n int64, outErr error) { fdFrom, fdTo, err := openFiles(from, to) if err != nil { return 0, err } defer func() { if err := fdTo.Close(); err != nil { outErr = err return } if err := fdFrom.Close(); err != nil { outErr = err return } }() return Decrypt(key, fdFrom, fdTo) } func remover(t *testing.T, path string) { if err := os.Remove(path); err != nil { t.Errorf("Could not remove temp file: %v", err) } } func testSimpleEncDec(t *testing.T, size int64) { path := testutil.CreateFile(int64(size)) defer remover(t, path) encPath := path + "_enc" decPath := path + "_dec" _, err := encryptFile(TestKey, path, encPath) defer remover(t, encPath) if err != nil { t.Errorf("Encrypt failed: %v", err) } _, err = decryptFile(TestKey, encPath, decPath) defer remover(t, decPath) if err != nil { t.Errorf("Decrypt failed: %v", err) } a, _ := ioutil.ReadFile(path) b, _ := ioutil.ReadFile(decPath) c, _ := ioutil.ReadFile(encPath) if !bytes.Equal(a, b) { t.Errorf("Source and decrypted not equal") } if bytes.Equal(a, c) && size != 0 { t.Errorf("Source was not encrypted (same as source)") t.Errorf("%v|||%v|||%v", a, b, c) } } func TestSimpleEncDec(t *testing.T) { t.Parallel() for _, size := range SizeTests { if ExtraDebug { t.Logf("Testing SimpleEncDec for size %d", size) } t.Run(fmt.Sprintf("size-%d", size), func(t *testing.T) { testSimpleEncDec(t, size) }) } } var SizeTests = []int64{ 0, 1, defaultMaxBlockSize - 1, defaultMaxBlockSize, defaultMaxBlockSize + 10, defaultDecBufferSize - 1, defaultDecBufferSize, defaultDecBufferSize + 1, defaultEncBufferSize - 1, defaultEncBufferSize, 7 * defaultEncBufferSize, 7*defaultEncBufferSize - 1, defaultEncBufferSize + 1, } type seekTest struct { Whence int Offset float64 Error error } var SeekTests = []seekTest{ // Jump to the mid: {io.SeekStart, 0.5, nil}, // Should stay the same: {io.SeekCurrent, 0, nil}, // Jump a quarter forth: {io.SeekCurrent, 0.25, nil}, // Jump a half back: {io.SeekCurrent, -0.5, nil}, // Jump back to the half: {io.SeekCurrent, 0.25, nil}, // See if SEEK_END works: {io.SeekEnd, -0.5, nil}, // This triggered a crash earlier: {io.SeekEnd, -2, io.EOF}, // Im guessing now: {io.SeekEnd, -1.0 / 4096, nil}, } func BenchmarkEncDec(b *testing.B) { for n := 0; n < b.N; n++ { testSimpleEncDec(nil, defaultMaxBlockSize*100) } } func TestSeek(t *testing.T) { for _, size := range SizeTests { testSeek(t, int64(size), false, false) testSeek(t, int64(size), false, true) testSeek(t, int64(size), true, false) testSeek(t, int64(size), true, true) if t.Failed() { break } } } func testSeek(t *testing.T, N int64, readFrom, writeTo bool) { sourceData := testutil.CreateDummyBuf(N) source := bytes.NewBuffer(sourceData) shared := &bytes.Buffer{} if ExtraDebug { t.Logf("Testing seek for size %d", N) } enc, err := NewWriter(shared, TestKey, FlagEncryptAES256GCM) if err != nil { t.Errorf("Creating an encrypted writer failed: %v", err) return } // Encrypt: if _, err = testutil.DumbCopy(enc, source, readFrom, writeTo); err != nil { t.Errorf("copy(enc, source) failed %v", err) return } // This needs to be here, since close writes // left over data to the write stream if err = enc.Close(); err != nil { t.Errorf("close(enc): %v", err) return } sharedReader := bytes.NewReader(shared.Bytes()) decLayer, err := NewReader(sharedReader, TestKey) if err != nil { t.Errorf("creating new reader failed: %v", err) return } lastJump := int64(0) for _, test := range SeekTests { lastJump = testSeekOneWhence( t, N, readFrom, writeTo, lastJump, test, decLayer, sourceData, ) } } func testSeekOneWhence( t *testing.T, N int64, readFrom, writeTo bool, lastJump int64, test seekTest, decLayer *Reader, sourceData []byte, ) int64 { realOffset := int64(math.Floor(.5 + test.Offset*float64(N))) whence := map[int]string{ 0: "SEEK_SET", 1: "SEEK_CUR", 2: "SEEK_END", }[test.Whence] exptOffset := int64(0) switch test.Whence { case io.SeekStart: exptOffset = realOffset case io.SeekCurrent: exptOffset = lastJump + realOffset case io.SeekEnd: exptOffset = N + realOffset default: panic("Bad whence") } if ExtraDebug { t.Logf( " => Seek(%v, %v) -> %v (size: %v)", realOffset, whence, exptOffset, N, ) } jumpedTo, err := decLayer.Seek(realOffset, test.Whence) if err != test.Error { if err != io.EOF && N != 0 { t.Fatalf( "Seek(%v, %v) produced an error: %v (should be %v)", realOffset, whence, err, test.Error, ) } } if test.Error != nil { return lastJump } if jumpedTo != exptOffset { t.Errorf( "Seek(%v, %v) jumped badly. Should be %v, was %v", realOffset, whence, exptOffset, jumpedTo, ) return -1 } // Decrypt and check if the contents are okay: dest := bytes.NewBuffer(nil) copiedBytes, err := testutil.DumbCopy(dest, decLayer, readFrom, writeTo) if err != nil { t.Errorf("Decrypt failed: %v", err) return jumpedTo } if copiedBytes != N-jumpedTo { t.Errorf("Copied different amount of decrypted data than expected.") t.Errorf("Should be %v, was %v bytes.", N-jumpedTo, copiedBytes) } // Check the data actually matches the source data. if !bytes.Equal(sourceData[jumpedTo:], dest.Bytes()) { t.Errorf("Seeked data does not match expectations.") t.Errorf("\tEXPECTED: %v", util.OmitBytes(sourceData[jumpedTo:], 10)) t.Errorf("\tGOT: %v", util.OmitBytes(dest.Bytes(), 10)) return jumpedTo } // Jump back, so the other tests continue to work: jumpedAgain, err := decLayer.Seek(jumpedTo, io.SeekStart) if err != nil { t.Errorf("Seeking not possible after reading: %v", err) return jumpedTo } if jumpedTo != jumpedAgain { t.Errorf("Jumping back to original pos failed.") t.Errorf("Should be %v, was %v.", jumpedTo, jumpedAgain) return jumpedTo } return jumpedTo } func TestEmptyFile(t *testing.T) { srcBuf := []byte{} dstBuf := []byte{} tmpBuf := &bytes.Buffer{} src := bytes.NewReader(srcBuf) dst := bytes.NewBuffer(dstBuf) enc, err := NewWriter(tmpBuf, TestKey, FlagEncryptAES256GCM) if err != nil { t.Errorf("TestEmpyFile: creating writer failed: %v", err) return } if _, err = io.Copy(enc, src); err != nil { t.Errorf("TestEmpyFile: copy(enc, src) failed: %v", err) return } if err = enc.Close(); err != nil { t.Errorf("TestEmpyFile: close(enc) failed: %v", err) return } dec, err := NewReader(bytes.NewReader(tmpBuf.Bytes()), TestKey) if err != nil { t.Errorf("TestEmpyFile: creating reader failed: %v", err) return } if _, err = dec.Seek(10, io.SeekStart); err != io.EOF { t.Errorf("Seek failed: %v", err) return } if _, err = io.Copy(dst, dec); err != nil { t.Errorf("TestEmpyFile: copy(dst, dec) failed: %v", err) return } if !bytes.Equal(srcBuf, dstBuf) { t.Errorf("TestEmpyFile: Not empty: src=%v dst=%v", srcBuf, dstBuf) return } } // Test if encrypting the same plaintext twice yields // the same ciphertext. This is a crucial property for brig, although it // has some security implications (i.e. no real random etc.) func TestEncryptedTheSame(t *testing.T) { sourceData := testutil.CreateDummyBuf(3 * defaultMaxBlockSize) encOne := &bytes.Buffer{} encTwo := &bytes.Buffer{} n1, err := Encrypt( TestKey, bytes.NewReader(sourceData), encOne, FlagEncryptAES256GCM, ) if err != nil { t.Errorf("TestEncryptedTheSame: Encrypting first failed: %v", err) return } n2, err := Encrypt( TestKey, bytes.NewReader(sourceData), encTwo, FlagEncryptAES256GCM, ) if err != nil { t.Errorf("TestEncryptedTheSame: Encrypting second failed: %v", err) return } if n1 != n2 { t.Errorf("TestEncryptedTheSame: Ciphertexts differ in length.") return } if !bytes.Equal(encOne.Bytes(), encTwo.Bytes()) { t.Errorf("TestEncryptedTheSame: Ciphertext differ, you failed.") t.Errorf("\tOne: %v", encOne.Bytes()) t.Errorf("\tTwo: %v", encTwo.Bytes()) return } } // Test if swapping small parts of the output func TestEncryptedByteSwaps(t *testing.T) { data1 := testutil.CreateDummyBuf(2 * defaultMaxBlockSize) data2 := testutil.CreateDummyBuf(2 * defaultMaxBlockSize) data3 := testutil.CreateDummyBuf(2 * defaultMaxBlockSize) // Do a small modification in the beginning. data2[0]++ // Do a small modification in the end. data3[2*defaultMaxBlockSize-1]++ // Encrypt all data samples: encBuf1 := &bytes.Buffer{} encBuf2 := &bytes.Buffer{} encBuf3 := &bytes.Buffer{} var err error _, err = Encrypt( TestKey, bytes.NewReader(data1), encBuf1, FlagEncryptChaCha20, ) require.Nil(t, err) _, err = Encrypt( TestKey, bytes.NewReader(data2), encBuf2, FlagEncryptChaCha20, ) require.Nil(t, err) _, err = Encrypt( TestKey, bytes.NewReader(data3), encBuf3, FlagEncryptChaCha20, ) require.Nil(t, err) encData1 := encBuf1.Bytes() encData2 := encBuf2.Bytes() encData3 := encBuf3.Bytes() // It should be all the same with a one-byte change. require.Equal(t, len(encData1), len(encData2)) require.Equal(t, len(encData2), len(encData3)) // s = full size; m = start of second block s := len(encData1) m := len(encData1)/2 + headerSize // Require that only the first block was tainted, other block should be same. require.False(t, bytes.Equal(encData1[0:m], encData2[0:m])) require.True(t, bytes.Equal(encData1[m:s], encData2[m:s])) // Require that the last block was tainted, first block should be same require.True(t, bytes.Equal(encData1[0:m], encData3[0:m])) require.False(t, bytes.Equal(encData1[m:s], encData3[m:s])) } ================================================ FILE: catfs/mio/encrypt/reader.go ================================================ package encrypt import ( "bytes" "encoding/binary" "fmt" "io" ) // Reader decrypts and encrypted stream from Reader. type Reader struct { // Underlying reader io.Reader aeadCommon // Caches leftovers from unread blocks backlog *bytes.Reader // Last index of the byte the user visited. // (Used to avoid re-reads in Seek()) // This does *not* equal the seek offset of the underlying stream. lastDecSeekPos int64 // lastEncSeekPos saves the current position of the underlying stream. // it is used mostly for ensuring SEEK_END works. lastEncSeekPos int64 // Parsed header info info *HeaderInfo // true once readHeader() was called parsedHeader bool // Buffer for decrypted data (MaxBlockSize big) decBuf []byte // true as long readBlock was not successful isInitialRead bool // Total size of the underlying stream in bytes. // This is only set when SEEK_END was used. endOffsetEnc int64 } func (r *Reader) readHeaderIfNotDone() error { if r.parsedHeader { return nil } r.parsedHeader = true header := make([]byte, headerSize) n, err := io.ReadFull(r.Reader, header) if err != nil { return err } if n != headerSize { return fmt.Errorf("No valid header found, damaged file?") } info, err := ParseHeader(header, r.key) if err != nil { return err } if info.Version != 1 { return fmt.Errorf("this implementation does not support versions != 1") } if uint32(len(r.key)) != info.KeyLen { return fmt.Errorf( "key length differs: file=%d, user=%d", info.KeyLen, len(r.key), ) } r.info = info if err := r.initAeadCommon( r.key, info.CipherBit, int64(r.info.BlockLen), ); err != nil { return err } r.lastEncSeekPos += headerSize r.decBuf = make([]byte, 0, r.info.BlockLen) return nil } // Flags will return the flags stored in the header of the encrypted stream. // If the header was not read yet, it will attempt to read it. func (r *Reader) Flags() (Flags, error) { // Make sure we have the info needed to parse the header: if err := r.readHeaderIfNotDone(); err != nil { return 0, err } return r.info.Flags, nil } // Read from source and decrypt. // // This method always decrypts one block to optimize for continuous reads. If // dest is too small to hold the block, the decrypted text is cached for the // next read. func (r *Reader) Read(dest []byte) (int, error) { // Make sure we have the info needed to parse the header: if err := r.readHeaderIfNotDone(); err != nil { return 0, err } readBytes := 0 // Try our best to fill len(dest) for readBytes < len(dest) { if r.backlog.Len() == 0 { if _, rerr := r.readBlock(); rerr != nil && rerr != io.EOF { return readBytes, rerr } } n, berr := r.backlog.Read(dest[readBytes:]) r.lastDecSeekPos += int64(n) readBytes += n if berr == io.EOF { return readBytes, io.EOF } } return readBytes, nil } // Fill internal buffer with current block func (r *Reader) readBlock() (int, error) { if r.info == nil { return 0, fmt.Errorf("Invalid header data") } // Read nonce: if n, err := io.ReadFull(r.Reader, r.nonce); err != nil { return 0, err } else if n != r.aead.NonceSize() { return 0, fmt.Errorf( "nonce size mismatch; should: %d - have: %d (err: %v)", r.aead.NonceSize(), n, err, ) } // Convert to block number: readBlockNum := binary.LittleEndian.Uint64(r.nonce) // Check the block number: currBlockNum := uint64(r.lastDecSeekPos / int64(r.info.BlockLen)) if currBlockNum != readBlockNum { return 0, fmt.Errorf( "bad block number; as %d, should be %d", readBlockNum, currBlockNum, ) } // Read the *whole* block from the raw stream N := int(r.info.BlockLen) + r.aead.Overhead() n, err := io.ReadAtLeast(r.Reader, r.encBuf[:N], N) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return 0, err } r.lastEncSeekPos += int64(n) + int64(len(r.nonce)) r.decBuf, err = r.aead.Open(r.decBuf[:0], r.nonce, r.encBuf[:n], nil) if err != nil { return 0, err } r.backlog.Reset(r.decBuf) r.isInitialRead = false return len(r.decBuf), nil } // Seek into the encrypted stream. // // Note that the seek offset is relative to the decrypted data, // not to the underlying, encrypted stream. func (r *Reader) Seek(offset int64, whence int) (int64, error) { // Check if seeking is supported: seeker, ok := r.Reader.(io.Seeker) if !ok { return 0, fmt.Errorf("seek is not supported by underlying stream") } if err := r.readHeaderIfNotDone(); err != nil { return 0, err } // set to true when an actual call to seeker.Seek() was made. wasMoved := false // Constants and assumption on the stream below: blockHeaderSize := int64(r.aead.NonceSize()) blockOverhead := blockHeaderSize + int64(r.aead.Overhead()) totalBlockSize := blockOverhead + int64(r.info.BlockLen) // absolute Offset in the decrypted stream absOffsetDec := int64(0) // Convert possibly relative offset to absolute offset: switch whence { case io.SeekCurrent: absOffsetDec = r.lastDecSeekPos + offset case io.SeekStart: absOffsetDec = offset case io.SeekEnd: // Try to figure out the end of the stream. // This might be inefficient for some underlying readers, // but is probably okay for ipfs. // // NOTE: We cache this not only for performance, but also // as a workaround for a bug in ipfs. // See: https://github.com/ipfs/go-ipfs/issues/2567 if r.endOffsetEnc < 0 { endOffsetEnc, err := seeker.Seek(0, io.SeekEnd) if err != nil && err != io.EOF { return 0, err } r.endOffsetEnc = endOffsetEnc } // This computation is verbose on purporse, // since the details might be confusing. encLen := (r.endOffsetEnc - headerSize) encRest := encLen % totalBlockSize decBlocks := encLen / totalBlockSize endOffsetDec := decBlocks * int64(r.info.BlockLen) if encRest > 0 { endOffsetDec += encRest - blockOverhead } absOffsetDec = endOffsetDec + offset if absOffsetDec < 0 { // That's the wrong end of file... return 0, io.EOF } // For SEEK_END we need to make sure that we move the seek pointer // back to a sensible position when we decide that no actual move // is necessary further down this function. defer func() { if !wasMoved { seeker.Seek(r.lastEncSeekPos, io.SeekStart) } }() } if absOffsetDec < 0 { return 0, fmt.Errorf("Negative seek index: %d", absOffsetDec) } // Caller wanted to know only the current stream pos: if absOffsetDec == r.lastDecSeekPos { return absOffsetDec, nil } // Convert decrypted offset to encrypted offset absOffsetEnc := headerSize + ((absOffsetDec / int64(r.info.BlockLen)) * totalBlockSize) // Check if we're still in the same block as last time: blockNum := absOffsetEnc / totalBlockSize lastBlockNum := r.lastDecSeekPos / int64(r.info.BlockLen) r.lastDecSeekPos = absOffsetDec if lastBlockNum != blockNum || r.isInitialRead || whence == io.SeekEnd { r.lastEncSeekPos = absOffsetEnc // Seek to the beginning of the encrypted block: wasMoved = true if _, err := seeker.Seek(absOffsetEnc, io.SeekStart); err != nil { return 0, err } // Make read consume the current block: if _, err := r.readBlock(); err != nil { return 0, err } } // reslice the backlog, so Read() does not return skipped data. if _, err := r.backlog.Seek( absOffsetDec%int64(r.info.BlockLen), io.SeekStart, ); err != nil { return 0, err } return absOffsetDec, nil } // WriteTo copies all data from `r` to `w`. // // It is intended to avoid unneeded copying by choosing a suitable buffer size // and by directly reading block after block. io.Copy will use it automatically. // // It returns the number of written bytes and possible errors (but no io.EOF) func (r *Reader) WriteTo(w io.Writer) (int64, error) { // Make sure we have the info needed to parse the header: if err := r.readHeaderIfNotDone(); err != nil { return 0, err } n := int64(0) // Backlog might be still filled if Read() or Seek() was done before: if r.backlog.Len() > 0 { bn, err := r.backlog.WriteTo(w) if err != nil { return bn, err } n += bn r.lastDecSeekPos += bn } for { nread, rerr := r.readBlock() if rerr != nil && rerr != io.EOF { return n, rerr } r.lastDecSeekPos += int64(nread) nwrite, werr := w.Write(r.decBuf[:nread]) if werr != nil { return n, werr } n += int64(nwrite) if nwrite != nread { return n, io.ErrShortWrite } if rerr == io.EOF { break } } return n, nil } // NewReader creates a new encrypted reader and validates the file header. // The key is required to be KeySize bytes long. func NewReader(r io.Reader, key []byte) (*Reader, error) { reader := &Reader{ Reader: r, backlog: bytes.NewReader([]byte{}), parsedHeader: false, isInitialRead: true, endOffsetEnc: -1, aeadCommon: aeadCommon{ key: key, }, } return reader, nil } ================================================ FILE: catfs/mio/encrypt/writer.go ================================================ package encrypt import ( "bytes" "encoding/binary" "errors" "io" ) var ( // ErrBadBlockSize is returned when the data is damaged and has an invalid block size ErrBadBlockSize = errors.New("underlying reader failed to read full w.maxBlockSize") // ErrMixedMethods is returned when calling Write() with ReadFrom() together. ErrMixedMethods = errors.New("mixing Write() and ReadFrom() is not allowed") ) // Writer encrypts the data stream before writing to Writer. type Writer struct { // Internal Writer we would write to. io.Writer // Common fields with Reader aeadCommon // A buffer that is max. w.maxBlockSize big. // Used for caching leftover data between writes. rbuf *bytes.Buffer // Index of the currently written block. blockCount uint64 // True after the first write. headerWritten bool // w.maxBlockSize is the maximum number of bytes a single payload may have maxBlockSize int64 // Used encryption algorithm flags Flags } // GoodDecBufferSize returns a buffer size that is suitable for decryption. func (w *Writer) GoodDecBufferSize() int64 { return w.maxBlockSize } // GoodEncBufferSize returns a buffer size that is suitable for encryption. func (w *Writer) GoodEncBufferSize() int64 { return w.maxBlockSize + 40 } func (w *Writer) emitHeaderIfNeeded() error { if w.headerWritten { return nil } w.headerWritten = true header := GenerateHeader(w.key, w.maxBlockSize, w.flags) _, err := w.Writer.Write(header) return err } func (w *Writer) Write(p []byte) (int, error) { if err := w.emitHeaderIfNeeded(); err != nil { return 0, err } for int64(w.rbuf.Len()) >= w.maxBlockSize { if _, err := w.flushPack(w.rbuf.Next(int(w.maxBlockSize))); err != nil { return 0, err } } // Remember left-overs for next write: if _, err := w.rbuf.Write(p); err != nil { return 0, nil } // Fake the amount of data we've written: return len(p), nil } func (w *Writer) flushPack(pack []byte) (int, error) { // Create a new Nonce for this block: binary.LittleEndian.PutUint64(w.nonce, w.blockCount) // Encrypt the text: w.encBuf = w.aead.Seal(w.encBuf[:0], w.nonce, pack, nil) // Pass it to the underlying writer: nNonce, err := w.Writer.Write(w.nonce) if err != nil { return nNonce, err } w.blockCount++ nBuf, err := w.Writer.Write(w.encBuf) return nNonce + nBuf, err } // Close the Writer and write any left-over blocks // This does not close the underlying data stream. func (w *Writer) Close() error { if err := w.emitHeaderIfNeeded(); err != nil { return err } // Flush last block of data if any: for w.rbuf.Len() > 0 { n := int64(w.rbuf.Len()) if n > w.maxBlockSize { n = w.maxBlockSize } if _, err := w.flushPack(w.rbuf.Next(int(n))); err != nil { return err } } return nil } // ReadFrom writes all readable from `r` into `w`. // // It is intentend as optimized way to copy the whole stream without // unneeded copying in between. io.Copy() will use this function automatically. // // It returns the number of read bytes and any encountered error (no io.EOF) func (w *Writer) ReadFrom(r io.Reader) (int64, error) { if err := w.emitHeaderIfNeeded(); err != nil { return 0, err } n, nprev := int64(0), -1 buf := make([]byte, defaultDecBufferSize) // Check if a previous Write() wrote to rbuf. if w.rbuf.Len() > 0 { return 0, ErrMixedMethods } for { nread, rerr := io.ReadFull(r, buf) if rerr != nil && rerr != io.EOF && rerr != io.ErrUnexpectedEOF { return n, rerr } n += int64(nread) // Sanity check: check if previous block was properly aligned: if nprev >= 0 && int64(nprev) != w.maxBlockSize && rerr != io.EOF && rerr != io.ErrUnexpectedEOF { return n, ErrBadBlockSize } if nread > 0 { _, werr := w.flushPack(buf[:nread]) w.rbuf.Reset() if werr != nil { return n, werr } } nprev = nread if rerr == io.EOF || rerr == io.ErrUnexpectedEOF { break } } return n, nil } // NewWriter calls NewWriterWithFlagsAndBlockSize with a sane default cipher type // and a sane default max block size. func NewWriter(w io.Writer, key []byte, flags Flags) (*Writer, error) { return NewWriterWithBlockSize(w, key, flags, defaultMaxBlockSize) } // NewWriterWithBlockSize returns a new Writer which encrypts data with a // certain key. If `compressionFlag` is true, the compression // flag in the file header will also be true. Otherwise no compression is done. func NewWriterWithBlockSize(w io.Writer, key []byte, flags Flags, maxBlockSize int64) (*Writer, error) { ew := &Writer{ Writer: w, rbuf: &bytes.Buffer{}, maxBlockSize: maxBlockSize, flags: flags, } cipherBit, err := cipherTypeBitFromFlags(flags) if err != nil { return nil, err } if err := ew.initAeadCommon(key, cipherBit, ew.maxBlockSize); err != nil { return nil, err } return ew, nil } ================================================ FILE: catfs/mio/pagecache/cache.go ================================================ package pagecache import ( "github.com/sahib/brig/catfs/mio/pagecache/page" ) // Cache is the backing layer that stores pages in memory // or whatever medium it choses to use. type Cache interface { // Lookup returns a cached page, identified by `inode` and `page`. // If there is no such page page.ErrCacheMiss is returned. Lookup(inode int64, page uint32) (*page.Page, error) // Merge the existing cache contents with the new write // to `pageID`, starting at `pageOff` and with the contents of `buf`. Merge(inode int64, pageID, pageOff uint32, buf []byte) error // Evict clears cached pages for `inode`. `size` can be used // to clear only up to a certain size. Evict(inode, size int64) error // Close the cache and free up all resources. Close() error } ================================================ FILE: catfs/mio/pagecache/doc.go ================================================ // Package pagecache implements a io.ReaderAt and io.WriterAt that is similar in // function to the OverlayFS of Linux. It overlays a read-only stream and // enables write support. The writes will take priority on the data in stream // and will therefore be visible when calling ReadAt() of the overlay. // Read() and Write() are currently not supported, since they would not be used // by brig. // // Note that the normal POSIX file operations are supported. This includes // truncating a file to a certain length and also extending it to a certain // length. If length of the overlay is greater than the size of the underlying // stream we pad it with zeros - just like the kernel would do. Files can be // also extended by writing new blocks to the end of the overlay. // // Seeking will be done when necessary. WriteAt() has to do no seeking at all, // while ReadAt() will only seek if it has to (i.e. not reading from cache // alone, or if we're not if the right offset already). // // Implementation detail: The stream is divided into same-sized pages. Each // page can be retrieved as whole from the cache. If a page for a certain read // offset is found, then ReadAt() will overlay it with the underlying stream or // even read from memory if the stream completely occludes the underlying // stream. In general, care was taken to optimize a bit more for Write() since // pages delivered by ReadAt() can be cached by the FUSE filesystem. // // You can choose the page cache when creating the overlay. Depending on the // page cache implementation it's also possible to edit large files and // make edits persistent. // // NOTE: Whenever uint32 is used in this code, it refers to per-page offsets or // size. When int64 is used the content is an offset of the underlying offset. package pagecache ================================================ FILE: catfs/mio/pagecache/mdcache/l1.go ================================================ package mdcache import ( "container/list" "fmt" "github.com/sahib/brig/catfs/mio/pagecache/page" ) // L1 is a pure in-memory LRU cache which does no copying. // I did go for LRU because it's insanely simple and easy to implement // while still being quite effective. // // NOTE: We do not use one of the popular caching library here, since // none of them seem to fit our use-case. We require the following properties: // // 1. We must notice when items get evicted (in order to write to l2) // 2. We must be able to set a max memory bound. // 3. We must avoid copying of pages due to performance reasons. // // The most popular libraries fail always one of the criteria: // // - fastcache: fails 1 and 3. // - ristretto: fails 1. // - bigcache: fails 3. // // Since we know what kind of data we cache, it is reasonable to implement // a very basic LRU cache for L1. Therefore we just use sync.Map here. // Oh, and the l1cache is not thread safe, but dircache.go does locking. type l1item struct { Page *page.Page Link *list.Element } type l1cache struct { m map[pageKey]l1item k *list.List l2 cacheLayer maxMemory int64 } func newL1Cache(l2 cacheLayer, maxMemory int64) (*l1cache, error) { return &l1cache{ maxMemory: maxMemory, l2: l2, k: list.New(), m: make(map[pageKey]l1item), }, nil } func (c *l1cache) Set(pk pageKey, p *page.Page) error { existingItem, ok := c.m[pk] if !ok { // new content: c.m[pk] = l1item{ Page: p, Link: c.k.PushBack(pk), } } else { // do not push another page key, // c.k needs to have unique keys only. c.m[pk] = l1item{ Page: p, Link: existingItem.Link, } // prioritize this one more. c.k.MoveToBack(existingItem.Link) } maxPages := c.maxMemory / (page.Size + page.Meta) if int64(len(c.m)) > maxPages { oldPkIface := c.k.Remove(c.k.Front()) oldPk, ok := oldPkIface.(pageKey) if !ok { return fmt.Errorf("non-pagekey type stored in l1 keys: %T", oldPkIface) } oldItem, ok := c.m[oldPk] delete(c.m, oldPk) if !ok { // c.m and c.k got out of sync. // this is very likely a bug. return fmt.Errorf("l1: key in key list, but not in map: %v", oldPk) } if c.l2 == nil { // nil-interface for l2: loose pages in that case. // that may be valid if no disk can be used. return nil } // move old page to more persistent cache layer: return c.l2.Set(oldPk, oldItem.Page) } return nil } func (c *l1cache) Get(pk pageKey) (*page.Page, error) { item, ok := c.m[pk] if !ok { return nil, page.ErrCacheMiss } // Sort recently fetched item to end of list: c.k.MoveToBack(item.Link) return item.Page, nil } func (c *l1cache) Del(pks []pageKey) { for _, pk := range pks { delItem, ok := c.m[pk] if ok { c.k.Remove(delItem.Link) delete(c.m, pk) } } } func (c *l1cache) Close() error { // help GC if caller somehow still retains a reference: c.m = nil c.k = nil return nil } ================================================ FILE: catfs/mio/pagecache/mdcache/l1_test.go ================================================ package mdcache import ( "testing" "github.com/sahib/brig/catfs/mio/pagecache/page" "github.com/stretchr/testify/require" ) func withL1Cache(t *testing.T, fn func(l1, backing *l1cache)) { // some fake in-mem cache that stores everything that got removed // out of l1 due to size restrictions. backing, err := newL1Cache(nil, int64(^uint64(0)>>1)) require.NoError(t, err) l1, err := newL1Cache(backing, 4*(page.Size+page.Meta)) require.NoError(t, err) fn(l1, backing) require.NoError(t, l1.Close()) } func TestL1GetSetDel(t *testing.T) { // NOTE: Only covers the very basic usage. withL1Cache(t, func(l1, _ *l1cache) { pk := pageKey{1, 0} _, err := l1.Get(pk) require.Error(t, page.ErrCacheMiss) pset := dummyPage(0, 1024) require.NoError(t, l1.Set(pk, pset)) pgot, err := l1.Get(pk) require.NoError(t, err) require.Equal(t, pset.Data, pgot.Data) require.Equal(t, pset.Extents, pgot.Extents) l1.Del([]pageKey{pk}) _, err = l1.Get(pk) require.Error(t, page.ErrCacheMiss) }) } func TestL1SwapPriority(t *testing.T) { withL1Cache(t, func(l1, backing *l1cache) { // Insert 8 pages, only 4 can stay in l1. for idx := 0; idx < 8; idx++ { pk := pageKey{1, uint32(idx)} require.NoError(t, l1.Set(pk, dummyPage(0, uint32((idx+1)*100)))) } for idx := 0; idx < 4; idx++ { pk := pageKey{1, uint32(idx)} _, err := l1.Get(pk) require.Error(t, err, page.ErrCacheMiss) // should be in backing store, check: p, err := backing.Get(pk) require.NoError(t, err) expected := dummyPage(0, uint32((idx+1)*100)) require.Equal(t, expected, p) } for idx := 4; idx < 8; idx++ { pk := pageKey{1, uint32(idx)} p, err := l1.Get(pk) require.NoError(t, err) expected := dummyPage(0, uint32((idx+1)*100)) require.Equal(t, expected, p) } }) } ================================================ FILE: catfs/mio/pagecache/mdcache/l2.go ================================================ package mdcache import ( "io/ioutil" "os" "path/filepath" "sync" "github.com/golang/snappy" "github.com/sahib/brig/catfs/mio/pagecache/page" ) // NOTE: Room for improvement: // Introduce heuristic for snappy compression: // When an inode does not yield any noticeable compression, // disable it for this specific inode. Protects against // wasting CPU on already compressed data. type l2cache struct { mu sync.Mutex dir string compress bool zipBuf []byte } // NOTE: an empty (nil) l2cache is valid, but will not do anything. If an // empty string for `dir` is given, such an empty l2cache will be returned. func newL2Cache(dir string, compress bool) (*l2cache, error) { if dir == "" { return nil, nil } var zipBuf []byte if compress { zipBuf = make([]byte, snappy.MaxEncodedLen(page.Size)) } return &l2cache{ dir: dir, compress: compress, zipBuf: zipBuf, }, nil } func (c *l2cache) Set(pk pageKey, p *page.Page) error { if c == nil { return nil } c.mu.Lock() defer c.mu.Unlock() data := p.AsBytes() if c.compress { data = snappy.Encode(c.zipBuf, p.AsBytes()) } path := filepath.Join(c.dir, pk.String()) return ioutil.WriteFile(path, data, 0600) } func (c *l2cache) Get(pk pageKey) (*page.Page, error) { if c == nil { return nil, page.ErrCacheMiss } c.mu.Lock() defer c.mu.Unlock() path := filepath.Join(c.dir, pk.String()) pdata, err := ioutil.ReadFile(path) if err != nil { return nil, page.ErrCacheMiss } if c.compress { pdata, err = snappy.Decode(c.zipBuf, pdata) if err != nil { return nil, err } } return page.FromBytes(pdata) } func (c *l2cache) Del(pks []pageKey) { if c == nil { return } c.mu.Lock() defer c.mu.Unlock() for _, pk := range pks { path := filepath.Join(c.dir, pk.String()) // no error handling, just get rid of things. // we can't do anything if it could not be deleted. os.Remove(path) } } func (c *l2cache) Close() error { if c == nil { return nil } c.mu.Lock() defer c.mu.Unlock() return os.RemoveAll(c.dir) } ================================================ FILE: catfs/mio/pagecache/mdcache/l2_test.go ================================================ package mdcache import ( "io/ioutil" "os" "testing" "github.com/sahib/brig/catfs/mio/pagecache/page" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func dummyPage(off, length uint32) *page.Page { buf := testutil.CreateDummyBuf(int64(length)) return page.New(off, buf) } func withL2Cache(t *testing.T, fn func(l2 *l2cache)) { for _, compress := range []bool{false, true} { tmpDir, err := ioutil.TempDir("", "brig-page-l2") require.NoError(t, err) defer os.RemoveAll(tmpDir) l2, err := newL2Cache(tmpDir, compress) require.NoError(t, err) tname := "no-compress" if compress { tname = "compress" } t.Run(tname, func(t *testing.T) { fn(l2) }) // double check we do not waste any storage: require.NoError(t, l2.Close()) _, err = os.Stat(tmpDir) require.True(t, os.IsNotExist(err)) } } func TestL2GetSetDel(t *testing.T) { withL2Cache(t, func(l2 *l2cache) { pk := pageKey{1, 0} _, err := l2.Get(pk) require.Error(t, page.ErrCacheMiss) pset := dummyPage(0, 1024) require.NoError(t, l2.Set(pk, pset)) pgot, err := l2.Get(pk) require.NoError(t, err) require.Equal(t, pset.Data, pgot.Data) require.Equal(t, pset.Extents, pgot.Extents) l2.Del([]pageKey{pk}) _, err = l2.Get(pk) require.Error(t, page.ErrCacheMiss) }) } func TestL2Nil(t *testing.T) { // l2 is optional, so a nil l2 cache should "work": l2, err := newL2Cache("", false) require.NoError(t, err) _, err = l2.Get(pageKey{0, 1}) require.Error(t, page.ErrCacheMiss) require.NoError(t, l2.Set(pageKey{0, 1}, dummyPage(0, 1024))) l2.Del([]pageKey{{0, 1}}) } ================================================ FILE: catfs/mio/pagecache/mdcache/mdcache.go ================================================ // Package mdcache implements a leveled memory/disk cache combination. package mdcache import ( "fmt" "sync" "github.com/sahib/brig/catfs/mio/pagecache/page" log "github.com/sirupsen/logrus" ) // Options give room for finetuning the behavior of Memory/Disk cache. type Options struct { // MaxMemoryUsage of L1 in bytes MaxMemoryUsage int64 // SwapDirectory specifies where L2 pages are stored. // If empty, no l2 cache is used. Instead another l1 cache // is used in its place, rendering MaxMemoryUsage useless. // You have to set both for an effect. SwapDirectory string // L1CacheMissRefill will propagate // data from L2 to L1 if it could be found // successfully. L1CacheMissRefill bool // L2Compress will compress on-disk pages with snappy and decompress them // on load. Reduces storage, but increases CPU usage if you're swapping. // Since swapping is slow anyways this is recommended. L2Compress bool } type cacheLayer interface { Get(pk pageKey) (*page.Page, error) Set(pk pageKey, p *page.Page) error Del(pks []pageKey) Close() error } // MDCache is a leveled Memory/Disk cache combination. type MDCache struct { mu sync.Mutex l1 cacheLayer l2 cacheLayer opts Options } type pageKey struct { inode int64 pageIdx uint32 } func (pk pageKey) String() string { return fmt.Sprintf("%08x-%08x", pk.inode, pk.pageIdx) } // New returns a new Memory/Disk cache func New(opts Options) (*MDCache, error) { l2, err := newL2Cache(opts.SwapDirectory, opts.L2Compress) if err != nil { return nil, err } var l2Iface cacheLayer = l2 if l2 == nil { // special case: when we don't have a l2 cache // then use another memory cache as backing, // with infinite memory. maxMemory := int64(^uint64(0) >> 1) l2Iface, _ = newL1Cache(nil, maxMemory) } l1, err := newL1Cache(l2Iface, opts.MaxMemoryUsage) if err != nil { return nil, err } return &MDCache{ l1: l1, l2: l2Iface, opts: opts, }, nil } // Lookup implements pagecache.Cache func (dc *MDCache) Lookup(inode int64, pageIdx uint32) (*page.Page, error) { dc.mu.Lock() defer dc.mu.Unlock() return dc.get(pageKey{inode: inode, pageIdx: pageIdx}) } func (dc *MDCache) get(pk pageKey) (*page.Page, error) { p, err := dc.l1.Get(pk) switch err { case nil: return p, nil case page.ErrCacheMiss: p, err = dc.l2.Get(pk) if err != nil { return p, err } if dc.opts.L1CacheMissRefill { // propagate back to l1 cache: if err := dc.l1.Set(pk, p); err != nil { return p, err } } return p, err default: return nil, err } } // Merge implements pagecache.Cache func (dc *MDCache) Merge(inode int64, pageIdx, off uint32, write []byte) error { dc.mu.Lock() defer dc.mu.Unlock() if len(write) == 0 { // empty write deserves no extra computation. return nil } if off+uint32(len(write)) > page.Size { return fmt.Errorf("merge: write overflows page bounds") } pk := pageKey{inode: inode, pageIdx: pageIdx} p, err := dc.get(pk) if err != nil && err != page.ErrCacheMiss { return err } if p == nil { // Page was not cached yet. // Create an almost empty page. p = page.New(off, write) } p.Overlay(off, write) return dc.l1.Set(pk, p) } // Evict implements pagecache.Cache func (dc *MDCache) Evict(inode, size int64) error { dc.mu.Lock() defer dc.mu.Unlock() // Figure out all possible indices from size: pks := []pageKey{} pageHi := uint32(size / page.Size) if size%page.Size > 0 { pageHi++ } for pageIdx := uint32(0); pageIdx < pageHi; pageIdx++ { pks = append(pks, pageKey{inode: inode, pageIdx: pageIdx}) } dc.l1.Del(pks) dc.l2.Del(pks) return nil } // Close closes the cache contents and cleans up resources. func (dc *MDCache) Close() error { dc.mu.Lock() defer dc.mu.Unlock() if err := dc.l1.Close(); err != nil { log.WithError(err).Warnf("failed to reset l1 cache") } return dc.l2.Close() } ================================================ FILE: catfs/mio/pagecache/mdcache/mdcache_test.go ================================================ package mdcache import ( "io/ioutil" "os" "testing" "github.com/sahib/brig/catfs/mio/pagecache/page" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func withMDCache(t *testing.T, fn func(mdc *MDCache)) { tmpDir, err := ioutil.TempDir("", "brig-page-l2") require.NoError(t, err) defer os.RemoveAll(tmpDir) md, err := New(Options{ MaxMemoryUsage: 4 * page.Size, SwapDirectory: tmpDir, L1CacheMissRefill: true, }) require.NoError(t, err) fn(md) require.NoError(t, md.Close()) } func TestMDBasic(t *testing.T) { withMDCache(t, func(mdc *MDCache) { for idx := 0; idx < 8; idx++ { err := mdc.Merge(1, uint32(idx), 0, testutil.CreateDummyBuf(page.Size)) require.NoError(t, err) } for idx := 0; idx < 8; idx++ { p, err := mdc.Lookup(1, uint32(idx)) require.NoError(t, err) require.Equal(t, testutil.CreateDummyBuf(page.Size), p.Data) require.Equal(t, []page.Extent{{ OffLo: 0, OffHi: page.Size, }}, p.Extents) } require.NoError(t, mdc.Evict(1, 8*page.Size)) }) } ================================================ FILE: catfs/mio/pagecache/overlay.go ================================================ package pagecache import ( "bytes" "fmt" "io" "sync" "github.com/sahib/brig/catfs/mio/pagecache/page" "github.com/sahib/brig/util" ) // Layer is a layer above a read-only stream with write support. type Layer struct { // underlying stream rs io.ReadSeeker // inode is a unique identifier for the stream. // it is used as identifier in the page cache. inode int64 // cache gives access to cached pages cache Cache // size is the number of bytes that can be read from // `rs` from start to end. It represents the "old" file size. // It's only use to decide when to stop reading from the // underlying stream. For deciding where EOF is, length is used. size int64 // length starts out same as size, but might change due to // calls to Truncate(). Truncate is terrible name since it // can be also used to extend a file's length. But that's // how the underlying syscall is named, so we follow that. length int64 // overlayOffset is the last known offset in the stream, // including reads from the cache. It is the position in the // overlayed stream. overlayOffset int64 // streamOffset indicates the offset in the underlying stream `rs`. // It can be the same as `overlayOffset` but is not most of the time. // Not counted in in `streamOffset` are bytes that were read from // the cache exclusively, with no need to read from `rs`. // It's not updated when data is purely read from the cache. streamOffset int64 } // NewLayer returns a paged overlay for `rs`, reading and storing data from // `cache`. `inode` will be used as cache identifier for this file. The only // need is that it is unique to this file, otherwise it does not need any // inode-like semantics. `size` must be known in advance and reflects the size // of `rs`. This cannot be used for pure streaming. `rs` is assumed to be positioned // at the zero offset. If not, subtract the offset from `size`. func NewLayer(rs io.ReadSeeker, cache Cache, inode, size int64) (*Layer, error) { if err := cache.Evict(inode, size); err != nil { return nil, err } return &Layer{ rs: rs, inode: inode, size: size, length: size, cache: cache, }, nil } func (l *Layer) ensureOffset(zpr *zeroPadReader) error { if l.overlayOffset == l.streamOffset { return nil } zpr.off = l.overlayOffset newOffset, err := l.rs.Seek(l.overlayOffset, io.SeekStart) if err != nil { return err } l.streamOffset = newOffset if newOffset != l.overlayOffset { return fmt.Errorf("page: ensure offset failed (want: %d, got %d)", l.overlayOffset, newOffset) } return nil } // WriteAt writes `buf` to `off`. It will appear on the next // read operation. func (l *Layer) WriteAt(buf []byte, off int64) (n int, err error) { // If `buf` is large enough to span over several writes then we // have to calculate the offset of the first page, so that new // data is written to the correct place. pageOff := off % page.Size pageBuf := buf // Go over all pages this write affects. newOff := off + int64(len(buf)) pageLo := off / page.Size pageHi := newOff / page.Size if newOff%page.Size == 0 { pageHi-- } for pageIdx := pageLo; pageIdx <= pageHi; pageIdx++ { // Divide `buf` into small portions that will be copied // to the individual pages. mayWrite := page.Size - pageOff if mayWrite > int64(len(pageBuf)) { mayWrite = int64(len(pageBuf)) } if mayWrite == 0 { break } // Overlay the part of `buf` that affects this page // and merge with any pre-existing writes. if err := l.cache.Merge( l.inode, uint32(pageIdx), uint32(pageOff), pageBuf[:mayWrite], ); err != nil { return -1, err } // starting from the second block the page offset will // be always zero. That's only relevant for len(buf) > page.Size. pageOff = 0 pageBuf = pageBuf[mayWrite:] } // check if this write extended the full buffer. // If so we need to remember the new length. if newOff := off + int64(len(buf)); newOff > l.length { l.length = newOff } // We always write the full buffer or fail in prior. return len(buf), nil } var ( copyBufPool = &sync.Pool{ New: func() interface{} { return make([]byte, page.Size) }, } ) // ReadAt reads into `buf` from the position `off`. // // NOTE: There are two things that are not implemented // according to the io.ReaderAt docs: // // * ReadAt() should not modify the seek offset. // This implementation however does this. // * ReadAt() must be allowed to call in parallel. // We cannot guarantee that at the moment since sometimes // we have to seek the underlying stream - mutex? func (l *Layer) ReadAt(buf []byte, off int64) (int, error) { // when requesting reads beyond the size of the overlay, // we should immediately cancel the request. if off >= l.length { return 0, io.EOF } // set the desired offset l.overlayOffset = off // small helper for copying data to buf. // we will never copy more than page.Size to buf. ib := &iobuf{dst: buf} // l.rs might not be as long as l.length. // We need to pad the rest of the stream with zeros. // This reader does this. zpr := &zeroPadReader{ r: l.rs, off: off, size: l.size, length: l.length, } pageOff := uint32(off % page.Size) // keep the copy buf around between GC runs. copyBuf := copyBufPool.Get().([]byte) defer copyBufPool.Put(copyBuf) // Go over all pages this read may affect. // We might return early due to io.EOF though. newOff := off + int64(len(buf)) pageLo := off / page.Size pageHi := newOff / page.Size if newOff%page.Size == 0 { pageHi-- } for pageIdx := pageLo; pageIdx <= pageHi && ib.Left() > 0; pageIdx++ { pageMax := uint32(util.Min64(l.length, l.overlayOffset+page.Size) - l.overlayOffset) if pageMax+pageOff > page.Size { pageMax = page.Size - pageOff } p, err := l.cache.Lookup(l.inode, uint32(pageIdx)) switch err { case page.ErrCacheMiss: // we don't have this page cached. // need to read it from zpr directly. if err := l.ensureOffset(zpr); err != nil { return ib.Len(), err } n, err := copyNBuffer(ib, zpr, util.Min64(int64(ib.Left()), int64(pageMax)), copyBuf) l.overlayOffset += n l.streamOffset += n if err != nil { return ib.Len(), err } // NOTE: we could be clever here and cache pages that have // been read often. We could even hook in things like // fadvise() into this layer. case nil: // In this case we know that the page is cached. // We can fill `buf` with the page of the data, // (provided by page.Reader()). occludesStream := p.OccludesStream(pageOff, pageMax) if !occludesStream { // only seek if we have to. if err := l.ensureOffset(zpr); err != nil { return ib.Len(), err } pageN, err := io.ReadFull(zpr, copyBuf[pageOff:]) // Still handle the data even in case of errors. p.Underlay(pageOff, copyBuf[pageOff:pageOff+uint32(pageN)]) l.streamOffset += int64(pageN) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return ib.Len(), err } } r := bytes.NewReader(p.Data[pageOff : pageOff+pageMax]) n, err := copyNBuffer(ib, r, int64(ib.Left()), copyBuf) if err != nil && err != io.EOF { return ib.Len(), err } l.overlayOffset += n default: // some other error during cache lookup. return ib.Len(), err } // If read spans over several pages, the second // page has to start at zero. pageOff = 0 } return ib.Len(), nil } // Truncate sets the size of the stream. // There are three cases: // // - `size` is equal to Length(): Nothing happens. // - `size` is less than Length(): The stream will return io.EOF earlier. // - `size` is more than Length(): The stream will be padded with zeros. // // This matches the behavior of the equally confusingly named POSIX // ftruncate() function. Note that Truncate() is a very fast operation. func (l *Layer) Truncate(size int64) { l.length = size } // Length is the current truncated length of the overlay. // When you did not call Truncate() it will be the size you // passed to NewLayer(). Otherwise it is what you passed // to the last call of Truncate(). func (l *Layer) Length() int64 { return l.length } ///////////////////////////////////// // FILE I/O COMPATIBILITY METHODS // // // // Do not use, unless you have to. // // Prefer WriteAt() & ReadAt() // ///////////////////////////////////// // Read implements io.Reader by calling ReadAt() // with the current offset. func (l *Layer) Read(buf []byte) (int, error) { return l.ReadAt(buf, l.overlayOffset) } // Write writes `buf` at the current offset. // It does NOT modify the read position. func (l *Layer) Write(buf []byte) (int, error) { n, err := l.WriteAt(buf, l.overlayOffset) return n, err } // Seek changes the current offset for Write and Read. // Note that in this implementation calling ReadAt. // *does* change the seek offset. Use Seek() to make // sure you're reading from the right spot. func (l *Layer) Seek(off int64, whence int) (int64, error) { switch whence { case io.SeekStart: l.overlayOffset = off case io.SeekCurrent: l.overlayOffset += off case io.SeekEnd: l.overlayOffset = l.length + off default: return 0, fmt.Errorf("invalid whence %d", whence) } return l.overlayOffset, nil } // Close will close the overlay and free up all resources, // including pages in the cache. func (l *Layer) Close() error { return l.cache.Evict(l.inode, l.length) } // WriteTo implements io.WriterTo func (l *Layer) WriteTo(w io.Writer) (int64, error) { // NOTE: This method is mostly used in tests. // but can be also used by io.Copy() internally. // There is room for optimizations here: // Avoid one copy by directly writing to copyBuf. copyBuf := copyBufPool.Get().([]byte) defer copyBufPool.Put(copyBuf) wsum := int64(0) for { rn, rerr := l.ReadAt(copyBuf, l.overlayOffset) if rerr != nil && rerr != io.EOF { return wsum, rerr } wn, werr := w.Write(copyBuf[:rn]) wsum += int64(wn) if werr != nil { return wsum, werr } if wn < rn { return wsum, io.ErrShortWrite } if rerr == io.EOF { return wsum, nil } if rn == 0 { return wsum, fmt.Errorf("nothing read, but no EOF") } } } ================================================ FILE: catfs/mio/pagecache/overlay_test.go ================================================ package pagecache import ( "bytes" "fmt" "io" "math/rand" "testing" "github.com/sahib/brig/catfs/mio/pagecache/mdcache" "github.com/sahib/brig/catfs/mio/pagecache/page" "github.com/sahib/brig/util" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func withLayer(t *testing.T, size int64, fn func(expected []byte, p *Layer)) { md, err := mdcache.New(mdcache.Options{ MaxMemoryUsage: 4 * page.Size, SwapDirectory: "", }) require.NoError(t, err) data := testutil.CreateDummyBuf(size) p, err := NewLayer(bytes.NewReader(data), md, 42, size) require.NoError(t, err) expected := make([]byte, size) copy(expected, data) fn(expected, p) require.NoError(t, md.Close()) } var ( testSizes = []int64{ 16*page.Size + 0, 16*page.Size - 1, 16*page.Size + 1, page.Size + 0, page.Size - 1, page.Size + 1, 0, 1, } ) func TestReadOnly(t *testing.T) { for _, testSize := range testSizes { t.Run(fmt.Sprintf("%d", testSize), func(t *testing.T) { withLayer(t, testSize, func(expected []byte, p *Layer) { got := bytes.NewBuffer([]byte{}) n, err := p.WriteTo(got) require.NoError(t, err) require.Equal(t, testSize, n) require.Equal(t, expected, got.Bytes()) }) }) } } func padOrCutToLength(buf []byte, length int64) []byte { if int64(len(buf)) >= length { return buf[:length] } c := make([]byte, length) copy(c, buf) return c } func TestReadOnlyTruncate(t *testing.T) { truncOffsets := []int64{ -2*page.Size + 0, -2*page.Size - 1, -2*page.Size + 1, +2*page.Size + 0, +2*page.Size - 1, +2*page.Size + 1, +page.Size + 0, +page.Size - 1, +page.Size + 1, -page.Size + 0, -page.Size - 1, -page.Size + 1, +0, +1, -1, } for _, testSize := range testSizes { t.Run(fmt.Sprintf("%d", testSize), func(t *testing.T) { for _, truncOff := range truncOffsets { length := util.Max64(0, testSize+truncOff) if length == testSize { // no need to run test with no truncation. // already covered by TestReadOnly() continue } t.Run(fmt.Sprintf("trunc-to-%d", length), func(t *testing.T) { withLayer(t, testSize, func(expected []byte, p *Layer) { got := bytes.NewBuffer([]byte{}) p.Truncate(length) n, err := p.WriteTo(got) require.NoError(t, err) require.Equal(t, length, n) res := padOrCutToLength(got.Bytes(), length) require.Equal( t, padOrCutToLength(expected, length), res, ) }) }) } }) } } func TestWriteSingle(t *testing.T) { for _, testReadSize := range testSizes { t.Run(fmt.Sprintf("read-%d", testReadSize), func(t *testing.T) { for _, testWriteSize := range testSizes { t.Run(fmt.Sprintf("write-%d", testWriteSize), func(t *testing.T) { withLayer(t, testReadSize, func(expected []byte, p *Layer) { expected = testutil.CreateRandomDummyBuf(testWriteSize, 23) wn, err := p.WriteAt(expected, 0) require.NoError(t, err) require.Equal(t, int64(wn), testWriteSize) got := make([]byte, testWriteSize) rn, err := p.Read(got) if testReadSize == 0 { // special case: that will immediately return EOF. require.Error(t, io.EOF, err) return } require.NoError(t, err) require.Equal(t, wn, rn) }) }) } }) } } func TestWriteRandomOffset(t *testing.T) { // Randomly generate writes and write them to the layer. // The randomness is controlled by seed to be reproducible. // The generated data is also copy()'d to a slice which // serves as way to check the overlay on the final read. for seed := 0; seed < 40; seed++ { t.Run(fmt.Sprintf("seed-%d", seed), func(t *testing.T) { for _, testReadSize := range testSizes { if testReadSize == 0 { continue } t.Run(fmt.Sprintf("size-%d", testReadSize*2), func(t *testing.T) { withLayer(t, testReadSize, func(expected []byte, p *Layer) { // NOTE: We do not write beyond p.Length() // to make this test easier to check. p.Truncate(testReadSize * 2) expected = padOrCutToLength(expected, p.Length()) require.Equal(t, testReadSize*2, p.Length()) rand.Seed(int64(seed)) for nwrites := 0; nwrites < seed; nwrites++ { writeOff := rand.Int63n(p.Length()) writeLen := rand.Int63n(p.Length() - writeOff + 1) // stream contains 0-254 data, overwrite with random: buf := testutil.CreateRandomDummyBuf(writeLen, int64(seed)) copy(expected[writeOff:writeOff+writeLen], buf) wn, err := p.WriteAt(buf, writeOff) require.NoError(t, err) require.Equal(t, int(writeLen), wn) } got := &bytes.Buffer{} rn, err := io.Copy(got, p) require.NoError(t, err) require.Equal(t, p.Length(), int64(rn)) require.Equal(t, p.Length(), int64(len(expected))) require.Equal(t, p.Length(), int64(got.Len())) // This for loop here is just for easier digest // debug output. require.Equal() outputs huge // diffs that are seldom helpful. for idx := 0; idx < got.Len(); idx++ { if expected[idx] != got.Bytes()[idx] { require.Equal( t, expected[idx:idx+256], got.Bytes()[idx:idx+256], ) return } } // This assert is just here in case the for loop // above has a bug or gets lost somehow. require.Equal(t, expected, got.Bytes()) }) }) } }) } } func TestReadRandomOffset(t *testing.T) { tcs := []struct { size int64 length int64 nops int }{ {page.Size, page.Size, 10}, {2 * page.Size, page.Size, 20}, {page.Size, 2 * page.Size, 30}, {16 * page.Size, 16 * page.Size, 30}, {16 * page.Size, 16*page.Size + 1, 20}, {16*page.Size + 1, 16 * page.Size, 10}, } // 16*page.Size + 0, // 16*page.Size - 1, // 16*page.Size + 1, // page.Size + 0, // page.Size - 1, // page.Size + 1, // 0, // 1, for _, tc := range tcs { name := fmt.Sprintf("s%d-l%d-o%d", tc.size, tc.length, tc.nops) t.Run(name, func(t *testing.T) { // always use the same writing distribution, // just increasingly more writes: withLayer(t, tc.size, func(expected []byte, p *Layer) { p.Truncate(tc.length) expected = padOrCutToLength(expected, p.Length()) require.Equal(t, tc.length, p.Length()) rand.Seed(42) overlayed := make([]byte, len(expected)) copy(overlayed, expected) for idx := 0; idx < tc.nops; idx++ { writeOff := rand.Int63n(p.Length()) writeLen := rand.Int63n(p.Length() - writeOff + 1) // stream contains 0-254 data, overwrite with random: buf := testutil.CreateRandomDummyBuf(writeLen, int64(42)) copy(overlayed[writeOff:writeOff+writeLen], buf) wn, err := p.WriteAt(buf, writeOff) require.NoError(t, err) require.Equal(t, int(writeLen), wn) } for seed := 0; seed < 40; seed++ { rand.Seed(int64(seed)) t.Run(fmt.Sprintf("seed-%d", seed), func(t *testing.T) { for idx := 0; idx < tc.nops; idx++ { readOff := rand.Int63n(p.Length()) readLen := rand.Int63n(p.Length() - readOff + 1) readBuf := make([]byte, readLen) rn, err := p.ReadAt(readBuf, readOff) require.NoError(t, err) require.Equal(t, readLen, int64(rn)) for idx := int64(0); idx < readLen; idx++ { if overlayed[readOff+idx] != readBuf[idx] { require.Failf(t, "bad data read", "first wrong offset: %d", readOff+idx) return } } require.Equal(t, overlayed[readOff:readOff+readLen], readBuf) } }) } }) }) } } ================================================ FILE: catfs/mio/pagecache/page/page.go ================================================ package page // NOTE: I had quite often brain freeze while figuring out the indexing. // If you do too, take a piece of paper and draw it. // If you don't, congratulations. You're smarter than me. import ( "bytes" "encoding/binary" "errors" "fmt" "sort" log "github.com/sirupsen/logrus" ) const ( // Size is the default size for a page. // Last page might be smaller. Size = 64 * 1024 // Meta is the number of bytes we use // to store the extents of the page. // (4k is the typical page size on linux) Meta = 4 * 1024 // ExtentSize needed to store a single extent. ExtentSize = 8 ) var ( // ErrCacheMiss indicates that a page is missing from the cache. // Not a real error, but a sentinel to indicate this state. ErrCacheMiss = errors.New("cache miss") ) // Extent marks a single write or // several writes that were joined to one. // // The written data is in the range [lo, hi) // where hi is not part of the write! // // In other words, when writing 16384 bytes // at OffLo=0, then OffHi=16384, but the last // valid bytes is at p.Data[OffHi-1]! // // This was chosen so you could say p.Data[OffLo:OffHi] // and it would do what you would guess it would do. type Extent struct { OffLo, OffHi uint32 } func (e Extent) String() string { return fmt.Sprintf("[%d-%d)", e.OffLo, e.OffHi) } // Page is a single cached page type Page struct { // Extents is a list describing where // `Data` contains valid data. Extents []Extent // Data is the data hold by the page. // It is allocated to Size+Meta bytes, // even when no data was used. Data []byte } func (p *Page) String() string { buf := &bytes.Buffer{} for idx, extent := range p.Extents { buf.WriteString(extent.String()) if idx+1 != len(p.Extents) { buf.WriteString(", ") } } return fmt.Sprintf("", p.Data, buf.String()) } // New allocates a new page with an initial extent at `off` // and with `write` as data. See also Overlay() func New(off uint32, write []byte) *Page { // NOTE: We allocate more than we actually need in order to implement // AsBytes and FromBytes efficiently without further allocations. backing := make([]byte, Size+Meta) p := &Page{Data: backing[:Size]} p.Overlay(off, write) return p } // FromBytes reconstructs a page from the give data. // Note that ownership over the data is taken, do not write // to it anymore while using it as a page. func FromBytes(data []byte) (*Page, error) { if len(data) < Size { return nil, fmt.Errorf("page data smaller than mandatory size") } p := Page{Data: data[:Size]} extents := data[Size:cap(data)] for idx := 0; idx < len(extents); idx += ExtentSize { if idx+ExtentSize > len(extents) { // sanity check: do not read after extents. continue } offLo := binary.LittleEndian.Uint32(extents[idx+0:]) offHi := binary.LittleEndian.Uint32(extents[idx+4:]) if offLo == 0 && offHi == 0 { // empty writes are invalid and serve as sentinel value // to tell us we read too far. No other extents to expect. break } if offLo == offHi { log.Warnf("page cache: loaded empty extent") continue } if offLo > offHi { log.Warnf("page cache: loaded invalid extent") continue } p.Extents = append(p.Extents, Extent{ OffLo: offLo, OffHi: offHi, }) } return &p, nil } // AsBytes encodes the extents at the end of the page data // and returns the full sized page array. func (p *Page) AsBytes() []byte { if cap(p.Data) < Size+Meta { // this is a programming error: panic(fmt.Sprintf("bug: page memory was allocated too small %d", cap(p.Data))) } pdata := p.Data[:Size+Meta] pmeta := pdata[Size:] for idx, extent := range p.Extents { off := idx * ExtentSize if off+ExtentSize >= cap(p.Data)-Size { // NOTE: This is an inefficient allocation/copy. It will occur only // when there are more than $(Meta/ExtentSize) distinct writes // without a single read of this page (a non-occluding read will // unify all extents). This is pretty unlikely to happen in normal // circumstances. If that happens it's a weird use case, so // allocate another 64 extents. pdata = append(pdata, make([]byte, ExtentSize*64)...) p.Data = pdata[:Size] pmeta = pdata[Size:cap(pdata)] } binary.LittleEndian.PutUint32(pmeta[off+0:], extent.OffLo) binary.LittleEndian.PutUint32(pmeta[off+4:], extent.OffHi) } return pdata } // affectedExtentIdxs() returns the indices of extents // that would be affected when writing a new extent with // the offsets [lo, hi]. // // Consider the following cases, where "-" are areas // with existing extents, "_" without and "|" denotes // the area where we want to write newly. First extent // is called E1, second E2 and so on. // // Case 1: => min=E2, max=E2 (does not hit any extent) // // ------__|--|___------- // // Case 2: => min=E2, max=E3 (partially hits an extent) // // ------__|-------|----- // // Case 3: => min=E2, max=E3 (fully inside one extent) // // ------________--|---|- // // Case 4: => min=len(extents), max=len(extents) (outside any extent) // // ------________-------- |-----| func (p *Page) affectedExtentIdxs(lo, hi uint32) (int, int) { minExIdx := sort.Search(len(p.Extents), func(i int) bool { return lo < p.Extents[i].OffHi }) maxExIdx := sort.Search(len(p.Extents), func(i int) bool { return hi <= p.Extents[i].OffLo }) if minExIdx > maxExIdx { // this can happen if lo > hi. // (basically a programmer error) maxExIdx = minExIdx } return minExIdx, maxExIdx } // OccludesStream will tell you if the page's cached contents // fully occlude the underlying stream. Or in other words: // If true, we do not need to read from the underlying stream. func (p *Page) OccludesStream(pageOff, length uint32) bool { l := int64(length) minExIdx, maxExIdx := p.affectedExtentIdxs(pageOff, pageOff+length) for idx := minExIdx; idx < maxExIdx && l > 0; idx++ { ex := p.Extents[idx] if ex.OffHi < pageOff { continue } if ex.OffLo < pageOff { l -= int64(ex.OffHi - pageOff) continue } l -= int64(ex.OffHi - ex.OffLo) } return l <= 0 } // Overlay adds newly written data in `write` to the page // at `off` (relative to the page start!). off + len(write) may not // exceed the page size! This is a programmer error. // // Internally, the data is copied to the page buffer and we keep // note of the new data in an extent, possibly merging with existing // ones. This is a relatively fast operation. func (p *Page) Overlay(off uint32, write []byte) { if len(write) == 0 { return } offPlusWrite := off + uint32(len(write)) if offPlusWrite > uint32(len(p.Data)) { // this is a programmer error: panic(fmt.Sprintf("extent with write over page bound: %d", offPlusWrite)) } // Copy the data to the requested part of the page. // Everything after is maintaining the extents. copy(p.Data[off:offPlusWrite], write) p.updateExtents(off, offPlusWrite) } func (p *Page) updateExtents(off, offPlusWrite uint32) { // base case: no extents yet: if len(p.Extents) == 0 { p.Extents = append(p.Extents, Extent{ OffLo: off, OffHi: offPlusWrite, }) return } // Find out where to insert the new extent. // Use binary search to find a range of extents // that are affected by this write. minExIdx, maxExIdx := p.affectedExtentIdxs(off, offPlusWrite) if minExIdx >= len(p.Extents) { // This means that no extent was affected because we wrote beyond any // existing extent. Append a new extent to the end of the list. p.Extents = append(p.Extents, Extent{ OffLo: off, OffHi: offPlusWrite, }) return } if minExIdx == maxExIdx { // write happens in "free space". No extent hit. if minExIdx > 0 && p.Extents[minExIdx-1].OffHi == off { // If the write happens to be right after another existing extent // then merge with it. Otherwise insert below. p.Extents[minExIdx-1].OffHi = offPlusWrite return } if maxExIdx < len(p.Extents) && p.Extents[maxExIdx].OffLo == offPlusWrite { // If the write happens to be right before another existing extent // then merge with it. Otherwise insert below. p.Extents[maxExIdx].OffLo = off return } // insert new extent in the middle of the slice. p.Extents = append(p.Extents, Extent{}) copy(p.Extents[minExIdx+1:], p.Extents[minExIdx:]) p.Extents[minExIdx] = Extent{ OffLo: off, OffHi: offPlusWrite, } return } // Join all affected in the range to one single extent, // and move rest of extents further and cut to new size: newHi := p.Extents[maxExIdx-1].OffHi newLo := p.Extents[minExIdx].OffLo if newHi < offPlusWrite { newHi = offPlusWrite } if newLo > off { newLo = off } p.Extents[minExIdx].OffLo = newLo p.Extents[minExIdx].OffHi = newHi copy(p.Extents[minExIdx+1:], p.Extents[maxExIdx:]) p.Extents = p.Extents[:len(p.Extents)-(maxExIdx-minExIdx)+1] } func minUint32(a, b uint32) uint32 { if a < b { return a } return b } // Underlay is like the "negative" of Overlay. It writes the data of `write` // (starting at pageOff) to the underlying buffer where *no* extent is. // It can be used to "cache" data from the underlying stream, but not // overwriting any overlay. If OccludesStream() returns true for the same // offsets, then Underlay() will be an (expensive) no-op. func (p *Page) Underlay(pageOff uint32, write []byte) { pageOffPlusWrite := pageOff + uint32(len(write)) if pageOff == pageOffPlusWrite { // zero underlay. return } cursor := write prevOff := pageOff for _, ex := range p.Extents { if ex.OffHi < pageOff { // Extent was before the desired write. // No need to consider this one. continue } if ex.OffLo < pageOff { // Extent started before pageOff, // but goes over it. We should not copy. // Instead "loose" the data of that extent. cutoff := minUint32(ex.OffHi-pageOff, uint32(len(cursor))) cursor = cursor[cutoff:] prevOff = ex.OffHi continue } toCopy := ex.OffLo - prevOff if toCopy > 0 { // Copy everything since last copy // to p.Data and jump over the data in cursor. copy(p.Data[prevOff:prevOff+toCopy], cursor) } cursor = cursor[minUint32(toCopy+ex.OffHi-ex.OffLo, uint32(len(cursor))):] prevOff = ex.OffHi } if prevOff < pageOffPlusWrite && len(cursor) > 0 { // Handle the case when the underlying write // goes beyond all extents or when there are // no extents at all. toCopy := pageOffPlusWrite - prevOff copy(p.Data[prevOff:prevOff+toCopy], cursor) } p.updateExtents(pageOff, pageOffPlusWrite) } ================================================ FILE: catfs/mio/pagecache/page/page_test.go ================================================ package page import ( "testing" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func TestPageAffectedIndices(t *testing.T) { e1 := Extent{ OffLo: 0, OffHi: Size / 4, } e2 := Extent{ OffLo: 2 * Size / 4, OffHi: 3 * Size / 4, } e3 := Extent{ OffLo: 3 * Size / 4, OffHi: 4 * Size / 4, } p := Page{ Data: make([]byte, Size+Meta), Extents: []Extent{e1, e2, e3}, } for idx := uint32(1); idx < e1.OffHi; idx++ { l, h := p.affectedExtentIdxs(0, uint32(idx)) // result means: affects extents[0] until // (excluding) extents[1] require.Equal(t, 0, l, idx) require.Equal(t, 1, h, idx) } for idx := e1.OffHi; idx < e2.OffLo; idx++ { l, h := p.affectedExtentIdxs(e1.OffHi, uint32(idx)) // result means: no extent was found, the array // of affected extents is empty. require.Equal(t, 1, l, idx) require.Equal(t, 1, h, idx) } for idx := e2.OffLo + 1; idx < e3.OffLo; idx++ { l, h := p.affectedExtentIdxs(idx, idx+1) // result means: no extent was found, the array // of affected extents is empty. require.Equal(t, 1, l, idx) require.Equal(t, 2, h, idx) } // No for loop needed for last case: l, h := p.affectedExtentIdxs(Size, Size+1) require.Equal(t, 3, l) require.Equal(t, 3, h) } func TestPageSerializeDeserialize(t *testing.T) { expected := New(0, testutil.CreateDummyBuf(Size)) data := expected.AsBytes() got, err := FromBytes(data) require.NoError(t, err) require.Equal(t, expected.Data, got.Data) require.Equal(t, expected.Extents, got.Extents) } func TestPageSerializeWithManyWrites(t *testing.T) { // Simulate a really pathological case where we have tons of small writes. // They will cause AsBytes() to increase the backing buffer when // serializing. This will be a performance hit, but is at least correct. expected := New(0, testutil.CreateDummyBuf(10)) for idx := 0; idx < 1024; idx++ { off := uint32(11 + (idx * 11)) expected.Overlay(off, testutil.CreateDummyBuf(10)) } // 1025: initial extent existed: require.Len(t, expected.Extents, 1025) data := expected.AsBytes() got, err := FromBytes(data) require.NoError(t, err) require.Equal(t, expected.Data, got.Data) require.Equal(t, expected.Extents, got.Extents) } func TestPageOccludeStreamBasic(t *testing.T) { // page with one extent: p := New(0, testutil.CreateDummyBuf(Size/4)) require.False(t, p.OccludesStream(0, Size)) require.False(t, p.OccludesStream(0, Size/4+1)) require.True(t, p.OccludesStream(0, Size/4)) require.True(t, p.OccludesStream(0, Size/4-1)) p.Overlay(2*Size/4, testutil.CreateDummyBuf(Size/4)) require.False(t, p.OccludesStream(2*Size/4, Size/4+1)) require.False(t, p.OccludesStream(0, 3*Size/4+1)) require.True(t, p.OccludesStream(2*Size/4, Size/4)) require.True(t, p.OccludesStream(2*Size/4, Size/4-1)) } func TestPageOccludeStreamInExtent(t *testing.T) { p := New(0, testutil.CreateDummyBuf(Size/4)) p.Overlay(2*Size/4, testutil.CreateDummyBuf(Size/4)) require.False(t, p.OccludesStream(Size/8, 5*Size/8)) require.False(t, p.OccludesStream(5*Size/8, Size/8+1)) require.True(t, p.OccludesStream(5*Size/8, Size/8)) require.True(t, p.OccludesStream(Size/8, Size/8)) } func TestPageAddExtent(t *testing.T) { // page with one extent: p := New(0, testutil.CreateDummyBuf(Size/4)) // This matches the extents in TestPageAffectedIndices: // (first extent touches existing one!) p.Overlay(100, testutil.CreateDummyBuf(Size/4)) p.Overlay(2*Size/4, testutil.CreateDummyBuf(Size/4)) p.Overlay(3*Size/4, testutil.CreateDummyBuf(Size/4)) require.Len(t, p.Extents, 3) require.Equal( t, []Extent{{0, Size/4 + 100}, {2 * Size / 4, 3 * Size / 4}, {3 * Size / 4, Size}}, p.Extents, ) require.Panics(t, func() { // Write beyond the extents: p.Overlay(Size, testutil.CreateDummyBuf(1)) }) // Write an extent in free space, right after another one: // It should detect this and merge with it. p.Overlay(Size/4+100, testutil.CreateDummyBuf(20)) require.Len(t, p.Extents, 3) require.Equal(t, Extent{0, Size/4 + 120}, p.Extents[0]) // Write an extent in free space (not adjacent): p.Overlay(Size/4+200, testutil.CreateDummyBuf(30)) require.Len(t, p.Extents, 4) require.Equal(t, Extent{Size/4 + 200, Size/4 + 230}, p.Extents[1]) // Write an extent that covers everything, // should reduce to a single one: p.Overlay(0, testutil.CreateDummyBuf(Size)) require.Len(t, p.Extents, 1) require.Equal(t, Extent{0, Size}, p.Extents[0]) require.Equal(t, testutil.CreateDummyBuf(Size), p.Data) // Try to add an empty extent, it should not do anything. p.Overlay(0, []byte{}) require.Len(t, p.Extents, 1) } func TestPageAddExtentRegression(t *testing.T) { // I forgot to adjust the lower extent bound: p := New(3*Size/4, testutil.CreateDummyBuf(Size/4)) p.Overlay(0, testutil.CreateDummyBuf(Size)) require.Len(t, p.Extents, 1) require.Equal(t, uint32(0), p.Extents[0].OffLo) require.Equal(t, uint32(Size), p.Extents[0].OffHi) } func TestPageUnderlayFull(t *testing.T) { underlay := testutil.CreateRandomDummyBuf(Size, 23) overlay := testutil.CreateDummyBuf(Size / 4) p := New(2*Size/4, overlay) p.Underlay(0, underlay) copy(underlay[2*Size/4:], overlay) require.Equal(t, len(underlay), len(p.Data)) require.Equal(t, underlay, p.Data) } func TestPageUnderlayPartial(t *testing.T) { underlay := testutil.CreateRandomDummyBuf(2*Size/4, 23) p := New(2*Size/4, testutil.CreateDummyBuf(Size/4)) // That overlay should be ignored: p.Overlay(Size/16, testutil.CreateDummyBuf(Size/32)) // This overlay shadows the underlay: p.Overlay(Size/8, testutil.CreateDummyBuf(Size/4)) // Now underlay it. Should only write things // between 3*Size/8 and Size/2, everything else is shadowed. p.Underlay(Size/4, underlay) // Construct our expectation: expected := make([]byte, Size) copy(expected[Size/4:], underlay) copy(expected[Size/16:], testutil.CreateDummyBuf(Size/32)) copy(expected[Size/8:], testutil.CreateDummyBuf(Size/4)) copy(expected[2*Size/4:], testutil.CreateDummyBuf(Size/4)) require.Equal(t, expected, p.Data) } func TestPageUnderlayLeftover(t *testing.T) { underlay := testutil.CreateRandomDummyBuf(1*Size/4, 23) overlay := testutil.CreateDummyBuf(3 * Size / 4) p := New(0, overlay) // should do nothing! p.Underlay(0, underlay) // Construct our expectation: expected := make([]byte, Size) copy(expected, overlay) require.Equal(t, expected, p.Data) } ================================================ FILE: catfs/mio/pagecache/util.go ================================================ package pagecache import ( "io" ) // small util to wrap a buffer we want to write to. Tells you easily how much // data you can still write to it. type iobuf struct { dst []byte off int } func (ib *iobuf) Write(src []byte) (int, error) { n := copy(ib.dst[ib.off:ib.off+ib.Left()], src) ib.off += n return n, nil } func (ib *iobuf) Len() int { return ib.off } func (ib *iobuf) Left() int { return len(ib.dst) - ib.off } // zeroPadReader wraps another reader which has data // until `size`. If `length` > `size` than it pads the // gap with zero reads. type zeroPadReader struct { r io.Reader off, size, length int64 } func memzero(buf []byte) { // NOTE: This for-loop is optimized by the compiler: // https://github.com/golang/go/issues/5373 // // (copy with a pre-allocated zero page is 2x slower than this!) for idx := range buf { buf[idx] = 0 } } func (zpr *zeroPadReader) Read(buf []byte) (int, error) { if zpr.size >= zpr.length { // sanity check. zpr.length might be also shorter. // then we don't do any padding but work like // io.LimitReader(). zpr.size = zpr.length } diff := zpr.length - zpr.off bufLen := int64(len(buf)) if diff < bufLen { // clamp buf to zpr.length bufLen = diff } if zpr.off < zpr.size { // below underlying stream size: n, err := zpr.r.Read(buf[:bufLen]) zpr.off += int64(n) return n, err } if diff > 0 { // above underlying stream size, // but below padded length. memzero(buf[:bufLen]) zpr.off += bufLen return int(bufLen), nil } return 0, io.EOF } ///////// // copyNBuffer is golang's io.CopyN with added param for the buffer, // like in io.CopyBuffer. Saves precious allocations. func copyNBuffer(dst io.Writer, src io.Reader, n int64, buf []byte) (written int64, err error) { written, err = io.CopyBuffer(dst, io.LimitReader(src, n), buf) if written == n { return n, nil } if written < n && err == nil { // src stopped early; must have been EOF. err = io.EOF } return } ================================================ FILE: catfs/mio/pagecache/util_test.go ================================================ package pagecache import ( "bytes" "io" "testing" "github.com/sahib/brig/util" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) func TestZeroPaddedReader(t *testing.T) { tcs := []struct { name string off, length, size int64 }{ { name: "usual-case", off: 0, length: 1024, size: 512, }, { name: "truncate-short", off: 0, length: 512, size: 1024, }, { name: "equal", off: 0, length: 1024, size: 1024, }, { name: "zero", off: 0, length: 0, size: 0, }, } const maxSize = 4 * 1024 data := testutil.CreateDummyBuf(maxSize) zero := make([]byte, maxSize) for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { zpr := &zeroPadReader{ r: bytes.NewReader(data), off: tc.off, size: tc.size, length: tc.length, } out := &bytes.Buffer{} n, err := io.Copy(out, zpr) require.NoError(t, err) require.Equal(t, tc.length, n) a := util.Min64(tc.size, tc.length) b := util.Max64(tc.size, tc.length) outData := out.Bytes() require.Equal(t, data[0:a], outData[0:a]) require.Equal(t, zero[a:b], outData[a:b]) }) } } func TestIOBuf(t *testing.T) { tcs := []struct { name string srcSize int dstSize int }{ { name: "src=dst", srcSize: 1024, dstSize: 1024, }, { name: "srcdst", srcSize: 2048, dstSize: 512, }, { name: "zero", srcSize: 0, dstSize: 0, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { src := testutil.CreateDummyBuf(int64(tc.srcSize)) ib := &iobuf{ dst: make([]byte, tc.dstSize), } n, err := ib.Write(src) require.NoError(t, err) require.Equal(t, n, ib.Len()) min := tc.srcSize if min > tc.dstSize { min = tc.dstSize } require.Equal(t, n, min) }) } } func BenchmarkZeroing(b *testing.B) { b.Run("memzero", func(b *testing.B) { buf := testutil.CreateDummyBuf(16 * 1024) for idx := 0; idx < b.N; idx++ { memzero(buf) } }) b.Run("zerocopy", func(b *testing.B) { zero := make([]byte, 16*1024) buf := testutil.CreateDummyBuf(16 * 1024) for idx := 0; idx < b.N; idx++ { copy(buf, zero) } }) } ================================================ FILE: catfs/mio/stream.go ================================================ package mio import ( "fmt" "io" "io/ioutil" "github.com/sahib/brig/catfs/mio/compress" "github.com/sahib/brig/catfs/mio/encrypt" "github.com/sahib/brig/repo/hints" "github.com/sahib/brig/util" log "github.com/sirupsen/logrus" ) // Stream is a stream coming from the backend. type Stream interface { io.Reader io.Seeker io.Closer io.WriterTo } type stream struct { io.Reader io.Seeker io.Closer io.WriterTo } type dumbWriterTo struct { r io.Reader } func (d dumbWriterTo) WriteTo(w io.Writer) (n int64, err error) { return io.Copy(w, d.r) } // NewOutStream creates an OutStream piping data from brig to the outside. // `key` is used to decrypt the data. The compression algorithm is read // from the stream header. func NewOutStream(r io.ReadSeeker, isRaw bool, key []byte) (Stream, error) { s := stream{ Reader: r, Seeker: r, WriterTo: dumbWriterTo{r: r}, Closer: ioutil.NopCloser(r), } if isRaw { // directly return stream. return s, nil } // At this point we're sure that there must be a magic number. // We can use it to decide what readers we should build. magicNumber, headerReader, err := util.PeekHeader(r, 8) if err != nil { // First read on the stream, errors will bubble up here. return nil, err } // make sure that the header is prefixed to the stream again: // compress + encrypt reader expect the magic number there. s.Reader = headerReader s.Seeker = headerReader s.WriterTo = dumbWriterTo{r: headerReader} // NOTE: Assumption here is that our own magic numbers // are always 8 bytes long. Since we control it, // that's reasonable. if len(magicNumber) != 8 { return nil, fmt.Errorf("bad magic number") } var isEncrypted bool switch mn := string(magicNumber); mn { case string(encrypt.MagicNumber): isEncrypted = true case string(compress.MagicNumber): // Not encrypted, but decompress needed. default: return nil, fmt.Errorf("unknown magic number '%s'", mn) } if isEncrypted { rEnc, err := encrypt.NewReader(s, key) if err != nil { return nil, err } flags, err := rEnc.Flags() if err != nil { return nil, err } s.Reader = rEnc s.Seeker = rEnc s.WriterTo = rEnc // The encryption header stores if we encoded the stream // with another stream inside (matroska like). If not, // we can return early. if flags&encrypt.FlagCompressedInside == 0 { return s, nil } } // if compression is used inside, than wrap in decompressor: // (s might contain decryptor or is raw stream) rZip := compress.NewReader(s) s.Reader = rZip s.Seeker = rZip s.WriterTo = rZip return s, nil } func guessCompression(path string, r io.Reader, hint *hints.Hint) (io.Reader, error) { // Keep the header of the file in memory, so we can do some guessing // of e.g. the compression algorithm we should use. headerReader := util.NewHeaderReader(r, 2048) headerBuf, err := headerReader.Peek() if err != nil { log.WithError(err).Warnf("failed to peek stream header") return nil, err } compressAlgo, err := compress.GuessAlgorithm(path, headerBuf) if err != nil { // NOTE: don't error out here. That just means we don't // guessed the perfect settings. log.WithError(err). WithField("path", path). Warnf("failed to guess suitable zip algorithm") } log.Debugf("guessed '%s' compression for file %s", compressAlgo, path) hint.CompressionAlgo = hints.CompressAlgorithmTypeToCompressionHint(compressAlgo) return headerReader, nil } // NewInStream creates a new stream that pipes data into ipfs. // The data is read from `r`, encrypted with `key` and encoded based on the // settings given by `hint`. `path` is only used to better guess the compression // algorithm - if desired by `hint`. `path` can be empty. // // It returns a reader that will produce the encoded stream. // If no actual encoding will be done, the second return param will be true func NewInStream(r io.Reader, path string, key []byte, hint hints.Hint) (io.ReadCloser, bool, error) { var err error if hint.CompressionAlgo == hints.CompressionGuess { // replace "guess" to an actual compression algorithm. r, err = guessCompression(path, r, &hint) if err != nil { return nil, false, err } } // use a pipe to redirect `r` to encoding writers without copying: pr, pw := io.Pipe() // Writing to pw will be matched by a read on the other side. // If there is no read we will block. var w io.Writer = pw var closers = []io.Closer{pw} // Only add encryption if desired by hints: if hint.EncryptionAlgo != hints.EncryptionNone { wEnc, err := encrypt.NewWriter(w, key, hint.EncryptFlags()) if err != nil { return nil, false, err } closers = append(closers, wEnc) w = wEnc } // Only add compression if desired or mime type is suitable: if hint.CompressionAlgo != hints.CompressionNone { wZip, err := compress.NewWriter(w, hint.CompressionAlgo.ToCompressAlgorithmType()) if err != nil { return nil, false, err } closers = append(closers, wZip) w = wZip } // Suck the reader empty and move it to `w`. go func() { if _, err := io.Copy(w, r); err != nil { // Continue closing the fds; no return. log.WithError(err).Warnf("internal write error") } // NOTE: closers must be closed in inverse order. // pipe writer should come last. Each Close() // might still write out data. for idx := len(closers) - 1; idx >= 0; idx-- { if err := closers[idx].Close(); err != nil { log.WithError(err).Warnf("internal close error") } } }() return pr, hint.IsRaw(), nil } // limitedStream is a small wrapper around Stream, // which allows truncating the stream at a certain size. type limitedStream struct { stream Stream pos uint64 size uint64 } func (ls *limitedStream) Read(buf []byte) (int, error) { isEOF := false if ls.pos+uint64(len(buf)) >= ls.size { buf = buf[:ls.size-ls.pos] isEOF = true } n, err := ls.stream.Read(buf) if err != nil { return n, err } if isEOF { err = io.EOF } return n, err } func (ls *limitedStream) Seek(offset int64, whence int) (int64, error) { switch whence { case io.SeekCurrent: return ls.Seek(int64(ls.pos)+offset, io.SeekStart) case io.SeekEnd: ls.pos = 0 return ls.Seek(int64(ls.size)+offset, io.SeekStart) case io.SeekStart: ls.pos = 0 } newPos := int64(ls.pos) + offset if newPos < 0 { return -1, io.EOF } if newPos > int64(ls.size) { return int64(ls.size), io.EOF } ls.pos = uint64(newPos) return ls.stream.Seek(newPos, io.SeekStart) } func (ls *limitedStream) WriteTo(w io.Writer) (int64, error) { // We do not want to defeat the purpose of WriteTo here. // That's why we do the limit check in the writer part. return ls.stream.WriteTo(util.LimitWriter(w, int64(ls.size-ls.pos))) } func (ls *limitedStream) Close() error { return ls.stream.Close() } // LimitStream is like io.LimitReader, but works for mio.Stream. // It will not allow reading/seeking after the specified size. func LimitStream(stream Stream, size uint64) Stream { return &limitedStream{ stream: stream, pos: 0, size: size, } } ================================================ FILE: catfs/mio/stream_test.go ================================================ package mio import ( "bytes" "fmt" "io" "io/ioutil" "testing" "github.com/brianvoe/gofakeit/v6" "github.com/sahib/brig/catfs/mio/compress" "github.com/sahib/brig/repo/hints" "github.com/sahib/brig/util/testutil" "github.com/stretchr/testify/require" ) var testKey = []byte("01234567890ABCDE01234567890ABCDE") func testWriteAndRead( t *testing.T, raw []byte, hint hints.Hint, ) { rawBuf := &bytes.Buffer{} if _, err := rawBuf.Write(raw); err != nil { t.Errorf("Huh, buf-write failed?") return } encStream, isRaw, err := NewInStream( rawBuf, gofakeit.Name(), testKey, hint, ) if err != nil { t.Errorf("creating encryption stream failed: %v", err) return } encrypted := &bytes.Buffer{} if _, err = io.Copy(encrypted, encStream); err != nil { t.Errorf("reading encrypted data failed: %v", err) return } // Fake a close method: br := bytes.NewReader(encrypted.Bytes()) r := stream{ Reader: br, Seeker: br, WriterTo: br, Closer: ioutil.NopCloser(nil), } decStream, err := NewOutStream(r, isRaw, testKey) if err != nil { t.Errorf("creating decryption stream failed: %v", err) return } decrypted := &bytes.Buffer{} if _, err = io.Copy(decrypted, decStream); err != nil { t.Errorf("reading decrypted data failed: %v", err) return } if !bytes.Equal(decrypted.Bytes(), raw) { t.Errorf("Raw and decrypted is not equal => BUG.") t.Errorf("RAW:\n %v", raw) t.Errorf("DEC:\n %v", decrypted.Bytes()) return } } func TestWriteAndRead(t *testing.T) { t.Parallel() s64k := int64(64 * 1024) sizes := []int64{ 0, 1, 10, s64k, s64k - 1, s64k + 1, s64k * 2, s64k * 1024, } for _, size := range sizes { regularData := testutil.CreateDummyBuf(size) randomData := testutil.CreateRandomDummyBuf(size, 42) for _, hint := range hints.AllPossibleHints() { prefix := fmt.Sprintf("%s-size-%d-", hint, size) t.Run(prefix+"regular", func(t *testing.T) { t.Parallel() testWriteAndRead(t, regularData, hint) }) t.Run(prefix+"random", func(t *testing.T) { t.Parallel() testWriteAndRead(t, randomData, hint) }) } } } func TestLimitedStream(t *testing.T) { t.Parallel() testData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} r := bytes.NewReader(testData) // Fake a stream without all the encryption/compression fuzz. stream := struct { io.Reader io.Seeker io.Closer io.WriterTo }{ Reader: r, Seeker: r, WriterTo: r, Closer: ioutil.NopCloser(r), } for idx := 0; idx <= 10; idx++ { // Seek back to beginning: _, err := stream.Seek(0, io.SeekStart) require.Nil(t, err) smallStream := LimitStream(stream, uint64(idx)) data, err := ioutil.ReadAll(smallStream) require.Nil(t, err) require.Equal(t, testData[:idx], data) } var err error // Reset and do some special torturing: _, err = stream.Seek(0, io.SeekStart) require.Nil(t, err) limitStream := LimitStream(stream, 5) n, err := limitStream.Seek(4, io.SeekStart) require.Nil(t, err) require.Equal(t, int64(4), n) n, err = limitStream.Seek(6, io.SeekStart) require.True(t, err == io.EOF) n, err = limitStream.Seek(-5, io.SeekEnd) require.Nil(t, err) require.Equal(t, int64(0), n) _, err = limitStream.Seek(-6, io.SeekEnd) require.True(t, err == io.EOF) _, err = stream.Seek(0, io.SeekStart) require.Nil(t, err) limitStream = LimitStream(stream, 5) buf := &bytes.Buffer{} n, err = limitStream.WriteTo(buf) require.Nil(t, err) require.Equal(t, n, int64(10)) require.Equal(t, buf.Bytes(), testData[:5]) buf.Reset() _, err = stream.Seek(0, io.SeekStart) require.Nil(t, err) limitStream = LimitStream(stream, 11) n, err = limitStream.WriteTo(buf) require.Nil(t, err) require.Equal(t, n, int64(10)) require.Equal(t, buf.Bytes(), testData) } func TestLimitStreamSize(t *testing.T) { // Size taken from a dummy file that showed this bug: data := testutil.CreateDummyBuf(6041) packData, err := compress.Pack(data, compress.AlgoSnappy) require.Nil(t, err) rZip := compress.NewReader(bytes.NewReader(packData)) stream := struct { io.Reader io.Seeker io.Closer io.WriterTo }{ Reader: rZip, Seeker: rZip, WriterTo: rZip, Closer: ioutil.NopCloser(rZip), } r := LimitStream(stream, uint64(len(data))) size, err := r.Seek(0, io.SeekEnd) require.Nil(t, err) require.Equal(t, int64(len(data)), size) off, err := r.Seek(0, io.SeekStart) require.Nil(t, err) require.Equal(t, int64(0), off) buf := &bytes.Buffer{} n, err := io.Copy(buf, r) require.Nil(t, err) require.Equal(t, int64(len(data)), n) require.Equal(t, data, buf.Bytes()) } func TestStreamSizeBySeek(t *testing.T) { buf := &bytes.Buffer{} data := testutil.CreateDummyBuf(6041 * 1024) encStream, isRaw, err := NewInStream( bytes.NewReader(data), "", testKey, hints.Default(), ) require.Nil(t, err) _, err = io.Copy(buf, encStream) require.Nil(t, err) stream, err := NewOutStream( bytes.NewReader(buf.Bytes()), isRaw, testKey, ) require.Nil(t, err) n, err := stream.Seek(0, io.SeekEnd) require.Nil(t, err) require.Equal(t, int64(len(data)), n) n, err = stream.Seek(0, io.SeekStart) require.Nil(t, err) require.Equal(t, int64(0), n) outBuf := &bytes.Buffer{} n, err = io.Copy(outBuf, stream) require.Nil(t, err) require.Equal(t, int64(len(data)), n) require.Equal(t, outBuf.Bytes(), data) } ================================================ FILE: catfs/nodes/base.go ================================================ package nodes import ( "fmt" "strings" "time" e "github.com/pkg/errors" ie "github.com/sahib/brig/catfs/errors" capnp_model "github.com/sahib/brig/catfs/nodes/capnp" h "github.com/sahib/brig/util/hashlib" capnp "zombiezen.com/go/capnproto2" ) // Base is a place that holds all common attributes of all Nodes. // It also defines some utility function that will be mixed into real nodes. type Base struct { // Basename of this node name string // name of the user that last modified this node user string // Hash of this node (might be empty) tree h.Hash // Pointer hash to the content in the backend backend h.Hash // Content hash of this node content h.Hash // Last modification time of this node. modTime time.Time // Type of this node nodeType NodeType // Unique identifier for this node inode uint64 } // copyBase will copy all attributes from the base. func (b *Base) copyBase(inode uint64) Base { return Base{ name: b.name, user: b.user, tree: b.tree.Clone(), content: b.content.Clone(), backend: b.backend.Clone(), modTime: b.modTime, nodeType: b.nodeType, inode: inode, } } // User returns the user that last modified this node. func (b *Base) User() string { return b.user } // Name returns the name of this node (e.g. /a/b/c -> c) // The root directory will have the name empty string. func (b *Base) Name() string { return b.name } // TreeHash returns the hash of this node. func (b *Base) TreeHash() h.Hash { return b.tree } // ContentHash returns the content hash of this node. func (b *Base) ContentHash() h.Hash { return b.content } // BackendHash returns the backend hash of this node. func (b *Base) BackendHash() h.Hash { return b.backend } // Type returns the type of this node. func (b *Base) Type() NodeType { return b.nodeType } // ModTime will return the last time this node's content // was modified. Metadata changes are not recorded. func (b *Base) ModTime() time.Time { return b.modTime } // Inode will return a unique ID that is different for each node. func (b *Base) Inode() uint64 { return b.inode } /////// UTILS ///////// func (b *Base) setBaseAttrsToNode(capnode capnp_model.Node) error { modTimeBin, err := b.modTime.MarshalBinary() if err != nil { return err } if err := capnode.SetModTime(string(modTimeBin)); err != nil { return err } if err := capnode.SetTreeHash(b.tree); err != nil { return err } if err := capnode.SetContentHash(b.content); err != nil { return err } if err := capnode.SetBackendHash(b.backend); err != nil { return err } if err := capnode.SetName(b.name); err != nil { return err } if err := capnode.SetUser(b.user); err != nil { return err } capnode.SetInode(b.inode) return nil } func (b *Base) parseBaseAttrsFromNode(capnode capnp_model.Node) error { var err error b.name, err = capnode.Name() if err != nil { return err } b.user, err = capnode.User() if err != nil { return err } b.tree, err = capnode.TreeHash() if err != nil { return err } b.content, err = capnode.ContentHash() if err != nil { return err } b.backend, err = capnode.BackendHash() if err != nil { return err } unparsedModTime, err := capnode.ModTime() if err != nil { return err } if err := b.modTime.UnmarshalBinary([]byte(unparsedModTime)); err != nil { return err } switch typ := capnode.Which(); typ { case capnp_model.Node_Which_file: b.nodeType = NodeTypeFile case capnp_model.Node_Which_directory: b.nodeType = NodeTypeDirectory case capnp_model.Node_Which_commit: b.nodeType = NodeTypeCommit case capnp_model.Node_Which_ghost: // Ghost set the nodeType themselves. // Ignore them here. default: return fmt.Errorf("Bad capnp node type `%d`", typ) } b.inode = capnode.Inode() return nil } func prefixSlash(s string) string { if !strings.HasPrefix(s, "/") { return "/" + s } return s } ///////////////////////////////////////// // MARSHAL HELPERS FOR ARBITRARY NODES // ///////////////////////////////////////// // MarshalNode will convert any Node to a byte string // Use UnmarshalNode to load a Node from it again. func MarshalNode(nd Node) ([]byte, error) { msg, err := nd.ToCapnp() if err != nil { return nil, err } return msg.Marshal() } // UnmarshalNode will try to interpret data as a Node func UnmarshalNode(data []byte) (Node, error) { msg, err := capnp.Unmarshal(data) if err != nil { return nil, err } capNd, err := capnp_model.ReadRootNode(msg) if err != nil { return nil, err } return CapNodeToNode(capNd) } // CapNodeToNode converts a capnproto `capNd` to a normal `Node`. func CapNodeToNode(capNd capnp_model.Node) (Node, error) { // Find out the correct node struct to initialize. var node Node switch typ := capNd.Which(); typ { case capnp_model.Node_Which_ghost: node = &Ghost{} case capnp_model.Node_Which_file: node = &File{} case capnp_model.Node_Which_directory: node = &Directory{} case capnp_model.Node_Which_commit: node = &Commit{} default: return nil, fmt.Errorf("Bad capnp node type `%d`", typ) } if err := node.FromCapnpNode(capNd); err != nil { return nil, err } return node, nil } ////////////////////////// // GENERAL NODE HELPERS // ////////////////////////// // Depth returns the depth of the node. // It does this by looking at the path separators. // The depth of "/" is defined as 0. func Depth(nd Node) int { path := nd.Path() if path == "/" { return 0 } depth := 0 for _, rn := range path { if rn == '/' { depth++ } } return depth } // RemoveNode removes `nd` from it's parent directory using `lkr`. // Removing the root is a no-op. func RemoveNode(lkr Linker, nd Node) error { parDir, err := ParentDirectory(lkr, nd) if err != nil { return err } // Cannot remove root: if parDir == nil { return nil } return parDir.RemoveChild(lkr, nd) } // ParentDirectory returns the parent directory of `nd`. // For the root it will return nil. func ParentDirectory(lkr Linker, nd Node) (*Directory, error) { par, err := nd.Parent(lkr) if err != nil { return nil, err } if par == nil { return nil, nil } parDir, ok := par.(*Directory) if !ok { return nil, ie.ErrBadNode } return parDir, nil } // ContentHash returns the correct content hash for `nd`. // This also works for ghosts where the content hash is taken from the // underlying node (ghosts themselve have no content). func ContentHash(nd Node) (h.Hash, error) { switch nd.Type() { case NodeTypeDirectory, NodeTypeCommit, NodeTypeFile: return nd.ContentHash(), nil case NodeTypeGhost: ghost, ok := nd.(*Ghost) if !ok { return nil, e.Wrapf(ie.ErrBadNode, "cannot convert to ghost") } switch ghost.OldNode().Type() { case NodeTypeFile: oldFile, err := ghost.OldFile() if err != nil { return nil, err } return oldFile.ContentHash(), nil case NodeTypeDirectory: oldDirectory, err := ghost.OldDirectory() if err != nil { return nil, err } return oldDirectory.ContentHash(), nil } } return nil, ie.ErrBadNode } ================================================ FILE: catfs/nodes/capnp/nodes.capnp ================================================ using Go = import "/go.capnp"; @0x9195d073cb5c5953; $Go.package("capnp"); $Go.import("github.com/sahib/brig/catfs/nodes/capnp"); struct Commit $Go.doc("Commit is a set of changes to nodes") { # Following attributes will be part of the hash: message @0 :Text; author @1 :Text; parent @2 :Data; # Hash to parent. root @3 :Data; # Hash to root directory. index @4 :Int64; # Total number of commits. # Attributes not being part of the hash: merge :group { with @5 :Text; head @6 :Data; } } struct DirEntry $Go.doc("A single directory entry") { name @0 :Text; hash @1 :Data; } struct Directory $Go.doc("Directory contains one or more directories or files") { size @0 :UInt64; cachedSize @1 :Int64; parent @2 :Text; children @3 :List(DirEntry); contents @4 :List(DirEntry); } struct File $Go.doc("A leaf node in the MDAG") { size @0 :UInt64; cachedSize @1 :Int64; parent @2 :Text; key @3 :Data; # This indicates that the stream associated with this # file is not encoded by brig, but raw. We should not # attempt to decode it. isRaw @4 :Bool; } struct Ghost $Go.doc("Ghost indicates that a certain node was at this path once") { ghostInode @0 :UInt64; ghostPath @1 :Text; union { commit @2 :Commit; directory @3 :Directory; file @4 :File; } } struct Node $Go.doc("Node is a node in the merkle dag of brig") { name @0 :Text; treeHash @1 :Data; modTime @2 :Text; # Time as ISO8601 inode @3 :UInt64; contentHash @4 :Data; user @5 :Text; union { commit @6 :Commit; directory @7 :Directory; file @8 :File; ghost @9 :Ghost; } backendHash @10 :Data; } ================================================ FILE: catfs/nodes/capnp/nodes.capnp.go ================================================ // Code generated by capnpc-go. DO NOT EDIT. package capnp import ( strconv "strconv" capnp "zombiezen.com/go/capnproto2" text "zombiezen.com/go/capnproto2/encoding/text" schemas "zombiezen.com/go/capnproto2/schemas" ) // Commit is a set of changes to nodes type Commit struct{ capnp.Struct } type Commit_merge Commit // Commit_TypeID is the unique identifier for the type Commit. const Commit_TypeID = 0x8da013c66e545daf func NewCommit(s *capnp.Segment) (Commit, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 6}) return Commit{st}, err } func NewRootCommit(s *capnp.Segment) (Commit, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 6}) return Commit{st}, err } func ReadRootCommit(msg *capnp.Message) (Commit, error) { root, err := msg.RootPtr() return Commit{root.Struct()}, err } func (s Commit) String() string { str, _ := text.Marshal(0x8da013c66e545daf, s.Struct) return str } func (s Commit) Message() (string, error) { p, err := s.Struct.Ptr(0) return p.Text(), err } func (s Commit) HasMessage() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s Commit) MessageBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) return p.TextBytes(), err } func (s Commit) SetMessage(v string) error { return s.Struct.SetText(0, v) } func (s Commit) Author() (string, error) { p, err := s.Struct.Ptr(1) return p.Text(), err } func (s Commit) HasAuthor() bool { p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s Commit) AuthorBytes() ([]byte, error) { p, err := s.Struct.Ptr(1) return p.TextBytes(), err } func (s Commit) SetAuthor(v string) error { return s.Struct.SetText(1, v) } func (s Commit) Parent() ([]byte, error) { p, err := s.Struct.Ptr(2) return []byte(p.Data()), err } func (s Commit) HasParent() bool { p, err := s.Struct.Ptr(2) return p.IsValid() || err != nil } func (s Commit) SetParent(v []byte) error { return s.Struct.SetData(2, v) } func (s Commit) Root() ([]byte, error) { p, err := s.Struct.Ptr(3) return []byte(p.Data()), err } func (s Commit) HasRoot() bool { p, err := s.Struct.Ptr(3) return p.IsValid() || err != nil } func (s Commit) SetRoot(v []byte) error { return s.Struct.SetData(3, v) } func (s Commit) Index() int64 { return int64(s.Struct.Uint64(0)) } func (s Commit) SetIndex(v int64) { s.Struct.SetUint64(0, uint64(v)) } func (s Commit) Merge() Commit_merge { return Commit_merge(s) } func (s Commit_merge) With() (string, error) { p, err := s.Struct.Ptr(4) return p.Text(), err } func (s Commit_merge) HasWith() bool { p, err := s.Struct.Ptr(4) return p.IsValid() || err != nil } func (s Commit_merge) WithBytes() ([]byte, error) { p, err := s.Struct.Ptr(4) return p.TextBytes(), err } func (s Commit_merge) SetWith(v string) error { return s.Struct.SetText(4, v) } func (s Commit_merge) Head() ([]byte, error) { p, err := s.Struct.Ptr(5) return []byte(p.Data()), err } func (s Commit_merge) HasHead() bool { p, err := s.Struct.Ptr(5) return p.IsValid() || err != nil } func (s Commit_merge) SetHead(v []byte) error { return s.Struct.SetData(5, v) } // Commit_List is a list of Commit. type Commit_List struct{ capnp.List } // NewCommit creates a new list of Commit. func NewCommit_List(s *capnp.Segment, sz int32) (Commit_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 6}, sz) return Commit_List{l}, err } func (s Commit_List) At(i int) Commit { return Commit{s.List.Struct(i)} } func (s Commit_List) Set(i int, v Commit) error { return s.List.SetStruct(i, v.Struct) } func (s Commit_List) String() string { str, _ := text.MarshalList(0x8da013c66e545daf, s.List) return str } // Commit_Promise is a wrapper for a Commit promised by a client call. type Commit_Promise struct{ *capnp.Pipeline } func (p Commit_Promise) Struct() (Commit, error) { s, err := p.Pipeline.Struct() return Commit{s}, err } func (p Commit_Promise) Merge() Commit_merge_Promise { return Commit_merge_Promise{p.Pipeline} } // Commit_merge_Promise is a wrapper for a Commit_merge promised by a client call. type Commit_merge_Promise struct{ *capnp.Pipeline } func (p Commit_merge_Promise) Struct() (Commit_merge, error) { s, err := p.Pipeline.Struct() return Commit_merge{s}, err } // A single directory entry type DirEntry struct{ capnp.Struct } // DirEntry_TypeID is the unique identifier for the type DirEntry. const DirEntry_TypeID = 0x8b15ee76774b1f9d func NewDirEntry(s *capnp.Segment) (DirEntry, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) return DirEntry{st}, err } func NewRootDirEntry(s *capnp.Segment) (DirEntry, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) return DirEntry{st}, err } func ReadRootDirEntry(msg *capnp.Message) (DirEntry, error) { root, err := msg.RootPtr() return DirEntry{root.Struct()}, err } func (s DirEntry) String() string { str, _ := text.Marshal(0x8b15ee76774b1f9d, s.Struct) return str } func (s DirEntry) Name() (string, error) { p, err := s.Struct.Ptr(0) return p.Text(), err } func (s DirEntry) HasName() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s DirEntry) NameBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) return p.TextBytes(), err } func (s DirEntry) SetName(v string) error { return s.Struct.SetText(0, v) } func (s DirEntry) Hash() ([]byte, error) { p, err := s.Struct.Ptr(1) return []byte(p.Data()), err } func (s DirEntry) HasHash() bool { p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s DirEntry) SetHash(v []byte) error { return s.Struct.SetData(1, v) } // DirEntry_List is a list of DirEntry. type DirEntry_List struct{ capnp.List } // NewDirEntry creates a new list of DirEntry. func NewDirEntry_List(s *capnp.Segment, sz int32) (DirEntry_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz) return DirEntry_List{l}, err } func (s DirEntry_List) At(i int) DirEntry { return DirEntry{s.List.Struct(i)} } func (s DirEntry_List) Set(i int, v DirEntry) error { return s.List.SetStruct(i, v.Struct) } func (s DirEntry_List) String() string { str, _ := text.MarshalList(0x8b15ee76774b1f9d, s.List) return str } // DirEntry_Promise is a wrapper for a DirEntry promised by a client call. type DirEntry_Promise struct{ *capnp.Pipeline } func (p DirEntry_Promise) Struct() (DirEntry, error) { s, err := p.Pipeline.Struct() return DirEntry{s}, err } // Directory contains one or more directories or files type Directory struct{ capnp.Struct } // Directory_TypeID is the unique identifier for the type Directory. const Directory_TypeID = 0xe24c59306c829c01 func NewDirectory(s *capnp.Segment) (Directory, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 3}) return Directory{st}, err } func NewRootDirectory(s *capnp.Segment) (Directory, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 3}) return Directory{st}, err } func ReadRootDirectory(msg *capnp.Message) (Directory, error) { root, err := msg.RootPtr() return Directory{root.Struct()}, err } func (s Directory) String() string { str, _ := text.Marshal(0xe24c59306c829c01, s.Struct) return str } func (s Directory) Size() uint64 { return s.Struct.Uint64(0) } func (s Directory) SetSize(v uint64) { s.Struct.SetUint64(0, v) } func (s Directory) CachedSize() int64 { return int64(s.Struct.Uint64(8)) } func (s Directory) SetCachedSize(v int64) { s.Struct.SetUint64(8, uint64(v)) } func (s Directory) Parent() (string, error) { p, err := s.Struct.Ptr(0) return p.Text(), err } func (s Directory) HasParent() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s Directory) ParentBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) return p.TextBytes(), err } func (s Directory) SetParent(v string) error { return s.Struct.SetText(0, v) } func (s Directory) Children() (DirEntry_List, error) { p, err := s.Struct.Ptr(1) return DirEntry_List{List: p.List()}, err } func (s Directory) HasChildren() bool { p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s Directory) SetChildren(v DirEntry_List) error { return s.Struct.SetPtr(1, v.List.ToPtr()) } // NewChildren sets the children field to a newly // allocated DirEntry_List, preferring placement in s's segment. func (s Directory) NewChildren(n int32) (DirEntry_List, error) { l, err := NewDirEntry_List(s.Struct.Segment(), n) if err != nil { return DirEntry_List{}, err } err = s.Struct.SetPtr(1, l.List.ToPtr()) return l, err } func (s Directory) Contents() (DirEntry_List, error) { p, err := s.Struct.Ptr(2) return DirEntry_List{List: p.List()}, err } func (s Directory) HasContents() bool { p, err := s.Struct.Ptr(2) return p.IsValid() || err != nil } func (s Directory) SetContents(v DirEntry_List) error { return s.Struct.SetPtr(2, v.List.ToPtr()) } // NewContents sets the contents field to a newly // allocated DirEntry_List, preferring placement in s's segment. func (s Directory) NewContents(n int32) (DirEntry_List, error) { l, err := NewDirEntry_List(s.Struct.Segment(), n) if err != nil { return DirEntry_List{}, err } err = s.Struct.SetPtr(2, l.List.ToPtr()) return l, err } // Directory_List is a list of Directory. type Directory_List struct{ capnp.List } // NewDirectory creates a new list of Directory. func NewDirectory_List(s *capnp.Segment, sz int32) (Directory_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 3}, sz) return Directory_List{l}, err } func (s Directory_List) At(i int) Directory { return Directory{s.List.Struct(i)} } func (s Directory_List) Set(i int, v Directory) error { return s.List.SetStruct(i, v.Struct) } func (s Directory_List) String() string { str, _ := text.MarshalList(0xe24c59306c829c01, s.List) return str } // Directory_Promise is a wrapper for a Directory promised by a client call. type Directory_Promise struct{ *capnp.Pipeline } func (p Directory_Promise) Struct() (Directory, error) { s, err := p.Pipeline.Struct() return Directory{s}, err } // A leaf node in the MDAG type File struct{ capnp.Struct } // File_TypeID is the unique identifier for the type File. const File_TypeID = 0x8ea7393d37893155 func NewFile(s *capnp.Segment) (File, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 2}) return File{st}, err } func NewRootFile(s *capnp.Segment) (File, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 2}) return File{st}, err } func ReadRootFile(msg *capnp.Message) (File, error) { root, err := msg.RootPtr() return File{root.Struct()}, err } func (s File) String() string { str, _ := text.Marshal(0x8ea7393d37893155, s.Struct) return str } func (s File) Size() uint64 { return s.Struct.Uint64(0) } func (s File) SetSize(v uint64) { s.Struct.SetUint64(0, v) } func (s File) CachedSize() int64 { return int64(s.Struct.Uint64(8)) } func (s File) SetCachedSize(v int64) { s.Struct.SetUint64(8, uint64(v)) } func (s File) Parent() (string, error) { p, err := s.Struct.Ptr(0) return p.Text(), err } func (s File) HasParent() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s File) ParentBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) return p.TextBytes(), err } func (s File) SetParent(v string) error { return s.Struct.SetText(0, v) } func (s File) Key() ([]byte, error) { p, err := s.Struct.Ptr(1) return []byte(p.Data()), err } func (s File) HasKey() bool { p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s File) SetKey(v []byte) error { return s.Struct.SetData(1, v) } func (s File) IsRaw() bool { return s.Struct.Bit(128) } func (s File) SetIsRaw(v bool) { s.Struct.SetBit(128, v) } // File_List is a list of File. type File_List struct{ capnp.List } // NewFile creates a new list of File. func NewFile_List(s *capnp.Segment, sz int32) (File_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 2}, sz) return File_List{l}, err } func (s File_List) At(i int) File { return File{s.List.Struct(i)} } func (s File_List) Set(i int, v File) error { return s.List.SetStruct(i, v.Struct) } func (s File_List) String() string { str, _ := text.MarshalList(0x8ea7393d37893155, s.List) return str } // File_Promise is a wrapper for a File promised by a client call. type File_Promise struct{ *capnp.Pipeline } func (p File_Promise) Struct() (File, error) { s, err := p.Pipeline.Struct() return File{s}, err } // Ghost indicates that a certain node was at this path once type Ghost struct{ capnp.Struct } type Ghost_Which uint16 const ( Ghost_Which_commit Ghost_Which = 0 Ghost_Which_directory Ghost_Which = 1 Ghost_Which_file Ghost_Which = 2 ) func (w Ghost_Which) String() string { const s = "commitdirectoryfile" switch w { case Ghost_Which_commit: return s[0:6] case Ghost_Which_directory: return s[6:15] case Ghost_Which_file: return s[15:19] } return "Ghost_Which(" + strconv.FormatUint(uint64(w), 10) + ")" } // Ghost_TypeID is the unique identifier for the type Ghost. const Ghost_TypeID = 0x80c828d7e89c12ea func NewGhost(s *capnp.Segment) (Ghost, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 2}) return Ghost{st}, err } func NewRootGhost(s *capnp.Segment) (Ghost, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 2}) return Ghost{st}, err } func ReadRootGhost(msg *capnp.Message) (Ghost, error) { root, err := msg.RootPtr() return Ghost{root.Struct()}, err } func (s Ghost) String() string { str, _ := text.Marshal(0x80c828d7e89c12ea, s.Struct) return str } func (s Ghost) Which() Ghost_Which { return Ghost_Which(s.Struct.Uint16(8)) } func (s Ghost) GhostInode() uint64 { return s.Struct.Uint64(0) } func (s Ghost) SetGhostInode(v uint64) { s.Struct.SetUint64(0, v) } func (s Ghost) GhostPath() (string, error) { p, err := s.Struct.Ptr(0) return p.Text(), err } func (s Ghost) HasGhostPath() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s Ghost) GhostPathBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) return p.TextBytes(), err } func (s Ghost) SetGhostPath(v string) error { return s.Struct.SetText(0, v) } func (s Ghost) Commit() (Commit, error) { if s.Struct.Uint16(8) != 0 { panic("Which() != commit") } p, err := s.Struct.Ptr(1) return Commit{Struct: p.Struct()}, err } func (s Ghost) HasCommit() bool { if s.Struct.Uint16(8) != 0 { return false } p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s Ghost) SetCommit(v Commit) error { s.Struct.SetUint16(8, 0) return s.Struct.SetPtr(1, v.Struct.ToPtr()) } // NewCommit sets the commit field to a newly // allocated Commit struct, preferring placement in s's segment. func (s Ghost) NewCommit() (Commit, error) { s.Struct.SetUint16(8, 0) ss, err := NewCommit(s.Struct.Segment()) if err != nil { return Commit{}, err } err = s.Struct.SetPtr(1, ss.Struct.ToPtr()) return ss, err } func (s Ghost) Directory() (Directory, error) { if s.Struct.Uint16(8) != 1 { panic("Which() != directory") } p, err := s.Struct.Ptr(1) return Directory{Struct: p.Struct()}, err } func (s Ghost) HasDirectory() bool { if s.Struct.Uint16(8) != 1 { return false } p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s Ghost) SetDirectory(v Directory) error { s.Struct.SetUint16(8, 1) return s.Struct.SetPtr(1, v.Struct.ToPtr()) } // NewDirectory sets the directory field to a newly // allocated Directory struct, preferring placement in s's segment. func (s Ghost) NewDirectory() (Directory, error) { s.Struct.SetUint16(8, 1) ss, err := NewDirectory(s.Struct.Segment()) if err != nil { return Directory{}, err } err = s.Struct.SetPtr(1, ss.Struct.ToPtr()) return ss, err } func (s Ghost) File() (File, error) { if s.Struct.Uint16(8) != 2 { panic("Which() != file") } p, err := s.Struct.Ptr(1) return File{Struct: p.Struct()}, err } func (s Ghost) HasFile() bool { if s.Struct.Uint16(8) != 2 { return false } p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s Ghost) SetFile(v File) error { s.Struct.SetUint16(8, 2) return s.Struct.SetPtr(1, v.Struct.ToPtr()) } // NewFile sets the file field to a newly // allocated File struct, preferring placement in s's segment. func (s Ghost) NewFile() (File, error) { s.Struct.SetUint16(8, 2) ss, err := NewFile(s.Struct.Segment()) if err != nil { return File{}, err } err = s.Struct.SetPtr(1, ss.Struct.ToPtr()) return ss, err } // Ghost_List is a list of Ghost. type Ghost_List struct{ capnp.List } // NewGhost creates a new list of Ghost. func NewGhost_List(s *capnp.Segment, sz int32) (Ghost_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 2}, sz) return Ghost_List{l}, err } func (s Ghost_List) At(i int) Ghost { return Ghost{s.List.Struct(i)} } func (s Ghost_List) Set(i int, v Ghost) error { return s.List.SetStruct(i, v.Struct) } func (s Ghost_List) String() string { str, _ := text.MarshalList(0x80c828d7e89c12ea, s.List) return str } // Ghost_Promise is a wrapper for a Ghost promised by a client call. type Ghost_Promise struct{ *capnp.Pipeline } func (p Ghost_Promise) Struct() (Ghost, error) { s, err := p.Pipeline.Struct() return Ghost{s}, err } func (p Ghost_Promise) Commit() Commit_Promise { return Commit_Promise{Pipeline: p.Pipeline.GetPipeline(1)} } func (p Ghost_Promise) Directory() Directory_Promise { return Directory_Promise{Pipeline: p.Pipeline.GetPipeline(1)} } func (p Ghost_Promise) File() File_Promise { return File_Promise{Pipeline: p.Pipeline.GetPipeline(1)} } // Node is a node in the merkle dag of brig type Node struct{ capnp.Struct } type Node_Which uint16 const ( Node_Which_commit Node_Which = 0 Node_Which_directory Node_Which = 1 Node_Which_file Node_Which = 2 Node_Which_ghost Node_Which = 3 ) func (w Node_Which) String() string { const s = "commitdirectoryfileghost" switch w { case Node_Which_commit: return s[0:6] case Node_Which_directory: return s[6:15] case Node_Which_file: return s[15:19] case Node_Which_ghost: return s[19:24] } return "Node_Which(" + strconv.FormatUint(uint64(w), 10) + ")" } // Node_TypeID is the unique identifier for the type Node. const Node_TypeID = 0xa629eb7f7066fae3 func NewNode(s *capnp.Segment) (Node, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 7}) return Node{st}, err } func NewRootNode(s *capnp.Segment) (Node, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 7}) return Node{st}, err } func ReadRootNode(msg *capnp.Message) (Node, error) { root, err := msg.RootPtr() return Node{root.Struct()}, err } func (s Node) String() string { str, _ := text.Marshal(0xa629eb7f7066fae3, s.Struct) return str } func (s Node) Which() Node_Which { return Node_Which(s.Struct.Uint16(8)) } func (s Node) Name() (string, error) { p, err := s.Struct.Ptr(0) return p.Text(), err } func (s Node) HasName() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s Node) NameBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) return p.TextBytes(), err } func (s Node) SetName(v string) error { return s.Struct.SetText(0, v) } func (s Node) TreeHash() ([]byte, error) { p, err := s.Struct.Ptr(1) return []byte(p.Data()), err } func (s Node) HasTreeHash() bool { p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s Node) SetTreeHash(v []byte) error { return s.Struct.SetData(1, v) } func (s Node) ModTime() (string, error) { p, err := s.Struct.Ptr(2) return p.Text(), err } func (s Node) HasModTime() bool { p, err := s.Struct.Ptr(2) return p.IsValid() || err != nil } func (s Node) ModTimeBytes() ([]byte, error) { p, err := s.Struct.Ptr(2) return p.TextBytes(), err } func (s Node) SetModTime(v string) error { return s.Struct.SetText(2, v) } func (s Node) Inode() uint64 { return s.Struct.Uint64(0) } func (s Node) SetInode(v uint64) { s.Struct.SetUint64(0, v) } func (s Node) ContentHash() ([]byte, error) { p, err := s.Struct.Ptr(3) return []byte(p.Data()), err } func (s Node) HasContentHash() bool { p, err := s.Struct.Ptr(3) return p.IsValid() || err != nil } func (s Node) SetContentHash(v []byte) error { return s.Struct.SetData(3, v) } func (s Node) User() (string, error) { p, err := s.Struct.Ptr(4) return p.Text(), err } func (s Node) HasUser() bool { p, err := s.Struct.Ptr(4) return p.IsValid() || err != nil } func (s Node) UserBytes() ([]byte, error) { p, err := s.Struct.Ptr(4) return p.TextBytes(), err } func (s Node) SetUser(v string) error { return s.Struct.SetText(4, v) } func (s Node) Commit() (Commit, error) { if s.Struct.Uint16(8) != 0 { panic("Which() != commit") } p, err := s.Struct.Ptr(5) return Commit{Struct: p.Struct()}, err } func (s Node) HasCommit() bool { if s.Struct.Uint16(8) != 0 { return false } p, err := s.Struct.Ptr(5) return p.IsValid() || err != nil } func (s Node) SetCommit(v Commit) error { s.Struct.SetUint16(8, 0) return s.Struct.SetPtr(5, v.Struct.ToPtr()) } // NewCommit sets the commit field to a newly // allocated Commit struct, preferring placement in s's segment. func (s Node) NewCommit() (Commit, error) { s.Struct.SetUint16(8, 0) ss, err := NewCommit(s.Struct.Segment()) if err != nil { return Commit{}, err } err = s.Struct.SetPtr(5, ss.Struct.ToPtr()) return ss, err } func (s Node) Directory() (Directory, error) { if s.Struct.Uint16(8) != 1 { panic("Which() != directory") } p, err := s.Struct.Ptr(5) return Directory{Struct: p.Struct()}, err } func (s Node) HasDirectory() bool { if s.Struct.Uint16(8) != 1 { return false } p, err := s.Struct.Ptr(5) return p.IsValid() || err != nil } func (s Node) SetDirectory(v Directory) error { s.Struct.SetUint16(8, 1) return s.Struct.SetPtr(5, v.Struct.ToPtr()) } // NewDirectory sets the directory field to a newly // allocated Directory struct, preferring placement in s's segment. func (s Node) NewDirectory() (Directory, error) { s.Struct.SetUint16(8, 1) ss, err := NewDirectory(s.Struct.Segment()) if err != nil { return Directory{}, err } err = s.Struct.SetPtr(5, ss.Struct.ToPtr()) return ss, err } func (s Node) File() (File, error) { if s.Struct.Uint16(8) != 2 { panic("Which() != file") } p, err := s.Struct.Ptr(5) return File{Struct: p.Struct()}, err } func (s Node) HasFile() bool { if s.Struct.Uint16(8) != 2 { return false } p, err := s.Struct.Ptr(5) return p.IsValid() || err != nil } func (s Node) SetFile(v File) error { s.Struct.SetUint16(8, 2) return s.Struct.SetPtr(5, v.Struct.ToPtr()) } // NewFile sets the file field to a newly // allocated File struct, preferring placement in s's segment. func (s Node) NewFile() (File, error) { s.Struct.SetUint16(8, 2) ss, err := NewFile(s.Struct.Segment()) if err != nil { return File{}, err } err = s.Struct.SetPtr(5, ss.Struct.ToPtr()) return ss, err } func (s Node) Ghost() (Ghost, error) { if s.Struct.Uint16(8) != 3 { panic("Which() != ghost") } p, err := s.Struct.Ptr(5) return Ghost{Struct: p.Struct()}, err } func (s Node) HasGhost() bool { if s.Struct.Uint16(8) != 3 { return false } p, err := s.Struct.Ptr(5) return p.IsValid() || err != nil } func (s Node) SetGhost(v Ghost) error { s.Struct.SetUint16(8, 3) return s.Struct.SetPtr(5, v.Struct.ToPtr()) } // NewGhost sets the ghost field to a newly // allocated Ghost struct, preferring placement in s's segment. func (s Node) NewGhost() (Ghost, error) { s.Struct.SetUint16(8, 3) ss, err := NewGhost(s.Struct.Segment()) if err != nil { return Ghost{}, err } err = s.Struct.SetPtr(5, ss.Struct.ToPtr()) return ss, err } func (s Node) BackendHash() ([]byte, error) { p, err := s.Struct.Ptr(6) return []byte(p.Data()), err } func (s Node) HasBackendHash() bool { p, err := s.Struct.Ptr(6) return p.IsValid() || err != nil } func (s Node) SetBackendHash(v []byte) error { return s.Struct.SetData(6, v) } // Node_List is a list of Node. type Node_List struct{ capnp.List } // NewNode creates a new list of Node. func NewNode_List(s *capnp.Segment, sz int32) (Node_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 7}, sz) return Node_List{l}, err } func (s Node_List) At(i int) Node { return Node{s.List.Struct(i)} } func (s Node_List) Set(i int, v Node) error { return s.List.SetStruct(i, v.Struct) } func (s Node_List) String() string { str, _ := text.MarshalList(0xa629eb7f7066fae3, s.List) return str } // Node_Promise is a wrapper for a Node promised by a client call. type Node_Promise struct{ *capnp.Pipeline } func (p Node_Promise) Struct() (Node, error) { s, err := p.Pipeline.Struct() return Node{s}, err } func (p Node_Promise) Commit() Commit_Promise { return Commit_Promise{Pipeline: p.Pipeline.GetPipeline(5)} } func (p Node_Promise) Directory() Directory_Promise { return Directory_Promise{Pipeline: p.Pipeline.GetPipeline(5)} } func (p Node_Promise) File() File_Promise { return File_Promise{Pipeline: p.Pipeline.GetPipeline(5)} } func (p Node_Promise) Ghost() Ghost_Promise { return Ghost_Promise{Pipeline: p.Pipeline.GetPipeline(5)} } const schema_9195d073cb5c5953 = "x\xda\xb4\x96\xefk\x14\xd7\x1a\xc7\x9f\xef9\xb3;Y" + "\xd1\xbb\xd9;\x11D\\\xf7 \x0aQ\x12M\\/W" + "\x83\x17\x8d\xc6k\xcc\xcd\x95\x1cW\xb9*za\xdc=" + "\xd9\x1d\xdc\x9d\x093cc\xa4E[\x14\xb45E\xa9" + "\x82BBm\x89\xfd\x01-m\xff\x80\xd2\x1f`)\xed" + "\x9b\xd2\x17-\xed\xcb\xb6/j\x0b}]\xda\xea\x94\xb3" + "\xbf&\x86\xb4Ji\xdfe\x9e\xe7\xfc\xf8\x9e\xcf\xf3<" + "\xdfl\xdf\xcfl'\xebOx\x8cH\xaeJ$\xa3\xef" + "\xfe>\xf3\xed\xe7\xdd\x1f\x9e#\xb9\x1a,*\x1c9\xf6" + "q\xf0\xc9\xf5\xab\xb4\x87\x99\x1cF>\x8b5\xb0za" + "Z\xbd\xc8\xe5\x1d\xe4@\x88fs\xff\x99|\xec\x87\xe5" + "\xcfPf5\xe2\x0d\x09f\x12\xe5/\xb0\x01X\xd7\x99" + "i]g9\xeb=6I\x88\xde8~\xd0\xfd\xc0\xba" + "5\xad/\x98\xbf>\xa9\xd7g\xf9\x06X\xbd\xdc\xb4z" + "y.\x7f\x9c\xffO\x9f\x7f\xa8\xff\xd2?\xff\xb5\xed\xe5" + "g\xf5\x06\xbe\xf0\x82ic%\xacY\xc3\xb4f\x8d\\" + "\xfe\x8eQ\x17\xf4\xf5O\xe3\x13g\xbf_\xff\xd2\xc2'" + "\x98\xa6\x01#\xffEb%\xac\xbb\x09\xd3\xba\x9b\xc8\xe5" + "\xb3I\x8f\x11\xa2\xb9oF\xbfL\xcf\xfd\xf8\x0e\xc9u" + "\x98\xa7py\xd2\x04Q\xfev\xc7Q\x10\xac\xb7:\xb4" + "|\xcc\x15V" + "<\xbf\xfd9a\xfb\xca\x0d[d\xd2\xbe\xe7\xb5?r" + "\x8e[R\xa7\x91 \x86\x04!WS~Y=\x0c\xdd" + "\xbf\x1d^U\x8b\x83[\xd1,\xf0\xfb\xd1\xa0\xa8*{" + "\\\xb8LO\x8d\xe3\x8a\xb0\xa2\xc4\x7f\x87\x06\xf7\xd2\x83" + "\xb3\xa2\x1f{\x9aC\x9e\x9f\xc7\xeaI=@\xe78\xe4" + "e\x064Q]\x1a\x88\x07(\xc3\x9bs2\xbd&\x1e" + "\x9f\x8cq\xae\xc1\xea\xaafu\x99C\xde`H\x07\xce" + "\x99x:\x8av\xb1\xa2J\x05\x87\xf8\x19\xd5zq\x8b" + "M\x13\x95yRM\xc5h\x82\x03\xf6$@\x0c\x8f`" + "2\xfbubq(k\x9b\xdd4\x82h\x7f\x9dF " + "\x0c\xbba'M05\xe5\x9f\xac*Q\xb2\xcb\xba\xbd" + "N\xf8N\x99 {Z\x94\xacu\xd8@T\x10\xe0(" + "\xf4 \x06e\xad\xc7\x08Q\xa1[\xc7\xb7 \xee+\xab" + "\x1f\xbb\x88\x0a=:\xbe\x15\x0cht\x96\xf5\x0fl&" + "*\xf4\xe9\xf0v\xbd\xdc\xe0ub\xd66\x9c *l" + "\xd5\xf1!\x1dO\x18]H\x10Y\x83\xf5k\xb7\xeb\xf8" + "0\x18\xb2\xc9(Jt!Id\xed\xc1\x00Qa\xa7" + "\xce\x8c\xea\x8cy_gL\"k\x1f\x0e\x10\x15\x86u" + "\xe6\xa0\xcet\xdc\xd3\x99\x0e\"K\xd6O\x1b\xd5\x99\xc3" + ":\x93\xfaEgRD\xd6\xa1\xba\xae1\x9d9\xa6\xef" + "_\x92\xec\xc2\x12\"\xebH]\xd7a\x1d/a\xc1\xac" + "G\xa1\xaf\xd4\xb0\x1dT\x88\xa8U\xb0\xb35\xaft\xd0" + "\x89\xd7\xe4\x1c\xcd8.\xbf\xe7\x86\xca\x0d\x87\xc9\x9cg" + "\x13\xe9S\x81\xf2\xff\x1a\xaf\xcc\xd5\xdd\x18\x9d\xf1\xff\xfb" + "\xe6a'\xec\xe2I\xe5\x96\x1e\x14\xd2\xee/\xe3\xb7\xfc" + "JK\xdbXS>/+m\x91\x9d\x8d*-\xf0\xc8" + "F\x81\x1e\xf4\xc8I'\xac\xc4\x1e\xa9\xec\xd2\xa3\xde9" + "\xd4\xb2fZ\xbc\xb1\xbb\x9b\x8d\xfd\"\xa2\xd6\xd2\xc4\x94" + "\xd0\x9cm\xc7\x0d\x84\xe7*\xe1\xf9\xa2\xe6\xf9\xaa\xed\xf2" + "\x8e\x0atl\xdc1\xabu\xdb\xfc\xd3\xac`\xa49\xf5" + "\xdaJ\x0d\xd6\xb0\x82\xdb#M\xdb|\xf7\x0fXAT" + "\xac8\xd5\x92\xaf\\\xdda\x7f#\x8cq\xa03\xfe%" + "F\xd0\xc1VS\x05\xbf\xb7\xe8\xd7\x00\x00\x00\xff\xff*" + "\xb6Z]" func init() { schemas.Register(schema_9195d073cb5c5953, 0x80c828d7e89c12ea, 0x8b15ee76774b1f9d, 0x8da013c66e545daf, 0x8ea7393d37893155, 0xa629eb7f7066fae3, 0xbff8a40fda4ce4a4, 0xe24c59306c829c01) } ================================================ FILE: catfs/nodes/commit.go ================================================ package nodes import ( "bytes" "fmt" "path" "time" capnp_model "github.com/sahib/brig/catfs/nodes/capnp" h "github.com/sahib/brig/util/hashlib" capnp "zombiezen.com/go/capnproto2" ) const ( // AuthorOfStage is the Person that is displayed for the stage commit. // Currently this is just an empty hash Person that will be set later. AuthorOfStage = "unknown" ) // Commit groups a set of changes type Commit struct { Base // Commit message (might be auto-generated) message string // Author is the id of the committer. author string // root is the tree hash of the root directory root h.Hash // Parent hash (only nil for initial commit) parent h.Hash // Index of the commit (first is 0, second 1 and so on) index int64 merge struct { // With indicates with which person we merged. with string // head is a reference to the commit we merged with on // the remote side. head h.Hash } } // NewEmptyCommit creates a new commit after the commit referenced by `parent`. // `parent` might be nil for the very first commit. func NewEmptyCommit(inode uint64, index int64) (*Commit, error) { return &Commit{ Base: Base{ nodeType: NodeTypeCommit, inode: inode, modTime: time.Now(), }, index: index, author: AuthorOfStage, }, nil } // ToCapnp will convert all commit internals to a capnp message. func (c *Commit) ToCapnp() (*capnp.Message, error) { msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) if err != nil { return nil, err } capNd, err := capnp_model.NewRootNode(seg) if err != nil { return nil, err } return msg, c.ToCapnpNode(seg, capNd) } // ToCapnpNode converts this node to a serializable capnp proto node. func (c *Commit) ToCapnpNode(seg *capnp.Segment, capNd capnp_model.Node) error { if err := c.setBaseAttrsToNode(capNd); err != nil { return err } capCmt, err := c.setCommitAttrs(seg) if err != nil { return err } return capNd.SetCommit(*capCmt) } func (c *Commit) setCommitAttrs(seg *capnp.Segment) (*capnp_model.Commit, error) { capCmt, err := capnp_model.NewCommit(seg) if err != nil { return nil, err } if err := capCmt.SetMessage(c.message); err != nil { return nil, err } if err := capCmt.SetRoot(c.root); err != nil { return nil, err } if err := capCmt.SetAuthor(c.author); err != nil { return nil, err } if err := capCmt.SetParent(c.parent); err != nil { return nil, err } capCmt.SetIndex(c.index) // Store merge infos: capmerge := capCmt.Merge() if err := capmerge.SetWith(c.merge.with); err != nil { return nil, err } if err := capmerge.SetHead(c.merge.head); err != nil { return nil, err } return &capCmt, nil } // FromCapnp will set the content of `msg` into the commit, // overwriting any previous state. func (c *Commit) FromCapnp(msg *capnp.Message) error { capNd, err := capnp_model.ReadRootNode(msg) if err != nil { return err } return c.FromCapnpNode(capNd) } // FromCapnpNode converts a serialized node to a normal node. func (c *Commit) FromCapnpNode(capNd capnp_model.Node) error { if err := c.parseBaseAttrsFromNode(capNd); err != nil { return err } c.nodeType = NodeTypeCommit capCmt, err := capNd.Commit() if err != nil { return err } return c.readCommitAttrs(capCmt) } func (c *Commit) readCommitAttrs(capCmt capnp_model.Commit) error { var err error c.author, err = capCmt.Author() if err != nil { return err } c.message, err = capCmt.Message() if err != nil { return err } c.root, err = capCmt.Root() if err != nil { return err } c.parent, err = capCmt.Parent() if err != nil { return err } c.index = capCmt.Index() capMerge := capCmt.Merge() c.merge.head, err = capMerge.Head() if err != nil { return err } c.merge.with, err = capMerge.With() return err } // IsBoxed will return True if the ommit was already boxed // (i.e. is a finished commit and no staging commit) func (c *Commit) IsBoxed() bool { return c.tree != nil } // padHash will take a Hash and pad it's representation to 2048 bytes. // This is done so we can support different hash sizes later on. // We need fixed lengths for the hash calculation of a commit. func padHash(hash h.Hash) []byte { padded := make([]byte, 2048) copy(padded, hash.Bytes()) return padded } // Root returns the current root hash // You shall not modify the returned hash. func (c *Commit) Root() h.Hash { return c.root } // SetRoot sets the root directory of this commit. func (c *Commit) SetRoot(hash h.Hash) { c.root = hash.Clone() } // BoxCommit takes all currently filled data and calculates the final hash. // It also will update the modification time. // Only a boxed commit should be func (c *Commit) BoxCommit(author string, message string) error { if c.root == nil { return fmt.Errorf("Cannot box commit: root directory is empty") } c.author = author buf := &bytes.Buffer{} // If parent == nil, this will be EmptyBackendHash. buf.Write(padHash(c.parent)) // Write the root hash. buf.Write(padHash(c.root)) // Write the author hash. Different author -> different content. buf.Write(padHash(h.Sum([]byte(c.author)))) // Write the message last, it may be arbitrary length. buf.Write([]byte(message)) mh := h.Sum(buf.Bytes()) c.message = message c.tree = h.Hash(mh) return nil } // String will return a nice representation of a commit. func (c *Commit) String() string { return fmt.Sprintf( "", c.tree.B58String(), c.message, ) } // SetMergeMarker remembers that we merged with the user `with` // at this commit at `remoteHead`. func (c *Commit) SetMergeMarker(with string, remoteHead h.Hash) { c.merge.with = with c.merge.head = remoteHead.Clone() } // MergeMarker returns the merge info for this commit, if any. func (c *Commit) MergeMarker() (string, h.Hash) { return c.merge.with, c.merge.head } // /////////////////// METADATA INTERFACE /////////////////// // Name will return the hash of the commit. func (c *Commit) Name() string { return c.tree.B58String() } // Message will return the commit message of this commit func (c *Commit) Message() string { return c.message } // Path will return the path of the commit, which will func (c *Commit) Path() string { return prefixSlash(path.Join(".snapshots", c.Name())) } // Size will always return 0 since a commit has no defined size. // If you're interested in the size of the snapshot, check the size // of the root directory. func (c *Commit) Size() uint64 { return 0 } // CachedSize returns zero. // Same reasons as for Size() above. func (c *Commit) CachedSize() int64 { return 0 } // Index of the commit. First commit has the index 0, // next commit has the index 1 and so on. func (c *Commit) Index() int64 { return c.index } /////////////// HIERARCHY INTERFACE /////////////// // NChildren will always return 1, since a commit has always exactly one // root dir attached. func (c *Commit) NChildren() int { return 1 } // Child will return the root directory, no matter what name is given. func (c *Commit) Child(lkr Linker, _ string) (Node, error) { // Return the root directory, no matter what name was passed. return lkr.NodeByHash(c.root) } // Parent will return the parent commit of this node or nil // if it is the first commit ever made. func (c *Commit) Parent(lkr Linker) (Node, error) { if c.parent == nil { return nil, nil } return lkr.NodeByHash(c.parent) } // SetParent sets the parent of the commit to `nd`. func (c *Commit) SetParent(lkr Linker, nd Node) error { c.parent = nd.TreeHash().Clone() return nil } // SetModTime sets the commits modtime to `t`. // This should only be used for the most recent commit. func (c *Commit) SetModTime(t time.Time) { c.modTime = t } // Assert that Commit follows the Node interface: var _ Node = &Commit{} ================================================ FILE: catfs/nodes/commit_test.go ================================================ package nodes import ( "testing" h "github.com/sahib/brig/util/hashlib" "github.com/stretchr/testify/require" capnp "zombiezen.com/go/capnproto2" ) func TestCommit(t *testing.T) { cmt, err := NewEmptyCommit(0, 42) if err != nil { t.Fatalf("Failed to create commit: %v", err) } cmt.root = h.EmptyBackendHash cmt.parent = h.EmptyBackendHash cmt.Base.name = "some commit" cmt.SetMergeMarker(AuthorOfStage, h.TestDummy(t, 42)) if err := cmt.BoxCommit(AuthorOfStage, "Hello"); err != nil { t.Fatalf("Failed to box commit: %v", err) } msg, err := cmt.ToCapnp() if err != nil { t.Fatalf("Failed to convert commit to capnp: %v", err) } data, err := msg.Marshal() if err != nil { t.Fatalf("Failed to marshal message: %v", err) } newMsg, err := capnp.Unmarshal(data) if err != nil { t.Fatalf("Unmarshal failed: %v", err) } empty := &Commit{} if err := empty.FromCapnp(newMsg); err != nil { t.Fatalf("From failed: %v", err) } if empty.message != "Hello" { t.Fatalf("Bad message unmarshaled: %v", empty.message) } if !empty.root.Equal(h.EmptyBackendHash) { t.Fatalf("Bad root unmarshaled: %v", empty.root) } if !empty.parent.Equal(h.EmptyBackendHash) { t.Fatalf("Bad parent unmarshaled: %v", empty.root) } if empty.author != AuthorOfStage { t.Fatalf("Bad author unmarshaled: %v", empty.root) } if empty.index != 42 { t.Fatalf("Index did not match in umarshalled: %v", empty.index) } person, remoteHead := empty.MergeMarker() if !remoteHead.Equal(h.TestDummy(t, 42)) { t.Fatalf("Remote head was not loaded correctly: %v", remoteHead.Bytes()) } if person != AuthorOfStage { t.Fatalf("Person from unmarshaled commit does not equal staging author: %v", person) } empty.modTime = cmt.modTime require.Equal(t, empty, cmt) } ================================================ FILE: catfs/nodes/directory.go ================================================ package nodes import ( "errors" "fmt" "path" "sort" "strings" "time" ie "github.com/sahib/brig/catfs/errors" capnp_model "github.com/sahib/brig/catfs/nodes/capnp" h "github.com/sahib/brig/util/hashlib" capnp "zombiezen.com/go/capnproto2" ) // Directory is a typical directory that may contain // several other directories or files. type Directory struct { Base size uint64 cachedSize int64 // Negative indicates that it is unknown parentName string children map[string]h.Hash contents map[string]h.Hash order []string } // NewEmptyDirectory creates a new empty directory that does not exist yet. func NewEmptyDirectory( lkr Linker, parent *Directory, name string, user string, inode uint64, ) (*Directory, error) { absPath := "" if parent != nil { absPath = path.Join(parent.Path(), name) } newDir := &Directory{ Base: Base{ inode: inode, user: user, tree: h.Sum([]byte(absPath)), content: h.EmptyInternalHash.Clone(), backend: h.EmptyBackendHash.Clone(), name: name, nodeType: NodeTypeDirectory, modTime: time.Now().Truncate(time.Microsecond), }, children: make(map[string]h.Hash), contents: make(map[string]h.Hash), order: []string{}, } if parent != nil { // parentName is set by Add: if err := parent.Add(lkr, newDir); err != nil { return nil, err } } return newDir, nil } func (d *Directory) String() string { return fmt.Sprintf("", d.Path(), d.TreeHash(), d.Inode()) } // ToCapnp converts the directory to an easily serializable capnp message. func (d *Directory) ToCapnp() (*capnp.Message, error) { msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) if err != nil { return nil, err } capNd, err := capnp_model.NewRootNode(seg) if err != nil { return nil, err } return msg, d.ToCapnpNode(seg, capNd) } // ToCapnpNode converts this node to a serializable capnp proto node. func (d *Directory) ToCapnpNode(seg *capnp.Segment, capNd capnp_model.Node) error { if err := d.setBaseAttrsToNode(capNd); err != nil { return err } capDir, err := d.setDirectoryAttrs(seg) if err != nil { return err } return capNd.SetDirectory(*capDir) } func (d *Directory) setDirectoryAttrs(seg *capnp.Segment) (*capnp_model.Directory, error) { capDir, err := capnp_model.NewDirectory(seg) if err != nil { return nil, err } children, err := capnp_model.NewDirEntry_List(seg, int32(len(d.children))) if err != nil { return nil, err } entryIdx := 0 for name, hash := range d.children { entry, err := capnp_model.NewDirEntry(seg) if err != nil { return nil, err } if err := entry.SetName(name); err != nil { return nil, err } if err := entry.SetHash(hash); err != nil { return nil, err } if err := children.Set(entryIdx, entry); err != nil { return nil, err } entryIdx++ } if err := capDir.SetChildren(children); err != nil { return nil, err } contents, err := capnp_model.NewDirEntry_List(seg, int32(len(d.contents))) if err != nil { return nil, err } entryIdx = 0 for name, hash := range d.contents { entry, err := capnp_model.NewDirEntry(seg) if err != nil { return nil, err } if err := entry.SetName(name); err != nil { return nil, err } if err := entry.SetHash(hash); err != nil { return nil, err } if err := contents.Set(entryIdx, entry); err != nil { return nil, err } entryIdx++ } if err := capDir.SetContents(contents); err != nil { return nil, err } if err := capDir.SetParent(d.parentName); err != nil { return nil, err } capDir.SetSize(d.size) capDir.SetCachedSize(int64(d.size)) return &capDir, nil } // FromCapnp will take the result of ToCapnp and set all of it's attributes. func (d *Directory) FromCapnp(msg *capnp.Message) error { capNd, err := capnp_model.ReadRootNode(msg) if err != nil { return err } return d.FromCapnpNode(capNd) } // FromCapnpNode converts a serialized node to a normal node. func (d *Directory) FromCapnpNode(capNd capnp_model.Node) error { if err := d.parseBaseAttrsFromNode(capNd); err != nil { return err } capDir, err := capNd.Directory() if err != nil { return err } return d.readDirectoryAttr(capDir) } func (d *Directory) readDirectoryAttr(capDir capnp_model.Directory) error { var err error d.size = capDir.Size() d.cachedSize = capDir.CachedSize() d.parentName, err = capDir.Parent() if err != nil { return err } childList, err := capDir.Children() if err != nil { return err } d.children = make(map[string]h.Hash) for i := 0; i < childList.Len(); i++ { entry := childList.At(i) name, err := entry.Name() if err != nil { return err } hash, err := entry.Hash() if err != nil { return err } d.children[name] = hash d.order = append(d.order, name) } contentList, err := capDir.Contents() if err != nil { return err } d.contents = make(map[string]h.Hash) for i := 0; i < contentList.Len(); i++ { entry := contentList.At(i) name, err := entry.Name() if err != nil { return err } hash, err := entry.Hash() if err != nil { return err } d.contents[name] = hash } sort.Strings(d.order) d.nodeType = NodeTypeDirectory return nil } ////////////// NODE INTERFACE ///////////////// // Name returns the dirname of this directory. func (d *Directory) Name() string { return d.name } // Size returns the accumulated size of the directory // (i.e. the sum of a files in it, excluding ghosts) func (d *Directory) Size() uint64 { return d.size } // CachedSize is similar to Size() above but for accumulated backends storage func (d *Directory) CachedSize() int64 { return d.cachedSize } // Path returns the full path of this node. func (d *Directory) Path() string { return prefixSlash(path.Join(d.parentName, d.Base.name)) } // NChildren returns the number of children the directory has. func (d *Directory) NChildren() int { return len(d.children) } // Child returns a specific child with `name` or nil, if it was not found. func (d *Directory) Child(lkr Linker, name string) (Node, error) { childHash, ok := d.children[name] if !ok { return nil, nil } return lkr.NodeByHash(childHash) } // Parent will return the parent of this directory or nil, // if this directory is already the root directory. func (d *Directory) Parent(lkr Linker) (Node, error) { if d.parentName == "" { return nil, nil } return lkr.LookupNode(d.parentName) } // SetParent will set the parent of this directory to `nd`. func (d *Directory) SetParent(lkr Linker, nd Node) error { if d.Path() == "/" { return nil } if nd == nil { d.parentName = "" } else { d.parentName = nd.Path() } return nil } // ////////////// TREE MOVEMENT ///////////////// // VisitChildren will call `fn` for each of it's direct children. // The order of visits is lexicographical based on the child name. func (d *Directory) VisitChildren(lkr Linker, fn func(nd Node) error) error { for _, name := range d.order { hash := d.children[name] child, err := lkr.NodeByHash(hash) if err != nil { return err } if child == nil { return fmt.Errorf("BUG: dead link in tree: %s => %s", name, hash.B58String()) } if err := fn(child); err != nil { return err } } return nil } // ChildrenSorted returns a list of children node objects, sorted lexically by // their path. Use this whenever you want to have a defined order of nodes, // but do not really care what order. func (d *Directory) ChildrenSorted(lkr Linker) ([]Node, error) { children := []Node{} err := d.VisitChildren(lkr, func(nd Node) error { children = append(children, nd) return nil }) if err != nil { return nil, err } return children, nil } // Up will call `visit` for each node onto the way top to the root node, // including this directory. func (d *Directory) Up(lkr Linker, visit func(par *Directory) error) error { root, err := lkr.Root() if err != nil { return err } elems := strings.Split(d.Path(), "/") dirs := []*Directory{root} curr := root for _, elem := range elems { if elem == "" { continue } childHash, ok := curr.children[elem] if !ok { // This usually means that some link is missing. return fmt.Errorf("bug: cannot reach self from root in up()") } childNode, err := lkr.NodeByHash(childHash) if err != nil { return err } child, ok := childNode.(*Directory) if !ok { return fmt.Errorf("bug: non-directory in up(): %v", childHash) } dirs = append(dirs, child) curr = child } // Visit the nodes in reverse order, self first, root last: for idx := len(dirs) - 1; idx >= 0; idx-- { if err := visit(dirs[idx]); err != nil { return err } } return nil } // IsRoot returns true if this directory is the root directory. func (d *Directory) IsRoot() bool { return d.parentName == "" } // ErrSkipChild can be returned inside a Walk() closure to stop descending // recursively into a directory. var ErrSkipChild = errors.New("skip sub directory") // Walk calls `visit` for each node below `node`, including `node`. // If `dfs` is true, depth first search will be used. // If `dfs` is false, breadth first search will be used. // It is valid to pass a File to Walk(), then visit will be called exactly once. // // It is possible to return the special error value ErrSkipChild in the callback. // In this case, the children of this node are skipped. // For this to work, `dfs` has to be false. func Walk(lkr Linker, node Node, dfs bool, visit func(child Node) error) error { if node == nil { return nil } if node.Type() != NodeTypeDirectory { err := visit(node) if err == ErrSkipChild { return nil } return err } d, ok := node.(*Directory) if !ok { return ie.ErrBadNode } if !dfs { if err := visit(node); err != nil { if err == ErrSkipChild { return nil } return err } } for _, name := range d.order { hash := d.children[name] child, err := lkr.NodeByHash(hash) if err != nil { return err } if child == nil { return fmt.Errorf("walk: could not resolve %s (%s)", name, hash.B58String()) } if err := Walk(lkr, child, dfs, visit); err != nil { return err } } if dfs { if err := visit(node); err != nil { if err == ErrSkipChild { panic("bug: you cannot use dfs=true and ErrSkipChild together") } return err } } return nil } // Lookup will lookup `repoPath` relative to this directory. func (d *Directory) Lookup(lkr Linker, repoPath string) (Node, error) { repoPath = prefixSlash(path.Clean(repoPath)) elems := strings.Split(repoPath, "/") // Strip off the first empty field: elems = elems[1:] if len(elems) == 1 && elems[0] == "" { return d, nil } var curr Node = d var err error for idx, elem := range elems { curr, err = curr.Child(lkr, elem) if err != nil { return nil, err } if curr == nil { return nil, ie.NoSuchFile(repoPath) } // If the child is a ghost and we did not fully resolve the path // yet we stop here. If it's the ghost of a directory we could // resolve its children, but that would be confusing. if curr.Type() == NodeTypeGhost && idx != len(elems)-1 { return nil, ie.NoSuchFile(repoPath) } } return curr, nil } //////////// STATE ALTERING METHODS ////////////// // SetSize sets the size of this directory. func (d *Directory) SetSize(size uint64) { d.size = size } // SetCachedSize sets the cached size of the directory. func (d *Directory) SetCachedSize(cachedSize int64) { d.cachedSize = cachedSize } // SetName will set the name of this directory. func (d *Directory) SetName(name string) { d.name = name } // SetModTime will set a new mod time to this directory (i.e. "touch" it) func (d *Directory) SetModTime(modTime time.Time) { d.Base.modTime = modTime.Truncate(time.Microsecond) } // Copy returns a copy of the directory with `inode` changed. func (d *Directory) Copy(inode uint64) ModNode { children := make(map[string]h.Hash) contents := make(map[string]h.Hash) for name, hash := range d.children { children[name] = hash.Clone() } for name, hash := range d.contents { contents[name] = hash.Clone() } order := make([]string, len(d.order)) copy(order, d.order) return &Directory{ Base: d.Base.copyBase(inode), size: d.size, parentName: d.parentName, children: children, contents: contents, order: order, } } func (d *Directory) rehash(lkr Linker, updateContentHash bool) error { newTreeHash := h.Sum([]byte(path.Join(d.parentName, d.name))) newContentHash := h.EmptyInternalHash.Clone() for _, name := range d.order { newTreeHash = newTreeHash.Mix(d.children[name]) if childContent := d.contents[name]; updateContentHash && childContent != nil { // The child content might be nil in case of ghost. // Those should not add to the content calculation. newContentHash = newContentHash.Mix(childContent) } } oldHash := d.tree.Clone() d.tree = newTreeHash if updateContentHash { d.content = newContentHash } lkr.MemIndexSwap(d, oldHash, true) return nil } // Add `nd` to this directory using `lkr`. func (d *Directory) Add(lkr Linker, nd Node) error { if nd == d { return fmt.Errorf("bug: attempting to add `%s` to itself", nd.Path()) } if _, ok := d.children[nd.Name()]; ok { twin, err := d.Child(lkr, nd.Name()) if err != nil { return ie.ErrExists } if twin.Type() != NodeTypeGhost { return ie.ErrExists } // Twin is a ghost. We delete it to clear space for a new (added) node. err = d.RemoveChild(lkr, twin) if err != nil { // the ghost twin stays and we report it as existing return ie.ErrExists } } nodeSize := nd.Size() nodeCachedSize := nd.CachedSize() nodeHash := nd.TreeHash() nodeContent := nd.ContentHash() d.children[nd.Name()] = nodeHash if nd.Type() != NodeTypeGhost { d.contents[nd.Name()] = nodeContent } nameIdx := sort.SearchStrings(d.order, nd.Name()) suffix := append([]string{nd.Name()}, d.order[nameIdx:]...) d.order = append(d.order[:nameIdx], suffix...) var lastNd Node err := d.Up(lkr, func(parent *Directory) error { if nd.Type() != NodeTypeGhost { // Only add to the size if it's not a ghost. // They do not really count as size. // Same goes for the node content. parent.size += nodeSize parent.cachedSize += nodeCachedSize } if lastNd != nil { parent.children[lastNd.Name()] = lastNd.TreeHash() if nd.Type() != NodeTypeGhost { parent.contents[lastNd.Name()] = lastNd.ContentHash() } } if err := parent.rehash(lkr, true); err != nil { return err } lastNd = parent return nil }) if err != nil { return err } // Establish the link between parent and child: return nd.SetParent(lkr, d) } // RemoveChild removes the child named `name` from it's children. // There is no way to remove the root node. func (d *Directory) RemoveChild(lkr Linker, nd Node) error { name := nd.Name() if _, ok := d.children[name]; !ok { return ie.NoSuchFile(name) } // Unset parent from child: if err := nd.SetParent(lkr, nil); err != nil { return err } // Delete it from orders and children. // This assumes that it definitely was part of orders before. delete(d.children, name) delete(d.contents, name) nameIdx := sort.SearchStrings(d.order, name) d.order = append(d.order[:nameIdx], d.order[nameIdx+1:]...) var lastNd Node nodeSize := nd.Size() nodeCachedSize := nd.CachedSize() return d.Up(lkr, func(parent *Directory) error { if nd.Type() != NodeTypeGhost { parent.size -= nodeSize parent.cachedSize -= nodeCachedSize } if lastNd != nil { parent.children[lastNd.Name()] = lastNd.TreeHash() if nd.Type() != NodeTypeGhost { parent.contents[lastNd.Name()] = lastNd.ContentHash() } } if err := parent.rehash(lkr, true); err != nil { return err } lastNd = parent return nil }) } func (d *Directory) rebuildOrderCache() { d.order = []string{} for name := range d.children { d.order = append(d.order, name) } sort.Strings(d.order) } // NotifyMove should be called whenever a node is being moved. func (d *Directory) NotifyMove(lkr Linker, newParent *Directory, newPath string) error { visited := map[string]Node{} oldRootPath := d.Path() err := Walk(lkr, d, true, func(child Node) error { oldChildPath := child.Path() newChildPath := path.Join(newPath, oldChildPath[len(oldRootPath):]) visited[newChildPath] = child switch child.Type() { case NodeTypeDirectory: childDir, ok := child.(*Directory) if !ok { return ie.ErrBadNode } for name := range childDir.children { movedChildPath := path.Join(newChildPath, name) childDir.children[name] = visited[movedChildPath].TreeHash() } if err := childDir.rehash(lkr, false); err != nil { return err } dirname, basename := path.Split(newChildPath) childDir.parentName = dirname childDir.SetName(basename) return nil case NodeTypeFile: childFile, ok := child.(*File) if !ok { return ie.ErrBadNode } if err := childFile.NotifyMove(lkr, nil, newChildPath); err != nil { return err } case NodeTypeGhost: childGhost, ok := child.(*Ghost) if !ok { return ie.ErrBadNode } childGhost.SetGhostPath(newChildPath) default: return fmt.Errorf("bad node type in NotifyMove(): %d", child.Type()) } return nil }) if err != nil { return err } // Fixup the links from the parents to the children: for nodePath, node := range visited { if parent, ok := visited[path.Dir(nodePath)]; ok { parentDir := parent.(*Directory) parentDir.children[path.Base(nodePath)] = node.TreeHash() parentDir.rebuildOrderCache() } } if err := newParent.Add(lkr, d); err != nil { return err } newParent.rebuildOrderCache() return nil } // SetUser sets the user that last modified the directory. func (d *Directory) SetUser(user string) { d.Base.user = user } // Assert that Directory follows the Node interface: var _ ModNode = &Directory{} ================================================ FILE: catfs/nodes/directory_test.go ================================================ package nodes import ( "testing" ie "github.com/sahib/brig/catfs/errors" "github.com/stretchr/testify/require" capnp "zombiezen.com/go/capnproto2" ) func TestDirectoryBasics(t *testing.T) { lkr := NewMockLinker() repoDir, err := NewEmptyDirectory(lkr, nil, "", "a", 1) lkr.MemSetRoot(repoDir) lkr.AddNode(repoDir, true) if err != nil { t.Fatalf("Failed to create empty dir: %v", err) } subDir, err := NewEmptyDirectory(lkr, repoDir, "sub", "b", 2) if err != nil { t.Fatalf("Failed to create empty sub dir: %v", err) } lkr.AddNode(subDir, true) if err := repoDir.Add(lkr, subDir); err != ie.ErrExists { t.Fatalf("Adding sub/ to repo/ worked twice: %v", err) } // Fake size here. repoDir.size = 3 repoDir.cachedSize = 3 msg, err := repoDir.ToCapnp() if err != nil { t.Fatalf("Failed to convert repo dir to capnp: %v", err) } data, err := msg.Marshal() if err != nil { t.Fatalf("Failed to marshal message: %v", err) } newMsg, err := capnp.Unmarshal(data) if err != nil { t.Fatalf("Unmarshal failed: %v", err) } empty := &Directory{} if err := empty.FromCapnp(newMsg); err != nil { t.Fatalf("From capnp failed: %v", err) } if empty.size != 3 { t.Fatalf("Root size was not loaded correctly: %v", err) } if empty.parentName != "" { t.Fatalf("Root parentName as not loaded correctly: %v", err) } if empty.Inode() != 1 { t.Fatalf("Inode was not loaded correctly: %v != 1", empty.Inode()) } if subHash, ok := empty.children["sub"]; ok { if !subHash.Equal(subDir.TreeHash()) { t.Fatalf("Unmarshaled hash differs (!= sub): %v", subDir.TreeHash()) } } else { t.Fatalf("Root children do not contain sub") } empty.modTime = repoDir.modTime require.Equal(t, empty, repoDir) } ================================================ FILE: catfs/nodes/doc.go ================================================ // Package nodes implements all nodes and defines basic operations on it. // // It however does not implement any specific database scheme, nor // are any operations implemented that require knowledge of other nodes. // If knowledge about other nodes is required, the Linker interface needs // to be fulfilled by a higher level. // // The actual core of brig is built upon this package. // Any changes here should thus be well thought through. package nodes ================================================ FILE: catfs/nodes/file.go ================================================ package nodes import ( "fmt" "path" "time" capnp_model "github.com/sahib/brig/catfs/nodes/capnp" h "github.com/sahib/brig/util/hashlib" capnp "zombiezen.com/go/capnproto2" ) // File represents a single file in the repository. // It stores all metadata about it and links to the actual data. type File struct { Base size uint64 cachedSize int64 // Negative indicates that it is unknown parent string key []byte isRaw bool } // NewEmptyFile returns a newly created file under `parent`, named `name`. func NewEmptyFile(parent *Directory, name string, user string, inode uint64) *File { return &File{ Base: Base{ name: name, user: user, inode: inode, modTime: time.Now().Truncate(time.Microsecond), nodeType: NodeTypeFile, }, parent: parent.Path(), } } // ToCapnp converts a file to a capnp message. func (f *File) ToCapnp() (*capnp.Message, error) { msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) if err != nil { return nil, err } capNd, err := capnp_model.NewRootNode(seg) if err != nil { return nil, err } return msg, f.ToCapnpNode(seg, capNd) } // ToCapnpNode converts this node to a serializable capnp proto node. func (f *File) ToCapnpNode(seg *capnp.Segment, capNd capnp_model.Node) error { if err := f.setBaseAttrsToNode(capNd); err != nil { return err } capFile, err := f.setFileAttrs(seg) if err != nil { return err } return capNd.SetFile(*capFile) } func (f *File) setFileAttrs(seg *capnp.Segment) (*capnp_model.File, error) { capFile, err := capnp_model.NewFile(seg) if err != nil { return nil, err } if err := capFile.SetParent(f.parent); err != nil { return nil, err } if err := capFile.SetKey(f.key); err != nil { return nil, err } capFile.SetSize(f.size) capFile.SetCachedSize(f.cachedSize) capFile.SetIsRaw(f.isRaw) return &capFile, nil } // FromCapnp sets all state of `msg` into the file. func (f *File) FromCapnp(msg *capnp.Message) error { capNd, err := capnp_model.ReadRootNode(msg) if err != nil { return err } return f.FromCapnpNode(capNd) } // FromCapnpNode converts a serialized node to a normal node. func (f *File) FromCapnpNode(capNd capnp_model.Node) error { if err := f.parseBaseAttrsFromNode(capNd); err != nil { return err } capFile, err := capNd.File() if err != nil { return err } return f.readFileAttrs(capFile) } func (f *File) readFileAttrs(capFile capnp_model.File) error { var err error f.parent, err = capFile.Parent() if err != nil { return err } f.isRaw = capFile.IsRaw() f.nodeType = NodeTypeFile f.size = capFile.Size() f.cachedSize = capFile.CachedSize() f.key, err = capFile.Key() return err } ////////////////// METADATA INTERFACE ////////////////// // Size returns the number of bytes in the file's content. func (f *File) Size() uint64 { return f.size } // CachedSize returns the number of bytes in the file's backend storage. func (f *File) CachedSize() int64 { return f.cachedSize } ////////////////// ATTRIBUTE SETTERS ////////////////// // SetModTime udates the mod time of the file (i.e. "touch"es it) func (f *File) SetModTime(t time.Time) { f.modTime = t.Truncate(time.Microsecond) } // SetName set the name of the file. func (f *File) SetName(n string) { f.name = n } // SetIsRaw sets the isRaw attribute func (f *File) SetIsRaw(isRaw bool) { f.isRaw = isRaw } // SetKey updates the key to a new value, taking ownership of the value. func (f *File) SetKey(k []byte) { f.key = k } // SetSize will update the size of the file and update it's mod time. func (f *File) SetSize(s uint64) { f.size = s f.SetModTime(time.Now()) } // SetCachedSize will update the cached size of the file and update it's mod time. func (f *File) SetCachedSize(s int64) { f.cachedSize = s f.SetModTime(time.Now()) } // Copy copies the contents of the file, except `inode`. func (f *File) Copy(inode uint64) ModNode { if f == nil { return nil } var copyKey []byte if f.key != nil { copyKey = make([]byte, len(f.key)) copy(copyKey, f.key) } return &File{ Base: f.Base.copyBase(inode), size: f.size, cachedSize: f.cachedSize, parent: f.parent, key: copyKey, } } func (f *File) rehash(lkr Linker, newPath string) { oldHash := f.tree.Clone() var contentHash h.Hash if f.Base.content != nil { contentHash = f.Base.content.Clone() } else { contentHash = h.EmptyInternalHash.Clone() } f.tree = h.Sum([]byte(fmt.Sprintf("%s|%s", newPath, contentHash))) lkr.MemIndexSwap(f, oldHash, true) } // NotifyMove should be called when the node moved parents. func (f *File) NotifyMove(lkr Linker, newParent *Directory, newPath string) error { dirname, basename := path.Split(newPath) f.SetName(basename) f.parent = dirname f.rehash(lkr, newPath) if newParent != nil { if err := newParent.Add(lkr, f); err != nil { return err } newParent.rebuildOrderCache() } return nil } // SetContent will update the hash of the file (and also the mod time) func (f *File) SetContent(lkr Linker, content h.Hash) { f.Base.content = content f.rehash(lkr, f.Path()) f.SetModTime(time.Now()) } // SetBackend will update the hash of the file (and also the mod time) func (f *File) SetBackend(lkr Linker, backend h.Hash) { f.Base.backend = backend f.SetModTime(time.Now()) } func (f *File) String() string { return fmt.Sprintf( "", f.Path(), f.TreeHash(), f.Inode(), f.IsRaw(), ) } // Path will return the absolute path of the file. func (f *File) Path() string { return prefixSlash(path.Join(f.parent, f.name)) } // IsRaw returns if the file is associated with a raw stream. // raw streams should not be decoded. func (f *File) IsRaw() bool { return f.isRaw } ////////////////// HIERARCHY INTERFACE ////////////////// // NChildren returns the number of children this file node has. func (f *File) NChildren() int { return 0 } // Child will return always nil, since files don't have children. func (f *File) Child(_ Linker, name string) (Node, error) { // A file never has a child. Sad but true. return nil, nil } // Parent returns the parent directory of File. // If `f` is already the root, it will return itself (and never nil). func (f *File) Parent(lkr Linker) (Node, error) { return lkr.LookupNode(f.parent) } // SetParent will set the parent of the file to `parent`. func (f *File) SetParent(_ Linker, parent Node) error { if parent == nil { return nil } f.parent = parent.Path() return nil } // Key returns the current key of the file. func (f *File) Key() []byte { return f.key } // SetUser sets the user that last modified the file. func (f *File) SetUser(user string) { f.Base.user = user } // Interface check for debugging: var _ ModNode = &File{} var _ Streamable = &File{} ================================================ FILE: catfs/nodes/file_test.go ================================================ package nodes import ( "bytes" "testing" "time" "github.com/stretchr/testify/require" capnp "zombiezen.com/go/capnproto2" ) func TestFile(t *testing.T) { lkr := NewMockLinker() root, err := NewEmptyDirectory(lkr, nil, "", "a", 2) if err != nil { t.Fatalf("Failed to create root dir: %v", err) } lkr.AddNode(root, true) lkr.MemSetRoot(root) file := NewEmptyFile(root, "some file", "a", 3) lkr.AddNode(file, true) if err != nil { t.Fatalf("Failed to create empty file: %v", err) } file.SetName("new_name") file.SetKey([]byte{1, 2, 3}) file.SetSize(42) file.SetContent(lkr, []byte{4, 5, 6}) file.SetBackend(lkr, []byte{7, 8, 9}) hashBeforeUnmarshal := file.TreeHash().Clone() now := time.Now() file.SetModTime(now) msg, err := file.ToCapnp() if err != nil { t.Fatalf("Failed to convert repo dir to capnp: %v", err) } data, err := msg.Marshal() if err != nil { t.Fatalf("Failed to marshal message: %v", err) } newMsg, err := capnp.Unmarshal(data) if err != nil { t.Fatalf("Unmarshal failed: %v", err) } empty := &File{} if err := empty.FromCapnp(newMsg); err != nil { t.Fatalf("From capnp failed: %v", err) } if empty.Name() != "new_name" { t.Fatalf("Name differs after unmarshal: %v", empty.Name()) } if empty.ModTime() != now.Truncate(time.Microsecond) { t.Fatalf("modtime differs after unmarshal: %v Want: %v", now, empty.ModTime()) } if empty.Size() != 42 { t.Fatalf("size differs after unmarshal: %v", empty.Size()) } if !bytes.Equal(empty.Key(), []byte{1, 2, 3}) { t.Fatalf("key differs after unmarshal: %v", empty.Key()) } if !bytes.Equal(empty.TreeHash(), hashBeforeUnmarshal) { t.Fatalf("tree hash differs after unmarshal: %v", empty.TreeHash()) } if !bytes.Equal(empty.BackendHash(), []byte{7, 8, 9}) { t.Fatalf("backend hash differs after unmarshal: %v", empty.BackendHash()) } if !bytes.Equal(empty.ContentHash(), []byte{4, 5, 6}) { t.Fatalf("content hash differs after unmarshal: %v", empty.ContentHash()) } empty.modTime = file.modTime require.Equal(t, empty, file) } ================================================ FILE: catfs/nodes/ghost.go ================================================ package nodes import ( "fmt" ie "github.com/sahib/brig/catfs/errors" capnp_model "github.com/sahib/brig/catfs/nodes/capnp" h "github.com/sahib/brig/util/hashlib" capnp "zombiezen.com/go/capnproto2" ) // Ghost is a special kind of Node that marks a moved node. // If a file was moved, a ghost will be created for the old place. // If another file is moved to the new place, the ghost will be "resurrected" // with the new content. type Ghost struct { ModNode ghostPath string ghostInode uint64 oldType NodeType } // MakeGhost takes an existing node and converts it to a ghost. // In the ghost form no metadata is lost, but the node should // not show up. `inode` will be the new inode of the ghost. // It should differ to the previous node. func MakeGhost(nd ModNode, inode uint64) (*Ghost, error) { if nd.Type() == NodeTypeGhost { panic("cannot put a ghost in a ghost") } return &Ghost{ ModNode: nd.Copy(nd.Inode()), oldType: nd.Type(), ghostInode: inode, ghostPath: nd.Path(), }, nil } // Type always returns NodeTypeGhost func (g *Ghost) Type() NodeType { return NodeTypeGhost } // OldNode returns the node the ghost was when it still was alive. func (g *Ghost) OldNode() ModNode { return g.ModNode } // OldFile returns the file the ghost was when it still was alive. // Returns ErrBadNode when it wasn't a file. func (g *Ghost) OldFile() (*File, error) { file, ok := g.ModNode.(*File) if !ok { return nil, ie.ErrBadNode } return file, nil } // OldDirectory returns the old directory that the node was in lifetime // If the ghost was not a directory, ErrBadNode is returned. func (g *Ghost) OldDirectory() (*Directory, error) { directory, ok := g.ModNode.(*Directory) if !ok { return nil, ie.ErrBadNode } return directory, nil } func (g *Ghost) String() string { return fmt.Sprintf("", g.TreeHash(), g.ModNode) } // Path returns the path of the node. func (g *Ghost) Path() string { return g.ghostPath } // TreeHash returns the hash of the node. func (g *Ghost) TreeHash() h.Hash { return h.Sum([]byte(fmt.Sprintf("ghost:%s", g.ModNode.TreeHash()))) } // Inode returns the inode func (g *Ghost) Inode() uint64 { return g.ghostInode } // SetGhostPath sets the path of the ghost. func (g *Ghost) SetGhostPath(newPath string) { g.ghostPath = newPath } // ToCapnp serializes the underlying node func (g *Ghost) ToCapnp() (*capnp.Message, error) { msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) if err != nil { return nil, err } capNd, err := capnp_model.NewRootNode(seg) if err != nil { return nil, err } return msg, g.ToCapnpNode(seg, capNd) } // ToCapnpNode converts this node to a serializable capnp proto node. func (g *Ghost) ToCapnpNode(seg *capnp.Segment, capNd capnp_model.Node) error { var base *Base capghost, err := capNd.NewGhost() if err != nil { return err } capghost.SetGhostInode(g.ghostInode) if err = capghost.SetGhostPath(g.ghostPath); err != nil { return err } switch g.oldType { case NodeTypeFile: file, ok := g.ModNode.(*File) if !ok { return ie.ErrBadNode } capfile, err := file.setFileAttrs(seg) if err != nil { return err } base = &file.Base if err = capghost.SetFile(*capfile); err != nil { return err } case NodeTypeDirectory: dir, ok := g.ModNode.(*Directory) if !ok { return ie.ErrBadNode } capdir, err := dir.setDirectoryAttrs(seg) if err != nil { return err } base = &dir.Base if err = capghost.SetDirectory(*capdir); err != nil { return err } case NodeTypeGhost: panic("Recursive ghosts are not possible") default: panic(fmt.Sprintf("Unknown node type: %d", g.oldType)) } if err != nil { return err } if err := base.setBaseAttrsToNode(capNd); err != nil { return err } return capNd.SetGhost(capghost) } // FromCapnp reads all attributes from a previously marshaled ghost. func (g *Ghost) FromCapnp(msg *capnp.Message) error { capNd, err := capnp_model.ReadRootNode(msg) if err != nil { return err } return g.FromCapnpNode(capNd) } // FromCapnpNode converts a serialized node to a normal node. func (g *Ghost) FromCapnpNode(capNd capnp_model.Node) error { if typ := capNd.Which(); typ != capnp_model.Node_Which_ghost { return fmt.Errorf("BUG: ghost unmarshal with non ghost type: %d", typ) } capghost, err := capNd.Ghost() if err != nil { return err } g.ghostInode = capghost.GhostInode() g.ghostPath, err = capghost.GhostPath() if err != nil { return err } var base *Base switch typ := capghost.Which(); typ { case capnp_model.Ghost_Which_directory: capdir, err := capghost.Directory() if err != nil { return err } dir := &Directory{} if err := dir.readDirectoryAttr(capdir); err != nil { return err } g.ModNode = dir g.oldType = NodeTypeDirectory base = &dir.Base case capnp_model.Ghost_Which_file: capfile, err := capghost.File() if err != nil { return err } file := &File{} if err := file.readFileAttrs(capfile); err != nil { return err } g.ModNode = file g.oldType = NodeTypeFile base = &file.Base default: return ie.ErrBadNode } return base.parseBaseAttrsFromNode(capNd) } ================================================ FILE: catfs/nodes/ghost_test.go ================================================ package nodes import ( "bytes" "testing" h "github.com/sahib/brig/util/hashlib" capnp "zombiezen.com/go/capnproto2" ) func TestGhost(t *testing.T) { lkr := NewMockLinker() root, err := NewEmptyDirectory(lkr, nil, "", "a", 1) if err != nil { t.Fatalf("Failed to create root dir: %v", err) } lkr.AddNode(root, true) lkr.MemSetRoot(root) file := NewEmptyFile(root, "x.png", "a", 42) file.backend = h.TestDummy(t, 2) file.tree = h.TestDummy(t, 3) file.size = 13 if err != nil { t.Fatalf("Failed to create empty file: %v", err) } ghost, err := MakeGhost(file, 666) if err != nil { t.Fatalf("Failed to make root dir a ghost: %v", err) } ghost.SetGhostPath("/other") if ghost.Type() != NodeTypeGhost { t.Fatalf("Ghost does not identify itself as ghost: %d", ghost.Type()) } if !bytes.Equal(ghost.OldNode().TreeHash(), file.TreeHash()) { t.Fatalf("Ghost and real hash differ (%v - %v)", ghost.TreeHash(), root.TreeHash()) } msg, err := ghost.ToCapnp() if err != nil { t.Fatalf("Ghost ToCapnp failed: %v", err) } data, err := msg.Marshal() if err != nil { t.Fatalf("Ghost marshal failed: %v", err) } newMsg, err := capnp.Unmarshal(data) if err != nil { t.Fatalf("Ghost unmarshal failed: %v", err) } empty := &Ghost{} if err := empty.FromCapnp(newMsg); err != nil { t.Fatalf("Ghost FromCapnp failed: %v", err) } if empty.Path() != "/other" { t.Fatalf("Ghost path was not unmarshaled: %v", empty.Path()) } if !bytes.Equal(ghost.OldNode().TreeHash(), file.TreeHash()) { t.Fatalf("Ghost and real hash differ (%v - %v)", ghost.TreeHash(), root.TreeHash()) } unmarshaledFile, err := ghost.OldFile() if err != nil { t.Fatalf("Failed to cast ghost to old file: %v", err) } if !unmarshaledFile.BackendHash().Equal(file.BackendHash()) { t.Fatalf("Hash content differs after unmarshal: %v", unmarshaledFile.BackendHash()) } if !unmarshaledFile.TreeHash().Equal(file.TreeHash()) { t.Fatalf("Hash itself differs after unmarshal: %v", unmarshaledFile.TreeHash()) } if unmarshaledFile.Inode() != file.Inode() { t.Fatalf("Inodes differ after unmarshal: %d != %d", unmarshaledFile.Inode(), file.Inode()) } if unmarshaledFile.Path() != "/x.png" { t.Fatalf("Path differs after unmarshal: %v", unmarshaledFile.Path()) } if empty.Inode() != ghost.Inode() { t.Fatalf("Inodes differ after unmarshal: %d != %d", unmarshaledFile.Inode(), file.Inode()) } } ================================================ FILE: catfs/nodes/linker.go ================================================ package nodes import ( "fmt" ie "github.com/sahib/brig/catfs/errors" h "github.com/sahib/brig/util/hashlib" ) // Linker will tell a node how it relates to other nodes // and gives it the ability to resolve other nodes by hash. // Apart from that it gives the underlying linker implementation // the possibility to be notified when a hash changes. type Linker interface { // Root should return the current root directory. Root() (*Directory, error) // LookupNode should resolve `path` starting from the root directory. // If the path does not exist an error is returned and can be checked // with IsNoSuchFileError() LookupNode(path string) (Node, error) // NodeByHash resolves the hash to a specific node. // If the node does not exist, nil is returned. NodeByHash(hash h.Hash) (Node, error) // MemIndexSwap should be called when // the hash of a node changes. MemIndexSwap(nd Node, oldHash h.Hash, updatePathIndex bool) // MemSetRoot should be called when the current root directory changed. MemSetRoot(root *Directory) } //////////////////////////// // MOCKING IMPLEMENTATION // //////////////////////////// // MockLinker is supposed to be used for testing. // It simply holds all nodes in memory. New nodes should be added via AddNode. type MockLinker struct { root *Directory paths map[string]Node hashes map[string]Node } // NewMockLinker returns a Linker that can be easily used for testing. func NewMockLinker() *MockLinker { return &MockLinker{ paths: make(map[string]Node), hashes: make(map[string]Node), } } // Root returns the currently set root. // If none was created yet, an empty directory is returned. func (ml *MockLinker) Root() (*Directory, error) { if ml.root != nil { return ml.root, nil } root, err := NewEmptyDirectory(ml, nil, "", "", 0) if err != nil { return nil, err } ml.root = root return root, nil } // LookupNode tries to lookup if there is already a node with this path. func (ml *MockLinker) LookupNode(path string) (Node, error) { if node, ok := ml.paths[path]; ok { return node, nil } return nil, ie.NoSuchFile(path) } // NodeByHash will return a previously added node (via AddNode) by it's hash. func (ml *MockLinker) NodeByHash(hash h.Hash) (Node, error) { if node, ok := ml.hashes[hash.B58String()]; ok { return node, nil } return nil, fmt.Errorf("No such hash") } // MemSetRoot sets the current root to be `root`. func (ml *MockLinker) MemSetRoot(root *Directory) { ml.root = root } // MemIndexSwap will replace a node (referenced by `oldHash`) with `nd`. // The path does not change. func (ml *MockLinker) MemIndexSwap(nd Node, oldHash h.Hash, updatePathIndex bool) { delete(ml.hashes, oldHash.B58String()) ml.AddNode(nd, updatePathIndex) } // AddNode will add a node to the memory index. // This is not part of the linker interface. func (ml *MockLinker) AddNode(nd Node, updatePathIndex bool) { ml.hashes[nd.TreeHash().B58String()] = nd if updatePathIndex { ml.paths[nd.Path()] = nd } } ================================================ FILE: catfs/nodes/node.go ================================================ package nodes import ( "time" capnp_model "github.com/sahib/brig/catfs/nodes/capnp" h "github.com/sahib/brig/util/hashlib" capnp "zombiezen.com/go/capnproto2" ) // NodeType defines the type of a specific node. type NodeType uint8 const ( // NodeTypeUnknown should not happen in real programs NodeTypeUnknown = NodeType(iota) // NodeTypeFile indicates a regular file NodeTypeFile // NodeTypeDirectory indicates a directory NodeTypeDirectory // NodeTypeCommit indicates a commit NodeTypeCommit // NodeTypeGhost indicates a moved node NodeTypeGhost ) var nodeTypeToString = map[NodeType]string{ NodeTypeCommit: "commit", NodeTypeGhost: "ghost", NodeTypeFile: "file", NodeTypeDirectory: "directory", } func (n NodeType) String() string { if name, ok := nodeTypeToString[n]; ok { return name } return "unknown" } // Metadatable is a thing that accumulates certain common node attributes. type Metadatable interface { // Name returns the name of the object, i.e. the last part of the path, // which is also commonly called 'basename' in unix filesystems. Name() string // User returns the id of the user that last modified this file. // (There is no real ownership) User() string // Size returns the size of the node in bytes. Size() uint64 // CachedSize returns the size of the node at the backend in bytes. // Negative indicates unknown status CachedSize() int64 // ModTime returns the time when the last modification to the node happened. ModTime() time.Time // Path of this node. Path() string // GetType returns the type of the node. Type() NodeType // INode shall return a unique identifier for this node that does // not change, even when the content of the node changes. Inode() uint64 // TreeHash returns the hash value of the node. // // It is an error to modify the hash value. // If you need to modify it, you have to make an own copy via .Clone(). TreeHash() h.Hash // ContentHash is the actual plain text hash of the node. // This is used for comparing file and directory equality. ContentHash() h.Hash // BackendHash returns the hash under which the stored content // can be read from the backend. // It is valid to return nil if the file is empty. BackendHash() h.Hash } // Serializable is a thing that can be converted to a capnproto message. type Serializable interface { ToCapnp() (*capnp.Message, error) FromCapnp(*capnp.Message) error ToCapnpNode(seg *capnp.Segment, capNd capnp_model.Node) error FromCapnpNode(capNd capnp_model.Node) error } // HierarchyEntry represents a thing that is placed in // a file hierarchy and may have other children beneath it. type HierarchyEntry interface { // NChildren returns the total number of children to a node. NChildren() int // Child returns a named child. Child(lkr Linker, name string) (Node, error) // Parent returns the parent node or nil if there is none. Parent(lkr Linker) (Node, error) // SetParent sets the parent new. Care must be taken to remove old // references to the node to avoid loops. SetParent(lkr Linker, nd Node) error } // Streamable represents a thing that can be streamed, // given a cryptographic key. type Streamable interface { Key() []byte } // Node is a single node in brig's MDAG. // It is currently either a Commit, a File or a Directory. type Node interface { Metadatable Serializable HierarchyEntry } // ModNode is a node that supports modification of // it's core attributes. File and Directory are settable, // but a commit is not. type ModNode interface { Node // SetSize sets the size of the node in bytes SetSize(size uint64) // SetModTime updates the modtime timestamp SetModTime(modTime time.Time) // SetName sets the user that last modified the file SetName(name string) // SetUser sets the user that last modified the file SetUser(user string) // NotifyMove tells the node that it was moved. // It should be called whenever the path of the node changed. // (i.e. not only the name, but parts of the parent path) NotifyMove(lkr Linker, parent *Directory, newPath string) error // Copy creates a copy of this node with the inode `inode`. Copy(inode uint64) ModNode } ================================================ FILE: catfs/pinner.go ================================================ package catfs import ( "errors" capnp "github.com/sahib/brig/catfs/capnp" c "github.com/sahib/brig/catfs/core" "github.com/sahib/brig/catfs/db" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" capnp_lib "zombiezen.com/go/capnproto2" ) // errNotPinnedSentinel is returned to signal an early exit in Walk() var errNotPinnedSentinel = errors.New("not pinned") // pinCacheEntry is one entry in the pin cache. type pinCacheEntry struct { Inodes map[uint64]bool } func capnpToPinCacheEntry(data []byte) (*pinCacheEntry, error) { msg, err := capnp_lib.Unmarshal(data) if err != nil { return nil, err } capEntry, err := capnp.ReadRootPinEntry(msg) if err != nil { return nil, err } capPins, err := capEntry.Pins() if err != nil { return nil, err } entry := &pinCacheEntry{ Inodes: make(map[uint64]bool), } for idx := 0; idx < capPins.Len(); idx++ { capPin := capPins.At(idx) entry.Inodes[capPin.Inode()] = capPin.IsPinned() } return entry, nil } func pinEnryToCapnpData(entry *pinCacheEntry) ([]byte, error) { msg, seg, err := capnp_lib.NewMessage(capnp_lib.SingleSegment(nil)) if err != nil { return nil, err } capEntry, err := capnp.NewRootPinEntry(seg) if err != nil { return nil, err } capPinList, err := capnp.NewPin_List(seg, int32(len(entry.Inodes))) if err != nil { return nil, err } idx := 0 for inode, isPinned := range entry.Inodes { capPin, err := capnp.NewPin(seg) if err != nil { return nil, err } capPin.SetInode(inode) capPin.SetIsPinned(isPinned) if err := capPinList.Set(idx, capPin); err != nil { return nil, err } idx++ } if err := capEntry.SetPins(capPinList); err != nil { return nil, err } return msg.Marshal() } // Pinner remembers which hashes are pinned and if they are pinned explicitly. // Its API can be used to safely change the pinning state. It assumes that it // is the only entitiy the pins & unpins nodes. type Pinner struct { bk FsBackend lkr *c.Linker } // NewPinner creates a new pin cache at `pinDbPath`, possibly erroring out. // `lkr` and `bk` are used to make PinNode() and UnpinNode() work. func NewPinner(lkr *c.Linker, bk FsBackend) (*Pinner, error) { return &Pinner{lkr: lkr, bk: bk}, nil } // Close the pinning cache. func (pc *Pinner) Close() error { // currently a no-op return nil } func getEntry(kv db.Database, hash h.Hash) (*pinCacheEntry, error) { data, err := kv.Get("pins", hash.B58String()) if err != nil { if err == db.ErrNoSuchKey { return nil, nil } return nil, err } return capnpToPinCacheEntry(data) } // remember the pin state of a certain hash. // This does change anything in the backend but only changes the caching structure. // Use with care to avoid data inconsistencies. func (pc *Pinner) remember(inode uint64, hash h.Hash, isPinned, isExplicit bool) error { return pc.lkr.AtomicWithBatch(func(batch db.Batch) (bool, error) { oldEntry, err := getEntry(pc.lkr.KV(), hash) if err != nil { return true, err } var inodes map[uint64]bool if oldEntry != nil { inodes = oldEntry.Inodes } else { inodes = make(map[uint64]bool) } if !isPinned { delete(inodes, inode) } else { inodes[inode] = isExplicit } entry := pinCacheEntry{ Inodes: inodes, } data, err := pinEnryToCapnpData(&entry) if err != nil { return true, err } batch.Put(data, "pins", hash.B58String()) return false, nil }) } // IsPinned returns two boolean values indicating the pin status of `inode` and // `hash`. If the first value is true, the content is pinned, if the second is // true it is pinned explicitly. func (pc *Pinner) IsPinned(inode uint64, hash h.Hash) (bool, bool, error) { data, err := pc.lkr.KV().Get("pins", hash.B58String()) if err != nil && err != db.ErrNoSuchKey { return false, false, err } if err == nil { // cache hit entry, err := capnpToPinCacheEntry(data) if err != nil { return false, false, err } isExplicit, ok := entry.Inodes[inode] return ok, isExplicit, nil } // We do not have this information yet. // Create a new entry based on the backend information. // silence a key error, ok will be false then. isPinned, err := pc.bk.IsPinned(hash) if err != nil { return false, false, err } // remember the file to be pinned non-explicitly: if err := pc.remember(inode, hash, isPinned, false); err != nil { return false, false, err } return isPinned, false, nil } //////////////////////////// // Pin will remember the node at `inode` with hash `hash` as `explicit`ly pinned. func (pc *Pinner) Pin(inode uint64, hash h.Hash, explicit bool) error { isPinned, isExplicit, err := pc.IsPinned(inode, hash) if err != nil { return err } if isPinned { if isExplicit && !explicit { // will not "downgrade" an existing pin. return nil } } else { if err := pc.bk.Pin(hash); err != nil { return err } } return pc.remember(inode, hash, true, explicit) } // Unpin pins the content at `inode` and `hash`. If the pin was explicit, // `explicit` must be true to make this work. func (pc *Pinner) Unpin(inode uint64, hash h.Hash, explicit bool) error { isPinned, isExplicit, err := pc.IsPinned(inode, hash) if err != nil { return err } if isPinned { if isExplicit && !explicit { return nil } if err := pc.bk.Unpin(hash); err != nil { return err } } return pc.remember(inode, hash, false, explicit) } //////////////////////////// // doPinOp recursively walks over all children of a node and pins or unpins them. func (pc *Pinner) doPinOp(op func(uint64, h.Hash, bool) error, nd n.Node, explicit bool) error { return n.Walk(pc.lkr, nd, true, func(child n.Node) error { if child.Type() != n.NodeTypeFile { return nil } file, ok := child.(*n.File) if !ok { return ie.ErrBadNode } return op(file.Inode(), file.BackendHash(), explicit) }) } // PinNode tries to pin the node referenced by `nd`. // The difference to calling Pin(nd.BackendHash()) is, // that this method will pin directories recursively, if given. // // If the file is already pinned exclusively and you want // to pin it non-exclusive, this will be a no-op. // In this case you have to unpin it first exclusively. func (pc *Pinner) PinNode(nd n.Node, explicit bool) error { return pc.doPinOp(pc.Pin, nd, explicit) } // UnpinNode is the exact opposite of PinNode. func (pc *Pinner) UnpinNode(nd n.Node, explicit bool) error { return pc.doPinOp(pc.Unpin, nd, explicit) } // IsNodePinned checks if all `nd` is pinned and if so, exlusively. // If `nd` is a directory, it will only return true if all children // are also pinned (same for second return value). func (pc *Pinner) IsNodePinned(nd n.Node) (bool, bool, error) { // Handle special case: // empty directories should count as pinned. // (for the sake of the definition that a directory is pinned, // if all children are also pinned) if nd.Type() == n.NodeTypeDirectory && nd.NChildren() == 0 { return true, true, nil } pinCount := 0 explicitCount := 0 totalCount := 0 err := n.Walk(pc.lkr, nd, true, func(child n.Node) error { if child.Type() != n.NodeTypeFile { return nil } file, ok := child.(*n.File) if !ok { return ie.ErrBadNode } totalCount++ isPinned, isExplicit, err := pc.IsPinned(file.Inode(), file.BackendHash()) if err != nil { return err } if isExplicit { explicitCount++ } if isPinned { // Make sure that we do not count empty directories // as pinned nodes. pinCount++ } else { // Return a special error here to stop Walk() iterating. // One file is enough to stop IsPinned() from being true. return errNotPinnedSentinel } return nil }) if err != nil && err != errNotPinnedSentinel { return false, false, err } if err == errNotPinnedSentinel { return false, false, nil } return pinCount > 0, explicitCount == totalCount, nil } ================================================ FILE: catfs/pinner_test.go ================================================ package catfs import ( "bytes" "testing" c "github.com/sahib/brig/catfs/core" h "github.com/sahib/brig/util/hashlib" "github.com/stretchr/testify/require" ) func TestPinMemCache(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { backend := NewMemFsBackend() pinner, err := NewPinner(lkr, backend) require.Nil(t, err) content := h.TestDummy(t, 1) require.Nil(t, pinner.remember(1, content, true, false)) isPinned, isExplicit, err := pinner.IsPinned(1, content) require.Nil(t, err) require.True(t, isPinned) require.False(t, isExplicit) require.Nil(t, pinner.remember(1, content, true, true)) isPinned, isExplicit, err = pinner.IsPinned(1, content) require.Nil(t, err) require.True(t, isPinned) require.True(t, isExplicit) require.Nil(t, pinner.Close()) }) } func TestPinRememberHashTwice(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { backend := NewMemFsBackend() pinner, err := NewPinner(lkr, backend) require.Nil(t, err) content := h.TestDummy(t, 1) require.Nil(t, pinner.remember(1, content, true, false)) isPinned, isExplicit, err := pinner.IsPinned(1, content) require.Nil(t, err) require.True(t, isPinned) require.False(t, isExplicit) require.Nil(t, pinner.remember(2, content, true, true)) isPinned, isExplicit, err = pinner.IsPinned(2, content) require.Nil(t, err) require.True(t, isPinned) require.True(t, isExplicit) require.Nil(t, pinner.remember(2, content, false, true)) isPinned, isExplicit, err = pinner.IsPinned(2, content) require.Nil(t, err) require.False(t, isPinned) require.False(t, isExplicit) // old inode is still counted as pinned. isPinned, isExplicit, err = pinner.IsPinned(1, content) require.Nil(t, err) require.True(t, isPinned) require.False(t, isExplicit) require.Nil(t, pinner.Close()) }) } func TestPinNode(t *testing.T) { withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Stage("/x", bytes.NewReader([]byte{1}))) x, err := fs.lkr.LookupFile("/x") require.Nil(t, err) require.Nil(t, fs.pinner.PinNode(x, false)) isPinned, isExplicit, err := fs.pinner.IsNodePinned(x) require.Nil(t, err) require.True(t, isPinned) require.False(t, isExplicit) require.Nil(t, fs.pinner.PinNode(x, true)) isPinned, isExplicit, err = fs.pinner.IsNodePinned(x) require.Nil(t, err) require.True(t, isPinned) require.True(t, isExplicit) // Downgrade unpin(false) when explicit => no change. require.Nil(t, fs.pinner.UnpinNode(x, false)) isPinned, isExplicit, err = fs.pinner.IsNodePinned(x) require.Nil(t, err) require.True(t, isPinned) require.True(t, isExplicit) require.Nil(t, fs.pinner.UnpinNode(x, true)) isPinned, isExplicit, err = fs.pinner.IsNodePinned(x) require.Nil(t, err) require.False(t, isPinned) require.False(t, isExplicit) }) } func TestPinEntryMarshal(t *testing.T) { pinEntry := &pinCacheEntry{ Inodes: map[uint64]bool{ 10: true, 15: false, 20: true, }, } data, err := pinEnryToCapnpData(pinEntry) require.Nil(t, err) loadedPinEntry, err := capnpToPinCacheEntry(data) require.Nil(t, err) require.Equal(t, pinEntry, loadedPinEntry) } func TestPinEmptyDir(t *testing.T) { withDummyFS(t, func(fs *FS) { require.Nil(t, fs.Mkdir("/empty", true)) dir, err := fs.lkr.LookupDirectory("/empty") require.Nil(t, err) isPinned, isExplicit, err := fs.pinner.IsNodePinned(dir) require.Nil(t, err) require.True(t, isPinned) require.True(t, isExplicit) }) } ================================================ FILE: catfs/repin.go ================================================ package catfs import ( "sort" "github.com/dustin/go-humanize" e "github.com/pkg/errors" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" "github.com/sahib/brig/catfs/vcs" "github.com/sahib/brig/util" log "github.com/sirupsen/logrus" ) type partition struct { PinSize uint64 // nodes that are within min_depth and should stay pinned // (or are even re-pinned if needed) ShouldPin []n.ModNode // nodes that are between min_depth and max_depth. // they might be unpinned if they exceed the quota. QuotaCandidates []n.ModNode // nodes that are behind max_depth. // all of the are unpinned for sure. DepthCandidates []n.ModNode } // partitionNodeHashes takes all hashes of a node and sorts them into the // buckets described in the partition docs. func (fs *FS) partitionNodeHashes(nd n.ModNode, minDepth, maxDepth int64) (*partition, error) { currDepth := int64(0) part := &partition{} curr, err := fs.lkr.Status() if err != nil { return nil, err } seen := make(map[string]bool) walker := vcs.NewHistoryWalker(fs.lkr, curr, nd) for walker.Next() { state := walker.State() curr := state.Curr if curr.Type() == n.NodeTypeGhost { // ghosts nodes are always unpinned continue } if seen[curr.BackendHash().B58String()] { // We only want to have the first $n distinct versions. // Sometimes the versions is duplicated though (removed, readded, moved) // so we don't want to include them since the docs say "first 10 versions". continue } // Sort the entry into the right bucket: if currDepth < minDepth { part.ShouldPin = append(part.ShouldPin, curr) part.PinSize += nd.Size() } else if currDepth >= minDepth && currDepth < maxDepth { part.QuotaCandidates = append(part.QuotaCandidates, curr) isPinned, isExplicit, err := fs.pinner.IsNodePinned(nd) if err != nil { return nil, err } if isPinned && !isExplicit { part.PinSize += nd.Size() } } else { part.DepthCandidates = append(part.DepthCandidates, curr) } seen[curr.BackendHash().B58String()] = true currDepth++ // TODO: Optimization: Save depth of last run and abort early if we know // that we unpinned everything at this level already. } if err := walker.Err(); err != nil { return nil, err } return part, nil } func (fs *FS) ensurePin(entries []n.ModNode) (uint64, error) { newlyPinned := uint64(0) isPinUnpinned := fs.cfg.Bool("repin.pin_unpinned") for _, nd := range entries { isPinned, _, err := fs.pinner.IsNodePinned(nd) if err != nil { return newlyPinned, err } if nd.Type() == n.NodeTypeFile && isPinned { // let's make sure that this file node is pinned at backend as well isCached, err := fs.bk.IsCached(nd.BackendHash()) if err != nil { return newlyPinned, err } if !isCached { log.Warningf("The %+v should be cached, but it is not. Recaching", nd) err := fs.bk.Pin(nd.BackendHash()) if err != nil { return newlyPinned, err } } } if !isPinned && isPinUnpinned { if nd.Type() == n.NodeTypeGhost { // ghosts cannot be pinned continue } if err := fs.pinner.PinNode(nd, false); err != nil { return newlyPinned, err } newlyPinned += nd.Size() } } return newlyPinned, nil } func (fs *FS) ensureUnpin(entries []n.ModNode) (uint64, error) { savedStorage := uint64(0) for _, nd := range entries { isPinned, _, err := fs.pinner.IsNodePinned(nd) if err != nil { return 0, err } if isPinned { explicit := true // we are unpinning even explicitly pinned if err := fs.pinner.UnpinNode(nd, explicit); err != nil { return 0, err } savedStorage += nd.Size() } } return savedStorage, nil } func findLastPinnedIdx(pinner *Pinner, nds []n.ModNode) (int, error) { for idx := len(nds) - 1; idx >= 0; idx-- { isPinned, isExplicit, err := pinner.IsNodePinned(nds[idx]) if err != nil { return -1, err } if isPinned && !isExplicit { return idx, nil } } return -1, nil } func (fs *FS) balanceQuota(ps []*partition, totalStorage, quota uint64) (uint64, error) { sort.Slice(ps, func(i, j int) bool { return ps[i].PinSize < ps[j].PinSize }) idx, empties := 0, 0 savedStorage := uint64(0) // Try to reduce the pinned storage amount until // we stay below the determined quota. for totalStorage >= quota && empties < len(ps) { cnds := ps[idx%len(ps)].QuotaCandidates if len(cnds) == 0 { empties++ continue } // Find the last index (i.e. earliest version) that is pinned. lastPinIdx, err := findLastPinnedIdx(fs.pinner, cnds) if err != nil { return 0, err } if lastPinIdx < 0 { empties++ ps[idx%len(ps)].QuotaCandidates = cnds[:0] continue } cnd := cnds[lastPinIdx] totalStorage -= cnd.Size() savedStorage += cnd.Size() explicit := true // we are unpinning even explicitly pinned if err := fs.pinner.UnpinNode(cnd, explicit); err != nil { return 0, err } ps[idx%len(ps)].QuotaCandidates = cnds[:lastPinIdx] } log.Infof("quota collector unpinned %d bytes", savedStorage) return savedStorage, nil } func (fs *FS) repin(root string) error { fs.mu.Lock() defer fs.mu.Unlock() // repinning doesn't modify any metadata, // but still affects the filesystem. if fs.readOnly { return nil } if !fs.cfg.Bool("repin.enabled") { return nil } minDepth := util.Max64(0, fs.cfg.Int("repin.min_depth")) maxDepth := util.Max64(1, fs.cfg.Int("repin.max_depth")) quotaSrc := fs.cfg.String("repin.quota") quota, err := humanize.ParseBytes(quotaSrc) if err != nil { return err } rootNd, err := fs.lkr.LookupDirectory(root) if err != nil { return err } totalStorage := uint64(0) addedToStorage := uint64(0) savedStorage := uint64(0) parts := []*partition{} log.Infof("repin started (min=%d max=%d quota=%s)", minDepth, maxDepth, quotaSrc) err = n.Walk(fs.lkr, rootNd, true, func(child n.Node) error { if child.Type() == n.NodeTypeDirectory { return nil } modChild, ok := child.(n.ModNode) if !ok { return e.Wrapf(ie.ErrBadNode, "repin") } part, err := fs.partitionNodeHashes(modChild, minDepth, maxDepth) if err != nil { return err } pinBytes, err := fs.ensurePin(part.ShouldPin) if err != nil { return err } unpinBytes, err := fs.ensureUnpin(part.DepthCandidates) if err != nil { return err } totalStorage += part.PinSize addedToStorage += pinBytes savedStorage += unpinBytes parts = append(parts, part) return nil }) if err != nil { return e.Wrapf(err, "repin: walk") } quotaUnpins, err := fs.balanceQuota(parts, totalStorage, quota) if err != nil { return e.Wrapf(err, "repin: quota balance") } savedStorage += quotaUnpins totalStorage -= quotaUnpins if savedStorage >= addedToStorage { log.Infof("repin finished; freed %s, total storage is %s", humanize.Bytes(savedStorage-addedToStorage), humanize.Bytes(totalStorage)) } else { log.Infof("repin finished; used extra %s, total storage is %s", humanize.Bytes(addedToStorage-savedStorage), humanize.Bytes(totalStorage)) } return nil } // Repin goes over all files in the filesystem and identifies files that need to be unpinned. // Only files that are not explicitly pinned, are touched. If a file is explicitly pinned, it will // survive the repinning process in any case. The repinning is steered by two config variables: // // - fs.repin.quota: Maximum amount of pinned storage (excluding explicit pins) // - fs.repin.depth: How many versions of a file to keep at least. This trumps quota. // func (fs *FS) Repin(root string) error { fs.repinControl <- prefixSlash(root) return nil } ================================================ FILE: catfs/repin_test.go ================================================ package catfs import ( "bytes" "fmt" "strings" "testing" "github.com/stretchr/testify/require" ) func TestRepinDepthOnly(t *testing.T) { withDummyFS(t, func(fs *FS) { fs.cfg.SetBool("repin.enabled", true) fs.cfg.SetString("repin.quota", "10G") fs.cfg.SetInt("repin.min_depth", 1) fs.cfg.SetInt("repin.max_depth", 10) testRun(t, fs, 10, 20) }) } func TestRepinNoMaxDepth(t *testing.T) { withDummyFS(t, func(fs *FS) { fs.cfg.SetBool("repin.enabled", true) fs.cfg.SetString("repin.quota", "10G") fs.cfg.SetInt("repin.min_depth", 1) fs.cfg.SetInt("repin.max_depth", 100) testRun(t, fs, 20, 20) }) } func TestRepinDisabled(t *testing.T) { withDummyFS(t, func(fs *FS) { fs.cfg.SetBool("repin.enabled", false) testRun(t, fs, 20, 20) }) } func TestRepinQuota(t *testing.T) { withDummyFS(t, func(fs *FS) { fs.cfg.SetBool("repin.enabled", true) fs.cfg.SetString("repin.quota", "11B") fs.cfg.SetInt("repin.min_depth", 1) fs.cfg.SetInt("repin.max_depth", 100) testRun(t, fs, 10, 20) }) } func TestRepinKillAll(t *testing.T) { withDummyFS(t, func(fs *FS) { fs.cfg.SetBool("repin.enabled", true) fs.cfg.SetString("repin.quota", "0B") fs.cfg.SetInt("repin.min_depth", 0) fs.cfg.SetInt("repin.max_depth", 0) testRun(t, fs, -1, 20) }) } func TestRepinOldBehaviour(t *testing.T) { withDummyFS(t, func(fs *FS) { fs.cfg.SetBool("repin.enabled", true) fs.cfg.SetString("repin.quota", "100G") fs.cfg.SetInt("repin.min_depth", 1) fs.cfg.SetInt("repin.max_depth", 1) testRun(t, fs, 1, 20) }) } func testRun(t *testing.T, fs *FS, split, n int) { for idx := 0; idx < n; idx++ { require.Nil(t, fs.Stage("/dir/a", bytes.NewReader([]byte{byte(idx)}))) require.Nil(t, fs.MakeCommit(fmt.Sprintf("state: %d", idx))) } for idx := 0; idx < n; idx++ { require.Nil(t, fs.Pin("/dir/a", "HEAD"+strings.Repeat("^", idx), false)) } require.Nil(t, fs.repin("/")) histA, err := fs.History("/dir/a") require.Nil(t, err) for idx := 0; idx <= split; idx++ { require.True(t, histA[idx].IsPinned, fmt.Sprintf("%d", idx)) require.False(t, histA[idx].IsExplicit, fmt.Sprintf("%d", idx)) } for idx := split + 1; idx < n; idx++ { require.False(t, histA[idx].IsPinned, fmt.Sprintf("%d", idx)) require.False(t, histA[idx].IsExplicit, fmt.Sprintf("%d", idx)) } } ================================================ FILE: catfs/rev.go ================================================ package catfs import ( "fmt" "regexp" "strconv" "strings" "unicode" e "github.com/pkg/errors" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" ) var ( indexCommitPattern = regexp.MustCompile(`^commit\[([-\+]{0,1}[0-9]+)\]$`) ) // validateRev check is a rev spec looks like it's valid // from a syntactic point of view. // // A valid ref may contain only letters or numbers, but might end with an // arbitrary number of '^' at the end. Unicode is allowed. // As special case it might also match indexCommitPattern. // // If any violation is dected, an error is returned. func validateRev(rev string) error { if indexCommitPattern.Match([]byte(rev)) { return nil } foundUp := false for _, c := range rev { if unicode.IsLetter(c) || unicode.IsNumber(c) { if foundUp { return fmt.Errorf("normal character after ^") } continue } switch c { case '^': foundUp = true default: return fmt.Errorf("invalid character in ref: `%v`", c) } } return nil } // parseRev resolves a base58 to a commit or if it looks like a refname // it tries to resolve that (HEAD, CURR, INIT e.g.). func parseRev(lkr *c.Linker, rev string) (*n.Commit, error) { if err := validateRev(rev); err != nil { return nil, e.Wrapf(err, "validate") } lowerRev := strings.ToLower(rev) matches := indexCommitPattern.FindSubmatch([]byte(lowerRev)) if len(matches) >= 2 { index, err := strconv.ParseInt(string(matches[1]), 10, 64) if err != nil { return nil, e.Wrapf(err, "failed to parse commit index spec") } return lkr.CommitByIndex(index) } pureRev := strings.TrimRight(rev, "^") hash, err := lkr.ExpandAbbrev(pureRev) if err != nil { // Either it was an hash and it is valid, // Or it is a tag name like HEAD (or "head") nd, err := lkr.ResolveRef(lowerRev) if err != nil { return nil, err } cmt, ok := nd.(*n.Commit) if !ok { return nil, ie.ErrBadNode } return cmt, nil } actualRev := hash.B58String() + strings.Repeat("^", strings.Count(rev, "^")) nd, err := lkr.ResolveRef(actualRev) if err != nil { return nil, err } cmt, ok := nd.(*n.Commit) if !ok { return nil, ie.ErrBadNode } return cmt, nil } ================================================ FILE: catfs/rev_test.go ================================================ package catfs import ( "testing" c "github.com/sahib/brig/catfs/core" "github.com/stretchr/testify/require" ) func TestRevParse(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { init, err := parseRev(lkr, "commit[0]") require.Nil(t, err) require.Equal(t, "init", init.Message()) }) } ================================================ FILE: catfs/vcs/capnp/patch.capnp ================================================ using Go = import "/go.capnp"; using Nodes = import "../../nodes/capnp/nodes.capnp"; @0xb943b54bf1683782; $Go.package("capnp"); $Go.import("github.com/sahib/brig/catfs/vcs/capnp"); struct Change $Go.doc("Change describes a single change") { mask @0 :UInt64; head @1 :Nodes.Node; next @2 :Nodes.Node; curr @3 :Nodes.Node; movedTo @4 :Text; wasPreviouslyAt @5 :Text; } struct Patch $Go.doc("Patch contains a single change") { fromIndex @0 :Int64; currIndex @1 :Int64; changes @2 :List(Change); } struct Patches $Go.doc("Patches contains several patches") { patches @0 :List(Patch); } ================================================ FILE: catfs/vcs/capnp/patch.capnp.go ================================================ // Code generated by capnpc-go. DO NOT EDIT. package capnp import ( capnp2 "github.com/sahib/brig/catfs/nodes/capnp" capnp "zombiezen.com/go/capnproto2" text "zombiezen.com/go/capnproto2/encoding/text" schemas "zombiezen.com/go/capnproto2/schemas" ) // Change describes a single change type Change struct{ capnp.Struct } // Change_TypeID is the unique identifier for the type Change. const Change_TypeID = 0x9592300df48789af func NewChange(s *capnp.Segment) (Change, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 5}) return Change{st}, err } func NewRootChange(s *capnp.Segment) (Change, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 5}) return Change{st}, err } func ReadRootChange(msg *capnp.Message) (Change, error) { root, err := msg.RootPtr() return Change{root.Struct()}, err } func (s Change) String() string { str, _ := text.Marshal(0x9592300df48789af, s.Struct) return str } func (s Change) Mask() uint64 { return s.Struct.Uint64(0) } func (s Change) SetMask(v uint64) { s.Struct.SetUint64(0, v) } func (s Change) Head() (capnp2.Node, error) { p, err := s.Struct.Ptr(0) return capnp2.Node{Struct: p.Struct()}, err } func (s Change) HasHead() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s Change) SetHead(v capnp2.Node) error { return s.Struct.SetPtr(0, v.Struct.ToPtr()) } // NewHead sets the head field to a newly // allocated capnp2.Node struct, preferring placement in s's segment. func (s Change) NewHead() (capnp2.Node, error) { ss, err := capnp2.NewNode(s.Struct.Segment()) if err != nil { return capnp2.Node{}, err } err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) return ss, err } func (s Change) Next() (capnp2.Node, error) { p, err := s.Struct.Ptr(1) return capnp2.Node{Struct: p.Struct()}, err } func (s Change) HasNext() bool { p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s Change) SetNext(v capnp2.Node) error { return s.Struct.SetPtr(1, v.Struct.ToPtr()) } // NewNext sets the next field to a newly // allocated capnp2.Node struct, preferring placement in s's segment. func (s Change) NewNext() (capnp2.Node, error) { ss, err := capnp2.NewNode(s.Struct.Segment()) if err != nil { return capnp2.Node{}, err } err = s.Struct.SetPtr(1, ss.Struct.ToPtr()) return ss, err } func (s Change) Curr() (capnp2.Node, error) { p, err := s.Struct.Ptr(2) return capnp2.Node{Struct: p.Struct()}, err } func (s Change) HasCurr() bool { p, err := s.Struct.Ptr(2) return p.IsValid() || err != nil } func (s Change) SetCurr(v capnp2.Node) error { return s.Struct.SetPtr(2, v.Struct.ToPtr()) } // NewCurr sets the curr field to a newly // allocated capnp2.Node struct, preferring placement in s's segment. func (s Change) NewCurr() (capnp2.Node, error) { ss, err := capnp2.NewNode(s.Struct.Segment()) if err != nil { return capnp2.Node{}, err } err = s.Struct.SetPtr(2, ss.Struct.ToPtr()) return ss, err } func (s Change) MovedTo() (string, error) { p, err := s.Struct.Ptr(3) return p.Text(), err } func (s Change) HasMovedTo() bool { p, err := s.Struct.Ptr(3) return p.IsValid() || err != nil } func (s Change) MovedToBytes() ([]byte, error) { p, err := s.Struct.Ptr(3) return p.TextBytes(), err } func (s Change) SetMovedTo(v string) error { return s.Struct.SetText(3, v) } func (s Change) WasPreviouslyAt() (string, error) { p, err := s.Struct.Ptr(4) return p.Text(), err } func (s Change) HasWasPreviouslyAt() bool { p, err := s.Struct.Ptr(4) return p.IsValid() || err != nil } func (s Change) WasPreviouslyAtBytes() ([]byte, error) { p, err := s.Struct.Ptr(4) return p.TextBytes(), err } func (s Change) SetWasPreviouslyAt(v string) error { return s.Struct.SetText(4, v) } // Change_List is a list of Change. type Change_List struct{ capnp.List } // NewChange creates a new list of Change. func NewChange_List(s *capnp.Segment, sz int32) (Change_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 5}, sz) return Change_List{l}, err } func (s Change_List) At(i int) Change { return Change{s.List.Struct(i)} } func (s Change_List) Set(i int, v Change) error { return s.List.SetStruct(i, v.Struct) } func (s Change_List) String() string { str, _ := text.MarshalList(0x9592300df48789af, s.List) return str } // Change_Promise is a wrapper for a Change promised by a client call. type Change_Promise struct{ *capnp.Pipeline } func (p Change_Promise) Struct() (Change, error) { s, err := p.Pipeline.Struct() return Change{s}, err } func (p Change_Promise) Head() capnp2.Node_Promise { return capnp2.Node_Promise{Pipeline: p.Pipeline.GetPipeline(0)} } func (p Change_Promise) Next() capnp2.Node_Promise { return capnp2.Node_Promise{Pipeline: p.Pipeline.GetPipeline(1)} } func (p Change_Promise) Curr() capnp2.Node_Promise { return capnp2.Node_Promise{Pipeline: p.Pipeline.GetPipeline(2)} } // Patch contains a single change type Patch struct{ capnp.Struct } // Patch_TypeID is the unique identifier for the type Patch. const Patch_TypeID = 0x927c7336e3054805 func NewPatch(s *capnp.Segment) (Patch, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) return Patch{st}, err } func NewRootPatch(s *capnp.Segment) (Patch, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) return Patch{st}, err } func ReadRootPatch(msg *capnp.Message) (Patch, error) { root, err := msg.RootPtr() return Patch{root.Struct()}, err } func (s Patch) String() string { str, _ := text.Marshal(0x927c7336e3054805, s.Struct) return str } func (s Patch) FromIndex() int64 { return int64(s.Struct.Uint64(0)) } func (s Patch) SetFromIndex(v int64) { s.Struct.SetUint64(0, uint64(v)) } func (s Patch) CurrIndex() int64 { return int64(s.Struct.Uint64(8)) } func (s Patch) SetCurrIndex(v int64) { s.Struct.SetUint64(8, uint64(v)) } func (s Patch) Changes() (Change_List, error) { p, err := s.Struct.Ptr(0) return Change_List{List: p.List()}, err } func (s Patch) HasChanges() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s Patch) SetChanges(v Change_List) error { return s.Struct.SetPtr(0, v.List.ToPtr()) } // NewChanges sets the changes field to a newly // allocated Change_List, preferring placement in s's segment. func (s Patch) NewChanges(n int32) (Change_List, error) { l, err := NewChange_List(s.Struct.Segment(), n) if err != nil { return Change_List{}, err } err = s.Struct.SetPtr(0, l.List.ToPtr()) return l, err } // Patch_List is a list of Patch. type Patch_List struct{ capnp.List } // NewPatch creates a new list of Patch. func NewPatch_List(s *capnp.Segment, sz int32) (Patch_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}, sz) return Patch_List{l}, err } func (s Patch_List) At(i int) Patch { return Patch{s.List.Struct(i)} } func (s Patch_List) Set(i int, v Patch) error { return s.List.SetStruct(i, v.Struct) } func (s Patch_List) String() string { str, _ := text.MarshalList(0x927c7336e3054805, s.List) return str } // Patch_Promise is a wrapper for a Patch promised by a client call. type Patch_Promise struct{ *capnp.Pipeline } func (p Patch_Promise) Struct() (Patch, error) { s, err := p.Pipeline.Struct() return Patch{s}, err } // Patches contains several patches type Patches struct{ capnp.Struct } // Patches_TypeID is the unique identifier for the type Patches. const Patches_TypeID = 0xc2984f083ea5351d func NewPatches(s *capnp.Segment) (Patches, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) return Patches{st}, err } func NewRootPatches(s *capnp.Segment) (Patches, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) return Patches{st}, err } func ReadRootPatches(msg *capnp.Message) (Patches, error) { root, err := msg.RootPtr() return Patches{root.Struct()}, err } func (s Patches) String() string { str, _ := text.Marshal(0xc2984f083ea5351d, s.Struct) return str } func (s Patches) Patches() (Patch_List, error) { p, err := s.Struct.Ptr(0) return Patch_List{List: p.List()}, err } func (s Patches) HasPatches() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s Patches) SetPatches(v Patch_List) error { return s.Struct.SetPtr(0, v.List.ToPtr()) } // NewPatches sets the patches field to a newly // allocated Patch_List, preferring placement in s's segment. func (s Patches) NewPatches(n int32) (Patch_List, error) { l, err := NewPatch_List(s.Struct.Segment(), n) if err != nil { return Patch_List{}, err } err = s.Struct.SetPtr(0, l.List.ToPtr()) return l, err } // Patches_List is a list of Patches. type Patches_List struct{ capnp.List } // NewPatches creates a new list of Patches. func NewPatches_List(s *capnp.Segment, sz int32) (Patches_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) return Patches_List{l}, err } func (s Patches_List) At(i int) Patches { return Patches{s.List.Struct(i)} } func (s Patches_List) Set(i int, v Patches) error { return s.List.SetStruct(i, v.Struct) } func (s Patches_List) String() string { str, _ := text.MarshalList(0xc2984f083ea5351d, s.List) return str } // Patches_Promise is a wrapper for a Patches promised by a client call. type Patches_Promise struct{ *capnp.Pipeline } func (p Patches_Promise) Struct() (Patches, error) { s, err := p.Pipeline.Struct() return Patches{s}, err } const schema_b943b54bf1683782 = "x\xda\x84\x93\xc1k\x13O\x1c\xc5\xbf\xef;\xbb\xbf\xb4" + "?R\xd319\x88T\xba\x88\x97z\xb0-E\x05\x0f" + "\xd6Z\x05E\xc5\x8c(\xeaEX7\xd3n0\xd9\x84" + "\xcc6V\xb0\x04z\x11=V\x85\x16\x14+\xb4b\xa1" + "R/\x82\x1e\xbc\x08\xfe\x0b\xfe\x03=\x89'\xc1K{" + "Y\x99$\xa6\xa5\x16{[>\xf3\xd8\xd9\xcf\xe3\xed\x10" + "p\x86\x87\xdd\x0c\x88T\xda\xfd/q/\xb8\xeb'\xcc" + "\xc39R}\xe0d\xf6d\xf8\xf3\xd2\x87\xf1O\xe4\"" + "E4\\\xde\x0f9\x93\x923\xfdre\x8d\x90\xac=" + "~\xf4\xabgh\xee\xb9\xcdb[\xd6M\x11\x8d\xdc\xc6" + "Ad\x8bHe\x8b\xe8\x1fY\xc0M\x10\x92C\xc7\x97" + "Ow]\x9d\xffB\xb2\x0f;\xde=\xf2\x9d\x0f#\xbb" + "\xc1\xa9\xec\x06\xf7g\x07\xc4(!\x09\xfcx\xc2\x0c\xd6" + "\x03a\x06\x03\xbf\x1aU\x07\xab~\x1c\x84\xc7\x9a\xcf\xa7" + "\xf2~\x1c \xcc\x03\xca\x01'w\x9e\xbeR\x9f\xbf=" + "\xf9J\xcaa\x8c\xf5\x01i\"\x89\xcd\xc4\xa6B/\xa8" + "p\x14\xfb\xc5\xc8x\xbeg\x8a\xd1dI{\xa3A\xe8" + "G\x93\xdaZ\x0b\x87\xc8\x01\x91<\x7f\x8dH\x9d\x13P" + "y\x86\x04r\xb0\xf0\x8a\x85\x97\x05\xd4-\x068\x07&" + "\x927\xce\x12\xa9\xbc\x80*1\x92\x89Z\xa5|1*" + "h\xc24\\b\xb8\xf6\xcb\xa7j\xb5\x1d\xac\xd1\xba\xd0" + "`\x1f!/\x80\xde\xad\xfa\x08\x16\xfe[w<\xf4#" + "1\xa9w\xf7\xf5\x9a\xbe\xc3\xf8\x1f\xc9x\xf3\x16\xaf " + "\xb4\x09j\xc5\xbbz\x9br\xdb\x18\xea@\xc7x\xe1(" + "\x91z&\xa0\x16\x19\x7f\x84_Z6/\xa0\x96\x18\x92" + "\xd12~m\xe1\x0b\x01\xf5\x96!\x05\xe7 \x88\xe4\xb2" + "\x85\x8b\x02j\x95!\x1d\x91\x83C$Wl7K\x02" + "\xea=C\xbaN\x0e.\x91|7K\xa4V\x05\xd4G" + "F\xa6\xec\x9b{\xe8&F7!\x13j\xbf\x80\xded" + "}s\xa2\xda\xf81\xf0\xc6v\xd1K\xc8Dz:\xde" + "\x05\xdb^\xff\xc6\x8dr\xa5\xae\x0b\xd7+H\x13#M" + "H\xee\xfb&_\xd3\xf5\"*S\xa6\xf4`,\xa6\xce" + "\xc9\x9e\x93J\x85\xda\xecYrsU\xdax\"\xa8\xb4" + "wet]\xd7\xfc\x92Wm\x9d\x10\x94\xd3)\xb9\xc7" + "6\xd2%\xa0\x8e0\x1a\xed\xc0\xd6\x0a:?\\k\x05" + "\xbf\x03\x00\x00\xff\xff\xe5\xc7\xe9\x1d" func init() { schemas.Register(schema_b943b54bf1683782, 0x927c7336e3054805, 0x9592300df48789af, 0xc2984f083ea5351d) } ================================================ FILE: catfs/vcs/change.go ================================================ package vcs import ( "fmt" "path" "strings" e "github.com/pkg/errors" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" capnp_model "github.com/sahib/brig/catfs/nodes/capnp" capnp_patch "github.com/sahib/brig/catfs/vcs/capnp" log "github.com/sirupsen/logrus" capnp "zombiezen.com/go/capnproto2" ) const ( // ChangeTypeNone means that a node did not change (compared to HEAD) ChangeTypeNone = ChangeType(0) // ChangeTypeAdd says that the node was initially added after HEAD. ChangeTypeAdd = ChangeType(1 << iota) // ChangeTypeModify says that the the node was modified after HEAD ChangeTypeModify // ChangeTypeMove says that the node was moved after HEAD. // Note that Move and Modify may happen at the same time. ChangeTypeMove // ChangeTypeRemove says that the node was removed after HEAD. ChangeTypeRemove ) // ChangeType is a mask of possible state change events. type ChangeType uint8 // String will convert a ChangeType to a human readable form func (ct ChangeType) String() string { v := []string{} if ct&ChangeTypeAdd != 0 { v = append(v, "added") } if ct&ChangeTypeModify != 0 { v = append(v, "modified") } if ct&ChangeTypeMove != 0 { v = append(v, "moved") } if ct&ChangeTypeRemove != 0 { v = append(v, "removed") } if len(v) == 0 { return "none" } return strings.Join(v, "|") } // IsCompatible checks if two change masks are compatible. // Changes are compatible when they can be both applied // without loosing any content. We may loose metadata though, // e.g. when one side was moved, but the other removed: // Here the remove would win and no move is counted. func (ct ChangeType) IsCompatible(ot ChangeType) bool { modifyMask := ChangeTypeAdd | ChangeTypeModify return ct&modifyMask == 0 || ot&modifyMask == 0 } /////////////////////////// // Change represents a single change of a node between two commits. type Change struct { // Mask is a bitmask of changes that were made. // It describes the change that was made between `Next` to `Head` // and which is part of `Head`. Mask ChangeType // Head is the commit that was the current HEAD when this change happened. // Note that this is NOT the commit that contains the change, but the commit before. Head *n.Commit // Next is the commit that comes before `Head`. Next *n.Commit // Curr is the node with the attributes at a specific state Curr n.ModNode // MovedTo is only filled for ghosts that were the source // of a move. It's the path of the node it was moved to. MovedTo string // WasPreviouslyAt points to the place `Curr` was at // before a move. On changes without a move this is empty. WasPreviouslyAt string } func (ch *Change) String() string { movedTo := "" if len(ch.MovedTo) != 0 { movedTo = fmt.Sprintf(" (now %s)", ch.MovedTo) } prevAt := "" if len(ch.WasPreviouslyAt) != 0 { prevAt = fmt.Sprintf(" (was %s)", ch.WasPreviouslyAt) } return fmt.Sprintf("<%s:%s%s%s>", ch.Curr.Path(), ch.Mask, prevAt, movedTo) } func replayAddWithUnpacking(lkr *c.Linker, ch *Change) error { // If it's an ghost, unpack it first: It will be added as if it was // never a ghost, but since the change mask has the // ChangeTypeRemove flag set, it will removed directly after. currNd := ch.Curr if ch.Curr.Type() == n.NodeTypeGhost { currGhost, ok := ch.Curr.(*n.Ghost) if !ok { return ie.ErrBadNode } currNd = currGhost.OldNode() } // Check the type of the old node: oldNd, err := lkr.LookupModNode(currNd.Path()) if err != nil && !ie.IsNoSuchFileError(err) { return err } // If the types are conflicting we have to remove the existing node. if oldNd != nil && oldNd.Type() != currNd.Type() { if oldNd.Type() == n.NodeTypeGhost { // the oldNd node is already deleted, no need to do anything special return replayAdd(lkr, currNd) } _, _, err := c.Remove(lkr, oldNd, true, true) if err != nil { return e.Wrapf(err, "replay: type-conflict-remove") } } return replayAdd(lkr, currNd) } func replayAdd(lkr *c.Linker, currNd n.ModNode) error { switch currNd.(type) { case *n.File: if _, err := c.Mkdir(lkr, path.Dir(currNd.Path()), true); err != nil { return e.Wrapf(err, "replay: mkdir") } if _, err := c.StageFromFileNode(lkr, currNd.(*n.File)); err != nil { return e.Wrapf(err, "replay: stage") } case *n.Directory: if _, err := c.Mkdir(lkr, currNd.Path(), true); err != nil { return e.Wrapf(err, "replay: mkdir") } default: return e.Wrapf(ie.ErrBadNode, "replay: modify") } return nil } func replayMove(lkr *c.Linker, ch *Change) error { if ch.MovedTo != "" { oldNd, err := lkr.LookupModNode(ch.Curr.Path()) if err != nil && !ie.IsNoSuchFileError(err) { return err } if _, err := c.Mkdir(lkr, path.Dir(ch.MovedTo), true); err != nil { return e.Wrapf(err, "replay: mkdir") } if oldNd != nil { if err := c.Move(lkr, oldNd, ch.MovedTo); err != nil { return e.Wrapf(err, "replay: move") } } } if ch.Curr.Type() != n.NodeTypeGhost { if _, err := lkr.LookupModNode(ch.Curr.Path()); ie.IsNoSuchFileError(err) { if err := replayAdd(lkr, ch.Curr); err != nil { return err } } } if ch.WasPreviouslyAt != "" { oldNd, err := lkr.LookupModNode(ch.WasPreviouslyAt) if err != nil && !ie.IsNoSuchFileError(err) { return err } if oldNd != nil { if oldNd.Type() != n.NodeTypeGhost { if _, _, err := c.Remove(lkr, oldNd, true, true); err != nil { return e.Wrap(err, "replay: move: remove old") } } } if err := replayAddMoveMapping(lkr, ch.WasPreviouslyAt, ch.Curr.Path()); err != nil { return err } } return nil } func replayAddMoveMapping(lkr *c.Linker, oldPath, newPath string) error { newNd, err := lkr.LookupModNode(newPath) if err != nil { return err } oldNd, err := lkr.LookupModNode(oldPath) if err != nil && !ie.IsNoSuchFileError(err) { return nil } if oldNd == nil { return nil } log.Debugf("adding move mapping: %s %s", oldPath, newPath) return lkr.AddMoveMapping(oldNd.Inode(), newNd.Inode()) } func replayRemove(lkr *c.Linker, ch *Change) error { currNd, err := lkr.LookupModNode(ch.Curr.Path()) if err != nil { return e.Wrapf(err, "replay: lookup: %v", ch.Curr.Path()) } if currNd.Type() != n.NodeTypeGhost { if _, _, err := c.Remove(lkr, currNd, true, true); err != nil { return err } } return nil } // Replay applies the change `ch` onto `lkr` by redoing the same operations: // move, remove, modify, add. Commits are not replayed, everything happens in // lkr.Status() without creating a new commit. func (ch *Change) Replay(lkr *c.Linker) error { return lkr.Atomic(func() (bool, error) { if ch.Mask&(ChangeTypeModify|ChangeTypeAdd) != 0 { // Something needs to be done based on the type. // Either create/update a new file or create a directory. if err := replayAddWithUnpacking(lkr, ch); err != nil { return true, err } } if ch.Mask&ChangeTypeMove != 0 { if err := replayMove(lkr, ch); err != nil { return true, err } } // We should only remove a node if we're getting a ghost in ch.Curr. // Otherwise the node might have been removed and added again. if ch.Mask&ChangeTypeRemove != 0 && ch.Curr.Type() == n.NodeTypeGhost { if err := replayRemove(lkr, ch); err != nil { return true, err } } return false, nil }) } func (ch *Change) toCapnpChange(seg *capnp.Segment, capCh *capnp_patch.Change) error { capCurrNd, err := capnp_model.NewNode(seg) if err != nil { return err } if err := ch.Curr.ToCapnpNode(seg, capCurrNd); err != nil { return err } capHeadNd, err := capnp_model.NewNode(seg) if err != nil { return err } if err := ch.Head.ToCapnpNode(seg, capHeadNd); err != nil { return err } capNextNd, err := capnp_model.NewNode(seg) if err != nil { return err } if err := ch.Next.ToCapnpNode(seg, capNextNd); err != nil { return err } if err := capCh.SetCurr(capCurrNd); err != nil { return err } if err := capCh.SetHead(capHeadNd); err != nil { return err } if err := capCh.SetNext(capNextNd); err != nil { return err } if err := capCh.SetMovedTo(ch.MovedTo); err != nil { return err } if err := capCh.SetWasPreviouslyAt(ch.WasPreviouslyAt); err != nil { return err } capCh.SetMask(uint64(ch.Mask)) return nil } // ToCapnp converts a change to a capnproto message. func (ch *Change) ToCapnp() (*capnp.Message, error) { msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) if err != nil { return nil, err } capCh, err := capnp_patch.NewRootChange(seg) if err != nil { return nil, err } if err := ch.toCapnpChange(seg, &capCh); err != nil { return nil, err } return msg, nil } func (ch *Change) fromCapnpChange(capCh capnp_patch.Change) error { capHeadNd, err := capCh.Head() if err != nil { return err } ch.Head = &n.Commit{} if err := ch.Head.FromCapnpNode(capHeadNd); err != nil { return err } capNextNd, err := capCh.Next() if err != nil { return err } ch.Next = &n.Commit{} if err := ch.Next.FromCapnpNode(capNextNd); err != nil { return err } capCurrNd, err := capCh.Curr() if err != nil { return err } currNd, err := n.CapNodeToNode(capCurrNd) if err != nil { return err } currModNd, ok := currNd.(n.ModNode) if !ok { return e.Wrapf(ie.ErrBadNode, "unmarshalled node is no mod node") } ch.Curr = currModNd movedTo, err := capCh.MovedTo() if err != nil { return err } wasPreviouslyAt, err := capCh.WasPreviouslyAt() if err != nil { return err } ch.MovedTo = movedTo ch.WasPreviouslyAt = wasPreviouslyAt ch.Mask = ChangeType(capCh.Mask()) return nil } // FromCapnp deserializes `msg` and writes it to `ch`. func (ch *Change) FromCapnp(msg *capnp.Message) error { capCh, err := capnp_patch.ReadRootChange(msg) if err != nil { return err } return ch.fromCapnpChange(capCh) } // CombineChanges compresses a list of changes (in a lossy way) to one Change. // The one change should be enough to re-create the changes that were made. func CombineChanges(changes []*Change) *Change { if len(changes) == 0 { return nil } // Only take the latest changes: ch := &Change{ Mask: ChangeType(0), Head: changes[0].Head, Next: changes[0].Next, Curr: changes[0].Curr, } // If the node moved, save the original path in MovedTo: pathChanged := changes[0].Curr.Path() != changes[len(changes)-1].Curr.Path() isGhost := changes[0].Curr.Type() == n.NodeTypeGhost // Combine the mask: for _, change := range changes { ch.Mask |= change.Mask } if ch.Mask&ChangeTypeMove != 0 { for idx := len(changes) - 1; idx >= 0; idx-- { if refPath := changes[idx].MovedTo; refPath != "" { ch.MovedTo = refPath break } } for idx := len(changes) - 1; idx >= 0; idx-- { if refPath := changes[idx].WasPreviouslyAt; refPath != "" { ch.WasPreviouslyAt = refPath pathChanged = refPath != changes[0].Curr.Path() break } } } // If the path did not really change, we do not want to have ChangeTypeMove // in the mask. This is to protect against circular moves. If it's a ghost // we should still include it though (for WasPreviouslyAt) if !pathChanged && !isGhost { ch.Mask &= ^ChangeTypeMove } // If the last change was not a remove, we do not need to if changes[0].Mask&ChangeTypeRemove == 0 && !isGhost { ch.Mask &= ^ChangeTypeRemove } return ch } ================================================ FILE: catfs/vcs/change_test.go ================================================ package vcs import ( "testing" c "github.com/sahib/brig/catfs/core" n "github.com/sahib/brig/catfs/nodes" "github.com/stretchr/testify/require" ) func TestChangeMarshalling(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { head, err := lkr.Head() require.Nil(t, err) curr := c.MustTouch(t, lkr, "/x", 1) next := c.MustCommit(t, lkr, "hello") change := &Change{ Mask: ChangeTypeMove | ChangeTypeRemove, Head: head, Next: next, Curr: curr, MovedTo: "/something", } msg, err := change.ToCapnp() require.Nil(t, err) newChange := &Change{} require.Nil(t, newChange.FromCapnp(msg)) require.Equal(t, newChange.MovedTo, "/something") require.Equal(t, newChange.Mask, ChangeTypeMove|ChangeTypeRemove) require.Equal(t, newChange.Curr, curr) require.Equal(t, newChange.Head, head) require.Equal(t, newChange.Next, next) // This check helps failing when adding new fields: require.Equal(t, change, newChange) }) } func TestChangeCombine(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { x := c.MustTouch(t, lkr, "/x", 1) c.MustCommit(t, lkr, "1") c.MustModify(t, lkr, x, 2) c.MustCommit(t, lkr, "2") y := c.MustMove(t, lkr, x, "/y") c.MustCommit(t, lkr, "move") c.MustRemove(t, lkr, y) ghost, err := lkr.LookupGhost("/y") require.Nil(t, err) status, err := lkr.Status() require.Nil(t, err) changes, err := History(lkr, ghost, status, nil) require.Nil(t, err) require.Len(t, changes, 4) require.Equal(t, changes[0].Mask, ChangeTypeRemove) require.Equal(t, changes[1].Mask, ChangeTypeMove) require.Equal(t, changes[2].Mask, ChangeTypeModify) require.Equal(t, changes[3].Mask, ChangeTypeAdd) change := CombineChanges(changes) require.Equal(t, change.MovedTo, "") require.Equal(t, change.WasPreviouslyAt, "/x") require.Equal( t, change.Mask, ChangeTypeRemove|ChangeTypeMove|ChangeTypeModify|ChangeTypeAdd, ) }) } func TestChangeCombineMoveBackAndForth(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { x := c.MustTouch(t, lkr, "/x", 1) c.MustCommit(t, lkr, "1") y := c.MustMove(t, lkr, x, "/y") c.MustCommit(t, lkr, "2") xx := c.MustMove(t, lkr, y, "/x") c.MustCommit(t, lkr, "3") status, err := lkr.Status() require.Nil(t, err) changes, err := History(lkr, xx, status, nil) require.Nil(t, err) require.Len(t, changes, 4) require.Equal(t, changes[0].Mask, ChangeTypeNone) require.Equal(t, changes[1].Mask, ChangeTypeMove) require.Equal(t, changes[2].Mask, ChangeTypeMove) require.Equal(t, changes[3].Mask, ChangeTypeAdd) change := CombineChanges(changes) require.Equal(t, "/x", change.WasPreviouslyAt) require.Equal(t, "", change.MovedTo) require.Equal(t, ChangeTypeAdd, change.Mask) }) } func TestChangeRemoveAndReadd(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { x := c.MustTouch(t, lkr, "/x", 1) c.MustCommit(t, lkr, "1") c.MustRemove(t, lkr, x) c.MustCommit(t, lkr, "2") xx := c.MustTouch(t, lkr, "/x", 2) c.MustCommit(t, lkr, "3") status, err := lkr.Status() require.Nil(t, err) changes, err := History(lkr, xx, status, nil) require.Nil(t, err) require.Len(t, changes, 4) require.Equal(t, changes[0].Mask, ChangeTypeNone) require.Equal(t, changes[1].Mask, ChangeTypeAdd|ChangeTypeModify) require.Equal(t, changes[2].Mask, ChangeTypeRemove) require.Equal(t, changes[3].Mask, ChangeTypeAdd) change := CombineChanges(changes) require.Equal(t, "", change.MovedTo) require.Equal(t, ChangeTypeAdd|ChangeTypeModify, change.Mask) }) } func TestChangeReplay(t *testing.T) { tcs := []struct { name string setup func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode check func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) }{ { name: "basic-add", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { return c.MustTouch(t, lkrSrc, "/x", 1) }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { dstX, err := lkrDst.LookupFile("/x") require.Nil(t, err) require.Equal(t, dstX.Size(), srcNd.Size()) require.Equal(t, dstX.TreeHash(), srcNd.TreeHash()) require.Equal(t, dstX.BackendHash(), srcNd.BackendHash()) require.Equal(t, dstX.ContentHash(), srcNd.ContentHash()) }, }, { name: "basic-modify", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { c.MustTouch(t, lkrDst, "/x", 0) return c.MustTouch(t, lkrSrc, "/x", 1) }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { dstX, err := lkrDst.LookupFile("/x") require.Nil(t, err) require.Equal(t, dstX.Size(), srcNd.Size()) require.Equal(t, dstX.TreeHash(), srcNd.TreeHash()) require.Equal(t, dstX.BackendHash(), srcNd.BackendHash()) require.Equal(t, dstX.ContentHash(), srcNd.ContentHash()) }, }, { name: "basic-remove", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { c.MustTouch(t, lkrDst, "/x", 1) srcX := c.MustTouch(t, lkrSrc, "/x", 1) return c.MustRemove(t, lkrSrc, srcX) }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { // it's enough to assert that it's a ghost now: _, err := lkrDst.LookupGhost("/x") require.Nil(t, err) }, }, { name: "basic-move", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { c.MustTouch(t, lkrDst, "/x", 1) srcX := c.MustTouch(t, lkrSrc, "/x", 1) c.MustCommit(t, lkrSrc, "move") return c.MustMove(t, lkrSrc, srcX, "/y") }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { // it's enough to assert that it's a ghost now: _, err := lkrDst.LookupGhost("/x") require.Nil(t, err) _, err = lkrDst.LookupFile("/y") require.Nil(t, err) }, }, { name: "basic-all", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { c.MustTouch(t, lkrDst, "/x", 1) srcX := c.MustTouch(t, lkrSrc, "/x", 1) c.MustCommit(t, lkrSrc, "touch") srcY := c.MustMove(t, lkrSrc, srcX, "/y").(*n.File) c.MustCommit(t, lkrSrc, "move") return c.MustRemove(t, lkrSrc, srcY) }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { // it's enough to assert that it's a ghost now: _, err := lkrDst.LookupGhost("/x") require.Nil(t, err) _, err = lkrDst.LookupGhost("/y") require.Nil(t, err) }, }, { name: "basic-mkdir", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { return c.MustMkdir(t, lkrSrc, "/sub") }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { dir, err := lkrDst.LookupDirectory("/sub") require.Nil(t, err) require.Equal(t, dir.Path(), "/sub") }, }, { name: "edge-conflicting-types", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { // Directory and file: c.MustMkdir(t, lkrDst, "/sub") return c.MustTouch(t, lkrSrc, "/sub", 1) }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { // The directory was purged and the file should appear: // The policy here is "trust the remote, it's his metadata" _, err := lkrDst.LookupFile("/sub") require.Nil(t, err) }, }, { name: "edge-modified-ghost", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { srcX := c.MustTouch(t, lkrSrc, "/x", 1) c.MustCommit(t, lkrSrc, "1") c.MustModify(t, lkrSrc, srcX, 2) c.MustCommit(t, lkrSrc, "2") return c.MustRemove(t, lkrSrc, srcX) }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { _, err := lkrDst.LookupGhost("/x") require.Nil(t, err) }, }, { name: "edge-mkdir-existing", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { c.MustMkdir(t, lkrDst, "/sub") return c.MustMkdir(t, lkrSrc, "/sub") }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { dir, err := lkrDst.LookupDirectory("/sub") require.Nil(t, err) require.Equal(t, dir.Path(), "/sub") }, }, { name: "edge-mkdir-existing-non-empty", setup: func(t *testing.T, lkrSrc, lkrDst *c.Linker) n.ModNode { c.MustMkdir(t, lkrDst, "/sub") c.MustTouch(t, lkrDst, "/sub/x", 1) return c.MustMkdir(t, lkrSrc, "/sub") }, check: func(t *testing.T, lkrSrc, lkrDst *c.Linker, srcNd n.ModNode) { dir, err := lkrDst.LookupDirectory("/sub") require.Nil(t, err) require.Equal(t, dir.Path(), "/sub") dstX, err := lkrDst.LookupFile("/sub/x") require.Nil(t, err) require.Equal(t, dstX.Path(), "/sub/x") }, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { srcNd := tc.setup(t, lkrSrc, lkrDst) srcHead := c.MustCommit(t, lkrSrc, "post setup") srcChanges, err := History(lkrSrc, srcNd, srcHead, nil) require.Nil(t, err) ch := CombineChanges(srcChanges) require.Nil(t, ch.Replay(lkrDst)) tc.check(t, lkrSrc, lkrDst, srcNd) }) }) } } ================================================ FILE: catfs/vcs/debug.go ================================================ package vcs import ( "fmt" ) const ( printDebug = false ) func debug(args ...interface{}) { if printDebug { fmt.Println(args...) } } func debugf(spec string, args ...interface{}) { if printDebug { fmt.Printf(spec, args...) } } ================================================ FILE: catfs/vcs/diff.go ================================================ package vcs import ( c "github.com/sahib/brig/catfs/core" n "github.com/sahib/brig/catfs/nodes" ) // DiffPair is a pair of nodes that have a relation in regard of a change. The // change is described by the masks. type DiffPair struct { Src n.ModNode Dst n.ModNode SrcMask ChangeType DstMask ChangeType } // Diff describes a difference between two commits. type Diff struct { cfg *SyncOptions // Nodes that were added from remote. Added []n.ModNode // Nodes that were removed on remote side. Removed []n.ModNode // Nodes (of us) that are missing on the remote side. Missing []n.ModNode // Nodes from remote that were ignored. Ignored []n.ModNode // Nodes that were only moved (but nothing else) Moved []DiffPair // Merged contains nodes where sync is able to combine changes // on both sides (i.e. one side moved, another modified) Merged []DiffPair // Conflict contains nodes where sync was not able to combine // the changes made on both sides. Conflict []DiffPair } func (df *Diff) handleAdd(src n.ModNode) error { df.Added = append(df.Added, src) return nil } func (df *Diff) handleRemove(dst n.ModNode) error { if df.cfg.IgnoreDeletes { df.Ignored = append(df.Ignored, dst) return nil } df.Removed = append(df.Removed, dst) return nil } func (df *Diff) handleMissing(dst n.ModNode) error { // Handle missing files like "removed" for diff. df.Missing = append(df.Missing, dst) return nil } func (df *Diff) handleTypeConflict(src, dst n.ModNode) error { df.Ignored = append(df.Ignored, dst) return nil } func (df *Diff) handleConflictNode(nd n.ModNode) error { df.Ignored = append(df.Ignored, nd) return nil } func (df *Diff) handleMove(src, dst n.ModNode) error { df.Moved = append(df.Moved, DiffPair{ Src: src, Dst: dst, SrcMask: ChangeType(0), DstMask: ChangeType(0), }) return nil } func (df *Diff) handleConflict(src, dst n.ModNode, srcMask, dstMask ChangeType) error { df.Conflict = append(df.Conflict, DiffPair{ Src: src, Dst: dst, SrcMask: srcMask, DstMask: dstMask, }) return nil } func (df *Diff) handleMerge(src, dst n.ModNode, srcMask, dstMask ChangeType) error { df.Merged = append(df.Merged, DiffPair{ Src: src, Dst: dst, SrcMask: srcMask, DstMask: dstMask, }) return nil } // MakeDiff show the differences between two linkers. // // Internally it works like Sync() but does not modify anything and just // merely records what the algorithm decided to do. func MakeDiff(lkrSrc, lkrDst *c.Linker, headSrc, headDst *n.Commit, cfg *SyncOptions) (*Diff, error) { if cfg == nil { cfg = defaultSyncConfig } diff := &Diff{cfg: cfg} rsv, err := newResolver(lkrSrc, lkrDst, headSrc, headDst, diff) if err != nil { return nil, err } if err := rsv.resolve(); err != nil { return nil, err } return diff, nil } ================================================ FILE: catfs/vcs/diff_test.go ================================================ package vcs import ( "testing" c "github.com/sahib/brig/catfs/core" "github.com/stretchr/testify/require" ) func setupDiffBasicSrcFile(t *testing.T, lkrSrc, lkrDst *c.Linker) { c.MustTouch(t, lkrSrc, "/x.png", 1) c.MustTouch(t, lkrDst, "/y.png", 2) } func checkDiffBasicSrcFileForward(t *testing.T, lkrSrc, lkrDst *c.Linker, diff *Diff) { require.Empty(t, diff.Removed) require.Empty(t, diff.Conflict) require.Empty(t, diff.Ignored) require.Empty(t, diff.Merged) require.Len(t, diff.Added, 1) require.Equal(t, "/x.png", diff.Added[0].Path()) require.Len(t, diff.Missing, 1) require.Equal(t, "/y.png", diff.Missing[0].Path()) } func checkDiffBasicSrcFileBackward(t *testing.T, lkrSrc, lkrDst *c.Linker, diff *Diff) { require.Empty(t, diff.Conflict) require.Empty(t, diff.Ignored) require.Empty(t, diff.Merged) require.Empty(t, diff.Removed) require.Len(t, diff.Added, 1) require.Equal(t, "/y.png", diff.Added[0].Path()) require.Len(t, diff.Missing, 1) require.Equal(t, "/x.png", diff.Missing[0].Path()) } /////////////// func assertDiffIsEmpty(t *testing.T, diff *Diff) { require.Empty(t, diff.Added) require.Empty(t, diff.Removed) require.Empty(t, diff.Conflict) require.Empty(t, diff.Ignored) require.Empty(t, diff.Merged) require.Empty(t, diff.Missing) } func TestDiff(t *testing.T) { tcs := []struct { name string setup func(t *testing.T, lkrSrc, lkrDst *c.Linker) checkForward func(t *testing.T, lkrSrc, lkrDst *c.Linker, diff *Diff) checkBackward func(t *testing.T, lkrSrc, lkrDst *c.Linker, diff *Diff) }{ { name: "basic-file-on-both-sides", setup: setupDiffBasicSrcFile, checkForward: checkDiffBasicSrcFileForward, checkBackward: checkDiffBasicSrcFileBackward, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { c.MustTouch(t, lkrSrc, "/README.md", 42) c.MustTouch(t, lkrDst, "/README.md", 42) c.MustCommitIfPossible(t, lkrDst, "setup dst") c.MustCommitIfPossible(t, lkrSrc, "setup src") tc.setup(t, lkrSrc, lkrDst) srcStatus, err := lkrSrc.Status() require.Nil(t, err) dstStatus, err := lkrDst.Status() require.Nil(t, err) diff, err := MakeDiff(lkrSrc, lkrDst, srcStatus, dstStatus, nil) if err != nil { t.Fatalf("diff forward failed: %v", err) } tc.checkForward(t, lkrSrc, lkrDst, diff) diff, err = MakeDiff(lkrDst, lkrSrc, dstStatus, srcStatus, nil) if err != nil { t.Fatalf("diff backward failed: %v", err) } tc.checkBackward(t, lkrSrc, lkrDst, diff) // Checking the same commit should always result into an empty diff: // We could of course cheat and check the hash to be equal, // but this is helpful to validate the implementation. diff, err = MakeDiff(lkrSrc, lkrSrc, srcStatus, srcStatus, nil) if err != nil { t.Fatalf("diff equal src failed: %v", err) } assertDiffIsEmpty(t, diff) diff, err = MakeDiff(lkrDst, lkrDst, dstStatus, dstStatus, nil) if err != nil { t.Fatalf("diff equal dst failed: %v", err) } assertDiffIsEmpty(t, diff) }) }) } } func TestDiffWithSameLinker(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { c.MustMkdir(t, lkr, "/old/sub/") c.MustTouchAndCommit(t, lkr, "/old/sub/x", 1) c.MustMove(t, lkr, c.MustLookupDirectory(t, lkr, "/old"), "/new") // Fetch current head and status: head, err := lkr.Head() require.Nil(t, err) status, err := lkr.Status() require.Nil(t, err) // log.SetOutput(os.Stderr) // log.SetLevel(log.DebugLevel) diff, err := MakeDiff(lkr, lkr, head, status, nil) if err != nil { t.Fatalf("diff forward failed: %v", err) } require.Empty(t, diff.Added) require.Empty(t, diff.Removed) require.Empty(t, diff.Ignored) require.Empty(t, diff.Conflict) require.Empty(t, diff.Merged) require.Empty(t, diff.Missing) require.Len(t, diff.Moved, 1) require.Equal(t, diff.Moved[0].Src.Path(), "/old") require.Equal(t, diff.Moved[0].Dst.Path(), "/new") diff, err = MakeDiff(lkr, lkr, status, head, nil) if err != nil { t.Fatalf("diff backward failed: %v", err) } require.Empty(t, diff.Added) require.Empty(t, diff.Removed) require.Empty(t, diff.Ignored) require.Empty(t, diff.Conflict) require.Empty(t, diff.Merged) require.Empty(t, diff.Missing) require.Len(t, diff.Moved, 1) require.Equal(t, diff.Moved[0].Dst.Path(), "/old") require.Equal(t, diff.Moved[0].Src.Path(), "/new") diff, err = MakeDiff(lkr, lkr, status, status, nil) if err != nil { t.Fatalf("diff equal head: %v", err) } assertDiffIsEmpty(t, diff) diff, err = MakeDiff(lkr, lkr, status, status, nil) if err != nil { t.Fatalf("diff equal status: %v", err) } assertDiffIsEmpty(t, diff) }) } ================================================ FILE: catfs/vcs/history.go ================================================ package vcs import ( "fmt" "path" "strings" e "github.com/pkg/errors" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" log "github.com/sirupsen/logrus" ) // HistoryWalker provides a way to iterate over all changes a single Node had. // It is capable of tracking a file even over multiple moves. // // The API is loosely modeled after bufio.Scanner and can be used like this: // // head, _ := lkr.Head() // nd, _ := lkr.LookupFile("/x") // walker := NewHistoryWalker(lkr, head, nd) // for walker.Next() { // walker.Change() // } // // if err := walker.Error(); err != nil { // // Handle errors. // } type HistoryWalker struct { lkr *c.Linker head *n.Commit curr n.ModNode next n.ModNode err error state *Change } // NewHistoryWalker will return a new HistoryWalker that will yield changes of // `node` starting from the state in `cmt` until the root commit if desired. // Note that it is not checked that `node` is actually part of `cmt`. func NewHistoryWalker(lkr *c.Linker, cmt *n.Commit, node n.ModNode) *HistoryWalker { return &HistoryWalker{ lkr: lkr, head: cmt, curr: node, } } // maskFromState figures out the change mask based on the current state func (hw *HistoryWalker) maskFromState(curr, next n.ModNode) ChangeType { mask := ChangeType(0) // Initial state; no succesor known yet to compare too. if next == nil { return mask } isGhostCurr := curr.Type() == n.NodeTypeGhost isGhostNext := next.Type() == n.NodeTypeGhost currHash, err := n.ContentHash(curr) if err != nil { log.Warningf("history: misisng content hash for %s (curr)", curr.Path()) return ChangeTypeNone } nextHash, err := n.ContentHash(next) if err != nil { log.Warningf("history: misisng content hash for %s (next)", next.Path()) return ChangeTypeNone } // If the hash differs, there's likely a modification going on. if !currHash.Equal(nextHash) { mask |= ChangeTypeModify } if next.Path() != curr.Path() { mask |= ChangeTypeMove } else { // If paths did not move, but the current node is a ghost, // then it means that the node was removed in this commit. if isGhostCurr && !isGhostNext { mask |= ChangeTypeRemove } // ...otherwise the node was re-added in this commit, // but removed previously. if !isGhostCurr && isGhostNext { mask |= ChangeTypeAdd } } return mask } func parentDirectoryForCommit(lkr *c.Linker, cmt *n.Commit, curr n.Node) (*n.Directory, error) { nextDirPath := path.Dir(curr.Path()) if nextDirPath == "/" { return nil, nil } root, err := lkr.DirectoryByHash(cmt.Root()) if err != nil { return nil, err } nd, err := root.Lookup(lkr, nextDirPath) if err != nil { if ie.IsNoSuchFileError(err) { return nil, nil } return nil, err } dir, ok := nd.(*n.Directory) if !ok { return nil, ie.ErrBadNode } return dir, nil } // Check if a node was moved and if so, return the coressponding other half. // If it was not moved, this method will return nil, MoveDirNone, nil. // // This is also supposed to work with moved directories (keep in mind that // moving directories will only create a ghost for the moved directory itself, // not the children of it): // // $ tree . // a/ // b/ // c # a file. // $ mv a f // // For this case we need to go over the parent directories of c (b and f) to // find the ghost dir "a". From there we can resolve back to "c". func findMovePartner(lkr *c.Linker, head *n.Commit, curr n.Node) (n.Node, c.MoveDir, error) { prev, direction, err := lkr.MoveMapping(head, curr) if err != nil { return nil, c.MoveDirNone, err } if prev != nil { return prev, direction, nil } childPath := []string{curr.Name()} for { parentDir, err := parentDirectoryForCommit(lkr, head, curr) if err != nil { return nil, c.MoveDirNone, e.Wrap(err, "bad parent dir") } if parentDir == nil { return nil, c.MoveDirNone, nil } prevDirNd, direction, err := lkr.MoveMapping(head, parentDir) if err != nil { return nil, c.MoveDirNone, err } // Advance for next round: curr = parentDir if prevDirNd == nil { // This was not moved; remember step for final lookup: childPath = append([]string{parentDir.Name()}, childPath...) continue } // At this point we know that the dir `parentDir` was moved. // Now we have to find the old version of the node and exit this for loop. var prevDir *n.Directory switch prevDirNd.Type() { case n.NodeTypeDirectory: // This case will probably not happen not very often. // Most of the time the old node in a mapping is a ghost. var ok bool prevDir, ok = prevDirNd.(*n.Directory) if !ok { return nil, c.MoveDirNone, ie.ErrBadNode } case n.NodeTypeGhost: // If it's a ghost we need to unpack it. prevDirGhost, ok := prevDirNd.(*n.Ghost) if !ok { return nil, c.MoveDirNone, e.Wrap( ie.ErrBadNode, "bad previous dir", ) } prevDir, err = prevDirGhost.OldDirectory() if err != nil { return nil, c.MoveDirNone, e.Wrap(err, "bad old directory") } default: return nil, c.MoveDirNone, fmt.Errorf("unexpected file node") } // By the current logic, the path is still reachable in // the directory the same way before. child, err := prevDir.Lookup(lkr, strings.Join(childPath, "/")) if err != nil { return nil, c.MoveDirNone, err } return child, direction, nil } } func getRealType(nd n.Node) n.NodeType { if nd.Type() == n.NodeTypeGhost { return nd.(*n.Ghost).OldNode().Type() } return nd.Type() } func (hw *HistoryWalker) findReferToPath(prevHeadCommit *n.Commit, prev n.Node) (string, n.Node, error) { if prev == nil { return "", nil, nil } referToPath := prev.Path() // Unpack the old ghost before doing anything with it: if prev.Type() != n.NodeTypeGhost { return referToPath, prev, nil } prevGhost, ok := prev.(*n.Ghost) if !ok { return "", nil, ie.ErrBadNode } prev = prevGhost.OldNode() // Special case: // A file has a move partner in this commit, // but there is no previous node in the commit before. // We should count this node as added therefore, not moved. prevRoot, err := hw.lkr.DirectoryByHash(prevHeadCommit.Root()) if err != nil { return "", nil, e.Wrap(err, "cannot find previous root directory") } _, err = prevRoot.Lookup(hw.lkr, prev.Path()) if ie.IsNoSuchFileError(err) { prev = nil referToPath = "" } return referToPath, prev, nil } func (hw *HistoryWalker) findDirectPrev(prevHeadCommit *n.Commit) (n.Node, bool) { prevRoot, err := hw.lkr.DirectoryByHash(prevHeadCommit.Root()) if err != nil { hw.err = e.Wrap(err, "cannot find previous root directory") return nil, false } prev, err := prevRoot.Lookup(hw.lkr, hw.curr.Path()) if ie.IsNoSuchFileError(err) { // The file did not exist in the previous commit (no ghost!) // It must have been added in this commit. hw.state = &Change{ Head: hw.head, Mask: ChangeTypeAdd, Curr: hw.curr, Next: prevHeadCommit, } // If curr is a ghost we have a rare case: // The node was added and removed in the same commit. if hw.curr.Type() == n.NodeTypeGhost { hw.state.Mask |= ChangeTypeRemove } hw.head = nil return nil, true } if err != nil { hw.err = e.Wrap(err, "history: prev root lookup failed") return nil, false } return prev, false } // Next advances the walker to the next commit. // Call State() to get the current state after. // If there are no commits left or an error happened, // false is returned. True otherwise. You should check // after a failing Next() if an error happened via Err() func (hw *HistoryWalker) Next() bool { if hw.err != nil { return false } if hw.head == nil { return false } // Check if this node participated in a move: prev, direction, err := findMovePartner(hw.lkr, hw.head, hw.curr) if err != nil { hw.err = err return false } // Advance to the previous commit: prevHead, err := hw.head.Parent(hw.lkr) if err != nil { hw.err = err return false } // We ran out of commits to check. if prevHead == nil { hw.state = &Change{ Head: hw.head, Mask: ChangeTypeAdd, Curr: hw.curr, Next: nil, } if hw.curr.Type() == n.NodeTypeGhost { hw.state.Mask |= ChangeTypeRemove } hw.head = nil return true } prevHeadCommit, ok := prevHead.(*n.Commit) if !ok { hw.err = e.Wrap(ie.ErrBadNode, "history: bad commit") return false } // Try to find the node "prev" is actually referring to in the old commit: referToPath, prev, err := hw.findReferToPath(prevHeadCommit, prev) if err != nil { hw.err = e.Wrap(err, "history: findReferToPath") return false } // Assumption here: The move mapping should only store one move per commit. // i.e: for move(a, b); a and b should always be in different commits. // This is enforced by the logic in MakeCommit() if prev == nil || direction != c.MoveDirSrcToDst { // No valid move mapping found, node was probably not moved. // Assume that we can reach it directly via it's path. var hasNext bool prev, hasNext = hw.findDirectPrev(prevHeadCommit) if prev == nil { return hasNext } } prevModNode, ok := prev.(n.ModNode) if !ok { hw.err = e.Wrap(ie.ErrBadNode, "history: bad mod node") return false } // Pack up the current state: hw.state = &Change{ Head: hw.head, Mask: hw.maskFromState(hw.curr, prevModNode), Curr: hw.curr, Next: prevHeadCommit, } if getRealType(prev) != getRealType(hw.curr) { // Edge case: The node changed its types. This can happen when we // remove a file and create a directory in its place. hw.state.Mask = ChangeTypeAdd hw.state.Next = nil return false } // Special case #1: A ghost that still has a move partner. // This means the node here was moved to `prev` in this commit. if hw.curr.Type() == n.NodeTypeGhost && direction == c.MoveDirDstToSrc { // Indicate that this node was indeed removed, // but still lives somewhere else. hw.state.Mask |= ChangeTypeMove hw.state.MovedTo = referToPath } // Special case #2: A non-ghost that was moved from somewhere. if hw.curr.Type() != n.NodeTypeGhost && direction == c.MoveDirSrcToDst { hw.state.Mask |= ChangeTypeMove hw.state.WasPreviouslyAt = referToPath } // Swap for the next call to Next(): hw.curr, hw.next = prevModNode, hw.curr hw.head = prevHeadCommit return true } // State returns the current change state. // Note that the change may have ChangeTypeNone as Mask if nothing changed. // If you only want states where it actually changed, just filter those. func (hw *HistoryWalker) State() *Change { return hw.state } // Err returns the last happened error or nil if none. func (hw *HistoryWalker) Err() error { return hw.err } // History returns a list of `nd`'s states starting with the commit in `start` // and stopping at `stop`. `stop` can be nil; in this case all commits will be // iterated. The returned list has the most recent change upfront, and the // latest change as last element. func History(lkr *c.Linker, nd n.ModNode, start, stop *n.Commit) ([]*Change, error) { states := make([]*Change, 0) walker := NewHistoryWalker(lkr, start, nd) for walker.Next() { state := walker.State() states = append(states, state) // Stop searching when he iterated deep enough: if stop != nil && state.Head.TreeHash().Equal(stop.TreeHash()) { break } } if err := walker.Err(); err != nil { return nil, err } return states, nil } ================================================ FILE: catfs/vcs/history_test.go ================================================ package vcs import ( "testing" "time" c "github.com/sahib/brig/catfs/core" "github.com/sahib/brig/catfs/db" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" ) func init() { log.SetLevel(log.WarnLevel) } type historySetup struct { commits []*n.Commit paths []string changes []ChangeType head *n.Commit node n.ModNode } /////////////// ACTUAL TESTCASES /////////////// func setupHistoryBasic(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) _, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) file, c3 := c.MustTouchAndCommit(t, lkr, "/x.png", 3) status, err := lkr.Status() if err != nil { t.Fatalf("Failed to retrieve status: %v", err) } return &historySetup{ commits: []*n.Commit{status, c3, c2, c1}, paths: []string{ "/x.png", "/x.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeNone, ChangeTypeModify, ChangeTypeModify, ChangeTypeAdd, }, head: status, node: file, } } func setupHistoryBasicHole(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) _, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) // Needed to have a commit that has changes: c.MustTouch(t, lkr, "/other", 23) file, c3 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) status, err := lkr.Status() if err != nil { t.Fatalf("Failed to retrieve status: %v", err) } return &historySetup{ commits: []*n.Commit{status, c3, c2, c1}, paths: []string{ "/x.png", "/x.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeNone, ChangeTypeNone, ChangeTypeModify, ChangeTypeAdd, }, head: status, node: file, } } func setupHistoryRemoveImmediately(t *testing.T, lkr *c.Linker) *historySetup { x := c.MustTouch(t, lkr, "/x", 1) c.MustRemove(t, lkr, x) status, err := lkr.Status() if err != nil { t.Fatalf("Failed to retrieve status: %v", err) } ghostX, err := lkr.LookupGhost("/x") require.Nil(t, err) return &historySetup{ commits: []*n.Commit{status}, paths: []string{ "/x", }, changes: []ChangeType{ ChangeTypeRemove | ChangeTypeAdd, }, head: status, node: ghostX, } } func setupHistoryRemoved(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) c.MustRemove(t, lkr, file) c3 := c.MustCommit(t, lkr, "after remove") status, err := lkr.Status() if err != nil { t.Fatalf("Failed to acquire status: %v", err) } // removing will copy file and make that a ghost. // i.e. we need to re-lookup it: ghost, err := lkr.LookupGhost(file.Path()) if err != nil { t.Fatalf("Failed to lookup ghost at %s: %v", file.Path(), err) } return &historySetup{ commits: []*n.Commit{status, c3, c2, c1}, paths: []string{ "/x.png", "/x.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeNone, ChangeTypeRemove, ChangeTypeModify, ChangeTypeAdd, }, head: status, node: ghost, } } func setupHistoryMoved(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) c.MustMove(t, lkr, file, "/y.png") c3 := c.MustCommit(t, lkr, "post-move") return &historySetup{ commits: []*n.Commit{c3, c2, c1}, paths: []string{ "/y.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeMove, ChangeTypeModify, ChangeTypeAdd, }, head: c3, node: file, } } func setupHistoryMoveStaging(t *testing.T, lkr *c.Linker) *historySetup { c.MustTouch(t, lkr, "/x.png", 1) _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) c.MustMove(t, lkr, file, "/y.png") status, err := lkr.Status() if err != nil { t.Fatalf("Failed to retrieve status: %v", err) } return &historySetup{ commits: []*n.Commit{status, c2, c1}, paths: []string{ "/y.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeMove, ChangeTypeModify, ChangeTypeAdd, }, head: status, node: file, } } func setupMoveInitial(t *testing.T, lkr *c.Linker) *historySetup { file := c.MustTouch(t, lkr, "/x.png", 1) c.MustMove(t, lkr, file, "/y.png") status, err := lkr.Status() if err != nil { t.Fatalf("Failed to retrieve status: %v", err) } // Should act like the node was added as "y.png", // even though it was moved. return &historySetup{ commits: []*n.Commit{status}, paths: []string{ "/y.png", }, changes: []ChangeType{ ChangeTypeAdd, }, head: status, node: file, } } func setupHistoryMoveAndModify(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) newFile := c.MustMove(t, lkr, file, "/y.png") c.MustModify(t, lkr, newFile.(*n.File), 42) c3 := c.MustCommit(t, lkr, "post-move-modify") return &historySetup{ commits: []*n.Commit{c3, c2, c1}, paths: []string{ "/y.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeModify | ChangeTypeMove, ChangeTypeModify, ChangeTypeAdd, }, head: c3, node: file, } } func setupHistoryMoveAndModifyStage(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) newFile := c.MustMove(t, lkr, file, "/y.png") c.MustModify(t, lkr, newFile.(*n.File), 42) status, err := lkr.Status() if err != nil { t.Fatalf("Failed to retrieve status: %v", err) } return &historySetup{ commits: []*n.Commit{status, c2, c1}, paths: []string{ "/y.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeModify | ChangeTypeMove, ChangeTypeModify, ChangeTypeAdd, }, head: status, node: file, } } func setupHistoryRemoveReadd(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) c.MustRemove(t, lkr, file) c3 := c.MustCommit(t, lkr, "after remove") file, c4 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) return &historySetup{ commits: []*n.Commit{c4, c3, c2, c1}, paths: []string{ "/x.png", "/x.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeAdd, ChangeTypeRemove, ChangeTypeModify, ChangeTypeAdd, }, head: c4, node: file, } } func setupHistoryRemoveReaddModify(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) c.MustRemove(t, lkr, file) c3 := c.MustCommit(t, lkr, "after remove") file, c4 := c.MustTouchAndCommit(t, lkr, "/x.png", 255) return &historySetup{ commits: []*n.Commit{c4, c3, c2, c1}, paths: []string{ "/x.png", "/x.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeAdd | ChangeTypeModify, ChangeTypeRemove, ChangeTypeModify, ChangeTypeAdd, }, head: c4, node: file, } } func setupHistoryRemoveReaddNoModify(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) c.MustRemove(t, lkr, file) c3 := c.MustCommit(t, lkr, "after remove") file, c4 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) return &historySetup{ commits: []*n.Commit{c4, c3, c2, c1}, paths: []string{ "/x.png", "/x.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeAdd, ChangeTypeRemove, ChangeTypeModify, ChangeTypeAdd, }, head: c4, node: file, } } func setupHistoryMoveCircle(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) newFile := c.MustMove(t, lkr, file, "/y.png") c3 := c.MustCommit(t, lkr, "move to y.png") newOldFile := c.MustMove(t, lkr, newFile, "/x.png") c4 := c.MustCommit(t, lkr, "move back to x.png") return &historySetup{ commits: []*n.Commit{c4, c3, c2, c1}, paths: []string{ "/x.png", "/y.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeMove, ChangeTypeMove, ChangeTypeModify, ChangeTypeAdd, }, head: c4, node: newOldFile, } } func setupHistoryMoveSamePlaceLeft(t *testing.T, lkr *c.Linker) *historySetup { x := c.MustTouch(t, lkr, "/x", 1) y := c.MustTouch(t, lkr, "/y", 1) c1 := c.MustCommit(t, lkr, "pre-move") c.MustMove(t, lkr, x, "/z") c.MustMove(t, lkr, y, "/z") c2 := c.MustCommit(t, lkr, "post-move") xGhost, err := lkr.LookupGhost("/x") require.Nil(t, err) return &historySetup{ commits: []*n.Commit{c2, c1}, paths: []string{ "/x", "/x", }, changes: []ChangeType{ // This file was removed, since the destination "z" // was overwritten by "y" and thus we may not count it // as moved. ChangeTypeRemove, ChangeTypeAdd, }, head: c2, node: xGhost, } } func setupHistoryTypeChange(t *testing.T, lkr *c.Linker) *historySetup { x := c.MustTouch(t, lkr, "/x", 1) c.MustCommit(t, lkr, "added") c.MustRemove(t, lkr, x) c.MustCommit(t, lkr, "removed") dir := c.MustMkdir(t, lkr, "/x") c3 := c.MustCommit(t, lkr, "mkdir") return &historySetup{ commits: []*n.Commit{c3}, paths: []string{ "/x", }, changes: []ChangeType{ // This file was removed, since the destination "z" // was overwritten by "y" and thus we may not count it // as moved. ChangeTypeAdd, }, head: c3, node: dir, } } func setupHistoryMoveSamePlaceRight(t *testing.T, lkr *c.Linker) *historySetup { x := c.MustTouch(t, lkr, "/x", 1) y := c.MustTouch(t, lkr, "/y", 1) c1 := c.MustCommit(t, lkr, "pre-move") c.MustMove(t, lkr, x, "/z") c.MustMove(t, lkr, y, "/z") c2 := c.MustCommit(t, lkr, "post-move") yGhost, err := lkr.LookupGhost("/y") require.Nil(t, err) return &historySetup{ commits: []*n.Commit{c2, c1}, paths: []string{ "/y", "/y", }, changes: []ChangeType{ ChangeTypeMove | ChangeTypeRemove, ChangeTypeAdd, }, head: c2, node: yGhost, } } func setupHistoryMoveAndReaddFromMoved(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) newFile := c.MustMove(t, lkr, file, "/y.png") _, c3 := c.MustTouchAndCommit(t, lkr, "/x.png", 23) return &historySetup{ commits: []*n.Commit{c3, c2, c1}, paths: []string{ "/y.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeMove, ChangeTypeModify, ChangeTypeAdd, }, head: c3, node: newFile, } } func setupHistoryMultipleMovesPerCommit(t *testing.T, lkr *c.Linker) *historySetup { // Check if we can track multiple moves per commit: fileX, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) fileY := c.MustMove(t, lkr, fileX, "/y.png") c.MustMove(t, lkr, fileY, "/z.png") fileZNew, err := c.Stage( lkr, "/z.png", h.TestDummy(t, 2), h.TestDummy(t, 2), uint64(2), -1, nil, time.Now(), false, ) require.Nil(t, err) c2 := c.MustCommit(t, lkr, "Moved around") return &historySetup{ commits: []*n.Commit{c2, c1}, paths: []string{ "/z.png", "/x.png", }, changes: []ChangeType{ ChangeTypeMove | ChangeTypeModify, ChangeTypeAdd, }, head: c2, node: fileZNew, } } func setupHistoryMultipleMovesInStage(t *testing.T, lkr *c.Linker) *historySetup { // Check if we can track multiple moves per commit: fileX, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) fileY := c.MustMove(t, lkr, fileX, "/y.png") fileZ := c.MustMove(t, lkr, fileY, "/z.png") status, err := lkr.Status() if err != nil { t.Fatalf("Failed to acquire status: %v", err) } return &historySetup{ commits: []*n.Commit{status, c1}, paths: []string{ "/z.png", "/x.png", }, changes: []ChangeType{ ChangeTypeMove, ChangeTypeAdd, }, head: status, node: fileZ, } } func setupHistoryMoveAndReaddFromAdded(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) c.MustMove(t, lkr, file, "/y.png") c3 := c.MustCommit(t, lkr, "move to y.png") readdedFile, c4 := c.MustTouchAndCommit(t, lkr, "/x.png", 23) return &historySetup{ commits: []*n.Commit{c4, c3, c2, c1}, paths: []string{ "/x.png", "/x.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeAdd | ChangeTypeModify, ChangeTypeMove | ChangeTypeRemove, ChangeTypeModify, ChangeTypeAdd, }, head: c4, node: readdedFile, } } func setupMoveDirectoryWithChild(t *testing.T, lkr *c.Linker) *historySetup { dir := c.MustMkdir(t, lkr, "/sub") _, c1 := c.MustTouchAndCommit(t, lkr, "/sub/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/sub/x.png", 2) c.MustMove(t, lkr, dir, "/moved-sub") c3 := c.MustCommit(t, lkr, "moved") status, err := lkr.Status() if err != nil { t.Fatalf("Failed to get status: %v", err) } return &historySetup{ commits: []*n.Commit{status, c3, c2, c1}, paths: []string{ "/moved-sub/x.png", "/moved-sub/x.png", "/sub/x.png", "/sub/x.png", }, changes: []ChangeType{ ChangeTypeNone, ChangeTypeMove, ChangeTypeModify, ChangeTypeAdd, }, head: status, node: file, } } func setupDirectoryHistory(t *testing.T, lkr *c.Linker) *historySetup { dir := c.MustMkdir(t, lkr, "/src") _, c1 := c.MustTouchAndCommit(t, lkr, "/src/x.png", 1) _, c2 := c.MustTouchAndCommit(t, lkr, "/src/x.png", 2) newDir := c.MustMove(t, lkr, dir, "/dst") c3 := c.MustCommit(t, lkr, "move") status, err := lkr.Status() if err != nil { t.Fatalf("Failed to get status: %v", err) } return &historySetup{ commits: []*n.Commit{status, c3, c2, c1}, paths: []string{ "/dst", "/dst", "/src", "/src", }, changes: []ChangeType{ ChangeTypeNone, ChangeTypeMove, ChangeTypeModify, ChangeTypeAdd, }, head: status, node: newDir, } } func setupGhostHistory(t *testing.T, lkr *c.Linker) *historySetup { _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) file, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) c.MustMove(t, lkr, file, "/y.png") c3 := c.MustCommit(t, lkr, "move") ghost, err := lkr.LookupGhost("/x.png") require.Nil(t, err) status, err := lkr.Status() if err != nil { t.Fatalf("Failed to get status: %v", err) } return &historySetup{ commits: []*n.Commit{status, c3, c2, c1}, paths: []string{ "/x.png", "/x.png", "/x.png", "/x.png", }, changes: []ChangeType{ ChangeTypeNone, // The "ChangeTypeMove" here is a hint that // this ghost was part of a move. ChangeTypeMove | ChangeTypeRemove, ChangeTypeModify, ChangeTypeAdd, }, head: status, node: ghost, } } func setupEdgeRoot(t *testing.T, lkr *c.Linker) *historySetup { init, err := lkr.Head() if err != nil { t.Fatalf("could not get head: %v", err) } _, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) _, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) _, c3 := c.MustTouchAndCommit(t, lkr, "/x.png", 3) status, err := lkr.Status() if err != nil { t.Fatalf("failed to get status: %v", err) } root, err := lkr.Root() if err != nil { t.Fatalf("failed to retrieve root: %v", err) } return &historySetup{ commits: []*n.Commit{status, c3, c2, c1, init}, paths: []string{ "/", "/", "/", "/", "/", }, changes: []ChangeType{ ChangeTypeNone, ChangeTypeModify, ChangeTypeModify, ChangeTypeModify, ChangeTypeAdd, }, head: status, node: root, } } type setupFunc func(t *testing.T, lkr *c.Linker) *historySetup // Registry bank for all testcases: func TestHistoryWalker(t *testing.T) { tcs := []struct { name string setup setupFunc }{ { name: "no-frills", setup: setupHistoryBasic, }, { name: "holes", setup: setupHistoryBasicHole, }, { name: "remove-it", setup: setupHistoryRemoved, }, { name: "remove-readd-simple", setup: setupHistoryRemoveReadd, }, { name: "remove-immedidately", setup: setupHistoryRemoveImmediately, }, { name: "remove-readd-modify", setup: setupHistoryRemoveReaddModify, }, { name: "remove-readd-no-modify", setup: setupHistoryRemoveReaddNoModify, }, { name: "move-once", setup: setupHistoryMoved, }, { name: "move-multiple-per-commit", setup: setupHistoryMultipleMovesPerCommit, }, { name: "move-multiple-per-stage", setup: setupHistoryMultipleMovesInStage, }, { name: "move-once-stage", setup: setupHistoryMoveStaging, }, { name: "move-initial", setup: setupMoveInitial, }, { name: "move-modify", setup: setupHistoryMoveAndModify, }, { name: "move-to-same-place-left", setup: setupHistoryMoveSamePlaceLeft, }, { name: "move-to-same-place-right", setup: setupHistoryMoveSamePlaceRight, }, { name: "move-modify-stage", setup: setupHistoryMoveAndModifyStage, }, { name: "move-circle", setup: setupHistoryMoveCircle, }, { name: "move-readd-from-moved-perspective", setup: setupHistoryMoveAndReaddFromMoved, }, { name: "move-readd-from-readded-perspective", setup: setupHistoryMoveAndReaddFromAdded, }, { name: "move-directory-with-child", setup: setupMoveDirectoryWithChild, }, { name: "directory-simple", setup: setupDirectoryHistory, }, { name: "ghost-simple", setup: setupGhostHistory, }, { name: "edge-root", setup: setupEdgeRoot, }, { name: "edge-type-change", setup: setupHistoryTypeChange, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { setup := tc.setup(t, lkr) testHistoryRunner(t, lkr, setup) }) }) } } // Actual test runner: func testHistoryRunner(t *testing.T, lkr *c.Linker, setup *historySetup) { idx := 0 walker := NewHistoryWalker(lkr, setup.head, setup.node) for walker.Next() { state := walker.State() // fmt.Println("TYPE", state.Mask) // fmt.Println("HEAD", state.Head) // fmt.Println("NEXT", state.Next) // fmt.Println("===") if idx >= len(setup.paths) { t.Fatalf("more history entries than expected") } if setup.paths[idx] != state.Curr.Path() { t.Fatalf( "Wrong path at index `%d`: %s (want: %s)", idx+1, state.Curr.Path(), setup.paths[idx], ) } if state.Mask != setup.changes[idx] { t.Errorf( "%d: Wrong type of state: %v (want: %s)", idx, state.Mask, setup.changes[idx], ) } if !setup.commits[idx].TreeHash().Equal(state.Head.TreeHash()) { t.Fatalf("Hash in commit differs") } idx++ } if err := walker.Err(); err != nil { t.Fatalf( "walker failed at index (%d/%d): %v", idx+1, len(setup.commits), err, ) } } // Test the History() utility based on HistoryWalker. func TestHistoryUtil(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { c1File, c1 := c.MustTouchAndCommit(t, lkr, "/x.png", 1) c1File = c1File.Copy(c1File.Inode()).(*n.File) c2File, c2 := c.MustTouchAndCommit(t, lkr, "/x.png", 2) c2File = c2File.Copy(c2File.Inode()).(*n.File) c3File := c.MustMove(t, lkr, c2File.Copy(c2File.Inode()), "/y.png") c3File = c3File.Copy(c3File.Inode()).(*n.File) c3 := c.MustCommit(t, lkr, "move to y.png") c4File, c4 := c.MustTouchAndCommit(t, lkr, "/y.png", 23) c4File = c4File.Copy(c4File.Inode()).(*n.File) states, err := History(lkr, c4File, c4, nil) if err != nil { t.Fatalf("History without stop commit failed: %v", err) } expected := []*Change{ { Head: c4, Curr: c4File, Mask: ChangeTypeModify, }, { Head: c3, Curr: c3File, Mask: ChangeTypeMove, }, { Head: c2, Curr: c2File, Mask: ChangeTypeModify, }, { Head: c1, Curr: c1File, Mask: ChangeTypeAdd, }, } for idx, state := range states { expect := expected[idx] require.Equal(t, state.Mask, expect.Mask, "Mask differs") require.Equal(t, state.Head, expect.Head, "Head differs") require.Equal(t, state.Curr, expect.Curr, "Curr differs") } }) } func TestHistoryWithNoParent(t *testing.T) { c.WithDummyKv(t, func(kv db.Database) { lkr := c.NewLinker(kv) lkr.SetOwner("alice") file, head := c.MustTouchAndCommit(t, lkr, "/x", 1) hist, err := History(lkr, file, head, nil) require.Nil(t, err) require.Len(t, hist, 1) require.Equal(t, hist[0].Mask, ChangeTypeAdd) }) } // Regression test: // Directories loose move history operation // when restarting the daemon in between. func TestHistoryMovedDirsWithReloadedLinker(t *testing.T) { validateHist := func(hist []*Change) { require.Len(t, hist, 2) require.Equal(t, hist[0].Mask, ChangeTypeMove) require.Equal(t, hist[1].Mask, ChangeTypeAdd) } c.WithReloadingLinker(t, func(lkr *c.Linker) { childDir := c.MustMkdir(t, lkr, "/child") c.MustCommit(t, lkr, "created") movedDir := c.MustMove(t, lkr, childDir, "/moved_child") status, err := lkr.Status() require.Nil(t, err) hist, err := History(lkr, movedDir, status, nil) require.Nil(t, err) validateHist(hist) }, func(lkr *c.Linker) { status, err := lkr.Status() require.Nil(t, err) childDir, err := lkr.LookupDirectory("/moved_child") require.Nil(t, err) hist, err := History(lkr, childDir, status, nil) require.Nil(t, err) validateHist(hist) }) } // Regression test: func TestHistoryOfMovedNestedDir(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { c.MustMkdir(t, lkr, "/src/core") c.MustTouch(t, lkr, "/src/core/linker.go", 3) c.MustCommit(t, lkr, "added") c.MustMove(t, lkr, c.MustLookupDirectory(t, lkr, "/src"), "/dst") c.MustCommit(t, lkr, "move") status, err := lkr.Status() require.Nil(t, err) // This raised an error before, since "/dst" was missing // in the "added" commit. hist, err := History(lkr, c.MustLookupDirectory(t, lkr, "/dst/core"), status, nil) require.Nil(t, err) require.Equal(t, "/dst/core", hist[0].Curr.Path()) require.Equal(t, ChangeTypeNone, hist[0].Mask) require.Equal(t, "/dst/core", hist[1].Curr.Path()) require.Equal(t, ChangeTypeMove, hist[1].Mask) require.Equal(t, "/src/core", hist[1].WasPreviouslyAt) require.Equal(t, "/src/core", hist[2].Curr.Path()) require.Equal(t, ChangeTypeAdd, hist[2].Mask) file, err := lkr.LookupModNode("/dst/core/linker.go") require.Nil(t, err) hist, err = History(lkr, file, status, nil) require.Nil(t, err) require.Equal(t, "/dst/core/linker.go", hist[0].Curr.Path()) require.Equal(t, ChangeTypeNone, hist[0].Mask) require.Equal(t, "/dst/core/linker.go", hist[1].Curr.Path()) require.Equal(t, ChangeTypeMove, hist[1].Mask) require.Equal(t, "/src/core/linker.go", hist[1].WasPreviouslyAt) require.Equal(t, "/src/core/linker.go", hist[2].Curr.Path()) require.Equal(t, ChangeTypeAdd, hist[2].Mask) }) } ================================================ FILE: catfs/vcs/mapper.go ================================================ package vcs // NOTE ON CODING STYLE: // If you modify something in here, make sure to always // incude "src" or "dst" in the symbol name to indicate // to which side of the sync/diff this symbol belongs! // Too many hours have been spent on confused debugging. import ( "fmt" "path" e "github.com/pkg/errors" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" "github.com/sahib/brig/util/trie" log "github.com/sirupsen/logrus" ) // MapPair is a pair of nodes (a file or a directory) // One of Src and Dst might be nil: // - If Src is nil, the node was removed on the remote side. // - If Dst is nil, the node was added on the remote side. // // Both shall never be nil at the same time. // // If TypeMismatch is true, nodes have a different type // and need conflict resolution. // // If SrcWasRemoved is true, the node was deleted on the // remote's side and we might need to propagate this remove. // Otherwise, if src is nil, dst can be considered as missing // file on src's side. // // If SrcWasMoved is true, the two nodes were purely moved, // but not modified otherwise. type MapPair struct { Src n.ModNode Dst n.ModNode SrcWasRemoved bool SrcWasMoved bool TypeMismatch bool } // flags that are set during the mapper run. // The zero value of this struct should mean "disabled". type flags struct { // The node was visited on the source side. // This should prohibit duplicate visits. srcVisited bool // The file was already reported/tested equal on src side. srcHandled bool // The file was already reported/tested equal on dst side. dstHandled bool // The directory consists completely of other src reports. srcComplete bool // The directory consists completely of other dst reports. dstComplete bool } // Mapper holds the state for the mapping algorithm. type Mapper struct { lkrSrc, lkrDst *c.Linker srcRoot n.Node srcHead *n.Commit dstHead *n.Commit flagsRoot *trie.Node fn func(pair MapPair) error } func (ma *Mapper) getFlags(path string) *flags { child := ma.flagsRoot.Lookup(path) if child == nil { child = ma.flagsRoot.InsertWithData(path, &flags{}) } if child.Data == nil { child.Data = &flags{} } return child.Data.(*flags) } func (ma *Mapper) setSrcVisited(nd n.Node) { ma.getFlags(nd.Path()).srcVisited = true } func (ma *Mapper) setSrcHandled(nd n.Node) { ma.getFlags(nd.Path()).srcHandled = true } func (ma *Mapper) setDstHandled(nd n.Node) { ma.getFlags(nd.Path()).dstHandled = true } func (ma *Mapper) setSrcComplete(nd n.Node) { ma.getFlags(nd.Path()).srcComplete = true } func (ma *Mapper) setDstComplete(nd n.Node) { ma.getFlags(nd.Path()).dstComplete = true } func (ma *Mapper) isSrcVisited(nd n.Node) bool { return ma.getFlags(nd.Path()).srcVisited } func (ma *Mapper) isSrcHandled(nd n.Node) bool { return ma.getFlags(nd.Path()).srcHandled } func (ma *Mapper) isDstHandled(nd n.Node) bool { return ma.getFlags(nd.Path()).dstHandled } func (ma *Mapper) isSrcComplete(nd n.Node) bool { return ma.getFlags(nd.Path()).srcComplete } func (ma *Mapper) isDstComplete(nd n.Node) bool { return ma.getFlags(nd.Path()).dstComplete } //////////////////// func (ma *Mapper) report(src, dst n.ModNode, typeMismatch, isRemove, isMove bool) error { if src != nil { ma.setSrcHandled(src) } if dst != nil { ma.setDstHandled(dst) } debug("=> report", src, dst) return ma.fn(MapPair{ Src: src, Dst: dst, TypeMismatch: typeMismatch, SrcWasRemoved: isRemove, SrcWasMoved: isMove, }) } func (ma *Mapper) reportByType(src, dst n.ModNode) error { if src == nil || dst == nil { return ma.report(src, dst, false, false, false) } isTypeMismatch := src.Type() != dst.Type() if isTypeMismatch { return ma.report(src, dst, isTypeMismatch, false, false) } if src.ContentHash().Equal(dst.ContentHash()) { // If the files are equal, but the location changed, // the file were moved. if src.Path() != dst.Path() { return ma.report(src, dst, isTypeMismatch, false, true) } // The files appear to be equal. // We need to remember to not output them again. ma.setSrcHandled(src) ma.setDstHandled(dst) return nil } return ma.report(src, dst, isTypeMismatch, false, false) } func (ma *Mapper) mapFile(srcCurr *n.File, dstFilePath string) error { // Check if we already visited this file. if ma.isSrcVisited(srcCurr) { return nil } debug("map file", srcCurr.Path(), dstFilePath) // Remember that we visited this node. ma.setSrcVisited(srcCurr) dstCurr, err := ma.lkrDst.LookupNodeAt(ma.dstHead, dstFilePath) if err != nil && !ie.IsNoSuchFileError(err) { return err } if dstCurr == nil { // We do not have this node yet, mark it for copying. return ma.report(srcCurr, nil, false, false, false) } switch typ := dstCurr.Type(); typ { case n.NodeTypeDirectory: // Our node seems to be a directory and theirs a file. // That's not something we can fix. dstDir, ok := dstCurr.(*n.Directory) if !ok { return ie.ErrBadNode } // File and Directory don't go well together. return ma.report(srcCurr, dstDir, true, false, false) case n.NodeTypeFile: // We have two competing files. dstFile, ok := dstCurr.(*n.File) if !ok { return ie.ErrBadNode } return ma.reportByType(srcCurr, dstFile) case n.NodeTypeGhost: // It's still possible that the file was moved or removed on our side. aliveDstCurr, err := ma.ghostToAlive(ma.lkrDst, ma.dstHead, dstCurr) if err != nil { return err } if aliveDstCurr == nil { dstGhost, ok := dstCurr.(*n.Ghost) if !ok { return ie.ErrBadNode } // File was removed by us. return ma.reportByType(srcCurr, dstGhost) } return ma.reportByType(srcCurr, aliveDstCurr) default: return e.Wrapf(ie.ErrBadNode, "Unexpected node type in syncFile: %v", typ) } } func (ma *Mapper) mapDirectoryContents(srcCurr *n.Directory, dstPath string) error { srcChildren, err := srcCurr.ChildrenSorted(ma.lkrSrc) if err != nil { return err } for _, srcChild := range srcChildren { childDstPath := path.Join(dstPath, srcChild.Name()) switch srcChild.Type() { case n.NodeTypeDirectory: srcChildDir, ok := srcChild.(*n.Directory) if !ok { return ie.ErrBadNode } if err := ma.mapDirectory(srcChildDir, childDstPath, false); err != nil { return err } ma.setSrcHandled(srcChildDir) dstCurrNd, err := ma.lkrDst.LookupModNodeAt(ma.dstHead, childDstPath) if err == nil { ma.setDstHandled(dstCurrNd) } case n.NodeTypeFile: srcChildFile, ok := srcChild.(*n.File) if !ok { return ie.ErrBadNode } if err := ma.mapFile(srcChildFile, childDstPath); err != nil { return err } ma.setSrcHandled(srcChildFile) case n.NodeTypeGhost: // remote ghosts are ignored, since they were handled beforehand. default: return ie.ErrBadNode } } return nil } func (ma *Mapper) mapDirectory(srcCurr *n.Directory, dstPath string, force bool) error { if !force { if ma.isSrcVisited(srcCurr) { return nil } } log.Debugf("mapping dir %s <-> %s", srcCurr.Path(), dstPath) ma.setSrcVisited(srcCurr) dstCurrNd, err := ma.lkrDst.LookupModNodeAt(ma.dstHead, dstPath) if err != nil && !ie.IsNoSuchFileError(err) { return err } if dstCurrNd == nil { // We never heard of this directory apparently. Go sync it. return ma.report(srcCurr, nil, false, false, false) } // Special case: The node might have been moved on dst's side. // We might notice this, if dst type is a ghost. if dstCurrNd.Type() == n.NodeTypeGhost { aliveDstCurr, err := ma.ghostToAlive(ma.lkrDst, ma.dstHead, dstCurrNd) if err != nil { return err } // No sibling found for this ghost. if aliveDstCurr == nil { return ma.report(srcCurr, nil, false, false, false) } localBackCheck, err := ma.lkrSrc.LookupNodeAt(ma.srcHead, aliveDstCurr.Path()) if err != nil && !ie.IsNoSuchFileError(err) { return err } if localBackCheck == nil || localBackCheck.Type() == n.NodeTypeGhost { // Delete the guard again, due to the recursive call. return ma.mapDirectory(srcCurr, aliveDstCurr.Path(), true) } return ma.report(srcCurr, nil, false, false, false) } if dstCurrNd.Type() != n.NodeTypeDirectory { return ma.report(srcCurr, dstCurrNd, true, false, false) } dstCurr, ok := dstCurrNd.(*n.Directory) if !ok { return ie.ErrBadNode } // Check if we're lucky and the directory hash is equal: if srcCurr.ContentHash().Equal(dstCurr.ContentHash()) { // Remember that we visited this subtree. ma.setSrcHandled(srcCurr) ma.setDstHandled(dstCurr) log.Debugf( "%s and %s have the same content; skipping", srcCurr.Path(), dstCurr.Path(), ) if srcCurr.Path() != dstCurr.Path() { return ma.report(srcCurr, dstCurr, false, false, true) } // If they even have the same tree hash, we can be sure that both // use the same path layout even. No work to do in this case. if srcCurr.TreeHash().Equal(dstCurr.TreeHash()) { return nil } } // Both sides have this directory, but the content differs. // We need to figure out recursively what exactly is different. return ma.mapDirectoryContents(srcCurr, dstPath) } func (ma *Mapper) ghostToAlive(lkr *c.Linker, head *n.Commit, nd n.Node) (n.ModNode, error) { partnerNd, _, err := lkr.MoveEntryPoint(nd) if err != nil { return nil, e.Wrap(err, "move entry point") } // No move partner found. if partnerNd == nil { return nil, nil } // We want to go forward in history. // In theory, the other direction should not happen, // since we're always operating on ghosts here. // if moveDir != c.MoveDirDstToSrc { // log.Debugf("bad move direction") // return nil, nil // } // Go forward to the most recent version of this node. // This is no guarantee yet that this node is reachable // from the head commit (it might have been removed...) mostRecent, err := lkr.NodeByInode(partnerNd.Inode()) if err != nil { return nil, err } if mostRecent == nil { err = fmt.Errorf("mapper: No such node with inode %d", partnerNd.Inode()) return nil, err } // This should usually not happen, but just to be sure. if mostRecent.Type() == n.NodeTypeGhost { return nil, nil } reacheable, err := lkr.LookupNodeAt(head, mostRecent.Path()) if err != nil && !ie.IsNoSuchFileError(err) { return nil, e.Wrapf(err, "ghost2alive: lookupAt") } if reacheable == nil { return nil, nil } if reacheable.Inode() != mostRecent.Inode() { // The node is still reachable, but it was changed // (i.e. by removing and re-adding it -> different inode) return nil, nil } reacheableModNd, ok := reacheable.(n.ModNode) if !ok { return nil, ie.ErrBadNode } return reacheableModNd, nil } type ghostDir struct { // source directory. srcDir *n.Directory // mapped path in lkrDst dstPath string } func (ma *Mapper) handleGhostsWithoutAliveNd(srcNd n.Node) error { dstNd, err := ma.lkrDst.LookupNodeAt(ma.dstHead, srcNd.Path()) if err != nil && !ie.IsNoSuchFileError(err) { return err } // Check if we maybe already removed or moved the node: if dstNd != nil && dstNd.Type() != n.NodeTypeGhost { dstModNd, ok := dstNd.(n.ModNode) if !ok { return ie.ErrBadNode } // Report that the file is missing on src's side. return ma.report(nil, dstModNd, false, true, false) } // does not exist on both sides, nothing to report. return nil } func (ma *Mapper) extractGhostDirs() ([]ghostDir, error) { movedSrcDirs := []ghostDir{} return movedSrcDirs, n.Walk(ma.lkrSrc, ma.srcRoot, true, func(srcNd n.Node) error { // Ignore everything that is not a ghost. if srcNd.Type() != n.NodeTypeGhost { return nil } aliveSrcNd, err := ma.ghostToAlive(ma.lkrSrc, ma.srcHead, srcNd) if err != nil { return err } if aliveSrcNd == nil { // It's a ghost, but it has no living counterpart. // This node *might* have been removed on the remote side. // Try to see if we have a node at this path, the next step // of sync then needs to decide if the node needs to be removed. return ma.handleGhostsWithoutAliveNd(srcNd) } // At this point we know that the ghost related to a moved file. // Check if we have a file at the same place. dstNd, err := ma.lkrDst.LookupNodeAt(ma.dstHead, aliveSrcNd.Path()) if err != nil && !ie.IsNoSuchFileError(err) { return err } if dstNd != nil && dstNd.Type() != n.NodeTypeGhost { // The node already exists in our place. No way we can really merge // it cleanly, so just handle the ghost as normal file and potentially // apply the normal conflict resolution later on. return nil } dstRefNd, err := ma.lkrDst.LookupNodeAt(ma.dstHead, srcNd.Path()) if err != nil && !ie.IsNoSuchFileError(err) { return err } if dstRefNd != nil { // Node maybe also moved. If so, try to resolve it to the full node: if dstRefNd.Type() == n.NodeTypeGhost { aliveOrig, err := ma.ghostToAlive(ma.lkrDst, ma.dstHead, dstRefNd) if err != nil { return err } dstRefNd = aliveOrig } } // The node was removed on dst: // We will detect the removal later. if dstRefNd == nil { return nil } dstRefModNd, ok := dstRefNd.(n.ModNode) if !ok { return e.Wrapf(ie.ErrBadNode, "dstRefModNd is not a file or directory: %v", dstRefNd) } switch aliveSrcNd.Type() { case n.NodeTypeFile: // Mark those both ghosts and original node as visited. err = ma.mapFile(aliveSrcNd.(*n.File), dstRefModNd.Path()) ma.setSrcVisited(aliveSrcNd) ma.setSrcVisited(srcNd) return err case n.NodeTypeDirectory: // ma.setSrcVisited(srcNd) if dstRefNd.Type() != n.NodeTypeDirectory { return ma.report(aliveSrcNd, dstRefModNd, true, false, false) } aliveSrcDir, ok := aliveSrcNd.(*n.Directory) if !ok { return ie.ErrBadNode } movedSrcDirs = append(movedSrcDirs, ghostDir{ srcDir: aliveSrcDir, dstPath: dstRefNd.Path(), }) return nil default: return e.Wrapf(ie.ErrBadNode, "Unexpected type in handle ghosts: %v", err) } }) } func (ma *Mapper) handleGhosts() error { movedSrcDirs, err := ma.extractGhostDirs() if err != nil { return err } // Handle moved paths after handling single files. // (mapDirectory assumes that moved files in it were already handled). for _, movedSrcDir := range movedSrcDirs { log.Debugf("map: %v %v", movedSrcDir.srcDir.Path(), movedSrcDir.dstPath) if err := ma.mapDirectory(movedSrcDir.srcDir, movedSrcDir.dstPath, false); err != nil { return err } } return nil } // NewMapper creates a new mapper object that is capable of finding pairs of // nodes between lkrDst and lkrSrc. func NewMapper(lkrSrc, lkrDst *c.Linker, srcHead, dstHead *n.Commit, srcRoot n.Node) (*Mapper, error) { var err error if srcHead == nil { srcHead, err = lkrSrc.Head() if err != nil { return nil, err } } if dstHead == nil { dstHead, err = lkrDst.Head() if err != nil { return nil, err } } return &Mapper{ lkrSrc: lkrSrc, lkrDst: lkrDst, srcHead: srcHead, dstHead: dstHead, srcRoot: srcRoot, flagsRoot: trie.NewNodeWithData(&flags{}), }, nil } func (ma *Mapper) nodeIsHandled(nd n.Node, srcToDst bool) bool { if srcToDst { return ma.isSrcHandled(nd) } return ma.isDstHandled(nd) } func (ma *Mapper) isComplete(lkr *c.Linker, root n.Node, srcToDst bool) (bool, error) { // If the file was already handled: ignore it completely. if ma.nodeIsHandled(root, srcToDst) { return false, nil } if root.Type() != n.NodeTypeDirectory { return true, nil } dir, ok := root.(*n.Directory) if !ok { return false, ie.ErrBadNode } children, err := dir.ChildrenSorted(lkr) if err != nil { return false, err } nComplete := 0 for _, child := range children { if ma.nodeIsHandled(child, srcToDst) { continue } isComplete, err := ma.isComplete(lkr, child, srcToDst) if err != nil { return false, err } if isComplete { nComplete++ } } // If all children were not handled & are complete we copy the flag. if nComplete == len(children) { if srcToDst { ma.setSrcComplete(root) } else { ma.setDstComplete(root) } return true, nil } return false, nil } // extractLeftovers goes over all nodes in src that were not covered // yet by previous measures. It will report any src node without a match then. func (ma *Mapper) extractLeftovers(lkr *c.Linker, root *n.Directory, srcToDst bool) error { if ma.nodeIsHandled(root, srcToDst) { return nil } if _, err := ma.isComplete(lkr, root, srcToDst); err != nil { return err } // Implement a basic walk/DFS with filtering: children, err := root.ChildrenSorted(lkr) if err != nil { return err } for _, child := range children { debug(fmt.Sprintf("extract: %v", child.Path())) if ma.nodeIsHandled(child, srcToDst) { debug(fmt.Sprintf("node is handled: %v", child.Path())) continue } switch child.Type() { case n.NodeTypeDirectory: dir, ok := child.(*n.Directory) if !ok { return ie.ErrBadNode } var complete bool if srcToDst { complete = ma.isSrcComplete(dir) } else { complete = ma.isDstComplete(dir) } debug(fmt.Sprintf("is complete: %v %v", child.Path(), complete)) if complete { if srcToDst { err = ma.report(dir, nil, false, false, false) } else { err = ma.report(nil, dir, false, false, false) } if err != nil { return err } } else { if err := ma.extractLeftovers(lkr, dir, srcToDst); err != nil { return err } } case n.NodeTypeFile: file, ok := child.(*n.File) if !ok { return ie.ErrBadNode } // Report the leftover: if srcToDst { err = ma.report(file, nil, false, false, false) } else { err = ma.report(nil, file, false, false, false) } if err != nil { return err } case n.NodeTypeGhost: // Those were already handled (or are not important) } } return nil } // Map calls `fn` for each pairing that was found. Equal files and // directories are not reported. Most directories are also not reported, but // if they are empty and not present on our side they will. No ghosts will be // reported. // // Some implementation background for the curious reader: // // In the simplest case a filesystem is a tree and the assumption can be made // that one node that lives at the same path on both sides is the same "file" // (i.e. in terms of "this is the file that the user wants to synchronize with"). // // With ghosts though, we have nodes that can indicate a removed or a moved file. // Due to moved files the filesystem tree becomes a graph and the mapping // algorithm (that is the base of Mapper) needs to do a depth first search // and thus needs to remember already visited nodes. // // Since moved nodes also takes priority we need to iterate over all ghosts first, // and mark their respective counterparts or report that they were removed on // the remote side (i.e. no counterpart exists.). Only after that we cycle // through all other nodes and assume that files living at the same path // reference the same "file". At this point we can treat the file graph // as tree again by ignoring all ghosts. // // A special case is when a file was moved on one side but, a file exists // already on the other side. In this case the already existing files wins. // // Some examples of the described behaviours can be found in the tests of Mapper. func (ma *Mapper) Map(fn func(pair MapPair) error) error { ma.fn = fn log.Debugf("mapping ghosts") if err := ma.handleGhosts(); err != nil { return err } log.Debugf("mapping non-ghosts") switch ma.srcRoot.Type() { case n.NodeTypeDirectory: dir, ok := ma.srcRoot.(*n.Directory) if !ok { return ie.ErrBadNode } if err := ma.mapDirectory(dir, dir.Path(), false); err != nil { return err } // Get root directories: // (only get them now since, in theory, mapFn could have changed things) srcRoot, err := ma.lkrSrc.DirectoryByHash(ma.srcHead.Root()) if err != nil { return err } dstRoot, err := ma.lkrDst.DirectoryByHash(ma.dstHead.Root()) if err != nil { return err } debug("-- Extract leftover src") // Extract things in "src" that were not mapped yet. // These are files that can be added to our inventory, // since we have notthing that mapped to them. if err := ma.extractLeftovers(ma.lkrSrc, srcRoot, true); err != nil { return err } debug("-- Extract leftover dst") // Check for files that we have, but dst does not. // We call those files "missing". return ma.extractLeftovers(ma.lkrDst, dstRoot, false) case n.NodeTypeFile: file, ok := ma.srcRoot.(*n.File) if !ok { return ie.ErrBadNode } return ma.mapFile(file, file.Path()) case n.NodeTypeGhost: // Not sure how this would happen. return nil default: return e.Wrapf(ie.ErrBadNode, "Unexpected type in route(): %v", ma.srcRoot) } } ================================================ FILE: catfs/vcs/mapper_test.go ================================================ package vcs import ( "fmt" "testing" c "github.com/sahib/brig/catfs/core" n "github.com/sahib/brig/catfs/nodes" "github.com/stretchr/testify/require" ) func mapperSetupBasicSame(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { c.MustTouchAndCommit(t, lkrSrc, "/x.png", 23) c.MustTouchAndCommit(t, lkrDst, "/x.png", 23) return []MapPair{} } func mapperSetupBasicDiff(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcFile, _ := c.MustTouchAndCommit(t, lkrSrc, "/x.png", 23) dstFile, _ := c.MustTouchAndCommit(t, lkrDst, "/x.png", 42) return []MapPair{ { Src: srcFile, Dst: dstFile, TypeMismatch: false, }, } } func mapperSetupBasicSrcTypeMismatch(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcDir := c.MustMkdir(t, lkrSrc, "/x") c.MustCommit(t, lkrSrc, "add dir") dstFile, _ := c.MustTouchAndCommit(t, lkrDst, "/x", 42) return []MapPair{ { Src: srcDir, Dst: dstFile, TypeMismatch: true, }, } } func mapperSetupBasicDstTypeMismatch(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcFile, _ := c.MustTouchAndCommit(t, lkrSrc, "/x", 42) dstDir := c.MustMkdir(t, lkrDst, "/x") c.MustCommit(t, lkrDst, "add dir") return []MapPair{ { Src: srcFile, Dst: dstDir, TypeMismatch: true, }, } } func mapperSetupBasicSrcAddFile(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcFile, _ := c.MustTouchAndCommit(t, lkrSrc, "/x.png", 42) return []MapPair{ { Src: srcFile, Dst: nil, TypeMismatch: false, }, } } func mapperSetupBasicDstAddFile(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { dstFile, _ := c.MustTouchAndCommit(t, lkrDst, "/x.png", 42) return []MapPair{ { Src: nil, Dst: dstFile, TypeMismatch: false, }, } } func mapperSetupBasicSrcAddDir(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcDir := c.MustMkdir(t, lkrSrc, "/x") c.MustCommit(t, lkrSrc, "add dir") return []MapPair{ { Src: srcDir, Dst: nil, TypeMismatch: false, }, } } func mapperSetupBasicDstAddDir(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { c.MustMkdir(t, lkrDst, "/x") return []MapPair{} } func mapperSetupSrcMoveFile(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { dstFile, _ := c.MustTouchAndCommit(t, lkrDst, "/x.png", 42) srcFileOld, _ := c.MustTouchAndCommit(t, lkrSrc, "/x.png", 23) srcFile := c.MustMove(t, lkrSrc, srcFileOld, "/y.png") c.MustCommit(t, lkrSrc, "I like to move it") return []MapPair{ { Src: srcFile, Dst: dstFile, TypeMismatch: false, }, } } func mapperSetupDstMoveFile(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcFile, _ := c.MustTouchAndCommit(t, lkrSrc, "/x.png", 42) dstFileOld, _ := c.MustTouchAndCommit(t, lkrDst, "/x.png", 23) dstFile := c.MustMove(t, lkrDst, dstFileOld, "/y.png") c.MustCommit(t, lkrDst, "I like to move it, move it") return []MapPair{ { Src: srcFile, Dst: dstFile, TypeMismatch: false, }, } } func mapperMoveNestedDir(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { c.MustMkdir(t, lkrSrc, "/old/sub/") c.MustMkdir(t, lkrDst, "/old/sub/") c.MustTouchAndCommit(t, lkrSrc, "/old/sub/x", 1) c.MustTouchAndCommit(t, lkrDst, "/old/sub/x", 1) srcDir := c.MustLookupDirectory(t, lkrSrc, "/old") dstDir := c.MustLookupDirectory(t, lkrDst, "/old") newDstDir := c.MustMove(t, lkrDst, dstDir, "/new") c.MustCommit(t, lkrDst, "moved") // Test for a special case here: // Directories that were moved, but still have identical files. return []MapPair{ { Src: srcDir, Dst: newDstDir, SrcWasMoved: true, TypeMismatch: false, SrcWasRemoved: false, }, } } func mapperSetupDstMoveDirEmpty(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcDir := c.MustMkdir(t, lkrSrc, "/x") c.MustCommit(t, lkrSrc, "Create src dir") dstDirOld := c.MustMkdir(t, lkrDst, "/x") dstDir := c.MustMove(t, lkrDst, dstDirOld, "/y") c.MustCommit(t, lkrDst, "I like to move it, move it") return []MapPair{ { Src: srcDir, Dst: dstDir, SrcWasMoved: true, }, } } func mapperSetupDstMoveDir(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { c.MustMkdir(t, lkrSrc, "/x") srcFile := c.MustTouch(t, lkrSrc, "/x/a.png", 42) c.MustCommit(t, lkrSrc, "Create src dir") dstDirOld := c.MustMkdir(t, lkrDst, "/x") c.MustMove(t, lkrDst, dstDirOld, "/y") dstFile := c.MustTouch(t, lkrDst, "/y/a.png", 23) c.MustCommit(t, lkrDst, "I like to move it, move it") return []MapPair{ { Src: srcFile, Dst: dstFile, TypeMismatch: false, }, } } func mapperSetupSrcMoveDir(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcDirOld := c.MustMkdir(t, lkrSrc, "/x") c.MustMove(t, lkrSrc, srcDirOld, "/y") srcFile := c.MustTouch(t, lkrSrc, "/y/a.png", 23) c.MustCommit(t, lkrSrc, "I like to move it, move it") c.MustMkdir(t, lkrDst, "/x") dstFile := c.MustTouch(t, lkrDst, "/x/a.png", 42) c.MustCommit(t, lkrDst, "Create dst dir") return []MapPair{ { Src: srcFile, Dst: dstFile, TypeMismatch: false, }, } } func mapperSetupMoveDirWithChild(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcDirOld := c.MustMkdir(t, lkrSrc, "/x") srcFile := c.MustTouch(t, lkrSrc, "/x/a.png", 23) c.MustMove(t, lkrSrc, srcDirOld, "/y") c.MustCommit(t, lkrSrc, "I like to move it, move it") c.MustMkdir(t, lkrDst, "/x") dstFile := c.MustTouch(t, lkrDst, "/x/a.png", 42) c.MustCommit(t, lkrDst, "Create dst dir") return []MapPair{ { Src: srcFile, Dst: dstFile, TypeMismatch: false, }, } } func mapperSetupSrcMoveWithExisting(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcDirOld := c.MustMkdir(t, lkrSrc, "/x") c.MustMove(t, lkrSrc, srcDirOld, "/y") srcFile := c.MustTouch(t, lkrSrc, "/y/a.png", 23) c.MustCommit(t, lkrSrc, "I like to move it, move it") // Additionally create an existing file that lives in the place // of the moved file. Mapper should favour existing files: dstDir := c.MustMkdir(t, lkrDst, "/x") c.MustMkdir(t, lkrDst, "/y") c.MustTouch(t, lkrDst, "/x/a.png", 42) dstFile := c.MustTouch(t, lkrDst, "/y/a.png", 42) c.MustCommit(t, lkrDst, "Create src dir") return []MapPair{ { Src: srcFile, Dst: dstFile, TypeMismatch: false, }, { Src: nil, Dst: dstDir, TypeMismatch: false, }, } } func mapperSetupSrcFileMoveToExistingEmptyDir(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { c.MustMkdir(t, lkrSrc, "/d1") c.MustMkdir(t, lkrSrc, "/d2") srcFileOld, _ := c.MustTouchAndCommit(t, lkrSrc, "/d1/t1", 23) srcFile := c.MustMove(t, lkrSrc, srcFileOld, "/d2/t1") c.MustCommit(t, lkrSrc, "move is done") c.MustMkdir(t, lkrDst, "/d1") c.MustMkdir(t, lkrDst, "/d2") dstFile, _ := c.MustTouchAndCommit(t, lkrDst, "/d1/t1", 23) return []MapPair{ { Src: srcFile, Dst: dstFile, SrcWasMoved: true, }, } } func mapperSetupDstMoveWithExisting(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcDir := c.MustMkdir(t, lkrSrc, "/x") c.MustMkdir(t, lkrSrc, "/y") c.MustTouch(t, lkrSrc, "/x/a.png", 42) srcFile := c.MustTouch(t, lkrSrc, "/y/a.png", 42) c.MustCommit(t, lkrSrc, "Create src dir") dstDirOld := c.MustMkdir(t, lkrDst, "/x") c.MustMove(t, lkrDst, dstDirOld, "/y") dstFile := c.MustTouch(t, lkrDst, "/y/a.png", 23) c.MustCommit(t, lkrDst, "I like to move it, move it") return []MapPair{ { Src: srcDir, Dst: nil, TypeMismatch: false, }, { Src: srcFile, Dst: dstFile, TypeMismatch: false, }, } } func mapperSetupNested(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcX, _ := c.MustTouchAndCommit(t, lkrSrc, "/common/a/b/c/x", 42) srcY, _ := c.MustTouchAndCommit(t, lkrSrc, "/common/a/b/c/y", 23) srcZ, _ := c.MustTouchAndCommit(t, lkrSrc, "/src-only/z", 23) dstX, _ := c.MustTouchAndCommit(t, lkrDst, "/common/a/b/c/x", 43) dstY, _ := c.MustTouchAndCommit(t, lkrDst, "/common/a/b/c/y", 24) dstZ, _ := c.MustTouchAndCommit(t, lkrDst, "/dst-only/z", 23) srcZParent, err := n.ParentDirectory(lkrSrc, srcZ) if err != nil { t.Fatalf("setup failed to get parent dir: %v", err) } dstZParent, err := n.ParentDirectory(lkrDst, dstZ) if err != nil { t.Fatalf("setup failed to get parent dir: %v", err) } return []MapPair{ { Src: srcX, Dst: dstX, TypeMismatch: false, }, { Src: srcY, Dst: dstY, TypeMismatch: false, }, { Src: srcZParent, Dst: nil, TypeMismatch: false, }, { Src: nil, Dst: dstZParent, TypeMismatch: false, }, } } func mapperSetupSrcRemove(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcFile := c.MustTouch(t, lkrSrc, "/x.png", 23) c.MustCommit(t, lkrSrc, "src: Touched /x.png") c.MustRemove(t, lkrSrc, srcFile) c.MustCommit(t, lkrSrc, "src: Removed /x.png") dstFile := c.MustTouch(t, lkrDst, "/x.png", 23) c.MustCommit(t, lkrDst, "dst: Touched /x.png") return []MapPair{ { Src: nil, Dst: dstFile, TypeMismatch: false, SrcWasRemoved: true, }, } } func mapperSetupDstRemove(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcFile := c.MustTouch(t, lkrSrc, "/x.png", 23) c.MustCommit(t, lkrSrc, "dst: Touched /x.png") dstFile := c.MustTouch(t, lkrDst, "/x.png", 23) c.MustCommit(t, lkrDst, "src: Touched /x.png") c.MustRemove(t, lkrDst, dstFile) c.MustCommit(t, lkrDst, "src: Removed /x.png") dstGhost, err := lkrDst.LookupGhost("/x.png") require.NoError(t, err) // We should be notified remote removed the file // (and that we possibly should remove it as well) return []MapPair{ { Src: srcFile, Dst: dstGhost, TypeMismatch: true, }, } } func mapperSetupMoveOnBothSides(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair { srcFile := c.MustTouch(t, lkrSrc, "/x", 23) c.MustCommit(t, lkrSrc, "src: touched /x") srcFileMoved := c.MustMove(t, lkrSrc, srcFile, "/y") c.MustCommit(t, lkrSrc, "src: /x moved to /y") dstFile := c.MustTouch(t, lkrDst, "/x", 42) c.MustCommit(t, lkrDst, "dst: touched /x") dstFileMoved := c.MustMove(t, lkrDst, dstFile, "/z") c.MustCommit(t, lkrDst, "dst: /x moved to /z") return []MapPair{ { Src: srcFileMoved, Dst: dstFileMoved, TypeMismatch: false, }, } } func TestMapper(t *testing.T) { tcs := []struct { name string setup func(t *testing.T, lkrSrc, lkrDst *c.Linker) []MapPair }{ { name: "basic-same", setup: mapperSetupBasicSame, }, { name: "basic-diff", setup: mapperSetupBasicDiff, }, { name: "basic-src-add-file", setup: mapperSetupBasicSrcAddFile, }, { name: "basic-dst-add-file", setup: mapperSetupBasicDstAddFile, }, { name: "basic-src-add-dir", setup: mapperSetupBasicSrcAddDir, }, { name: "basic-dst-add-dir", setup: mapperSetupBasicDstAddDir, }, { name: "basic-src-type-mismatch", setup: mapperSetupBasicSrcTypeMismatch, }, { name: "basic-dst-type-mismatch", setup: mapperSetupBasicDstTypeMismatch, }, { name: "basic-nested", setup: mapperSetupNested, }, { name: "remove-src", setup: mapperSetupSrcRemove, }, { name: "remove-dst", setup: mapperSetupDstRemove, }, { name: "move-simple-src-file", setup: mapperSetupSrcMoveFile, }, { name: "move-simple-dst-file", setup: mapperSetupDstMoveFile, }, { name: "move-simple-dst-empty-dir", setup: mapperSetupDstMoveDirEmpty, }, { name: "move-simple-src-dir", setup: mapperSetupSrcMoveDir, }, { name: "move-simple-dst-dir", setup: mapperSetupDstMoveDir, }, { name: "move-simple-src-dir-with-existing", setup: mapperSetupSrcMoveWithExisting, }, { name: "move-simple-dst-dir-with-existing", setup: mapperSetupDstMoveWithExisting, }, { name: "move-on-both-sides", setup: mapperSetupMoveOnBothSides, }, { name: "move-dir-with-child", setup: mapperSetupMoveDirWithChild, }, { name: "move-nested-dir", setup: mapperMoveNestedDir, }, { name: "move-src-file-to-existing-empty-dir", setup: mapperSetupSrcFileMoveToExistingEmptyDir, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { expect := tc.setup(t, lkrSrc, lkrDst) srcRoot, err := lkrSrc.Root() if err != nil { t.Fatalf("Failed to retrieve root: %v", err) } got := []MapPair{} diffFn := func(pair MapPair) error { got = append(got, pair) // if pair.Src != nil { // fmt.Println(".. ", pair.Src.Path()) // } // if pair.Dst != nil { // fmt.Println("-> ", pair.Dst.Path()) // } return nil } mapper, err := NewMapper(lkrSrc, lkrDst, nil, nil, srcRoot) require.Nil(t, err) if err := mapper.Map(diffFn); err != nil { t.Fatalf("mapping failed: %v", err) } // DEBUG. // for _, pair := range got { // fmt.Println("-", pair.Src, pair.Dst) // } if len(got) != len(expect) { t.Fatalf( "Got and expect length differ: %d vs %d", len(got), len(expect), ) } for idx, gotPair := range got { expectPair := expect[idx] failMsg := fmt.Sprintf("Failed pair %d", idx+1) require.Equal(t, expectPair, gotPair, failMsg) } }) }) } } ================================================ FILE: catfs/vcs/patch.go ================================================ package vcs import ( "errors" "path" "sort" e "github.com/pkg/errors" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" capnp_patch "github.com/sahib/brig/catfs/vcs/capnp" "github.com/sahib/brig/util/trie" log "github.com/sirupsen/logrus" capnp "zombiezen.com/go/capnproto2" ) // Patch is a set of changes that changed since a certain // version of a graph. type Patch struct { FromIndex int64 CurrIndex int64 Changes []*Change } // Patches is just a list of patches type Patches []*Patch // Len returns the number of changes in the patch. func (p *Patch) Len() int { return len(p.Changes) } func (p *Patch) Swap(i, j int) { p.Changes[i], p.Changes[j] = p.Changes[j], p.Changes[i] } func (p *Patch) Less(i, j int) bool { na, nb := p.Changes[i].Curr, p.Changes[j].Curr naIsGhost := na.Type() == n.NodeTypeGhost nbIsGhost := nb.Type() == n.NodeTypeGhost if naIsGhost != nbIsGhost { // Make sure ghosts are first added return naIsGhost } naIsDir := na.Type() == n.NodeTypeDirectory nbIsDir := nb.Type() == n.NodeTypeDirectory if naIsDir != nbIsDir { // Make sure that we first apply directory creation // and possible directory moves. return naIsDir } naIsRemove := p.Changes[i].Mask&ChangeTypeRemove != 0 nbIsRemove := p.Changes[j].Mask&ChangeTypeRemove != 0 if naIsRemove != nbIsRemove { // Make sure that everything is removed before // doing any other changes. return naIsRemove } naIsMove := p.Changes[i].Mask&ChangeTypeMove != 0 nbIsMove := p.Changes[j].Mask&ChangeTypeMove != 0 if naIsMove != nbIsMove { // Make sure that everything is moved before // doing any adds / modifcations. return naIsMove } return na.ModTime().Before(nb.ModTime()) } func (p *Patch) toCapnpPatch(seg *capnp.Segment, capPatch capnp_patch.Patch) error { capPatch.SetFromIndex(p.FromIndex) capPatch.SetCurrIndex(p.CurrIndex) capChangeLst, err := capnp_patch.NewChange_List(seg, int32(len(p.Changes))) if err != nil { return err } if err := capPatch.SetChanges(capChangeLst); err != nil { return err } for idx, change := range p.Changes { capCh, err := capnp_patch.NewChange(seg) if err != nil { return err } if err := change.toCapnpChange(seg, &capCh); err != nil { return err } if err := capChangeLst.Set(idx, capCh); err != nil { return err } } return nil } // ToCapnp serializes a patch to capnproto message. func (p *Patch) ToCapnp() (*capnp.Message, error) { msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) if err != nil { return nil, err } capPatch, err := capnp_patch.NewRootPatch(seg) if err != nil { return nil, err } return msg, p.toCapnpPatch(seg, capPatch) } // ToCapnp seriales patches to a capnproto message. func (ps Patches) ToCapnp() (*capnp.Message, error) { msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) if err != nil { return nil, err } capPatches, err := capnp_patch.NewRootPatches(seg) if err != nil { return nil, err } capPatchLst, err := capnp_patch.NewPatch_List(seg, int32(len(ps))) if err != nil { return nil, err } if err := capPatches.SetPatches(capPatchLst); err != nil { return nil, err } for idx, p := range ps { capPatch, err := capnp_patch.NewPatch(seg) if err != nil { return nil, err } if err := p.toCapnpPatch(seg, capPatch); err != nil { return nil, err } if err := capPatchLst.Set(idx, capPatch); err != nil { return nil, err } } return msg, nil } func (p *Patch) fromCapnpPatch(capPatch capnp_patch.Patch) error { p.FromIndex = capPatch.FromIndex() p.CurrIndex = capPatch.CurrIndex() capChs, err := capPatch.Changes() if err != nil { return err } for idx := 0; idx < capChs.Len(); idx++ { ch := &Change{} if err := ch.fromCapnpChange(capChs.At(idx)); err != nil { return e.Wrapf(err, "patch: from-capnp: change") } p.Changes = append(p.Changes, ch) } return nil } // FromCapnp deserializes `msg` into `p`. func (p *Patch) FromCapnp(msg *capnp.Message) error { capPatch, err := capnp_patch.ReadRootPatch(msg) if err != nil { return err } return p.fromCapnpPatch(capPatch) } // FromCapnp deserializes `msg` into `ps` func (ps *Patches) FromCapnp(msg *capnp.Message) error { capPatches, err := capnp_patch.ReadRootPatches(msg) if err != nil { return err } capPatchesLst, err := capPatches.Patches() if err != nil { return err } newPatches := Patches{} for idx := 0; idx < capPatchesLst.Len(); idx++ { p := &Patch{} if err := p.fromCapnpPatch(capPatchesLst.At(idx)); err != nil { return e.Wrapf(err, "patches: from-capnp: patch") } newPatches = append(newPatches, p) } *ps = newPatches return nil } // buildPrefixTrie builds a trie of prefixes that can be passed func buildPrefixTrie(prefixes []string) *trie.Node { root := trie.NewNode() for _, prefix := range prefixes { if prefix == "/" { root.Data = true } else { root.Insert(prefix).Data = true } } return root } func hasValidPrefix(root *trie.Node, path string) bool { if root.Data != nil && root.Data.(bool) == true { return true } curr := root for _, elem := range trie.SplitPath(path) { curr = curr.Lookup(elem) // No such children, not an allowed prefix. if curr == nil { return false } // If it's a prefix node it's over. if curr.Data != nil && curr.Data.(bool) == true { return true } } return false } func filterInvalidMoveGhost(lkr *c.Linker, child n.Node, combCh *Change, prefixTrie *trie.Node) (bool, error) { if child.Type() != n.NodeTypeGhost || combCh.Mask&ChangeTypeMove == 0 { return true, nil } moveNd, _, err := lkr.MoveEntryPoint(child) if err != nil { return false, err } if moveNd == nil { return false, nil } if !hasValidPrefix(prefixTrie, moveNd.Path()) { // The node was moved to the outside. Count it as removed. combCh.Mask &= ^ChangeTypeMove combCh.Mask |= ChangeTypeRemove return true, nil } return true, nil } // MakePatch creates a patch with all changes starting from `from`. // Patch will be created betweed `from` and `status` (current state) // It will only include nodes that are located under one of the prefixes in `prefixes`. func MakePatch(lkr *c.Linker, from *n.Commit, prefixes []string) (*Patch, error) { to, err := lkr.Status() if err != nil { return nil, err } return MakePatchFromTo(lkr, from, to, prefixes) } // MakePatches is like MakePatch but produces several patches, not a compressed one. func MakePatches(lkr *c.Linker, from *n.Commit, prefixes []string) (Patches, error) { to, err := lkr.Status() if err != nil { return nil, err } patches := []*Patch{} var errSkip = errors.New("stop log") var prevCmt = to // TODO: Log API should offer something like errSkip itself. err = c.Log(lkr, to, func(cmt *n.Commit) error { if prevCmt.Index() == to.Index() { // First iteration. prevCmt = cmt return nil } patch, err := MakePatchFromTo(lkr, cmt, prevCmt, prefixes) if err != nil { return err } patches = append(patches, patch) prevCmt = cmt if cmt.Index() == from.Index() { // We've gone deep enough. return errSkip } return nil }) if err != nil && err != errSkip { return nil, err } return patches, nil } // MakePatchFromTo makes a patch between two commits `from` (older one) and `to` (newer one) func MakePatchFromTo(lkr *c.Linker, from, to *n.Commit, prefixes []string) (*Patch, error) { root, err := to.Child(lkr, "does not matter") // child actually means Root for commits if err != nil { return nil, err } if from == nil { return nil, e.New("The `from` commit is nil") } if to == nil { return nil, e.New("The `to` commit is nil") } patch := &Patch{ FromIndex: from.Index(), CurrIndex: to.Index(), } // Shortcut: The patch CURR..CURR would be empty. // No need for further computations. if from.TreeHash().Equal(to.TreeHash()) { return patch, nil } // Build a prefix trie to quickly check invalid paths. // This is not necessarily much faster, but runs in constant time. if prefixes == nil { prefixes = []string{"/"} } prefixTrie := buildPrefixTrie(prefixes) err = n.Walk(lkr, root, false, func(child n.Node) error { childParentPath := path.Dir(child.Path()) if len(prefixes) != 0 && !hasValidPrefix(prefixTrie, childParentPath) { log.Debugf("Ignoring invalid prefix: %s", childParentPath) return nil } // Get all changes between `to` and `from`. childModNode, ok := child.(n.ModNode) if !ok { return e.Wrapf(ie.ErrBadNode, "make-patch: walk") } changes, err := History(lkr, childModNode, to, from) if err != nil { return err } // No need to export empty history, abort early. if len(changes) == 0 { return nil } combCh := CombineChanges(changes) // Directories are a bit of a special case. We're only interested in them // when creating new, empty directories (n_children == 0) or if whole trees // were moved. In the latter case we need to also send a notice about that, // but we can leave out any other change. if child.Type() == n.NodeTypeDirectory { dir, ok := child.(*n.Directory) if !ok { return e.Wrapf(ie.ErrBadNode, "make-patch: dir") } if combCh.Mask&ChangeTypeMove == 0 { if dir.NChildren() > 0 { return nil } } else { combCh.Mask = ChangeTypeMove } } // Some special filtering needs to be done here. If it'a "move" ghost // we don't want to export it if the move goes outside our prefixes // (which would count as "remove"). or if we already reported a top // level directory that contains this move. isValid, err := filterInvalidMoveGhost(lkr, child, combCh, prefixTrie) if err != nil { return err } log.Debugf("combine: %v <= %v (valid %v)", combCh, changes, isValid) if isValid && combCh.Mask != 0 { patch.Changes = append(patch.Changes, combCh) } return nil }) if err != nil { return nil, err } // Make sure the patch is applied in the right order. // The receiving site will sort it again, but it's better // to have it in the right order already. sort.Sort(patch) for _, ch := range patch.Changes { log.Debugf(" change: %s", ch) } return patch, nil } // ApplyPatch applies the patch `p` to the linker `lkr`. func ApplyPatch(lkr *c.Linker, p *Patch) error { sort.Sort(p) for _, change := range p.Changes { log.Debugf("apply %s %v", change, change.Curr.Type()) if err := change.Replay(lkr); err != nil { return err } } return nil } ================================================ FILE: catfs/vcs/patch_test.go ================================================ package vcs import ( "testing" c "github.com/sahib/brig/catfs/core" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" "github.com/stretchr/testify/require" ) func TestPatchMarshalling(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { head, err := lkr.Head() require.Nil(t, err) curr := c.MustTouch(t, lkr, "/x", 1) next := c.MustCommit(t, lkr, "hello") change1 := &Change{ Mask: ChangeTypeMove | ChangeTypeRemove, Head: head, Next: next, Curr: curr, MovedTo: "/something1", } c.MustModify(t, lkr, curr, 2) nextNext := c.MustCommit(t, lkr, "hello") change2 := &Change{ Mask: ChangeTypeAdd | ChangeTypeModify, Head: next, Next: nextNext, Curr: curr, MovedTo: "/something2", } patch := &Patch{ FromIndex: head.Index(), Changes: []*Change{change2, change1}, } msg, err := patch.ToCapnp() require.Nil(t, err) newPatch := &Patch{} require.Nil(t, newPatch.FromCapnp(msg)) require.Equal(t, patch, newPatch) }) } func TestPrefixTrie(t *testing.T) { prefixes := []string{ "/a", "/b", "/c/d", } root := buildPrefixTrie(prefixes) require.True(t, hasValidPrefix(root, "/a")) require.True(t, hasValidPrefix(root, "/a/x/y/z")) require.True(t, hasValidPrefix(root, "/b/c")) require.True(t, hasValidPrefix(root, "/c/d/e")) require.False(t, hasValidPrefix(root, "/c/e/d")) require.False(t, hasValidPrefix(root, "/c/a/b")) require.False(t, hasValidPrefix(root, "/")) require.False(t, hasValidPrefix(root, "/d")) } func TestMakePatch(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { init, err := lkrSrc.Head() require.Nil(t, err) srcX := c.MustTouch(t, lkrSrc, "/x", 1) srcY := c.MustTouch(t, lkrSrc, "/y", 2) c.MustMkdir(t, lkrSrc, "/sub") c.MustMkdir(t, lkrSrc, "/empty") srcZ := c.MustTouch(t, lkrSrc, "/sub/z", 3) c.MustCommit(t, lkrSrc, "3 files") patch, err := MakePatch(lkrSrc, init, []string{"/"}) require.Nil(t, err) require.Nil(t, ApplyPatch(lkrDst, patch)) dstX, err := lkrDst.LookupFile("/x") require.Nil(t, err) require.Equal(t, dstX.ContentHash(), h.TestDummy(t, 1)) dstY, err := lkrDst.LookupFile("/y") require.Nil(t, err) require.Equal(t, dstY.ContentHash(), h.TestDummy(t, 2)) dstZ, err := lkrDst.LookupFile("/sub/z") require.Nil(t, err) require.Equal(t, dstZ.ContentHash(), h.TestDummy(t, 3)) _, err = lkrDst.LookupDirectory("/empty") require.Nil(t, err) /////////////////// c.MustModify(t, lkrSrc, srcX, 4) c.MustMove(t, lkrSrc, srcY, "/y_moved") c.MustRemove(t, lkrSrc, srcZ) c.MustTouch(t, lkrSrc, "/empty/not_empty_anymore", 42) patch, err = MakePatch(lkrSrc, init, []string{"/"}) require.Nil(t, err) require.Nil(t, ApplyPatch(lkrDst, patch)) dstYMoved, err := lkrDst.LookupFile("/y_moved") require.Nil(t, err) require.Equal(t, dstYMoved.Path(), "/y_moved") dstYGhost, err := lkrDst.LookupGhost("/y") require.Nil(t, err) require.Equal(t, dstYGhost.Path(), "/y") dstZGhost, err := lkrDst.LookupGhost("/sub/z") require.Nil(t, err) require.Equal(t, dstZGhost.Path(), "/sub/z") dstNotEmptyFile, err := lkrDst.LookupFile("/empty/not_empty_anymore") require.Nil(t, err) require.Equal(t, dstNotEmptyFile.Path(), "/empty/not_empty_anymore") }) } func TestMakePatchWithOrderConflict(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { init, err := lkrSrc.Head() require.Nil(t, err) srcX := c.MustTouch(t, lkrSrc, "/x", 1) srcY := c.MustTouch(t, lkrSrc, "/y", 2) c.MustCommit(t, lkrSrc, "pre-move") c.MustMove(t, lkrSrc, srcX, "/z") c.MustMove(t, lkrSrc, srcY, "/z") c.MustCommit(t, lkrSrc, "post-move") patch, err := MakePatch(lkrSrc, init, []string{"/"}) require.Nil(t, err) // All files should be mentioned in the patch. // x and y are ghosts, z is the only real file. // Since y was moved last it has a move marker, x not. require.Equal(t, "/x", patch.Changes[1].Curr.Path()) require.Equal(t, "", patch.Changes[1].MovedTo) require.Equal(t, "", patch.Changes[1].WasPreviouslyAt) require.Equal(t, n.NodeTypeGhost, patch.Changes[1].Curr.Type()) require.Equal(t, "/y", patch.Changes[0].Curr.Path()) require.Equal(t, "/z", patch.Changes[0].MovedTo) require.Equal(t, n.NodeTypeGhost, patch.Changes[0].Curr.Type()) require.Equal(t, "/z", patch.Changes[2].Curr.Path()) require.Equal(t, "", patch.Changes[2].MovedTo) require.Equal(t, "/y", patch.Changes[2].WasPreviouslyAt) require.Equal(t, n.NodeTypeFile, patch.Changes[2].Curr.Type()) require.Nil(t, ApplyPatch(lkrDst, patch)) dstZ, err := lkrDst.LookupFile("/z") require.Nil(t, err) require.Equal(t, dstZ.ContentHash(), h.TestDummy(t, 2)) dstX, err := lkrDst.LookupGhost("/x") require.Nil(t, err) require.Equal(t, n.NodeTypeGhost, dstX.Type()) dstY, err := lkrDst.LookupGhost("/y") require.Nil(t, err) require.Equal(t, n.NodeTypeGhost, dstY.Type()) }) } // Move all children of a directory to another location. func TestMakePatchDirMoveAllChildren(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { init, err := lkrSrc.Head() require.Nil(t, err) c.MustMkdir(t, lkrSrc, "/src") subX := c.MustTouch(t, lkrSrc, "/src/x", 1) subY := c.MustTouch(t, lkrSrc, "/src/y", 2) preMove := c.MustCommit(t, lkrSrc, "base") patch1, err := MakePatch(lkrSrc, init, []string{"/"}) require.Nil(t, err) require.Nil(t, ApplyPatch(lkrDst, patch1)) srcDir, err := lkrDst.LookupDirectory("/src") require.Nil(t, err) require.Equal(t, 2, srcDir.NChildren()) /////////// c.MustMkdir(t, lkrSrc, "/dst") c.MustMove(t, lkrSrc, subX, "/dst/x") c.MustMove(t, lkrSrc, subY, "/dst/y") c.MustCommit(t, lkrSrc, "post-move") patch2, err := MakePatch(lkrSrc, preMove, []string{"/"}) require.Nil(t, err) require.Nil(t, ApplyPatch(lkrDst, patch2)) srcDir, err = lkrDst.LookupDirectory("/src") require.Nil(t, err) require.Equal(t, 2, srcDir.NChildren()) _, err = lkrDst.LookupGhost("/src/x") require.Nil(t, err) _, err = lkrDst.LookupGhost("/src/x") require.Nil(t, err) _, err = lkrDst.LookupFile("/dst/x") require.Nil(t, err) _, err = lkrDst.LookupFile("/dst/x") require.Nil(t, err) }) } // Move a directory completely, not just its contents. func TestMakePatchDirMoveCompletely(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { init, err := lkrSrc.Head() require.Nil(t, err) realSrcDir := c.MustMkdir(t, lkrSrc, "/src") c.MustTouch(t, lkrSrc, "/src/x", 1) c.MustTouch(t, lkrSrc, "/src/y", 2) preMove := c.MustCommit(t, lkrSrc, "base") patch1, err := MakePatch(lkrSrc, init, []string{"/"}) require.Nil(t, err) require.Nil(t, ApplyPatch(lkrDst, patch1)) srcDir, err := lkrDst.LookupDirectory("/src") require.Nil(t, err) require.Equal(t, 2, srcDir.NChildren()) /////////// c.MustMove(t, lkrSrc, realSrcDir, "/dst") c.MustCommit(t, lkrSrc, "post-move") patch2, err := MakePatch(lkrSrc, preMove, []string{"/"}) require.Nil(t, err) require.Nil(t, ApplyPatch(lkrDst, patch2)) srcDirGhost, err := lkrDst.LookupGhost("/src") require.Nil(t, err) require.Equal(t, 2, srcDirGhost.NChildren()) _, err = lkrDst.LookupNode("/src/x") require.NotNil(t, err) _, err = lkrDst.LookupNode("/src/x") require.NotNil(t, err) _, err = lkrDst.LookupFile("/dst/x") require.Nil(t, err) _, err = lkrDst.LookupFile("/dst/x") require.Nil(t, err) }) } func TestSyncPartialTwiceWithMovedFile(t *testing.T) { c.WithLinkerPair(t, func(lkrAli, lkrBob *c.Linker) { aliNd, _ := c.MustTouchAndCommit(t, lkrAli, "/ali-file", 1) c.MustTouchAndCommit(t, lkrBob, "/bob-file", 2) require.Nil(t, Sync(lkrAli, lkrBob, nil)) require.Nil(t, Sync(lkrBob, lkrAli, nil)) beforeMove, err := lkrAli.Head() require.Nil(t, err) c.MustMove(t, lkrAli, aliNd, "/bali-bile") c.MustCommit(t, lkrAli, "after move") // do commit before. patch, err := MakePatch(lkrAli, beforeMove, nil) require.Nil(t, err) // Apply the patch on bob's side. require.Nil(t, ApplyPatch(lkrBob, patch)) c.MustCommit(t, lkrBob, "after patch") diff, err := MakeDiff(lkrBob, lkrAli, nil, nil, nil) require.Nil(t, err) require.Len(t, diff.Added, 0) require.Len(t, diff.Removed, 0) require.Len(t, diff.Moved, 0) require.Len(t, diff.Conflict, 0) require.Len(t, diff.Merged, 0) require.Len(t, diff.Ignored, 0) }) } ================================================ FILE: catfs/vcs/reset.go ================================================ package vcs import ( "errors" "fmt" "path" e "github.com/pkg/errors" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" ) func findPathAt(lkr *c.Linker, cmt *n.Commit, path string) (string, error) { nd, err := lkr.LookupModNode(path) if err != nil && !ie.IsNoSuchFileError(err) { return "", err } if ie.IsNoSuchFileError(err) { // The file does not exist in the current commit, // so user probably knows that it had this path before. return path, nil } status, err := lkr.Status() if err != nil { return "", err } walker := NewHistoryWalker(lkr, status, nd) for walker.Next() { state := walker.State() if state.Head.TreeHash().Equal(cmt.TreeHash()) { return state.Curr.Path(), nil } } if err := walker.Err(); err != nil { return "", err } // Take the current path as best guess. return path, nil } func clearPath(lkr *c.Linker, ndPath string) (*n.Directory, error) { nd, err := lkr.LookupModNode(ndPath) isNoSuchFile := ie.IsNoSuchFileError(err) if err != nil && !isNoSuchFile { return nil, err } var par *n.Directory if ndPath != "/" { par, err = lkr.LookupDirectory(path.Dir(ndPath)) if err != nil { return nil, err } } if par == nil { return nil, fmt.Errorf( "checkout by commit if you want to checkout previous roots", ) } // The node does currently not exist (and the user wants to bring it back) if isNoSuchFile { return par, nil } err = n.Walk(lkr, nd, true, func(child n.Node) error { lkr.MemIndexPurge(child) return nil }) if err != nil { return nil, err } if err := par.RemoveChild(lkr, nd); err != nil { return nil, err } lkr.MemIndexPurge(nd) return par, lkr.StageNode(par) } // ResetNode resets a certain file to the state it had in cmt. If the file // did not exist back then, it will be deleted. `nd` is usually retrieved by // calling ResolveNode() and sorts. // // A special case occurs when the file was moved we reset to. // In this case the state of the old node (at the old path) // is being written to the node at the new path. // This is the more obvious choice to the user when he types: // // $ brig reset HEAD^ i-was-somewhere-else-before # name does not change. // // This method returns the old node (or nil if none) and any possible error. func ResetNode(lkr *c.Linker, cmt *n.Commit, currPath string) (n.Node, error) { root, err := lkr.DirectoryByHash(cmt.Root()) if err != nil { return nil, err } if root == nil { return nil, errors.New("no root to reset to") } // Find out the old path of `currPath` at `cmt`. // It might have changed due to moves. oldPath, err := findPathAt(lkr, cmt, currPath) if err != nil { return nil, err } oldNode, err := root.Lookup(lkr, oldPath) if err != nil && !ie.IsNoSuchFileError(err) { return nil, err } // Make sure that all write related action happen in one go: return oldNode, lkr.Atomic(func() (bool, error) { // Remove the node that is present at the current path: par, err := clearPath(lkr, currPath) if err != nil { return true, err } // old Node might not have yet existed back then. // If so, simply do not re-add it. if oldNode != nil { oldModNode, ok := oldNode.(n.ModNode) if !ok { return true, e.Wrapf(ie.ErrBadNode, "reset file") } // If the old node was at a different location, // we need to modify its path. oldModNode.SetName(path.Base(currPath)) if err := oldModNode.SetParent(lkr, par); err != nil { return true, err } if err := oldModNode.NotifyMove(lkr, par, oldModNode.Path()); err != nil { return true, err } if err := lkr.StageNode(oldNode); err != nil { return true, err } } return false, nil }) } ================================================ FILE: catfs/vcs/reset_test.go ================================================ package vcs import ( "testing" c "github.com/sahib/brig/catfs/core" "github.com/sahib/brig/catfs/db" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" h "github.com/sahib/brig/util/hashlib" "github.com/stretchr/testify/require" ) func TestResetFile(t *testing.T) { c.WithDummyKv(t, func(kv db.Database) { lkr := c.NewLinker(kv) if err := lkr.MakeCommit(n.AuthorOfStage, "initial commit"); err != nil { t.Fatalf("Initial commit failed: %v", err) } initCmt, err := lkr.Head() if err != nil { t.Fatalf("Failed to get initial head") } root, err := lkr.Root() if err != nil { t.Fatalf("Getting root failed: %v", err) } file := n.NewEmptyFile(root, "cat.png", "u", 3) c.MustModify(t, lkr, file, 1) oldFileHash := file.TreeHash().Clone() if err := lkr.MakeCommit(n.AuthorOfStage, "second commit"); err != nil { t.Fatalf("Failed to make second commit: %v", err) } c.MustModify(t, lkr, file, 2) headFileHash := file.TreeHash().Clone() if err := lkr.MakeCommit(n.AuthorOfStage, "third commit"); err != nil { t.Fatalf("Failed to make third commit: %v", err) } head, err := lkr.Head() if err != nil { t.Fatalf("Failed to get HEAD: %v", err) } lastCommitNd, err := head.Parent(lkr) if err != nil { t.Fatalf("Failed to get second commit: %v", err) } lastCommit := lastCommitNd.(*n.Commit) if _, err := ResetNode(lkr, lastCommit, "/cat.png"); err != nil { t.Fatalf("Failed to checkout file before commit: %v", err) } lastVersion, err := lkr.LookupFile("/cat.png") if err != nil { t.Fatalf("Failed to lookup /cat.png post checkout") } if !lastVersion.TreeHash().Equal(oldFileHash) { t.Fatalf("Hash of checkout'd file is not from second commit") } if lastVersion.Size() != 1 { t.Fatalf("Size of checkout'd file is not from second commit") } if _, err := ResetNode(lkr, initCmt, "/cat.png"); err != nil { t.Fatalf("Failed to checkout file at init: %v", err) } _, err = lkr.LookupFile("/cat.png") if !ie.IsNoSuchFileError(err) { t.Fatalf("Different error: %v", err) } if _, err := ResetNode(lkr, head, "/cat.png"); err != nil { t.Fatalf("Failed to checkout file at head: %v", err) } headVersion, err := lkr.LookupFile("/cat.png") if err != nil { t.Fatalf("Failed to lookup /cat.png post checkout") } if !headVersion.TreeHash().Equal(headFileHash) { t.Fatalf( "Hash differs between new and head reset: %v != %v", headVersion.TreeHash(), headFileHash, ) } }) } func TestFindPathAt(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { nd := c.MustTouch(t, lkr, "/x", 1) c1 := c.MustCommit(t, lkr, "1") c.MustMove(t, lkr, nd, "/y") c.MustCommit(t, lkr, "2") oldPath, err := findPathAt(lkr, c1, "/y") require.Nil(t, err) require.Equal(t, "/x", oldPath) }) } // Reset a file that was moved in earlier incarnations. func TestResetMovedFile(t *testing.T) { c.WithDummyLinker(t, func(lkr *c.Linker) { sub := c.MustMkdir(t, lkr, "/sub") nd := c.MustTouch(t, lkr, "/sub/x", 1) c1 := c.MustCommit(t, lkr, "1") c.MustMove(t, lkr, nd, "/y") c.MustModify(t, lkr, nd, 2) c.MustCommit(t, lkr, "2") // This should reset /y to content=1. _, err := ResetNode(lkr, c1, "/y") require.Nil(t, err) root, err := lkr.Root() require.Nil(t, err) children, err := root.ChildrenSorted(lkr) require.Nil(t, err) require.Len(t, children, 2) require.Equal(t, children[0].Type(), n.NodeType(n.NodeTypeDirectory)) require.Equal(t, children[0].Path(), "/sub") require.Equal(t, children[1].Type(), n.NodeType(n.NodeTypeFile)) require.Equal(t, children[1].Path(), "/y") require.Equal(t, children[1].BackendHash(), h.TestDummy(t, 1)) subChildren, err := sub.ChildrenSorted(lkr) require.Nil(t, err) require.Len(t, subChildren, 1) require.Equal(t, subChildren[0].Type(), n.NodeType(n.NodeTypeGhost)) require.Equal(t, subChildren[0].Path(), "/sub/x") }) } ================================================ FILE: catfs/vcs/resolve.go ================================================ package vcs // This package implements brig's sync algorithm which I called, in a burst of // modesty, "bright". (Not because it's or I'm very bright, but because it // starts with brig...) // // The sync algorithm tries to handle the following special cases: // - Propagate moves (most of them, at least) // - Propagate deletes (configurable?) // - Also sync empty directories. // // Terminology: // - Destination (short "dst") is used to reference our own storage. // - Source (short: "src") is used to reference the remote storage. // // The sync algorithm can be roughly divided in 4 stages: // - Stage 1: "Move Marking": // Iterate over all ghosts in the tree and check if they were either moved // (has sibling) or removed (has no sibling). In case of directories, the // second mapping stage is already executed. // // - Stage 2: "Mapping": // Finding pairs of files that possibly adding, merging or conflict handling. // Equal files will already be sorted out at this point. Every already // visited node in the remote linker will be marked. The mapping algorithm // starts at the root node and uses the attributes of the merkle trees // (same hash = same content) to skip over same parts. // // - Stage 3: "Resolving": // For each file a decision needs to be made. This decision defines the next step // and can be one of the following. // // - The file was added on the remote, we should add it to -> Add them. // - The file was removed on the remote, we might want to also delete it. // - The file was only moved on the remote node, we might want to moev it also. // - The file has compatible changes on the both sides. -> Merge them. // - The file was incompatible changes on both sides -> Do conflict resolution. // - The nodes have differing types (directory vs files). Report them. // // - Stage 4: "Handling" // Only at this stage "sync" and "diff" differ. // Sync will take the the files from Stage 3 and add/remove/merge files. // Diff will create a report out of those files and also includes files that // are simply missing on the source side (but do not need to be removed). // // Everything except Stage 4 is read-only. If a user wants to only show the diff // between two linkers, he just prints what would be done instead of actually doing it. // This makes the diff and sync implementation share most of it's code. import ( "fmt" "path" "regexp" "strings" e "github.com/pkg/errors" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" log "github.com/sirupsen/logrus" ) var ( conflictNodePattern = regexp.MustCompile(`/.*\.conflict\.\d+`) ) // executor is the interface that executes the actual action // needed to perform the sync (see "phase 4" in the package doc) type executor interface { handleAdd(src n.ModNode) error handleRemove(dst n.ModNode) error handleMissing(dst n.ModNode) error handleMove(src, dst n.ModNode) error handleConflictNode(src n.ModNode) error handleTypeConflict(src, dst n.ModNode) error handleMerge(src, dst n.ModNode, srcMask, dstMask ChangeType) error handleConflict(src, dst n.ModNode, srcMask, dstMask ChangeType) error } ////////////////////////////////////////////// // IMPLEMENTATION OF ACTUAL DECISION MAKING // ////////////////////////////////////////////// type resolver struct { lkrSrc *c.Linker lkrDst *c.Linker // What points should be resolved dstHead *n.Commit srcHead *n.Commit // cached attributes: dstMergeCmt *n.Commit srcMergeCmt *n.Commit // actual executor based on the decision exec executor } func newResolver(lkrSrc, lkrDst *c.Linker, srcHead, dstHead *n.Commit, exec executor) (*resolver, error) { var err error if srcHead == nil { srcHead, err = lkrSrc.Status() if err != nil { return nil, err } } if dstHead == nil { dstHead, err = lkrDst.Status() if err != nil { return nil, err } } return &resolver{ lkrSrc: lkrSrc, lkrDst: lkrDst, srcHead: srcHead, dstHead: dstHead, exec: exec, }, nil } func (rv *resolver) resolve() error { srcRoot, err := rv.lkrSrc.DirectoryByHash(rv.srcHead.Root()) if err != nil { return err } if err := rv.cacheLastCommonMerge(); err != nil { return e.Wrapf(err, "failed to find last common merge") } mapper, err := NewMapper(rv.lkrSrc, rv.lkrDst, rv.srcHead, rv.dstHead, srcRoot) if err != nil { return err } mappings := []MapPair{} err = mapper.Map(func(pair MapPair) error { mappings = append(mappings, pair) return nil }) if err != nil { return err } for _, pair := range mappings { if err := rv.decide(pair); err != nil { return err } } return nil } func (rv *resolver) cacheLastCommonMerge() error { srcOwner, err := rv.lkrSrc.Owner() if err != nil { return err } currHead := rv.dstHead for currHead != nil { with, srcRef := currHead.MergeMarker() if with == srcOwner { srcHead, err := rv.lkrSrc.CommitByHash(srcRef) if err != nil { return err } debugf("last merge found: %v = %s", with, srcRef) rv.dstMergeCmt = currHead rv.srcMergeCmt = srcHead } prevHeadNode, err := currHead.Parent(rv.lkrDst) if err != nil { return err } if prevHeadNode == nil { break } newDstHead, ok := prevHeadNode.(*n.Commit) if !ok { return ie.ErrBadNode } currHead = newDstHead } return nil } // isConflictPath will return true if the file or directory was created // as conflict file in case of a merge conflicts. func isConflictPath(path string) bool { return conflictNodePattern.MatchString(path) } // hasConflictFile reports if we already created a conflict file for `dstNd`. func (rv *resolver) hasConflictFile(dstNd n.ModNode) (bool, error) { parent, err := rv.lkrDst.LookupDirectory(path.Dir(dstNd.Path())) if err != nil { return false, err } // Assumption: The original node and its conflict fil // will be always on the same level. If this change, // the logic here has to change also. children, err := parent.ChildrenSorted(rv.lkrDst) if err != nil { return false, err } for _, child := range children { if child.Type() == n.NodeTypeGhost { continue } if isConflictPath(child.Path()) { // Also check if the conflict file belongs to our node: return strings.HasPrefix(child.Path(), dstNd.Path()), nil } } // None found, assume we do not have a conflict file (yet) return false, nil } // hasConflicts is always called when two nodes are on both sides and they do // not have the same hash. In the best case, both have compatible changes and // can be merged, otherwise a user defined conflict strategy has to be applied. func (rv *resolver) hasConflicts(src, dst n.ModNode) (bool, ChangeType, ChangeType, error) { // Nodes with same hashes are no conflicts... // (tree hash is also influenced by content) if src.TreeHash().Equal(dst.TreeHash()) { return false, 0, 0, nil } srcHist, err := History(rv.lkrSrc, src, rv.srcHead, rv.srcMergeCmt) if err != nil { return false, 0, 0, e.Wrapf(err, "history src") } dstHist, err := History(rv.lkrDst, dst, rv.dstHead, rv.dstMergeCmt) if err != nil { return false, 0, 0, e.Wrapf(err, "history dst") } // This loop can be optimized if the need arises: commonRootFound := false srcRoot, dstRoot := len(srcHist), len(dstHist) for srcIdx := 0; srcIdx < len(srcHist) && !commonRootFound; srcIdx++ { for dstIdx := 0; dstIdx < len(dstHist) && !commonRootFound; dstIdx++ { srcChange, dstChange := srcHist[srcIdx], dstHist[dstIdx] if srcChange.Curr.ContentHash().Equal(dstChange.Curr.ContentHash()) { srcRoot, dstRoot = srcIdx, dstIdx commonRootFound = true } } } srcHist = srcHist[:srcRoot] dstHist = dstHist[:dstRoot] // Compute the combination of all changes: var srcMask, dstMask ChangeType for _, change := range srcHist { srcMask |= change.Mask } for _, change := range dstHist { dstMask |= change.Mask } if len(srcHist) == 0 && len(dstHist) == 0 { return false, 0, 0, nil } // Handle a few lucky cases: if len(srcHist) > 0 && len(dstHist) == 0 { // We can "fast forward" our node. // There are only remote changes for this file. return false, srcMask, dstMask, nil } if len(srcHist) == 0 && len(dstHist) > 0 { // Only our side has changes. We can consider this node as merged. return false, 0, 0, nil } // Both sides have changes. Now we need to figure out if they are compatible. // We do this simply by OR-ing all changes on both side to an individual mask // and check if those can be applied on top of dst's current state. if !dstMask.IsCompatible(srcMask) { // The changes are not compatible. // We need to apply a conflict resolution strategy. return true, srcMask, dstMask, nil } // No conflict. We can merge src and dst. return false, srcMask, dstMask, nil } func pathOrNil(nd n.Node) string { if nd == nil { return "nil" } return nd.Path() } func (rv *resolver) decide(pair MapPair) error { log.Debugf( "Deciding pair: src=%v dst=%v", pathOrNil(pair.Src), pathOrNil(pair.Dst), ) if pair.Src == nil && pair.Dst == nil { return fmt.Errorf("Received completely empty mapping; ignoring") } if pair.Src != nil && isConflictPath(pair.Src.Path()) { return rv.exec.handleConflictNode(pair.Src) } if pair.Dst != nil && isConflictPath(pair.Dst.Path()) { return rv.exec.handleConflictNode(pair.Dst) } if pair.SrcWasMoved { return rv.exec.handleMove(pair.Src, pair.Dst) } if pair.Src == nil { if pair.SrcWasRemoved { return rv.exec.handleRemove(pair.Dst) } return rv.exec.handleMissing(pair.Dst) } if pair.Dst == nil { return rv.exec.handleAdd(pair.Src) } if pair.TypeMismatch { debugf( "%s is a %s and %s a %s; ignoring", pair.Src.Path(), pair.Src.Type(), pair.Dst.Path(), pair.Dst.Type(), ) return rv.exec.handleTypeConflict(pair.Src, pair.Dst) } hasConflicts, srcMask, dstMask, err := rv.hasConflicts(pair.Src, pair.Dst) if err != nil { return err } if hasConflicts { return rv.exec.handleConflict(pair.Src, pair.Dst, srcMask, dstMask) } hasConflictFile, err := rv.hasConflictFile(pair.Dst) if err != nil { return err } if hasConflictFile { return nil } // handleMerge needs the masks to decide what path / content to choose. return rv.exec.handleMerge(pair.Src, pair.Dst, srcMask, dstMask) } ================================================ FILE: catfs/vcs/resolve_test.go ================================================ package vcs import ( "testing" c "github.com/sahib/brig/catfs/core" n "github.com/sahib/brig/catfs/nodes" "github.com/stretchr/testify/require" ) type expect struct { dstMergeCmt *n.Commit srcMergeCmt *n.Commit srcFile *n.File dstFile *n.File err error result bool } func setupResolveBasicNoConflict(t *testing.T, lkrSrc, lkrDst *c.Linker) *expect { src, _ := c.MustTouchAndCommit(t, lkrSrc, "/x.png", 1) dst, _ := c.MustTouchAndCommit(t, lkrDst, "/x.png", 2) return &expect{ dstMergeCmt: nil, srcMergeCmt: nil, srcFile: src, dstFile: dst, err: nil, result: false, } } func TestHasConflicts(t *testing.T) { t.Parallel() tcs := []struct { name string setup func(t *testing.T, lkrSrc, lkrDst *c.Linker) *expect }{ { name: "basic-no-conflict-file", setup: setupResolveBasicNoConflict, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { t.Parallel() c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { expect := tc.setup(t, lkrSrc, lkrDst) syncer, err := newResolver(lkrSrc, lkrDst, nil, nil, nil) require.Nil(t, err) if err := syncer.cacheLastCommonMerge(); err != nil { t.Fatalf("Failed to find last common merge.") } require.Equal( t, expect.dstMergeCmt, syncer.dstMergeCmt, "dst merge marker", ) require.Equal( t, expect.srcMergeCmt, syncer.srcMergeCmt, "src merge marker", ) result, _, _, err := syncer.hasConflicts( expect.srcFile, expect.dstFile, ) if expect.err != err { t.Fatalf( "Resolve failed with wrong error: %v (want %v)", err, expect.err) } if expect.result == result { t.Fatalf( "resolve did not deliver the expected. Want %v, but got %v", expect.result, result, ) } }) }) } } ================================================ FILE: catfs/vcs/sync.go ================================================ package vcs import ( "fmt" "path" e "github.com/pkg/errors" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" log "github.com/sirupsen/logrus" ) const ( // ConflictStragetyMarker creates marker files for each conflict. ConflictStragetyMarker = iota // ConflictStragetyIgnore ignores conflicts totally. ConflictStragetyIgnore // ConflictStragetyEmbrace takes the version of the remote. ConflictStragetyEmbrace // ConflictStragetyUnknown should be used when the strategy is not clear. ConflictStragetyUnknown ) // ConflictStrategy defines what conflict strategy to apply in case of // nodes with different content hashes. type ConflictStrategy int func (cs ConflictStrategy) String() string { switch cs { case ConflictStragetyMarker: return "marker" case ConflictStragetyIgnore: return "ignore" case ConflictStragetyEmbrace: return "embrace" default: return "unknown" } } // ConflictStrategyFromString converts a string to a ConflictStrategy. // It it is not valid, ConflictStragetyUnknown is returned. func ConflictStrategyFromString(spec string) ConflictStrategy { switch spec { case "marker": return ConflictStragetyMarker case "ignore": return ConflictStragetyIgnore case "embrace": return ConflictStragetyEmbrace default: return ConflictStragetyUnknown } } // PinStats Handy structure to use during handleMerge to store the node info in which we merge type PinStats struct { Pinned, Explicit bool } // SyncOptions gives you the possibility to configure the sync algorithm. type SyncOptions struct { ConflictStrategy ConflictStrategy IgnoreDeletes bool IgnoreMoves bool Message string ReadOnlyFolders map[string]bool ConflictStrategyPerFolder map[string]ConflictStrategy OnAdd func(newNd n.ModNode) bool OnRemove func(oldNd n.ModNode) bool OnMerge func(nd n.ModNode, isGet bool, ndPinStats *PinStats) bool OnConflict func(src, dst n.ModNode) bool } var ( defaultSyncConfig = &SyncOptions{} ) type syncer struct { cfg *SyncOptions lkrSrc *c.Linker lkrDst *c.Linker } func (sy *syncer) add(src n.ModNode, srcParent, srcName string) error { var newDstNode n.ModNode var err error parentDir, err := sy.lkrDst.LookupDirectory(srcParent) if err != nil { return err } switch src.Type() { case n.NodeTypeDirectory: newDstNode, err = n.NewEmptyDirectory( sy.lkrDst, parentDir, srcName, src.User(), sy.lkrDst.NextInode(), ) if err != nil { return err } if err := sy.lkrDst.StageNode(newDstNode); err != nil { return err } srcDir, ok := src.(*n.Directory) if !ok { return ie.ErrBadNode } children, err := srcDir.ChildrenSorted(sy.lkrSrc) if err != nil { return err } for _, child := range children { childModNode, ok := child.(n.ModNode) if !ok { continue } if err := sy.add(childModNode, srcDir.Path(), child.Name()); err != nil { return err } } case n.NodeTypeFile: newDstFile := n.NewEmptyFile( parentDir, srcName, src.User(), sy.lkrDst.NextInode(), ) newDstNode = newDstFile srcFile, ok := src.(*n.File) if ok { newDstFile.SetContent(sy.lkrDst, srcFile.ContentHash()) newDstFile.SetBackend(sy.lkrDst, srcFile.BackendHash()) newDstFile.SetSize(srcFile.Size()) newDstFile.SetCachedSize(srcFile.CachedSize()) newDstFile.SetKey(srcFile.Key()) } if sy.cfg.OnAdd != nil { if !sy.cfg.OnAdd(newDstFile) { return nil } } if err := parentDir.Add(sy.lkrDst, newDstFile); err != nil { return err } return sy.lkrDst.StageNode(newDstNode) case n.NodeTypeGhost: // skipping addition of a ghost return nil default: return fmt.Errorf("Unexpected node type in handleAdd") } return nil } func isReadOnly(folders map[string]bool, nodePaths ...string) bool { for _, nodePath := range nodePaths { for { if folders[nodePath] { return true } newNodePath := path.Dir(nodePath) if newNodePath == nodePath { break } nodePath = newNodePath } } return false } func (sy *syncer) handleAdd(src n.ModNode) error { if isReadOnly(sy.cfg.ReadOnlyFolders, src.Path()) { return nil } log.Debugf("handling add: %s", src.Path()) return sy.add(src, path.Dir(src.Path()), src.Name()) } func (sy *syncer) handleMove(src, dst n.ModNode) error { if sy.cfg.IgnoreMoves { return nil } if isReadOnly(sy.cfg.ReadOnlyFolders, src.Path(), dst.Path()) { return nil } log.Debugf("handling move: %s -> %s", dst.Path(), src.Path()) if _, err := c.Mkdir(sy.lkrDst, path.Dir(src.Path()), true); err != nil { return err } // Move our node (dst) to the path determined by src. return e.Wrapf(c.Move(sy.lkrDst, dst, src.Path()), "move") } func (sy *syncer) handleMissing(dst n.ModNode) error { // This is only called when a file in dst is missing on src. // No sync action is required. log.Debugf("handling missing: %s", dst.Path()) return nil } func (sy *syncer) handleRemove(dst n.ModNode) error { if sy.cfg.IgnoreDeletes { return nil } if isReadOnly(sy.cfg.ReadOnlyFolders, dst.Path()) { return nil } log.Debugf("handling remove: %s", dst.Path()) if sy.cfg.OnRemove != nil { if !sy.cfg.OnRemove(dst) { return nil } } _, _, err := c.Remove(sy.lkrDst, dst, true, true) return err } func (sy *syncer) getConflictStrategy(nd n.ModNode) ConflictStrategy { curr := nd.Path() // Shortcurt: If the per-folder feature is not used, // we can skip this whole loop below. if len(sy.cfg.ConflictStrategyPerFolder) == 0 { return sy.cfg.ConflictStrategy } log.Debugf("*** MAP %v", sy.cfg.ConflictStrategyPerFolder) for { cs, ok := sy.cfg.ConflictStrategyPerFolder[curr] if ok { return cs } parent := path.Dir(curr) if parent == curr { break } curr = parent } // No special strategy found for this folder return sy.cfg.ConflictStrategy } func (sy *syncer) handleConflict(src, dst n.ModNode, srcMask, dstMask ChangeType) error { cs := sy.getConflictStrategy(dst) if cs == ConflictStragetyIgnore { return nil } if cs == ConflictStragetyEmbrace { return sy.handleMerge(src, dst, srcMask, dstMask) } if isReadOnly(sy.cfg.ReadOnlyFolders, src.Path(), dst.Path()) { return nil } log.Debugf("handling conflict: %s <-> %s", src.Path(), dst.Path()) // Find a path that we do not have yet. // stamp := time.Now().Format(time.RFC3339) conflictName := "" conflictNameTmpl := fmt.Sprintf("%s.conflict.%%d", dst.Name()) // Fix the unlikely case that there is already a node at the conflict path: for tries := 0; tries < 100; tries++ { conflictName = fmt.Sprintf(conflictNameTmpl, tries) dstNd, err := sy.lkrDst.LookupNode(conflictName) if err != nil && !ie.IsNoSuchFileError(err) { return err } if dstNd == nil { break } } dstDirname := path.Dir(dst.Path()) if sy.cfg.OnConflict != nil { if !sy.cfg.OnConflict(src, dst) { return nil } } return sy.add(src, dstDirname, conflictName) } func (sy *syncer) handleMerge(src, dst n.ModNode, srcMask, dstMask ChangeType) error { if isReadOnly(sy.cfg.ReadOnlyFolders, src.Path(), dst.Path()) { return nil } if src.Path() != dst.Path() { // Only move the file if it was only moved on the remote side. if srcMask&ChangeTypeMove != 0 && dstMask&ChangeTypeMove == 0 { if err := c.Move(sy.lkrDst, dst, src.Path()); err != nil { return err } } } if dst.Type() == n.NodeTypeGhost { // Nothing to do. We removed the file on our side, // but it's still on the remote side. Good for them, // but keep it deleted here. return nil } // If src did not change, there's no need to sync the content. // If src has no changes, we know that dst must have changes, // otherwise it would have been reported as conflict. if srcMask&ChangeTypeModify == 0 && srcMask&ChangeTypeAdd == 0 { return nil } dstParent, err := n.ParentDirectory(sy.lkrDst, dst) if err != nil { return err } if err := dstParent.RemoveChild(sy.lkrDst, dst); err != nil { return err } dstFile, ok := dst.(*n.File) if !ok { return ie.ErrBadNode } srcFile, ok := src.(*n.File) if !ok { return ie.ErrBadNode } oldDstPinStats := PinStats{false, false} isGet := true if sy.cfg.OnMerge != nil { sy.cfg.OnMerge(dst, isGet, &oldDstPinStats) } dstFile.SetContent(sy.lkrDst, srcFile.ContentHash()) dstFile.SetBackend(sy.lkrDst, srcFile.BackendHash()) dstFile.SetSize(srcFile.Size()) dstFile.SetCachedSize(srcFile.CachedSize()) dstFile.SetKey(srcFile.Key()) if err := dstParent.Add(sy.lkrDst, dstFile); err != nil { return err } if sy.cfg.OnMerge != nil { if !sy.cfg.OnMerge(dst, !isGet, &oldDstPinStats) { return nil } } return sy.lkrDst.StageNode(dstFile) } func (sy *syncer) handleTypeConflict(src, dst n.ModNode) error { log.Debugf("handling type conflict: %s <-> %s", src.Path(), dst.Path()) // Simply do nothing. return nil } func (sy *syncer) handleConflictNode(src n.ModNode) error { log.Debugf("handling node conflict: %s", src.Path()) // We don't care for files on the other side named "README.conflict.0" e.g. return nil } // Sync will synchronize the changes from `lkrSrc` to `lkrDst`, // according to the options set in `cfg`. This is atomic. // A new commit might be created with `message`, defaulting to a default message // when an empty string was given. func Sync(lkrSrc, lkrDst *c.Linker, cfg *SyncOptions) error { if cfg == nil { cfg = defaultSyncConfig } syncer := &syncer{ cfg: cfg, lkrSrc: lkrSrc, lkrDst: lkrDst, } resolver, err := newResolver(lkrSrc, lkrDst, nil, nil, syncer) if err != nil { return err } // Make sure the complete sync goes through in one disk transaction. return lkrDst.Atomic(func() (bool, error) { // This calls all the handleXXX() callbacks above. if err := resolver.resolve(); err != nil { return true, err } wasModified, err := lkrDst.HaveStagedChanges() if err != nil { return true, err } // If something was changed, we should set the merge marker // and also create a new commit. if wasModified { srcOwner, err := lkrSrc.Owner() if err != nil { return true, err } srcHead, err := lkrSrc.Head() if err != nil { return true, err } // If something was changed, remember that we merged with src. // This avoids merging conflicting files a second time in the next resolve(). if err := lkrDst.SetMergeMarker(srcOwner, srcHead.TreeHash()); err != nil { return true, err } message := cfg.Message if message == "" { message = fmt.Sprintf("merge with »%s«", srcOwner) } if err := lkrDst.MakeCommit(srcOwner, message); err != nil { return true, err } } return false, nil }) } ================================================ FILE: catfs/vcs/sync_test.go ================================================ package vcs import ( "testing" c "github.com/sahib/brig/catfs/core" h "github.com/sahib/brig/util/hashlib" "github.com/stretchr/testify/require" ) // Create a file in src and check // that it's being synced to the dst side. func setupBasicSrcFile(t *testing.T, lkrSrc, lkrDst *c.Linker) { c.MustTouch(t, lkrSrc, "/x.png", 1) } func checkBasicSrcFile(t *testing.T, lkrSrc, lkrDst *c.Linker) { xFile, err := lkrDst.LookupFile("/x.png") require.Nil(t, err) require.Equal(t, xFile.Path(), "/x.png") require.Equal(t, xFile.BackendHash(), h.TestDummy(t, 1)) } //////// // Only have the file on dst. // Nothing should happen, since no pair can be found. func setupBasicDstFile(t *testing.T, lkrSrc, lkrDst *c.Linker) { c.MustTouch(t, lkrDst, "/x.png", 1) } func checkBasicDstFile(t *testing.T, lkrSrc, lkrDst *c.Linker) { xFile, err := lkrDst.LookupFile("/x.png") require.Nil(t, err) require.Equal(t, xFile.Path(), "/x.png") require.Equal(t, xFile.BackendHash(), h.TestDummy(t, 1)) } //////// // Create the same file on both sides with the same content. func setupBasicBothNoConflict(t *testing.T, lkrSrc, lkrDst *c.Linker) { c.MustTouch(t, lkrSrc, "/x.png", 1) c.MustTouch(t, lkrDst, "/x.png", 1) } func checkBasicBothNoConflict(t *testing.T, lkrSrc, lkrDst *c.Linker) { xSrcFile, err := lkrSrc.LookupFile("/x.png") require.Nil(t, err) require.Equal(t, xSrcFile.Path(), "/x.png") require.Equal(t, xSrcFile.BackendHash(), h.TestDummy(t, 1)) xDstFile, err := lkrDst.LookupFile("/x.png") require.Nil(t, err) require.Equal(t, xDstFile.Path(), "/x.png") require.Equal(t, xDstFile.BackendHash(), h.TestDummy(t, 1)) } //////// // Create the same file on both sides with different content. // This should result in a conflict, resulting in conflict file. func setupBasicBothConflict(t *testing.T, lkrSrc, lkrDst *c.Linker) { c.MustTouch(t, lkrSrc, "/x.png", 42) c.MustTouch(t, lkrDst, "/x.png", 23) } func checkBasicBothConflict(t *testing.T, lkrSrc, lkrDst *c.Linker) { xSrcFile, err := lkrSrc.LookupFile("/x.png") require.Nil(t, err) require.Equal(t, xSrcFile.Path(), "/x.png") require.Equal(t, xSrcFile.BackendHash(), h.TestDummy(t, 42)) xDstFile, err := lkrDst.LookupFile("/x.png") require.Nil(t, err) require.Equal(t, xDstFile.Path(), "/x.png") require.Equal(t, xDstFile.BackendHash(), h.TestDummy(t, 23)) xConflictFile, err := lkrDst.LookupFile("/x.png.conflict.0") require.Nil(t, err) require.Equal(t, xConflictFile.Path(), "/x.png.conflict.0") require.Equal(t, xConflictFile.BackendHash(), h.TestDummy(t, 42)) } //////// func setupBasicRemove(t *testing.T, lkrSrc, lkrDst *c.Linker) { // Create x.png on src and remove it after one commit: xFile := c.MustTouch(t, lkrSrc, "/x.png", 42) c.MustCommit(t, lkrSrc, "who let the x out") c.MustRemove(t, lkrSrc, xFile) // Create the same file on dst: c.MustTouch(t, lkrDst, "/x.png", 42) } func checkBasicRemove(t *testing.T, lkrSrc, lkrDst *c.Linker) { xDstFile, err := lkrDst.LookupGhost("/x.png") require.Nil(t, err) require.Equal(t, xDstFile.Path(), "/x.png") } //////// func setupBasicSrcMove(t *testing.T, lkrSrc, lkrDst *c.Linker) { // Create x.png on src and remove it after one commit: xFile := c.MustTouch(t, lkrSrc, "/x.png", 42) c.MustCommit(t, lkrSrc, "who let the x out") c.MustMove(t, lkrSrc, xFile, "/y.png") // Create the same file on dst: c.MustTouch(t, lkrDst, "/x.png", 42) } func checkBasicSrcMove(t *testing.T, lkrSrc, lkrDst *c.Linker) { xDstGhost, err := lkrDst.LookupGhost("/x.png") require.Nil(t, err) require.Equal(t, xDstGhost.Path(), "/x.png") require.Equal(t, xDstGhost.BackendHash(), h.TestDummy(t, 42)) yDstFile, err := lkrDst.LookupFile("/y.png") require.Nil(t, err) require.Equal(t, yDstFile.Path(), "/y.png") require.Equal(t, yDstFile.BackendHash(), h.TestDummy(t, 42)) } //////// func setupEdgeMoveDirAndModifyChild(t *testing.T, lkrSrc, lkrDst *c.Linker) { // Syncing recursive empty dirs require detecting and adding them recursive. // This was buggy before, so prevent it from happening again. c.MustMkdir(t, lkrSrc, "/a") c.MustMkdir(t, lkrDst, "/a") c.MustCommit(t, lkrSrc, "added dirs src") c.MustCommit(t, lkrDst, "added dirs dst") } //////// func setupEdgeEmptyDir(t *testing.T, lkrSrc, lkrDst *c.Linker) { // Syncing recursive empty dirs require detecting and adding them recursive. // This was buggy before, so prevent it from happening again. c.MustMkdir(t, lkrSrc, "/empty/sub/blub") } func checkEdgeEmptyDir(t *testing.T, lkrSrc, lkrDst *c.Linker) { dir, err := lkrDst.LookupDirectory("/empty/sub/blub") require.Nil(t, err) require.Equal(t, dir.Path(), "/empty/sub/blub") } func TestSync(t *testing.T) { t.Parallel() tcs := []struct { name string setup func(t *testing.T, lkrSrc, lkrDst *c.Linker) check func(t *testing.T, lkrSrc, lkrDst *c.Linker) }{ { name: "basic-src-file", setup: setupBasicSrcFile, check: checkBasicSrcFile, }, { name: "basic-dst-file", setup: setupBasicDstFile, check: checkBasicDstFile, }, { name: "basic-both-file-no-conflict", setup: setupBasicBothNoConflict, check: checkBasicBothNoConflict, }, { name: "basic-both-file-conflict", setup: setupBasicBothConflict, check: checkBasicBothConflict, }, { name: "basic-src-remove", setup: setupBasicRemove, check: checkBasicRemove, }, { name: "basic-src-move", setup: setupBasicSrcMove, check: checkBasicSrcMove, }, { name: "edge-empty-dir", setup: setupEdgeEmptyDir, check: checkEdgeEmptyDir, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { t.Parallel() c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { tc.setup(t, lkrSrc, lkrDst) // sync requires that all changes are committed. c.MustCommitIfPossible(t, lkrDst, "setup dst") c.MustCommitIfPossible(t, lkrSrc, "setup src") if err := Sync(lkrSrc, lkrDst, nil); err != nil { t.Fatalf("sync failed: %v", err) } tc.check(t, lkrSrc, lkrDst) }) }) } } func TestSyncMergeMarker(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { c.MustTouchAndCommit(t, lkrSrc, "/x.png", 1) c.MustTouchAndCommit(t, lkrDst, "/y.png", 2) if err := Sync(lkrSrc, lkrDst, nil); err != nil { t.Fatalf("sync failed: %v", err) } dstHead, err := lkrDst.Head() require.Nil(t, err) srcHead, err := lkrSrc.Head() require.Nil(t, err) mergeUser, mergeHash := dstHead.MergeMarker() require.Equal(t, mergeUser, "src") require.Equal(t, mergeHash, srcHead.TreeHash()) c.MustTouch(t, lkrSrc, "/a.png", 3) c.MustTouch(t, lkrDst, "/b.png", 4) diff, err := MakeDiff(lkrSrc, lkrDst, nil, nil, nil) require.Nil(t, err) require.Empty(t, diff.Conflict) require.Empty(t, diff.Ignored) require.Empty(t, diff.Merged) require.Empty(t, diff.Removed) require.Len(t, diff.Added, 1) require.Len(t, diff.Missing, 2) require.Equal(t, diff.Added[0].Path(), "/a.png") require.Equal(t, diff.Missing[0].Path(), "/b.png") require.Equal(t, diff.Missing[1].Path(), "/y.png") }) } func TestSyncConflictMergeMarker(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { c.MustTouchAndCommit(t, lkrSrc, "/x.png", 1) c.MustTouchAndCommit(t, lkrDst, "/x.png", 2) if err := Sync(lkrSrc, lkrDst, nil); err != nil { t.Fatalf("sync failed: %v", err) } dstHead, err := lkrDst.Head() require.Nil(t, err) srcHead, err := lkrSrc.Head() require.Nil(t, err) mergeUser, mergeHash := dstHead.MergeMarker() require.Equal(t, mergeUser, "src") require.Equal(t, mergeHash, srcHead.TreeHash()) c.MustTouch(t, lkrSrc, "/a.png", 3) c.MustTouch(t, lkrDst, "/a.png", 4) diff, err := MakeDiff(lkrSrc, lkrDst, nil, nil, nil) require.Nil(t, err) require.Len(t, diff.Merged, 0) require.Len(t, diff.Ignored, 1) require.Len(t, diff.Conflict, 1) require.Empty(t, diff.Moved) require.Empty(t, diff.Missing) require.Empty(t, diff.Added) require.Empty(t, diff.Removed) // a.png is new and will conflict therefore. require.Equal(t, diff.Conflict[0].Dst.Path(), "/a.png") require.Equal(t, diff.Conflict[0].Src.Path(), "/a.png") // The previously created conflict file should count as missing. require.Equal(t, diff.Ignored[0].Path(), "/x.png.conflict.0") }) } func TestSyncTwiceWithMovedFile(t *testing.T) { c.WithLinkerPair(t, func(lkrAli, lkrBob *c.Linker) { aliNd, _ := c.MustTouchAndCommit(t, lkrAli, "/ali-file", 1) bobNd, _ := c.MustTouchAndCommit(t, lkrBob, "/bob-file", 2) require.Nil(t, Sync(lkrAli, lkrBob, nil)) require.Nil(t, Sync(lkrBob, lkrAli, nil)) c.MustMove(t, lkrAli, aliNd, "/bali-bile") c.MustMove(t, lkrBob, bobNd, "/blob-lile") c.MustCommit(t, lkrAli, "moved file") diff, err := MakeDiff(lkrBob, lkrAli, nil, nil, nil) require.Nil(t, err) require.Len(t, diff.Added, 0) require.Len(t, diff.Removed, 0) require.Len(t, diff.Moved, 2) }) } func TestSyncConflictStrategyEmbrace(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { c.MustTouchAndCommit(t, lkrSrc, "/x.png", 1) c.MustTouchAndCommit(t, lkrDst, "/x.png", 2) cfg := &SyncOptions{ ConflictStrategy: ConflictStragetyEmbrace, } diff, err := MakeDiff(lkrSrc, lkrDst, nil, nil, cfg) require.Nil(t, err) require.Len(t, diff.Conflict, 1) require.Empty(t, diff.Merged) require.Empty(t, diff.Ignored) require.Empty(t, diff.Moved) require.Empty(t, diff.Missing) require.Empty(t, diff.Added) require.Empty(t, diff.Removed) require.Nil(t, Sync(lkrSrc, lkrDst, cfg)) srcX, err := lkrSrc.LookupFile("/x.png") require.Nil(t, err) dstX, err := lkrDst.LookupFile("/x.png") require.Nil(t, err) require.Equal(t, srcX.ContentHash(), dstX.ContentHash()) }) } func TestSyncReadOnlyFolders(t *testing.T) { c.WithLinkerPair(t, func(lkrSrc, lkrDst *c.Linker) { // Create a file on alice' side: c.MustTouchAndCommit(t, lkrSrc, "/public/x.png", 1) cfg := &SyncOptions{ ReadOnlyFolders: map[string]bool{ "/public": true, }, } // Sync without a config - this is "bob's" side. // (he does not have any read-only folders) require.Nil(t, Sync(lkrSrc, lkrDst, nil)) // Both alice and bob should have the same file/content: srcX, err := lkrSrc.LookupFile("/public/x.png") require.Nil(t, err) dstX, err := lkrDst.LookupFile("/public/x.png") require.Nil(t, err) require.Equal(t, srcX.ContentHash(), dstX.ContentHash()) // bob modifies /public/x.png c.MustModify(t, lkrDst, dstX, 2) dstX, err = lkrDst.LookupFile("/public/x.png") require.Nil(t, err) // let alice sync back the change of bob: require.Nil(t, Sync(lkrDst, lkrSrc, cfg)) srcX, err = lkrSrc.LookupFile("/public/x.png") require.Nil(t, err) require.NotEqual(t, srcX.ContentHash(), dstX.ContentHash()) require.Equal(t, srcX.ContentHash(), h.TestDummy(t, byte(1))) }) } ================================================ FILE: catfs/vcs/undelete.go ================================================ package vcs import ( "fmt" c "github.com/sahib/brig/catfs/core" ie "github.com/sahib/brig/catfs/errors" n "github.com/sahib/brig/catfs/nodes" ) // Undelete tries to recover the node pointed to by `root`. // The node must be a ghost, otherwise we will error out. func Undelete(lkr *c.Linker, root string) error { nd, err := lkr.LookupModNode(root) if err != nil { return err } if nd.Type() != n.NodeTypeGhost { return fmt.Errorf("%s is not a deleted file: %v", root, err) } cmt, err := lkr.Status() if err != nil { return err } var origNd n.ModNode // Walk to the last point in history where the ghost // was either removed or moved. In theory it could have been // modified or added in between, but that would mean that // someone played around with the graph. walker := NewHistoryWalker(lkr, cmt, nd) for walker.Next() { state := walker.State() typ := state.Curr.Type() if typ != n.NodeTypeGhost { continue } if state.Mask&ChangeTypeRemove == 0 { continue } if state.Mask&ChangeTypeMove > 0 { continue } // We know now that we're on the ghost was added after deleting // or removing the file. Now go one back to reach the actual node. if !walker.Next() { break } origNd = walker.State().Curr break } if origNd == nil { return fmt.Errorf("could not find a state where this file was not deleted") } // Do the actual recovery. Handle the case where we are undeleting a // whole directory tree with possibly empty directories inside. return lkr.Atomic(func() (bool, error) { return true, n.Walk(lkr, origNd, true, func(child n.Node) error { switch child.Type() { case n.NodeTypeDirectory: dir, ok := child.(*n.Directory) if !ok { return ie.ErrBadNode } // Create empty directories manually, // all other directories will be created implicitly: if dir.NChildren() == 0 { _, err := c.Mkdir(lkr, dir.Path(), true) return err } case n.NodeTypeFile: file, ok := child.(*n.File) if !ok { return ie.ErrBadNode } // Stage that old state: _, err := c.StageFromFileNode(lkr, file) return err } return nil }) }) } ================================================ FILE: client/.gitignore ================================================ discovery/ ================================================ FILE: client/client.go ================================================ package client import ( "context" "net" "github.com/sahib/brig/server/capnp" "github.com/sahib/brig/util" "zombiezen.com/go/capnproto2/rpc" ) // Client is a helper API that implements the rpc interface to brig and makes // all data easily accessible from Go. Note that this layer is needed, so we // could later support other languages. type Client struct { ctx context.Context conn *rpc.Conn rawConn net.Conn api capnp.API } func connFromURL(s string) (net.Conn, error) { scheme, addr, err := util.URLToSchemeAndAddr(s) if err != nil { return nil, err } return net.Dial(scheme, addr) } // Dial will attempt to connect to brigd under the specified port func Dial(ctx context.Context, daemonURL string) (*Client, error) { rawConn, err := connFromURL(daemonURL) if err != nil { return nil, err } transport := rpc.StreamTransport(rawConn) conn := rpc.NewConn( transport, rpc.ConnLog(nil), rpc.SendBufferSize(128), ) api := capnp.API{Client: conn.Bootstrap(ctx)} return &Client{ ctx: ctx, rawConn: rawConn, conn: conn, api: api, }, nil } // LocalAddr return info about the local addr func (cl *Client) LocalAddr() net.Addr { return cl.rawConn.LocalAddr() } // RemoteAddr return info about the remote addr func (cl *Client) RemoteAddr() net.Addr { return cl.rawConn.RemoteAddr() } // Close will close the connection from the client side func (cl *Client) Close() error { return cl.conn.Close() } ================================================ FILE: client/clienttest/daemon.go ================================================ package clienttest import ( "context" "io/ioutil" "os" "path/filepath" "time" "github.com/sahib/brig/client" "github.com/sahib/brig/repo" "github.com/sahib/brig/server" log "github.com/sirupsen/logrus" ) // StartDaemon starts a new daemon with user `name`, using backend defined by // `backendName` and, if the backend is IPFS, uses the IPFS repository at // `ipfsPath`. The resulting server should be closed after use and the // temporary directory where all data resides should be removed. func StartDaemon(name, backendName, ipfsPath string) (*server.Server, error) { repoPath, err := ioutil.TempDir("", "brig-client-repo") if err != nil { return nil, err } daemonURL := "unix:" + filepath.Join(repoPath, "brig.socket") if err := repo.Init(repo.InitOptions{ BaseFolder: repoPath, Owner: name, BackendName: backendName, DaemonURL: daemonURL, }); err != nil { return nil, err } if backendName == "httpipfs" { if err := repo.OverwriteConfigKey( repoPath, "daemon.ipfs_path_or_url", ipfsPath, ); err != nil { return nil, err } } srv, err := server.BootServer(repoPath, daemonURL) if err != nil { return nil, err } go func() { if err := srv.Serve(); err != nil { log.WithError(err).Warnf("failed to serve") } }() // give some time for startup: time.Sleep(500 * time.Millisecond) return srv, nil } // WithDaemon calls `fn` with a readily setup daemon client. `name` is the user. func WithDaemon(name string, fn func(ctl *client.Client) error) error { srv, err := StartDaemon(name, "mock", "") if err != nil { return err } defer func() { // Somehow there is race condition between // srv.Close() from the defer at the very end // os.RemoveAll(repoPath). // Theoretically, `go` should have closed server // but in practice I see that repoPath is removed // before server had a chance to close the DB // and I see complains in log about DB.Close // I introduce this time delay as a crude hack time.Sleep(100 * time.Millisecond) os.RemoveAll(srv.RepoPath()) }() defer srv.Close() ctl, err := client.Dial(context.Background(), srv.DaemonURL()) if err != nil { return err } defer ctl.Close() return fn(ctl) } // WithDaemonPair calls `fn` with two readily setup daemon clients. // `nameA` and `nameB` are the respective names. func WithDaemonPair(nameA, nameB string, fn func(ctlA, ctlB *client.Client) error) error { return WithDaemon(nameA, func(ctlA *client.Client) error { return WithDaemon(nameB, func(ctlB *client.Client) error { aliWhoami, err := ctlA.Whoami() if err != nil { return err } bobWhoami, err := ctlB.Whoami() if err != nil { return err } // add bob to ali as remote if err := ctlA.RemoteAddOrUpdate(client.Remote{ Name: nameB, Fingerprint: bobWhoami.Fingerprint, }); err != nil { return err } // add ali to bob as remote if err := ctlB.RemoteAddOrUpdate(client.Remote{ Name: nameA, Fingerprint: aliWhoami.Fingerprint, }); err != nil { return err } return fn(ctlA, ctlB) }) }) } ================================================ FILE: client/fs_cmds.go ================================================ package client import ( "fmt" "io" "net" "os" "time" "github.com/sahib/brig/backend/httpipfs" "github.com/sahib/brig/catfs/mio" "github.com/sahib/brig/server/capnp" h "github.com/sahib/brig/util/hashlib" ) // StatInfo gives information about a file or directory // similar to the normal stat(2) call on POSIX. type StatInfo struct { Path string User string Size uint64 CachedSize int64 Inode uint64 IsDir bool IsRaw bool Depth int ModTime time.Time IsPinned bool IsExplicit bool TreeHash h.Hash ContentHash h.Hash BackendHash h.Hash Key []byte Hint Hint } func convertHash(hashBytes []byte, err error) (h.Hash, error) { if err != nil { return nil, err } return h.Cast(hashBytes) } func convertCapStatInfo(capInfo *capnp.StatInfo) (*StatInfo, error) { info := &StatInfo{} path, err := capInfo.Path() if err != nil { return nil, err } user, err := capInfo.User() if err != nil { return nil, err } treeHash, err := convertHash(capInfo.TreeHash()) if err != nil { return nil, err } contentHash, err := convertHash(capInfo.ContentHash()) if err != nil { return nil, err } backendHash, err := convertHash(capInfo.BackendHash()) if err != nil { return nil, err } key, err := capInfo.Key() if err != nil { return nil, err } modTimeData, err := capInfo.ModTime() if err != nil { return nil, err } if err := info.ModTime.UnmarshalText([]byte(modTimeData)); err != nil { return nil, err } capHint, err := capInfo.Hint() if err != nil { return nil, err } hint, err := convertCapHint(capHint) if err != nil { return nil, err } info.Path = path info.User = user info.Size = capInfo.Size() info.CachedSize = capInfo.CachedSize() info.Inode = capInfo.Inode() info.IsDir = capInfo.IsDir() info.IsRaw = capInfo.IsRaw() info.IsPinned = capInfo.IsPinned() info.IsExplicit = capInfo.IsExplicit() info.Depth = int(capInfo.Depth()) info.TreeHash = treeHash info.ContentHash = contentHash info.BackendHash = backendHash info.Key = key info.Hint = *hint return info, nil } // List will list all nodes beneath and including `root` up to `maxDepth`. func (cl *Client) List(root string, maxDepth int) ([]StatInfo, error) { call := cl.api.List(cl.ctx, func(p capnp.FS_list_Params) error { p.SetMaxDepth(int32(maxDepth)) return p.SetRoot(root) }) result, err := call.Struct() if err != nil { return nil, err } results := []StatInfo{} statList, err := result.Entries() if err != nil { return nil, err } for idx := 0; idx < statList.Len(); idx++ { capInfo := statList.At(idx) info, err := convertCapStatInfo(&capInfo) if err != nil { return nil, err } results = append(results, *info) } return results, err } // Stage will add a new node at `repoPath` with the contents of `localPath`. func (cl *Client) Stage(localPath, repoPath string) error { call := cl.api.Stage(cl.ctx, func(p capnp.FS_stage_Params) error { if err := p.SetRepoPath(repoPath); err != nil { return err } return p.SetLocalPath(localPath) }) _, err := call.Struct() return err } // StageFromReader will create a new node at `repoPath` from the contents of `r`. func (cl *Client) StageFromReader(repoPath string, r io.Reader) error { call := cl.api.StageFromStream(cl.ctx, func(p capnp.FS_stageFromStream_Params) error { return p.SetRepoPath(repoPath) }) // NOTE: Promise pipelining happens here, // cb might not have been returned yet by the server. // We can still use it, since Cap'n Proto returns a promise here. // First network call happens only at the first Struct() call. stream := call.Stream() // relative large buffer to minimize Cap'n Proto overhead even further. buf := make([]byte, 128*1024) chunkIdx, blockCheck := 0, 1 for { isEOF := false n, err := io.ReadFull(r, buf) if err != nil { if err == io.EOF || err == io.ErrUnexpectedEOF { isEOF = true } else { return err } } if n > 0 { promise := stream.SendChunk(cl.ctx, func(params capnp.FS_StageStream_sendChunk_Params) error { return params.SetChunk(buf[:n]) }) // Assumption here: If transfer fails it will fail in the first few blocks. // For the rest of the block we can skip error checks on most blocks. if chunkIdx%blockCheck == 0 { if _, err := promise.Struct(); err != nil { return err } if blockCheck < 128 { blockCheck *= 2 } } chunkIdx++ } if isEOF { break } } // Tell the server side that we're done sending chunks and that the data // should be already staged. _, err := stream.Done(cl.ctx, nil).Struct() return err } // Cat outputs the contents of the node at `path`. // The node must be a file. func (cl *Client) Cat(path string, offline bool) (io.ReadCloser, error) { call := cl.api.Cat(cl.ctx, func(p capnp.FS_cat_Params) error { p.SetOffline(offline) return p.SetPath(path) }) result, err := call.Struct() if err != nil { return nil, err } port := result.Port() conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", port)) if err != nil { return nil, err } return conn, nil } // CatOnClient is like Cat, but will fetch the stream directly from IPFS // and decode it on the client side. This is usually faster than other way round. func (cl *Client) CatOnClient(path string, offline bool, w io.Writer) error { info, err := cl.Stat(path) if err != nil { return err } ipfsPathOrMultiaddr, err := cl.ConfigGet("daemon.ipfs_path_or_url") if err != nil { return err } if ipfsPathOrMultiaddr == "" { return fmt.Errorf("no IPFS path or URL found - is this repo using IPFS?") } if offline { isCached, err := cl.IsCached(path) if err != nil { return err } if !isCached { return fmt.Errorf("not cached") } } nd, err := httpipfs.NewNode( ipfsPathOrMultiaddr, "", httpipfs.WithNoLogging(), ) if err != nil { return err } defer nd.Close() ipfsStream, err := nd.Cat(info.BackendHash) if err != nil { return err } defer ipfsStream.Close() stream, err := mio.NewOutStream(ipfsStream, info.IsRaw, info.Key) if err != nil { return err } _, err = io.Copy(os.Stdout, stream) return err } // Tar outputs a tar archive with the contents of `path`. // `path` can be either a file or directory. func (cl *Client) Tar(path string, offline bool) (io.ReadCloser, error) { call := cl.api.Tar(cl.ctx, func(p capnp.FS_tar_Params) error { p.SetOffline(offline) return p.SetPath(path) }) result, err := call.Struct() if err != nil { return nil, err } port := result.Port() conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", port)) if err != nil { return nil, err } return conn, nil } // Mkdir creates a new empty directory at `path`, possibly creating // intermediate directories if `createParents` is set. func (cl *Client) Mkdir(path string, createParents bool) error { call := cl.api.Mkdir(cl.ctx, func(p capnp.FS_mkdir_Params) error { p.SetCreateParents(createParents) return p.SetPath(path) }) _, err := call.Struct() return err } // Remove removes the node at `path`. // Directories are removed recursively. func (cl *Client) Remove(path string) error { call := cl.api.Remove(cl.ctx, func(p capnp.FS_remove_Params) error { return p.SetPath(path) }) _, err := call.Struct() return err } // Move moves the node at `srcPath` to `dstPath`. func (cl *Client) Move(srcPath, dstPath string) error { call := cl.api.Move(cl.ctx, func(p capnp.FS_move_Params) error { if err := p.SetSrcPath(srcPath); err != nil { return err } return p.SetDstPath(dstPath) }) _, err := call.Struct() return err } // Copy copies the node at `srcPath` to `dstPath`. func (cl *Client) Copy(srcPath, dstPath string) error { call := cl.api.Copy(cl.ctx, func(p capnp.FS_copy_Params) error { if err := p.SetSrcPath(srcPath); err != nil { return err } return p.SetDstPath(dstPath) }) _, err := call.Struct() return err } // Pin sets an explicit pin on the node at `path`. func (cl *Client) Pin(path string) error { call := cl.api.Pin(cl.ctx, func(p capnp.FS_pin_Params) error { return p.SetPath(path) }) _, err := call.Struct() return err } // Unpin removes an explicit pin at the node at `path`. func (cl *Client) Unpin(path string) error { call := cl.api.Unpin(cl.ctx, func(p capnp.FS_unpin_Params) error { return p.SetPath(path) }) _, err := call.Struct() return err } // Repin schedules a repinning operation func (cl *Client) Repin(root string) error { call := cl.api.Repin(cl.ctx, func(p capnp.FS_repin_Params) error { return p.SetPath(root) }) _, err := call.Struct() return err } // Stat gives detailed information about the node at `path`. func (cl *Client) Stat(path string) (*StatInfo, error) { call := cl.api.Stat(cl.ctx, func(p capnp.FS_stat_Params) error { return p.SetPath(path) }) result, err := call.Struct() if err != nil { return nil, err } capInfo, err := result.Info() if err != nil { return nil, err } return convertCapStatInfo(&capInfo) } // Touch creates a new empty file at `path`. func (cl *Client) Touch(path string) error { call := cl.api.Touch(cl.ctx, func(p capnp.FS_touch_Params) error { return p.SetPath(path) }) _, err := call.Struct() return err } // Exists tells us if a file at `path` exists. func (cl *Client) Exists(path string) (bool, error) { call := cl.api.Exists(cl.ctx, func(p capnp.FS_exists_Params) error { return p.SetPath(path) }) result, err := call.Struct() if err != nil { return false, err } return result.Exists(), nil } // Undelete restores the deleted file at `path`. func (cl *Client) Undelete(path string) error { call := cl.api.Undelete(cl.ctx, func(p capnp.FS_undelete_Params) error { return p.SetPath(path) }) _, err := call.Struct() return err } // DeletedNodes returns a list of deleted nodes under `root`. func (cl *Client) DeletedNodes(root string) ([]StatInfo, error) { call := cl.api.DeletedNodes(cl.ctx, func(p capnp.FS_deletedNodes_Params) error { return p.SetRoot(root) }) result, err := call.Struct() if err != nil { return nil, err } capNodes, err := result.Nodes() if err != nil { return nil, err } results := []StatInfo{} for idx := 0; idx < capNodes.Len(); idx++ { capInfo := capNodes.At(idx) info, err := convertCapStatInfo(&capInfo) if err != nil { return nil, err } results = append(results, *info) } return results, err } // IsCached checks if file or directory at `path` is cached. func (cl *Client) IsCached(path string) (bool, error) { call := cl.api.IsCached(cl.ctx, func(p capnp.FS_isCached_Params) error { return p.SetPath(path) }) result, err := call.Struct() if err != nil { return false, err } return result.IsCached(), nil } // RecodeStream takes the stream at `path` and, if it is a file, re-encodes // the stream with the current settings retrieved from the hint system. func (cl *Client) RecodeStream(path string) error { call := cl.api.RecodeStream(cl.ctx, func(p capnp.FS_recodeStream_Params) error { return p.SetPath(path) }) _, err := call.Struct() return err } ================================================ FILE: client/fs_test.go ================================================ package client_test import ( "bytes" "io" "io/ioutil" "os" "sort" "testing" "github.com/sahib/brig/client" "github.com/sahib/brig/client/clienttest" "github.com/sahib/brig/repo/hints" colorLog "github.com/sahib/brig/util/log" "github.com/sahib/brig/util/testutil" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" ) func init() { log.SetLevel(log.WarnLevel) log.SetFormatter(&colorLog.FancyLogFormatter{ UseColors: true, }) } func stringify(err error) string { if err == nil { return "" } return err.Error() } func withDaemon(t *testing.T, name string, fn func(ctl *client.Client)) { require.NoError( t, clienttest.WithDaemon(name, func(ctl *client.Client) error { fn(ctl) return nil }), ) } func withDaemonPair(t *testing.T, nameA, nameB string, fn func(ctlA, ctlB *client.Client)) { require.NoError( t, clienttest.WithDaemonPair(nameA, nameB, func(ctlA, ctlB *client.Client) error { fn(ctlA, ctlB) return nil }), ) } func TestStageAndCat(t *testing.T) { withDaemon(t, "ali", func(ctl *client.Client) { fd, err := ioutil.TempFile("", "brig-dummy-data") path := fd.Name() defer os.RemoveAll(path) expected := testutil.CreateDummyBuf(2 * 1024 * 1024) require.Nil(t, err, stringify(err)) _, err = fd.Write(expected) require.Nil(t, err, stringify(err)) require.Nil(t, fd.Close()) require.Nil(t, ctl.Stage(path, "/hello")) rw, err := ctl.Cat("hello", false) require.Nil(t, err, stringify(err)) data, err := ioutil.ReadAll(rw) require.Nil(t, err, stringify(err)) require.Equal(t, expected, data) require.Nil(t, rw.Close()) }) } func TestStageAndCatStream(t *testing.T) { withDaemon(t, "ali", func(ctl *client.Client) { const fileSize = 4 * 1024 * 1024 r := io.LimitReader(&testutil.TenReader{}, fileSize) err := ctl.StageFromReader("/hello", r) require.NoError(t, err) // time.Sleep(time.Second) rw, err := ctl.Cat("/hello", false) require.NoError(t, err) n, err := io.Copy(&testutil.TenWriter{}, rw) require.NoError(t, err) require.Equal(t, int64(fileSize), n) require.NoError(t, rw.Close()) }) } func TestMkdir(t *testing.T) { withDaemon(t, "ali", func(ctl *client.Client) { // Create something nested with -p... require.Nil(t, ctl.Mkdir("/a/b/c", true)) // Create it twice... require.Nil(t, ctl.Mkdir("/a/b/c", true)) // Create something nested without -p err := ctl.Mkdir("/x/y/z", false) require.Contains(t, err.Error(), "No such file") require.Nil(t, ctl.Mkdir("/x", false)) require.Nil(t, ctl.Mkdir("/x/y", false)) require.Nil(t, ctl.Mkdir("/x/y/z", false)) lst, err := ctl.List("/", -1) require.Nil(t, err, stringify(err)) paths := []string{} for _, info := range lst { paths = append(paths, info.Path) } sort.Strings(paths) require.Equal(t, paths, []string{ "/", "/a", "/a/b", "/a/b/c", "/x", "/x/y", "/x/y/z", }) }) } func TestSyncBasic(t *testing.T) { withDaemonPair(t, "ali", "bob", func(aliCtl, bobCtl *client.Client) { err := aliCtl.StageFromReader("/ali_file", bytes.NewReader([]byte{42})) require.NoError(t, err) err = bobCtl.StageFromReader("/bob_file", bytes.NewReader([]byte{23})) require.NoError(t, err) _, err = aliCtl.Sync("bob", true) require.NoError(t, err) _, err = bobCtl.Sync("ali", true) require.NoError(t, err) // We cannot query the file contents, since the mock backend // does not yet store the file content anywhere. bobFileStat, err := aliCtl.Stat("/bob_file") require.NoError(t, err) require.Equal(t, "/bob_file", bobFileStat.Path) aliFileStat, err := bobCtl.Stat("/ali_file") require.NoError(t, err) require.Equal(t, "/ali_file", aliFileStat.Path) }) } func pathsFromListing(l []client.StatInfo) []string { result := []string{} for _, entry := range l { result = append(result, entry.Path) } return result } func TestSyncConflict(t *testing.T) { withDaemonPair(t, "ali", "bob", func(aliCtl, bobCtl *client.Client) { // Create two files with the same content on both sides: err := aliCtl.StageFromReader("/README", bytes.NewReader([]byte{42})) require.Nil(t, err, stringify(err)) err = bobCtl.StageFromReader("/README", bytes.NewReader([]byte{42})) require.Nil(t, err, stringify(err)) // Sync and check if the files are still equal: _, err = bobCtl.Sync("ali", true) require.Nil(t, err, stringify(err)) aliFileStat, err := aliCtl.Stat("/README") require.Nil(t, err, stringify(err)) bobFileStat, err := bobCtl.Stat("/README") require.Nil(t, err, stringify(err)) require.Equal(t, aliFileStat.ContentHash, bobFileStat.ContentHash) // Modify bob's side only. A sync should have no effect. err = bobCtl.StageFromReader("/README", bytes.NewReader([]byte{43})) require.Nil(t, err, stringify(err)) _, err = bobCtl.Sync("ali", true) require.Nil(t, err, stringify(err)) bobFileStat, err = bobCtl.Stat("/README") require.Nil(t, err, stringify(err)) require.NotEqual(t, aliFileStat.ContentHash, bobFileStat.ContentHash) // Modify ali's side additionally. Now we should get a conflicting file. err = aliCtl.StageFromReader("/README", bytes.NewReader([]byte{41})) require.Nil(t, err, stringify(err)) dirs, err := bobCtl.List("/", -1) require.Nil(t, err, stringify(err)) require.Equal(t, []string{"/", "/README"}, pathsFromListing(dirs)) _, err = bobCtl.Sync("ali", true) require.Nil(t, err, stringify(err)) dirs, err = bobCtl.List("/", -1) require.Nil(t, err, stringify(err)) require.Equal( t, []string{"/", "/README", "/README.conflict.0"}, pathsFromListing(dirs), ) }) } func TestSyncSeveralTimes(t *testing.T) { withDaemonPair(t, "ali", "bob", func(aliCtl, bobCtl *client.Client) { err := aliCtl.StageFromReader("/ali_file_1", bytes.NewReader([]byte{1})) require.Nil(t, err, stringify(err)) _, err = bobCtl.Sync("ali", true) require.Nil(t, err, stringify(err)) dirs, err := bobCtl.List("/", -1) require.Nil(t, err, stringify(err)) require.Equal( t, []string{"/", "/ali_file_1"}, pathsFromListing(dirs), ) err = aliCtl.StageFromReader("/ali_file_2", bytes.NewReader([]byte{2})) require.Nil(t, err, stringify(err)) _, err = bobCtl.Sync("ali", true) require.Nil(t, err, stringify(err)) dirs, err = bobCtl.List("/", -1) require.Nil(t, err, stringify(err)) require.Equal( t, []string{"/", "/ali_file_1", "/ali_file_2"}, pathsFromListing(dirs), ) err = aliCtl.StageFromReader("/ali_file_3", bytes.NewReader([]byte{3})) require.Nil(t, err, stringify(err)) _, err = bobCtl.Sync("ali", true) require.Nil(t, err, stringify(err)) dirs, err = bobCtl.List("/", -1) require.Nil(t, err, stringify(err)) require.Equal( t, []string{"/", "/ali_file_1", "/ali_file_2", "/ali_file_3"}, pathsFromListing(dirs), ) }) } func TestSyncPartial(t *testing.T) { withDaemonPair(t, "ali", "bob", func(aliCtl, bobCtl *client.Client) { aliWhoami, err := aliCtl.Whoami() require.Nil(t, err, stringify(err)) bobWhoami, err := bobCtl.Whoami() require.Nil(t, err, stringify(err)) require.Nil(t, aliCtl.RemoteSave([]client.Remote{ { Name: "bob", Fingerprint: bobWhoami.Fingerprint, Folders: []client.RemoteFolder{ { Folder: "/photos", }, }, }, })) require.Nil(t, bobCtl.RemoteSave([]client.Remote{ { Name: "ali", Fingerprint: aliWhoami.Fingerprint, Folders: []client.RemoteFolder{ { Folder: "/photos", }, }, }, })) err = aliCtl.StageFromReader("/docs/ali_secret.txt", bytes.NewReader([]byte{0})) require.Nil(t, err, stringify(err)) err = aliCtl.StageFromReader("/photos/ali.png", bytes.NewReader([]byte{42})) require.Nil(t, err, stringify(err)) err = bobCtl.StageFromReader("/docs/bob_secret.txt", bytes.NewReader([]byte{0})) require.Nil(t, err, stringify(err)) err = bobCtl.StageFromReader("/photos/bob.png", bytes.NewReader([]byte{23})) require.Nil(t, err, stringify(err)) _, err = aliCtl.Sync("bob", true) require.Nil(t, err, stringify(err)) _, err = bobCtl.Sync("ali", true) require.Nil(t, err, stringify(err)) // We cannot query the file contents, since the mock backend // does not yet store the file content anywhere. aliLs, err := aliCtl.List("/", -1) require.Nil(t, err, stringify(err)) aliPaths := []string{} for _, entry := range aliLs { aliPaths = append(aliPaths, entry.Path) } bobLs, err := bobCtl.List("/", -1) require.Nil(t, err, stringify(err)) bobPaths := []string{} for _, entry := range bobLs { bobPaths = append(bobPaths, entry.Path) } require.Equal( t, []string{ "/", "/docs", "/photos", "/docs/ali_secret.txt", "/photos/ali.png", "/photos/bob.png", }, aliPaths, ) require.Equal( t, []string{ "/", "/docs", "/photos", "/docs/bob_secret.txt", "/photos/ali.png", "/photos/bob.png", }, bobPaths, ) }) } func TestSyncMovedFile(t *testing.T) { withDaemonPair(t, "ali", "bob", func(aliCtl, bobCtl *client.Client) { require.NoError(t, aliCtl.StageFromReader("/ali-file", bytes.NewReader([]byte{1, 2, 3}))) require.NoError(t, bobCtl.StageFromReader("/bob-file", bytes.NewReader([]byte{4, 5, 6}))) aliDiff, err := aliCtl.Sync("bob", true) require.NoError(t, err) bobDiff, err := bobCtl.Sync("ali", true) require.NoError(t, err) require.Equal(t, aliDiff.Added[0].Path, "/bob-file") require.Equal(t, bobDiff.Added[0].Path, "/ali-file") require.NoError(t, aliCtl.Move("/ali-file", "/bali-file")) bobDiffAfter, err := bobCtl.Sync("ali", true) require.NoError(t, err) require.Len(t, bobDiffAfter.Added, 0) require.Len(t, bobDiffAfter.Removed, 0) require.Len(t, bobDiffAfter.Moved, 1) }) } // Regression test for: // https://github.com/sahib/brig/issues/56 func TestSyncRemovedFile(t *testing.T) { log.SetLevel(log.DebugLevel) withDaemonPair(t, "ali", "bob", func(aliCtl, bobCtl *client.Client) { require.NoError(t, aliCtl.StageFromReader("/testfile", bytes.NewReader([]byte{1, 2, 3}))) // Bob should get the /testfile now. bobDiff, err := bobCtl.Sync("ali", true) require.NoError(t, err) require.Equal(t, 1, len(bobDiff.Added)) require.Equal(t, bobDiff.Added[0].Path, "/testfile") require.NoError(t, bobCtl.StageFromReader("/testfile", bytes.NewReader([]byte{3, 2, 1}))) require.NoError(t, bobCtl.MakeCommit("bob changed testfile")) // Remove the file at ali: require.NoError(t, aliCtl.Remove("/testfile")) require.NoError(t, aliCtl.MakeCommit("removed testfile")) // Sync and hope that we don't get the file back from bob: aliDiff, err := aliCtl.Sync("bob", true) require.NoError(t, err) // Check if something was added. require.Equal(t, 0, len(aliDiff.Added)) // ...but also checked it's not marked as removed: require.Equal(t, 0, len(aliDiff.Removed)) _, err = aliCtl.Stat("/testfile") require.Error(t, err) }) } func TestHints(t *testing.T) { withDaemon(t, "ali", func(ctl *client.Client) { // Add hint for directory. path := "/public/cat-meme.png" expected := testutil.CreateDummyBuf(1024 * 1024) require.NoError(t, ctl.Mkdir("/public", true)) require.NoError(t, ctl.StageFromReader(path, bytes.NewReader(expected))) info, err := ctl.Stat(path) require.NoError(t, err) defHints := hints.Default() require.Equal(t, string(defHints.CompressionAlgo), info.Hint.CompressionAlgo) require.Equal(t, string(defHints.EncryptionAlgo), info.Hint.EncryptionAlgo) require.Equal(t, false, info.IsRaw) none := "none" require.NoError(t, ctl.HintSet("/public", &none, &none)) info, err = ctl.Stat(path) require.NoError(t, err) require.Equal(t, "none", info.Hint.CompressionAlgo) require.Equal(t, "none", info.Hint.EncryptionAlgo) require.Equal(t, false, info.IsRaw) require.NoError(t, ctl.RecodeStream("/public")) info, err = ctl.Stat(path) require.NoError(t, err) require.Equal(t, "none", info.Hint.CompressionAlgo) require.Equal(t, "none", info.Hint.EncryptionAlgo) require.Equal(t, true, info.IsRaw) // Make sure it did not scramble the data: stream, err := ctl.Cat(path, true) require.NoError(t, err) got, err := ioutil.ReadAll(stream) require.NoError(t, err) require.Equal(t, expected, got) }) } ================================================ FILE: client/net_cmds.go ================================================ package client import ( "errors" "strings" "time" "github.com/sahib/brig/server/capnp" capnplib "zombiezen.com/go/capnproto2" ) //////////////////////// // REMOTE LIST ACCESS // //////////////////////// // RemoteFolder is a single folder shared with a remote. type RemoteFolder struct { Folder string `yaml:"Folder"` ReadOnly bool `yaml:"ReadOnly"` ConflictStrategy string `yaml:"ConflictStrategy"` } // Remote describes a single remote in the remote list. type Remote struct { Name string `yaml:"Name"` Fingerprint string `yaml:"Fingerprint"` Folders []RemoteFolder `yaml:"Folders,flow"` AutoUpdate bool `yaml:"AutoUpdate"` ConflictStrategy string `yaml:"ConflictStrategy"` AcceptPush bool `yaml:"AcceptPush"` } func capRemoteToRemote(capRemote capnp.Remote) (*Remote, error) { remoteName, err := capRemote.Name() if err != nil { return nil, err } remoteFp, err := capRemote.Fingerprint() if err != nil { return nil, err } remoteFolders, err := capRemote.Folders() if err != nil { return nil, err } conflictStrategy, err := capRemote.ConflictStrategy() if err != nil { return nil, err } folders := []RemoteFolder{} for idx := 0; idx < remoteFolders.Len(); idx++ { folder := remoteFolders.At(idx) folderName, err := folder.Folder() if err != nil { return nil, err } cs, err := folder.ConflictStrategy() if err != nil { return nil, err } folders = append(folders, RemoteFolder{ Folder: folderName, ReadOnly: folder.ReadOnly(), ConflictStrategy: cs, }) } return &Remote{ Name: remoteName, Fingerprint: remoteFp, Folders: folders, AutoUpdate: capRemote.AcceptAutoUpdates(), AcceptPush: capRemote.AcceptPush(), ConflictStrategy: conflictStrategy, }, nil } func remoteToCapRemote(remote Remote, seg *capnplib.Segment) (*capnp.Remote, error) { capRemote, err := capnp.NewRemote(seg) if err != nil { return nil, err } if err := capRemote.SetName(remote.Name); err != nil { return nil, err } if err := capRemote.SetFingerprint(string(remote.Fingerprint)); err != nil { return nil, err } if err := capRemote.SetConflictStrategy(remote.ConflictStrategy); err != nil { return nil, err } capFolders, err := capnp.NewRemoteFolder_List(seg, int32(len(remote.Folders))) if err != nil { return nil, err } for idx, folder := range remote.Folders { capFolder, err := capnp.NewRemoteFolder(seg) if err != nil { return nil, err } capFolder.SetReadOnly(folder.ReadOnly) if err := capFolder.SetFolder(folder.Folder); err != nil { return nil, err } if err := capFolder.SetConflictStrategy(folder.ConflictStrategy); err != nil { return nil, err } if err := capFolders.Set(idx, capFolder); err != nil { return nil, err } } if err := capRemote.SetFolders(capFolders); err != nil { return nil, err } capRemote.SetAcceptAutoUpdates(remote.AutoUpdate) capRemote.SetAcceptPush(remote.AcceptPush) return &capRemote, nil } // RemoteAddOrUpdate adds a new remote described in `remote`. // We thus authenticate this remote. func (cl *Client) RemoteAddOrUpdate(remote Remote) error { call := cl.api.RemoteAddOrUpdate(cl.ctx, func(p capnp.Net_remoteAddOrUpdate_Params) error { capRemote, err := remoteToCapRemote(remote, p.Segment()) if err != nil { return err } return p.SetRemote(*capRemote) }) _, err := call.Struct() return err } // RemoteByName adds a new remote described in `remote`. // We thus authenticate this remote. func (cl *Client) RemoteByName(name string) (Remote, error) { call := cl.api.RemoteByName(cl.ctx, func(p capnp.Net_remoteByName_Params) error { return p.SetName(name) }) res, err := call.Struct() if err != nil { return Remote{}, err } capRmt, err := res.Remote() if err != nil { return Remote{}, err } rmt, err := capRemoteToRemote(capRmt) if err != nil { return Remote{}, err } return *rmt, nil } // RemoteUpdate Updates the contents of `remote`. func (cl *Client) RemoteUpdate(remote Remote) error { call := cl.api.RemoteUpdate(cl.ctx, func(p capnp.Net_remoteUpdate_Params) error { capRemote, err := remoteToCapRemote(remote, p.Segment()) if err != nil { return err } return p.SetRemote(*capRemote) }) _, err := call.Struct() return err } // RemoteRm removes a remote by `name` from the remote list. func (cl *Client) RemoteRm(name string) error { call := cl.api.RemoteRm(cl.ctx, func(p capnp.Net_remoteRm_Params) error { return p.SetName(name) }) _, err := call.Struct() return err } // RemoteClear clears all of the remote list. func (cl *Client) RemoteClear() error { call := cl.api.RemoteClear(cl.ctx, func(p capnp.Net_remoteClear_Params) error { return nil }) _, err := call.Struct() return err } // RemoteLs lists all remotes in the remote list. func (cl *Client) RemoteLs() ([]Remote, error) { call := cl.api.RemoteLs(cl.ctx, func(p capnp.Net_remoteLs_Params) error { return nil }) result, err := call.Struct() if err != nil { return nil, err } capRemotes, err := result.Remotes() if err != nil { return nil, err } remotes := []Remote{} for idx := 0; idx < capRemotes.Len(); idx++ { capRemote := capRemotes.At(idx) remote, err := capRemoteToRemote(capRemote) if err != nil { return nil, err } remotes = append(remotes, *remote) } return remotes, nil } // RemoteSave swaps the contents of the remote lists with the contents of `remotes`. func (cl *Client) RemoteSave(remotes []Remote) error { call := cl.api.RemoteSave(cl.ctx, func(p capnp.Net_remoteSave_Params) error { seg := p.Segment() capRemotes, err := capnp.NewRemote_List(seg, int32(len(remotes))) if err != nil { return err } for idx, remote := range remotes { capRemote, err := remoteToCapRemote(remote, seg) if err != nil { return err } if err := capRemotes.Set(idx, *capRemote); err != nil { return err } } return p.SetRemotes(capRemotes) }) _, err := call.Struct() return err } // LocateResult is a result returned by Locate() type LocateResult struct { Name string Addr string Mask []string Fingerprint string } func capLrToLr(capLr capnp.LocateResult) (*LocateResult, error) { name, err := capLr.Name() if err != nil { return nil, err } addr, err := capLr.Addr() if err != nil { return nil, err } mask, err := capLr.Mask() if err != nil { return nil, err } fingerprint, err := capLr.Fingerprint() if err != nil { return nil, err } return &LocateResult{ Addr: addr, Name: name, Mask: strings.Split(mask, ","), Fingerprint: fingerprint, }, nil } // NetLocate tries to find other remotes by searching of `who` described by `mask`. // It will at max. take `timeoutSec` to search. This operation might take some time. // The return channel will yield a LocateResult once a new result is available. func (cl *Client) NetLocate(who, mask string, timeoutSec float64) (chan *LocateResult, error) { call := cl.api.NetLocate(cl.ctx, func(p capnp.Net_netLocate_Params) error { p.SetTimeoutSec(float64(timeoutSec)) if err := p.SetLocateMask(mask); err != nil { return err } return p.SetWho(who) }) result, err := call.Struct() if err != nil { return nil, err } ticket := result.Ticket() resultCh := make(chan *LocateResult) go func() { defer close(resultCh) for { nextCall := cl.api.NetLocateNext(cl.ctx, func(p capnp.Net_netLocateNext_Params) error { p.SetTicket(ticket) return nil }) result, err := nextCall.Struct() if err != nil { continue } if !result.HasResult() { break } capLr, err := result.Result() if err != nil { continue } lr, err := capLrToLr(capLr) if err != nil { continue } resultCh <- lr } }() return resultCh, nil } // RemotePing pings a remote by the name `who`. func (cl *Client) RemotePing(who string) (float64, error) { call := cl.api.RemotePing(cl.ctx, func(p capnp.Net_remotePing_Params) error { return p.SetWho(who) }) result, err := call.Struct() if err != nil { return 0, err } return result.Roundtrip(), nil } // Whoami describes the current user state type Whoami struct { CurrentUser string Owner string Fingerprint string IsOnline bool } // Whoami describes our own identity. func (cl *Client) Whoami() (*Whoami, error) { call := cl.api.Whoami(cl.ctx, func(p capnp.Net_whoami_Params) error { return nil }) result, err := call.Struct() if err != nil { return nil, err } capWhoami, err := result.Whoami() if err != nil { return nil, err } whoami := &Whoami{} whoami.CurrentUser, err = capWhoami.CurrentUser() if err != nil { return nil, err } whoami.Fingerprint, err = capWhoami.Fingerprint() if err != nil { return nil, err } whoami.Owner, err = capWhoami.Owner() if err != nil { return nil, err } whoami.IsOnline = capWhoami.IsOnline() return whoami, nil } // NetConnect connects to the ipfs network. func (cl *Client) NetConnect() error { _, err := cl.api.Connect(cl.ctx, func(p capnp.Net_connect_Params) error { return nil }).Struct() return err } // NetDisconnect disconnects from the ipfs network. func (cl *Client) NetDisconnect() error { _, err := cl.api.Disconnect(cl.ctx, func(p capnp.Net_disconnect_Params) error { return nil }).Struct() return err } // RemoteStatus is a entry in the remote online list. // Fingerprint is not necessarily filled. type RemoteStatus struct { Remote Remote LastSeen time.Time Roundtrip time.Duration Err error Authenticated bool } func capRemoteStatusToRemoteStatus(capStatus capnp.RemoteStatus) (*RemoteStatus, error) { capRemote, err := capStatus.Remote() if err != nil { return nil, err } remote, err := capRemoteToRemote(capRemote) if err != nil { return nil, err } msg, err := capStatus.Error() if err != nil { return nil, err } lastSeenStamp, err := capStatus.LastSeen() if err != nil { return nil, err } lastSeen := time.Now() if lastSeenStamp != "" { lastSeen, err = time.Parse(time.RFC3339, lastSeenStamp) if err != nil { return nil, err } } pingErr := errors.New(msg) if len(msg) == 0 { pingErr = nil } roundtripMs := time.Duration(capStatus.RoundtripMs()) * time.Millisecond return &RemoteStatus{ Remote: *remote, LastSeen: lastSeen, Roundtrip: roundtripMs, Err: pingErr, Authenticated: capStatus.Authenticated(), }, nil } // RemoteOnlineList is like RemoteList but also includes IsOnline and Authenticated // status. func (cl *Client) RemoteOnlineList() ([]RemoteStatus, error) { call := cl.api.RemoteOnlineList(cl.ctx, func(p capnp.Net_remoteOnlineList_Params) error { return nil }) result, err := call.Struct() if err != nil { return nil, err } capStatuses, err := result.Infos() if err != nil { return nil, err } statuses := []RemoteStatus{} for idx := 0; idx < capStatuses.Len(); idx++ { capStatus := capStatuses.At(idx) status, err := capRemoteStatusToRemoteStatus(capStatus) if err != nil { return nil, err } statuses = append(statuses, *status) } return statuses, nil } // Push sets a push request to `remoteName`. If `dryRun` is true, // the push won't be send but we will still check if the push is allowed. func (cl *Client) Push(remoteName string, dryRun bool) error { call := cl.api.Push(cl.ctx, func(p capnp.Net_push_Params) error { p.SetDryRun(dryRun) return p.SetRemoteName(remoteName) }) _, err := call.Struct() return err } ================================================ FILE: client/net_test.go ================================================ package client_test import ( "bytes" "strings" "testing" "time" "github.com/sahib/brig/client" "github.com/stretchr/testify/require" ) func TestPush(t *testing.T) { withDaemonPair(t, "ali", "bob", func(aliCtl, bobCtl *client.Client) { require.Nil(t, aliCtl.StageFromReader("/ali-file", bytes.NewReader([]byte{1, 2, 3}))) err := aliCtl.Push("bob", true) require.True(t, strings.HasSuffix(err.Error(), "remote does not allow it")) aliRmt, err := bobCtl.RemoteByName("ali") require.Nil(t, err) aliRmt.AcceptPush = true require.Nil(t, bobCtl.RemoteAddOrUpdate(aliRmt)) err = aliCtl.Push("bob", true) require.Nil(t, err) err = aliCtl.Push("bob", false) require.Nil(t, err) // There is a possible race condition here: // ``brig push`` only triggers the sync, but // waits only until the network message was sent. // It might take a small amount of time till the other // side managed to do the sync. time.Sleep(250 * time.Millisecond) // bob should have ali file without him syncing explicitly. _, err = bobCtl.Stat("/ali-file") require.Nil(t, err) }) } ================================================ FILE: client/repo_cmds.go ================================================ package client import ( "sort" gwdb "github.com/sahib/brig/gateway/db" "github.com/sahib/brig/server/capnp" h "github.com/sahib/brig/util/hashlib" capnplib "zombiezen.com/go/capnproto2" ) // Quit sends a quit signal to brigd. func (ctl *Client) Quit() error { call := ctl.api.Quit(ctl.ctx, func(p capnp.Repo_quit_Params) error { return nil }) _, err := call.Struct() return err } // Ping pings the daemon to see if it is responding. func (ctl *Client) Ping() error { call := ctl.api.Ping(ctl.ctx, func(p capnp.Repo_ping_Params) error { return nil }) result, err := call.Struct() if err != nil { return err } _, err = result.Reply() return err } // MountOptions holds the possible option for a single mount. type MountOptions struct { ReadOnly bool RootPath string Offline bool } func mountOptionsToCapnp(opts MountOptions, seg *capnplib.Segment) (*capnp.MountOptions, error) { capOpts, err := capnp.NewMountOptions(seg) if err != nil { return nil, err } capOpts.SetReadOnly(opts.ReadOnly) capOpts.SetOffline(opts.Offline) if err := capOpts.SetRootPath(opts.RootPath); err != nil { return nil, err } return &capOpts, nil } // Mount creates a new mount at `mountPath` with `opts`. func (ctl *Client) Mount(mountPath string, opts MountOptions) error { call := ctl.api.Mount(ctl.ctx, func(p capnp.Repo_mount_Params) error { capOpts, err := mountOptionsToCapnp(opts, p.Segment()) if err != nil { return err } if err := p.SetOptions(*capOpts); err != nil { return err } return p.SetMountPath(mountPath) }) _, err := call.Struct() return err } // Unmount kills a previously created mount at `mountPath`. func (ctl *Client) Unmount(mountPath string) error { call := ctl.api.Unmount(ctl.ctx, func(p capnp.Repo_unmount_Params) error { return p.SetMountPath(mountPath) }) _, err := call.Struct() return err } // ConfigGet returns the value at `key`. func (ctl *Client) ConfigGet(key string) (string, error) { call := ctl.api.ConfigGet(ctl.ctx, func(p capnp.Repo_configGet_Params) error { return p.SetKey(key) }) result, err := call.Struct() if err != nil { return "", err } return result.Value() } // ConfigSet sets the key at `key` to `value` func (ctl *Client) ConfigSet(key, value string) error { call := ctl.api.ConfigSet(ctl.ctx, func(p capnp.Repo_configSet_Params) error { if err := p.SetValue(value); err != nil { return err } return p.SetKey(key) }) _, err := call.Struct() return err } // ConfigEntry is a single entry of the config. type ConfigEntry struct { Key string Val string Doc string Default string NeedsRestart bool } func configEntryFromCapnp(capEntry capnp.ConfigEntry) (*ConfigEntry, error) { key, err := capEntry.Key() if err != nil { return nil, err } val, err := capEntry.Val() if err != nil { return nil, err } doc, err := capEntry.Doc() if err != nil { return nil, err } def, err := capEntry.Default() if err != nil { return nil, err } return &ConfigEntry{ Default: def, Key: key, Val: val, Doc: doc, NeedsRestart: capEntry.NeedsRestart(), }, nil } // ConfigAll returns all config entries with details. func (ctl *Client) ConfigAll() ([]ConfigEntry, error) { call := ctl.api.ConfigAll(ctl.ctx, func(p capnp.Repo_configAll_Params) error { return nil }) result, err := call.Struct() if err != nil { return nil, err } capPairs, err := result.All() if err != nil { return nil, err } entries := []ConfigEntry{} for idx := 0; idx < capPairs.Len(); idx++ { capEntry := capPairs.At(idx) entry, err := configEntryFromCapnp(capEntry) if err != nil { return nil, err } entries = append(entries, *entry) } return entries, nil } // ConfigDoc gets the documentation for a single config entry at `key`. func (ctl *Client) ConfigDoc(key string) (ConfigEntry, error) { call := ctl.api.ConfigDoc(ctl.ctx, func(p capnp.Repo_configDoc_Params) error { return p.SetKey(key) }) result, err := call.Struct() if err != nil { return ConfigEntry{}, err } capEntry, err := result.Desc() if err != nil { return ConfigEntry{}, err } entry, err := configEntryFromCapnp(capEntry) if err != nil { return ConfigEntry{}, err } return *entry, nil } // VersionInfo describes the version of the server. type VersionInfo struct { ServerSemVer string ServerRev string BackendSemVer string BackendRev string } // Version returns version information about the server. func (ctl *Client) Version() (*VersionInfo, error) { call := ctl.api.Version(ctl.ctx, func(p capnp.Repo_version_Params) error { return nil }) result, err := call.Struct() if err != nil { return nil, err } capVersion, err := result.Version() if err != nil { return nil, err } version := &VersionInfo{} version.ServerSemVer, err = capVersion.ServerVersion() if err != nil { return nil, err } version.ServerRev, err = capVersion.ServerRev() if err != nil { return nil, err } version.BackendSemVer, err = capVersion.BackendVersion() if err != nil { return nil, err } version.BackendRev, err = capVersion.BackendRev() if err != nil { return nil, err } return version, nil } // FstabAdd adds a new mount named `mountName` at `mountPath` with `opts`. // The mount will only be created after calling FstabApply. func (ctl *Client) FstabAdd(mountName, mountPath string, opts MountOptions) error { call := ctl.api.FstabAdd(ctl.ctx, func(p capnp.Repo_fstabAdd_Params) error { if err := p.SetMountName(mountName); err != nil { return err } if err := p.SetMountPath(mountPath); err != nil { return err } capOpts, err := mountOptionsToCapnp(opts, p.Segment()) if err != nil { return err } return p.SetOptions(*capOpts) }) _, err := call.Struct() return err } // FstabRemove removes a named mount called `mountName`. func (ctl *Client) FstabRemove(mountName string) error { call := ctl.api.FstabRemove(ctl.ctx, func(p capnp.Repo_fstabRemove_Params) error { return p.SetMountName(mountName) }) _, err := call.Struct() return err } // FstabApply will apply any changes made the filesystem tab. // This won't do anything if nothing was changed in the mean time. func (ctl *Client) FstabApply() error { call := ctl.api.FstabApply(ctl.ctx, func(p capnp.Repo_fstabApply_Params) error { return nil }) _, err := call.Struct() return err } // FstabUnmountAll will unmount all currently mounted fstab entries. func (ctl *Client) FstabUnmountAll() error { call := ctl.api.FstabUnmountAll(ctl.ctx, func(p capnp.Repo_fstabUnmountAll_Params) error { return nil }) _, err := call.Struct() return err } // FsTabEntry describes a single entry in the filesystem tab type FsTabEntry struct { Name string Path string Root string Active bool ReadOnly bool Offline bool } func capMountToMount(capEntry capnp.FsTabEntry) (*FsTabEntry, error) { name, err := capEntry.Name() if err != nil { return nil, err } root, err := capEntry.Root() if err != nil { return nil, err } path, err := capEntry.Path() if err != nil { return nil, err } return &FsTabEntry{ Path: path, Name: name, Root: root, Active: capEntry.Active(), ReadOnly: capEntry.ReadOnly(), Offline: capEntry.Offline(), }, nil } // FsTabList lists all fs tab entries. func (ctl *Client) FsTabList() ([]FsTabEntry, error) { call := ctl.api.FstabList(ctl.ctx, func(p capnp.Repo_fstabList_Params) error { return nil }) result, err := call.Struct() if err != nil { return nil, err } capMounts, err := result.Mounts() if err != nil { return nil, err } mounts := []FsTabEntry{} for idx := 0; idx < capMounts.Len(); idx++ { capMount := capMounts.At(idx) mount, err := capMountToMount(capMount) if err != nil { return nil, err } mounts = append(mounts, *mount) } return mounts, nil } // GarbageItem is a single path that was reaped by the garbage collector. type GarbageItem struct { Path string Owner string Content h.Hash } // GarbageCollect calls the backend (IPSF) garbage collector and returns the collected items. func (ctl *Client) GarbageCollect(aggressive bool) ([]*GarbageItem, error) { call := ctl.api.GarbageCollect(ctl.ctx, func(p capnp.FS_garbageCollect_Params) error { p.SetAggressive(aggressive) return nil }) result, err := call.Struct() if err != nil { return nil, err } freed := []*GarbageItem{} capFreed, err := result.Freed() if err != nil { return nil, err } for idx := 0; idx < capFreed.Len(); idx++ { capGcItem := capFreed.At(idx) gcItem := &GarbageItem{} gcItem.Owner, err = capGcItem.Owner() if err != nil { return nil, err } gcItem.Path, err = capGcItem.Path() if err != nil { return nil, err } content, err := capGcItem.Content() if err != nil { return nil, err } gcItem.Content, err = h.Cast(content) if err != nil { return nil, err } freed = append(freed, gcItem) } return freed, nil } // Become changes the current users to one of the users in the remote list. func (ctl *Client) Become(who string) error { call := ctl.api.Become(ctl.ctx, func(p capnp.Repo_become_Params) error { return p.SetWho(who) }) _, err := call.Struct() return err } // GatewayUser is a user that has access to the gateway. type GatewayUser struct { Name string PasswordHash string Salt string Folders []string Rights []string } // GatewayUserAdd adds a new user to the user database. // `folders` is a list of directories he may access. It might be empty, // in which case he can access everything (same as []string{"/"}) func (ctl *Client) GatewayUserAdd(name, password string, folders, rights []string) error { call := ctl.api.GatewayUserAdd(ctl.ctx, func(p capnp.Repo_gatewayUserAdd_Params) error { if err := p.SetName(name); err != nil { return err } if err := p.SetPassword(password); err != nil { return err } if err := p.SetPassword(password); err != nil { return err } seg := p.Segment() capFolders, err := capnplib.NewTextList(seg, int32(len(folders))) if err != nil { return err } for idx, folder := range folders { if err := capFolders.Set(idx, folder); err != nil { return err } } if err := p.SetFolders(capFolders); err != nil { return err } capRights, err := capnplib.NewTextList(seg, int32(len(rights))) if err != nil { return err } for idx, right := range rights { if err := capRights.Set(idx, right); err != nil { return err } } return p.SetRights(capRights) }) _, err := call.Struct() return err } // GatewayUserRemove removes an existing user and will error out // if the said user does not exist. func (ctl *Client) GatewayUserRemove(name string) error { call := ctl.api.GatewayUserRm(ctl.ctx, func(p capnp.Repo_gatewayUserRm_Params) error { return p.SetName(name) }) _, err := call.Struct() return err } // GatewayUserList lists all currently existing users. func (ctl *Client) GatewayUserList() ([]GatewayUser, error) { call := ctl.api.GatewayUserList(ctl.ctx, func(p capnp.Repo_gatewayUserList_Params) error { return nil }) result, err := call.Struct() if err != nil { return nil, err } capUsers, err := result.Users() if err != nil { return nil, err } users := []GatewayUser{} for idx := 0; idx < capUsers.Len(); idx++ { capUser := capUsers.At(idx) gwuser, err := gwdb.UserFromCapnp(capUser) if err != nil { return nil, err } users = append(users, GatewayUser{ Name: gwuser.Name, Salt: gwuser.Salt, PasswordHash: gwuser.PasswordHash, Folders: gwuser.Folders, Rights: gwuser.Rights, }) } return users, err } // DebugProfilePort will get the port of pprof server in the backend. // The port changes during daemon restarts. func (ctl *Client) DebugProfilePort() (int, error) { call := ctl.api.DebugProfilePort(ctl.ctx, func(p capnp.Repo_debugProfilePort_Params) error { return nil }) result, err := call.Struct() if err != nil { return -1, err } return int(result.Port()), nil } // Hint is a container for configuring streams. type Hint struct { // Path is the path the hint applies to (recursively) Path string // CompressionAlgo can be an algorithm or "guess" // to let brig choose a suitable one. CompressionAlgo string // EncryptionAlgo must be a valid encryption algorithm. EncryptionAlgo string } // HintSet remembers the given settings at `path` (and below) func (ctl *Client) HintSet(path string, compressionAlgo, encryptionAlgo *string) error { call := ctl.api.HintSet(ctl.ctx, func(p capnp.Repo_hintSet_Params) error { capHint, err := capnp.NewHint(p.Segment()) if err != nil { return err } if err := capHint.SetPath(path); err != nil { return err } if compressionAlgo != nil { if err := capHint.SetCompressionAlgo(*compressionAlgo); err != nil { return err } } if encryptionAlgo != nil { if err := capHint.SetEncryptionAlgo(*encryptionAlgo); err != nil { return err } } return p.SetHint(capHint) }) _, err := call.Struct() return err } // HintRemove removes the hint at `path`. func (ctl *Client) HintRemove(path string) error { call := ctl.api.HintRemove(ctl.ctx, func(p capnp.Repo_hintRemove_Params) error { return p.SetPath(path) }) _, err := call.Struct() return err } func convertCapHint(capHint capnp.Hint) (*Hint, error) { path, err := capHint.Path() if err != nil { return nil, err } compressionAlgo, err := capHint.CompressionAlgo() if err != nil { return nil, err } encryptionAlgo, err := capHint.EncryptionAlgo() if err != nil { return nil, err } return &Hint{ Path: path, EncryptionAlgo: encryptionAlgo, CompressionAlgo: compressionAlgo, }, nil } // HintList lists all hints that are currently set. func (ctl *Client) HintList() ([]Hint, error) { call := ctl.api.HintList(ctl.ctx, func(p capnp.Repo_hintList_Params) error { return nil }) result, err := call.Struct() if err != nil { return nil, err } capHints, err := result.Hints() if err != nil { return nil, err } hints := []Hint{} for idx := 0; idx < capHints.Len(); idx++ { hint, err := convertCapHint(capHints.At(idx)) if err != nil { return nil, err } hints = append(hints, *hint) } // Sort for display convenience: sort.Slice(hints, func(i, j int) bool { return hints[i].Path < hints[j].Path }) return hints, nil } ================================================ FILE: client/vcs_cmds.go ================================================ package client import ( "strings" "time" "github.com/sahib/brig/server/capnp" h "github.com/sahib/brig/util/hashlib" ) // MakeCommit creates a new commit from the current staging area. // The commit will have the message `msg`. func (ctl *Client) MakeCommit(msg string) error { call := ctl.api.Commit(ctl.ctx, func(p capnp.VCS_commit_Params) error { return p.SetMsg(msg) }) _, err := call.Struct() return err } // Commit describes a single commit in more detail. type Commit struct { Hash h.Hash Msg string Tags []string Date time.Time } func convertCapCommit(capEntry *capnp.Commit) (*Commit, error) { result := Commit{} modTimeStr, err := capEntry.Date() if err != nil { return nil, err } if err := result.Date.UnmarshalText([]byte(modTimeStr)); err != nil { return nil, err } result.Hash, err = capEntry.Hash() if err != nil { return nil, err } result.Msg, err = capEntry.Msg() if err != nil { return nil, err } tagList, err := capEntry.Tags() if err != nil { return nil, err } tags := []string{} for idx := 0; idx < tagList.Len(); idx++ { tag, err := tagList.At(idx) if err != nil { return nil, err } tags = append(tags, tag) } result.Tags = tags return &result, nil } // Log lists all commits, starting with the newest one. func (ctl *Client) Log() ([]Commit, error) { call := ctl.api.Log(ctl.ctx, func(p capnp.VCS_log_Params) error { return nil }) results := []Commit{} result, err := call.Struct() if err != nil { return nil, err } entries, err := result.Entries() if err != nil { return nil, err } for idx := 0; idx < entries.Len(); idx++ { capEntry := entries.At(idx) result, err := convertCapCommit(&capEntry) if err != nil { return nil, err } results = append(results, *result) } return results, nil } // Tag tags a commit (`rev`) with a certain `name`. func (ctl *Client) Tag(rev, name string) error { call := ctl.api.Tag(ctl.ctx, func(p capnp.VCS_tag_Params) error { if err := p.SetTagName(name); err != nil { return err } return p.SetRev(rev) }) _, err := call.Struct() return err } // Untag removes the `name` tag. func (ctl *Client) Untag(name string) error { call := ctl.api.Untag(ctl.ctx, func(p capnp.VCS_untag_Params) error { return p.SetTagName(name) }) _, err := call.Struct() return err } // Reset restores the content of `path` to the state at `rev`. // If `force` is true, it will overwrite the staging area if it needs to. func (ctl *Client) Reset(path, rev string, force bool) error { call := ctl.api.Reset(ctl.ctx, func(p capnp.VCS_reset_Params) error { if err := p.SetPath(path); err != nil { return err } p.SetForce(force) return p.SetRev(rev) }) _, err := call.Struct() return err } // Change describes a change of a node between two commits. type Change struct { Path string Mask []string Head *Commit Next *Commit MovedTo string WasPreviouslyAt string IsPinned bool IsExplicit bool } // History returns a detailed set of changes that happened to the node at `path`. func (ctl *Client) History(path string) ([]*Change, error) { call := ctl.api.History(ctl.ctx, func(p capnp.VCS_history_Params) error { return p.SetPath(path) }) result, err := call.Struct() if err != nil { return nil, err } histList, err := result.History() if err != nil { return nil, err } results := []*Change{} for idx := 0; idx < histList.Len(); idx++ { entry := histList.At(idx) path, err := entry.Path() if err != nil { return nil, err } change, err := entry.Change() if err != nil { return nil, err } capHeadCmt, err := entry.Head() if err != nil { return nil, err } head, err := convertCapCommit(&capHeadCmt) if err != nil { return nil, err } movedTo, err := entry.MovedTo() if err != nil { return nil, err } wasPreviouslyAt, err := entry.WasPreviouslyAt() if err != nil { return nil, err } var next *Commit if entry.HasNext() { capNextCmt, err := entry.Next() if err != nil { return nil, err } next, err = convertCapCommit(&capNextCmt) if err != nil { return nil, err } } results = append(results, &Change{ Path: path, Mask: strings.Split(change, "|"), Head: head, Next: next, MovedTo: movedTo, WasPreviouslyAt: wasPreviouslyAt, IsPinned: entry.IsPinned(), IsExplicit: entry.IsExplicit(), }) } return results, nil } // DiffPair is a pair of nodes that were changed in some way. type DiffPair struct { Src StatInfo Dst StatInfo } // Diff gives a detailed overview over the changes between two commits. type Diff struct { Added []StatInfo Removed []StatInfo Ignored []StatInfo Missing []StatInfo Moved []DiffPair Merged []DiffPair Conflict []DiffPair } // IsEmpty reports if a diff is completely empty (i.e. nothing changed) func (df *Diff) IsEmpty() bool { return 0 == 0+ len(df.Added)+ len(df.Removed)+ len(df.Ignored)+ len(df.Missing)+ len(df.Moved)+ len(df.Merged)+ len(df.Conflict) } func convertDiffList(lst capnp.StatInfo_List) ([]StatInfo, error) { infos := []StatInfo{} for idx := 0; idx < lst.Len(); idx++ { capInfo := lst.At(idx) info, err := convertCapStatInfo(&capInfo) if err != nil { return nil, err } infos = append(infos, *info) } return infos, nil } func convertDiffPairList(lst capnp.DiffPair_List) ([]DiffPair, error) { pairs := []DiffPair{} for idx := 0; idx < lst.Len(); idx++ { capPair := lst.At(idx) capSrc, err := capPair.Src() if err != nil { return nil, err } capDst, err := capPair.Dst() if err != nil { return nil, err } srcInfo, err := convertCapStatInfo(&capSrc) if err != nil { return nil, err } dstInfo, err := convertCapStatInfo(&capDst) if err != nil { return nil, err } pairs = append(pairs, DiffPair{ Src: *srcInfo, Dst: *dstInfo, }) } return pairs, nil } func convertCapDiffToDiff(capDiff capnp.Diff) (*Diff, error) { diff := &Diff{} lst, err := capDiff.Added() if err != nil { return nil, err } diff.Added, err = convertDiffList(lst) if err != nil { return nil, err } lst, err = capDiff.Missing() if err != nil { return nil, err } diff.Missing, err = convertDiffList(lst) if err != nil { return nil, err } lst, err = capDiff.Removed() if err != nil { return nil, err } diff.Removed, err = convertDiffList(lst) if err != nil { return nil, err } lst, err = capDiff.Ignored() if err != nil { return nil, err } diff.Ignored, err = convertDiffList(lst) if err != nil { return nil, err } pairs, err := capDiff.Moved() if err != nil { return nil, err } diff.Moved, err = convertDiffPairList(pairs) if err != nil { return nil, err } pairs, err = capDiff.Merged() if err != nil { return nil, err } diff.Merged, err = convertDiffPairList(pairs) if err != nil { return nil, err } pairs, err = capDiff.Conflict() if err != nil { return nil, err } diff.Conflict, err = convertDiffPairList(pairs) if err != nil { return nil, err } return diff, nil } // MakeDiff creates a diff between the commits at `remoteRev` and `localRev`. // If `needFetch` is true, the data is first updated from the remote. func (ctl *Client) MakeDiff(local, remote, localRev, remoteRev string, needFetch bool) (*Diff, error) { call := ctl.api.MakeDiff(ctl.ctx, func(p capnp.VCS_makeDiff_Params) error { if err := p.SetLocalOwner(local); err != nil { return err } if err := p.SetRemoteOwner(remote); err != nil { return err } if err := p.SetLocalRev(localRev); err != nil { return err } p.SetNeedFetch(needFetch) return p.SetRemoteRev(remoteRev) }) result, err := call.Struct() if err != nil { return nil, err } capDiff, err := result.Diff() if err != nil { return nil, err } return convertCapDiffToDiff(capDiff) } // Fetch updates our internal copy of the data of `remote`. func (ctl *Client) Fetch(remote string) error { call := ctl.api.Fetch(ctl.ctx, func(p capnp.VCS_fetch_Params) error { return p.SetWho(remote) }) _, err := call.Struct() return err } // Sync triggers a sync with the data from `remote`. // If `needFetch` is true, the data is first updated from the remote. func (ctl *Client) Sync(remote string, needFetch bool) (*Diff, error) { call := ctl.api.Sync(ctl.ctx, func(p capnp.VCS_sync_Params) error { p.SetNeedFetch(needFetch) return p.SetWithWhom(remote) }) result, err := call.Struct() if err != nil { return nil, err } capDiff, err := result.Diff() if err != nil { return nil, err } return convertCapDiffToDiff(capDiff) } // CommitInfo is like a stat(2) for commits. func (ctl *Client) CommitInfo(rev string) (bool, *Commit, error) { call := ctl.api.CommitInfo(ctl.ctx, func(p capnp.VCS_commitInfo_Params) error { return p.SetRev(rev) }) result, err := call.Struct() if err != nil { return false, nil, err } if !result.IsValidRef() { return false, nil, nil } capCmt, err := result.Commit() if err != nil { return false, nil, err } cmt, err := convertCapCommit(&capCmt) if err != nil { return false, nil, err } return true, cmt, nil } ================================================ FILE: cmd/bug.go ================================================ package cmd import ( "bytes" "context" "fmt" "net/url" "os" "os/exec" "strings" "github.com/fatih/color" "github.com/sahib/brig/client" "github.com/sahib/brig/version" "github.com/toqueteos/webbrowser" "github.com/urfave/cli" ) const ( reportURL = "https://github.com/sahib/brig/issues/new?" ) // printError simply prints a nicely formatted error to stderr. func printError(msg string) { fmt.Fprintln(os.Stderr, color.RedString("*** ")+msg) } // cmdOutput runs a command at `path` with `args` and returns it's output. // No real error checking is done, on errors an empty string is returned. func cmdOutput(path string, args ...string) string { out, err := exec.Command(path, args...).Output() // #nosec if err != nil { // No other error checking here, `brig bug` is best effort. printError(fmt.Sprintf("failed to run %s %s", path, strings.Join(args, " "))) return "" } return strings.TrimSpace(string(out)) } // handleBugReport compiles a report of useful info when providing a bug report. func handleBugReport(ctx *cli.Context) error { buf := &bytes.Buffer{} fmt.Fprintln(buf, `Please answer these questions before submitting your issue. Please include anything else you think is helpful. Thanks! ### What did you do? ### What did you expect to see? ### What did you see instead? ### Do you still see this issue with a development binary? ### Did you check if a similar bug report was already opened? ### System details:`) fmt.Fprintf(buf, "go version: ``%s``\n", cmdOutput("go", "version")) fmt.Fprintf(buf, "uname -s -v -m: ``%s``\n", cmdOutput("uname", "-s", "-v", "-m")) fmt.Fprintf(buf, "IPFS config: ``%s``\n", cmdOutput("ipfs", "config", "show")) fmt.Fprintf(buf, "\n") fmt.Fprintf( buf, "brig client version: ``%s [build: %s]``\n", version.String(), version.BuildTime, ) daemonURL, err := guessDaemonURL(ctx) if err != nil { return err } ctl, err := client.Dial(context.Background(), daemonURL) if err == nil { // Try to get the server side / ipfs version. version, err := ctl.Version() if err == nil { fmt.Fprintf( buf, "brig server version: ``%s+%s``\n", version.ServerSemVer, version.ServerRev, ) fmt.Fprintf( buf, "IPFS Version: ``%s+%s``\n", version.BackendSemVer, version.BackendRev, ) } } else { printError("Cannot get server and IPFS version.") printError("If it is possible to start the daemon, do it now.") printError("This will make the bug report more helpful. Thanks.") } printToStdout := ctx.Bool("stdout") if !printToStdout { // Try to open the issue tracker for convinience: urlVal := url.Values{} urlVal.Set("body", buf.String()) if err := webbrowser.Open(reportURL + urlVal.Encode()); err != nil { printToStdout = true } } if printToStdout { // If not, ask the user to print it directly: if !ctx.Bool("stdout") { printError("I failed to open the issue tracker in your browser.") printError("Please paste the underlying text manually at this URL:") printError("https://github.com/sahib/brig/issues") } fmt.Println(buf.String()) } return nil } ================================================ FILE: cmd/debug.go ================================================ package cmd import ( "fmt" "io" "io/ioutil" "os" "github.com/dustin/go-humanize" "github.com/mr-tron/base58" "github.com/sahib/brig/catfs/mio" "github.com/sahib/brig/client" "github.com/sahib/brig/fuse/fusetest" "github.com/sahib/brig/repo/hints" "github.com/sahib/brig/util/testutil" "github.com/urfave/cli" ) func handleDebugPprofPort(ctx *cli.Context, ctl *client.Client) error { port, err := ctl.DebugProfilePort() if err != nil { return err } if port > 0 { fmt.Println(port) } else { fmt.Println("Profiling is not enabled.") fmt.Println("Enable daemon.enable_pprof and restart.") } return nil } func readDebugKey(ctx *cli.Context) ([]byte, error) { keyB58 := ctx.String("key") key, err := base58.Decode(keyB58) if err != nil { return nil, err } return key, nil } func handleDebugDecodeStream(ctx *cli.Context) error { key, err := readDebugKey(ctx) if err != nil { return err } fd, err := ioutil.TempFile("", "") if err != nil { return err } defer fd.Close() defer os.Remove(fd.Name()) _, err = io.Copy(fd, os.Stdin) if err != nil { return err } _, err = fd.Seek(0, io.SeekStart) if err != nil { return err } stream, err := mio.NewOutStream(fd, ctx.Bool("raw"), key) if err != nil { return err } _, err = io.Copy(os.Stdout, stream) return err } func handleDebugEncodeStream(ctx *cli.Context) error { key, err := readDebugKey(ctx) if err != nil { return err } hint := hints.Hint{ EncryptionAlgo: hints.EncryptionHint(ctx.String("encryption")), CompressionAlgo: hints.CompressionHint(ctx.String("compression")), } if !hint.IsValid() { return fmt.Errorf("invalid encryption or compression") } r, _, err := mio.NewInStream(os.Stdin, "", key, hint) if err != nil { return err } _, err = io.Copy(os.Stdout, r) return err } func readStreamSized(ctx *cli.Context) (uint64, error) { return humanize.ParseBytes(ctx.String("size")) } func handleDebugTenSource(ctx *cli.Context) error { s, err := readStreamSized(ctx) if err != nil { return err } tr := &testutil.TenReader{} _, err = io.Copy(os.Stdout, io.LimitReader(tr, int64(s))) return err } func handleDebugTenSink(ctx *cli.Context) error { s, err := readStreamSized(ctx) if err != nil { return err } tw := &testutil.TenWriter{} n, err := io.Copy(tw, os.Stdin) if err != nil { return err } if int64(s) != n { return fmt.Errorf("expected %d, got %d bytes", s, n) } return nil } func handleDebugFuseMock(ctx *cli.Context) error { opts := fusetest.Options{ CatfsPath: ctx.String("catfs-path"), MountPath: ctx.String("mount-path"), IpfsPathOrMultiaddr: ctx.String("ipfs-path-or-multiaddr"), URL: ctx.String("url"), MountReadOnly: ctx.Bool("mount-ro"), MountOffline: ctx.Bool("mount-offline"), } return fusetest.Launch(opts) } ================================================ FILE: cmd/exit_codes.go ================================================ package cmd const ( // Success is the same as EXIT_SUCCESS in C Success = iota // BadArgs passed to cli; not our fault. BadArgs // BadPassword passed to prompt or switch; not our fault. BadPassword // DaemonNotResponding means the daemon does not respond in timely fashion. // Probably our fault. DaemonNotResponding // UnknownError is an uncategorized error, probably our fault. UnknownError ) ================================================ FILE: cmd/fs_handlers.go ================================================ package cmd import ( "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "regexp" "strconv" "strings" "time" "github.com/sahib/brig/cmd/tabwriter" "github.com/sahib/brig/util" "github.com/dustin/go-humanize" "github.com/fatih/color" "github.com/sahib/brig/client" "github.com/urfave/cli" "github.com/vbauerster/mpb" "github.com/vbauerster/mpb/decor" terminal "github.com/wayneashleyberry/terminal-dimensions" ) func handleStage(ctx *cli.Context, ctl *client.Client) error { localPath := ctx.Args().Get(0) readFromStdin := ctx.Bool("stdin") repoPath := filepath.Base(localPath) if len(ctx.Args()) > 1 { repoPath = ctx.Args().Get(1) if localPath == "-" { readFromStdin = true } } if readFromStdin { repoPath = ctx.Args().Get(0) return ctl.StageFromReader(repoPath, os.Stdin) } absLocalPath, err := filepath.Abs(localPath) if err != nil { return fmt.Errorf("Failed to retrieve absolute path: %v", err) } _, err = os.Stat(absLocalPath) if err != nil { return err } return handleStageDirectory(ctx, ctl, absLocalPath, repoPath) } // holds brig repoPaths twins (by content) to a OS file system localPath type twins struct { localPath string repoPaths []string } type walkOptions struct { dereference bool continueOnError bool } func walk(root, repoRoot string, depth int, opt walkOptions) (map[string]twins, error) { // toBeStaged map: key is local path, value is array of repoPaths using the local path toBeStaged := make(map[string]twins) depth++ if depth > 255 { return toBeStaged, fmt.Errorf("Exceeded allowed dereferencing depth for %v", root) } root = filepath.Clean(root) repoRoot = filepath.Clean(repoRoot) err := filepath.Walk(root, func(childPath string, info os.FileInfo, err error) error { repoPath := filepath.Join("/", repoRoot, childPath[len(root):]) if opt.dereference && info.Mode()&os.ModeSymlink != 0 { // NOTE: `brig` does not have concept of symlink // The lack of native symlinks in `brig` has the following potential issues // * Ignoring cycles limits valid use cases. // * Not ignoring cycles opens room for malicious input. // // Here we implement a dereference of symlinks as a temporary measure // which works in the most likely user scenarios (we assume that user // is not malicious to herself and does not create infinite symlinked loops). // If the level of recursion or cycles // (where link points to itself directly or indirectly) is exceeded, // we just fail on such link. // Currently, we have a depth limit of 255 (see couple line above). resolvedPath, err := filepath.EvalSymlinks(childPath) if err != nil { msg := fmt.Sprintf("Failed to resolve: %v: %v", childPath, err) if opt.continueOnError { fmt.Fprintf(os.Stderr, "WARNING: %s\n", msg) return nil } return fmt.Errorf(msg) } info, err = os.Stat(resolvedPath) if err != nil { msg := fmt.Sprintf("Failed to do os.Stat(%v): %v", resolvedPath, err) if opt.continueOnError { fmt.Fprintf(os.Stderr, "WARNING: %s\n", msg) return nil } return fmt.Errorf(msg) } childPath = resolvedPath if info.Mode().IsDir() { extra, err := walk(childPath, repoPath, depth, opt) if err != nil { if opt.continueOnError { fmt.Fprintf(os.Stderr, "WARNING: %s\n", err.Error()) return nil } return err } for k, v := range extra { t, ok := toBeStaged[k] if !ok { t = twins{ v.localPath, []string{}, } } t.repoPaths = append(t.repoPaths, v.repoPaths...) toBeStaged[k] = t } return nil } } if info.Mode().IsRegular() { k, _ := inodeString(childPath) t, ok := toBeStaged[k] if !ok { t = twins{ childPath, []string{}, } } t.repoPaths = append(t.repoPaths, repoPath) toBeStaged[k] = t } return nil }) return toBeStaged, err } func makeParentDirIfNeeded(ctx *cli.Context, ctl *client.Client, path string) error { parent := filepath.Dir(path) info, err := ctl.Stat(parent) if err != nil { if yes, _ := regexp.MatchString("No such file or directory:", err.Error()); yes { createParents := true err = ctl.Mkdir(parent, createParents) } return err } if info.IsDir { return nil } return fmt.Errorf("Cannot make dir from existing non dir node %s", parent) } func handleStageDirectory(ctx *cli.Context, ctl *client.Client, root, repoRoot string) error { // Links will be reflinked in the `man cp` sense, // i.e. resolved repoPaths will point to the same content and backend hash root = filepath.Clean(root) repoRoot = filepath.Clean(repoRoot) opt := walkOptions{ dereference: !ctx.Bool("no-dereference"), continueOnError: ctx.Bool("continue-on-error"), } toBeStaged, err := walk(root, repoRoot, 0, opt) if err != nil { return fmt.Errorf("failed to walk dir: %v: %v", root, err) } if len(toBeStaged) == 0 { // This might happen if ask to stage a symlink pointing to a dir // but Walk does not travel symlinks and we end up with empty list. return nil } width, err := terminal.Width() if err != nil { fmt.Fprintf(os.Stderr, "warning: failed to get terminal size: %s\n", err) width = 80 } pbars := mpb.New( // override default (80) width mpb.WithWidth(int(width)), // override default 120ms refresh rate mpb.WithRefreshRate(250*time.Millisecond), ) name := "ETA" bar := pbars.AddBar( int64(len(toBeStaged)), mpb.PrependDecorators( // display our name with one space on the right decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), // replace ETA decorator with "done" message, OnComplete event decor.OnComplete( // ETA decorator with ewma age of 60, and width reservation of 4 decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WC{W: 4}), "done", ), ), mpb.AppendDecorators(decor.Percentage()), ) type stageList struct { local string repoList []string } nWorkers := 20 start := time.Now() jobs := make(chan twins, nWorkers) // Start a bunch of workers that will do the actual adding: for idx := 0; idx < nWorkers; idx++ { go func() { for { twinsSet, ok := <-jobs if !ok { return } firstToStage := "" for i, repoPath := range twinsSet.repoPaths { if i == 0 { firstToStage = repoPath // First occurrence is staged. // Stage creates all needed parent directories. if err := ctl.Stage(twinsSet.localPath, repoPath); err != nil { fmt.Fprintf(os.Stderr, "failed to stage '%s' as '%s': %v\n", twinsSet.localPath, repoPath, err) break } continue } // Copy does not create parent directories. We take care of it. if err := makeParentDirIfNeeded(ctx, ctl, repoPath); err != nil { fmt.Fprintf(os.Stderr, "failed to make the parent dir for '%s': %v\n", repoPath, err) break } if err := ctl.Copy(firstToStage, repoPath); err != nil { fmt.Fprintf(os.Stderr, "failed copy of '%s' to '%s': %v\n", firstToStage, repoPath, err) break } } // Notify the bar. The op time is used for the ETA. // The time is measured by "start" is NOT the time used to // stage a single file. This would only work in a non-parallel // environment, because the ETA would assume that one file took // 2s, so 1000 files must take 2000s. Instead it measures the // time between two time recordings, which are in the ideal // case around 1/n_workers * time_to_stage but it measures the // actual amount of parallelism that we achieve. bar.IncrBy(1, time.Since(start)) start = time.Now() } }() } // Send the jobs onward: for _, v := range toBeStaged { jobs <- v } close(jobs) pbars.Wait() return nil } func handleCat(ctx *cli.Context, ctl *client.Client) error { path := "/" if len(ctx.Args()) >= 1 { path = ctx.Args().First() } info, err := ctl.Stat(path) if err != nil { return err } doOffline := ctx.Bool("offline") var stream io.ReadCloser if info.IsDir { stream, err = ctl.Tar(path, doOffline) } else if ctx.Bool("stream") { return ctl.CatOnClient(path, doOffline, os.Stdout) } else { stream, err = ctl.Cat(path, doOffline) } if err != nil { return err } defer util.Closer(stream) if _, err := io.Copy(os.Stdout, stream); err != nil { return ExitCode{ UnknownError, fmt.Sprintf("cat: %v", err), } } return nil } func handleRm(ctx *cli.Context, ctl *client.Client) error { path := ctx.Args().First() if err := ctl.Remove(path); err != nil { return ExitCode{ UnknownError, fmt.Sprintf("rm: %v", err), } } return nil } func handleMv(ctx *cli.Context, ctl *client.Client) error { srcPath := ctx.Args().Get(0) dstPath := ctx.Args().Get(1) return ctl.Move(srcPath, dstPath) } func handleCp(ctx *cli.Context, ctl *client.Client) error { srcPath := ctx.Args().Get(0) dstPath := ctx.Args().Get(1) return ctl.Copy(srcPath, dstPath) } func colorForSize(size uint64) func(f string, a ...interface{}) string { switch { case size >= 1024 && size < 1024<<10: return color.CyanString case size >= 1024<<10 && size < 1024<<20: return color.YellowString case size >= 1024<<20 && size < 1024<<30: return color.RedString case size >= 1024<<30: return color.MagentaString default: return func(f string, a ...interface{}) string { return f } } } func userPrefixMap(users []string) map[string]string { m := make(map[string]string) for _, user := range users { m[user] = user } tryAbbrev := func(abbrev string) bool { for _, short := range m { if short == abbrev { return false } } return true } for name := range m { atIdx := strings.Index(name, "@") if atIdx != -1 && tryAbbrev(name[:atIdx]) { m[name] = name[:atIdx] continue } slashIdx := strings.Index(name, "/") if slashIdx != -1 && tryAbbrev(name[:slashIdx]) { m[name] = name[:slashIdx] continue } } return m } func formatHint(hint client.Hint) string { return fmt.Sprintf("enc:%s-zip:%s", hint.EncryptionAlgo, hint.CompressionAlgo) } func handleList(ctx *cli.Context, ctl *client.Client) error { maxDepth := ctx.Int("depth") if ctx.Bool("recursive") { maxDepth = -1 } root := "/" if ctx.Args().Present() { root = ctx.Args().First() } entries, err := ctl.List(root, maxDepth) if err != nil { return err } tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) tmpl, err := readFormatTemplate(ctx) if err != nil { return err } if tmpl != nil { for _, entry := range entries { if err := tmpl.Execute(os.Stdout, entry); err != nil { return err } } return nil } users := []string{} for _, entry := range entries { users = append(users, entry.User) } userMap := userPrefixMap(users) if len(entries) != 0 { userColumn := "" if len(userMap) > 1 { userColumn = "USER\t" } fmt.Fprintf(tabW, "SIZE\tBKEND\tMODTIME\t%sPATH\tPIN\tCACHED\tHINT\n", userColumn) } for _, entry := range entries { pinState := " " + pinStateToSymbol(entry.IsPinned, entry.IsExplicit) var coloredPath string if entry.IsDir { coloredPath = color.GreenString(entry.Path) } else { coloredPath = color.WhiteString(entry.Path) } userEntry := "" if len(userMap) > 1 { userEntry = color.GreenString(userMap[entry.User]) + "\t" } isCached, err := ctl.IsCached(entry.Path) if err != nil { return err } cachedState := " " + pinStateToSymbol(isCached, false) fmt.Fprintf( tabW, "%s\t%s\t%s\t%s%s\t%s\t%s\t%s\n", colorForSize(entry.Size)(humanize.Bytes(entry.Size)), colorForSize(entry.Size)(humanize.Bytes(uint64(entry.CachedSize))), entry.ModTime.Format("2006-01-02 15:04:05 MST"), userEntry, coloredPath, pinState, cachedState, formatHint(entry.Hint), ) } return tabW.Flush() } func handleTree(ctx *cli.Context, ctl *client.Client) error { root := "/" if ctx.NArg() > 0 { root = ctx.Args().First() } entries, err := ctl.List(root, -1) if err != nil { return ExitCode{ UnknownError, fmt.Sprintf("tree: %v", err), } } showTree(entries, &treeCfg{ showPin: true, }) return nil } func handleMkdir(ctx *cli.Context, ctl *client.Client) error { path := ctx.Args().First() createParents := ctx.Bool("parents") if err := ctl.Mkdir(path, createParents); err != nil { return ExitCode{UnknownError, fmt.Sprintf("mkdir: %v", err)} } return nil } func handleShow(ctx *cli.Context, ctl *client.Client) error { path := ctx.Args().First() isValidRef, cmt, err := ctl.CommitInfo(path) if err != nil { return err } if isValidRef { return handleShowCommit(ctx, ctl, cmt) } return handleShowFileOrDir(ctx, ctl, path) } func handleShowCommit(ctx *cli.Context, ctl *client.Client, cmt *client.Commit) error { tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) printPair := func(name string, val interface{}) { fmt.Fprintf( tabW, "%s\t%v\t\n", color.WhiteString(name), val, ) } printPair("Path", cmt.Hash) printPair("Tags", strings.Join(cmt.Tags, ", ")) printPair("ModTime", cmt.Date.Format(time.RFC3339)) printPair("Message", cmt.Msg) tabW.Flush() self, err := ctl.Whoami() if err != nil { return err } diff, err := ctl.MakeDiff( self.CurrentUser, self.CurrentUser, cmt.Hash.B58String()+"^", cmt.Hash.B58String(), false, ) if err != nil { return err } if !diff.IsEmpty() { fmt.Println() fmt.Println("Here's what changed in this commit:") fmt.Println() printDiffTree(diff, false) } return nil } func handleShowFileOrDir(ctx *cli.Context, ctl *client.Client, path string) error { info, err := ctl.Stat(path) if err != nil { return err } tmpl, err := readFormatTemplate(ctx) if err != nil { return err } if tmpl != nil { return tmpl.Execute(os.Stdout, info) } isCached, err := ctl.IsCached(path) if err != nil { return err } pinState := yesify(info.IsPinned) explicitState := yesify(info.IsExplicit) cachedState := yesify(isCached) nodeType := "file" if info.IsDir { nodeType = "directory" } tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) printPair := func(name string, val interface{}) { fmt.Fprintf( tabW, "%s\t%v\t\n", color.WhiteString(name), val, ) } printPair("Path", info.Path) printPair("User", info.User) printPair("Type", nodeType) printPair("Size", fmt.Sprintf("%s (%d bytes)", humanize.Bytes(info.Size), info.Size)) printPair("Backend Size", fmt.Sprintf("%s (%d bytes)", humanize.Bytes(uint64(info.CachedSize)), info.CachedSize)) printPair("Inode", strconv.FormatUint(info.Inode, 10)) printPair("Pinned", pinState) printPair("Explicit", explicitState) printPair("Cached", cachedState) printPair("IsRaw", yesify(info.IsRaw)) printPair("ModTime", info.ModTime.Format(time.RFC3339)) printPair("Tree Hash", info.TreeHash.B58String()) printPair("Content Hash", info.ContentHash.B58String()) printPair("Hint", formatHint(info.Hint)) if !info.IsDir { printPair("Backend Hash", info.BackendHash.B58String()) } else { printPair("Backend Hash", "-") } return tabW.Flush() } func handleEdit(ctx *cli.Context, ctl *client.Client) error { repoPath := ctx.Args().First() exists, err := ctl.Exists(repoPath) if err != nil { return err } data := []byte{} if exists { r, err := ctl.Cat(repoPath, false) if err != nil { return err } defer util.Closer(r) data, err = ioutil.ReadAll(r) if err != nil { return err } } tempPath, err := editToPath(data, path.Ext(repoPath)) if err != nil { return err } defer func() { if err := os.Remove(tempPath); err != nil { fmt.Printf("Failed to remove temp file: %v\n", err) } }() return ctl.Stage(tempPath, repoPath) } func handleTouch(ctx *cli.Context, ctl *client.Client) error { repoPath := ctx.Args().First() return ctl.Touch(repoPath) } func handleTrashList(ctx *cli.Context, ctl *client.Client) error { root := "/" if firstArg := ctx.Args().First(); firstArg != "" { root = firstArg } nodes, err := ctl.DeletedNodes(root) if err != nil { return err } for _, node := range nodes { fmt.Println(node.Path) } return nil } func handleTrashRemove(ctx *cli.Context, ctl *client.Client) error { return ctl.Undelete(ctx.Args().First()) } ================================================ FILE: cmd/help.go ================================================ package cmd import ( "fmt" "strings" "github.com/sahib/brig/repo/hints" "github.com/toqueteos/webbrowser" "github.com/urfave/cli" ) type helpEntry struct { Usage string ArgsUsage string Description string Complete cli.BashCompleteFunc Flags []cli.Flag } func die(msg string) { // be really pedantic when help is missing. // it is a developer mistake after all and should be catched early. panic(msg) } func compressionHintsToBullets() string { d := hints.Default() s := []string{} for _, algo := range hints.CompressionHints() { suffix := "" if d.CompressionAlgo == algo { suffix = " (default)" } s = append(s, fmt.Sprintf(" * %s%s", algo, suffix)) } return strings.Join(s, "\n") } func encryptionHintsToBullets() string { d := hints.Default() s := []string{} for _, algo := range hints.EncryptionHints() { suffix := "" if d.EncryptionAlgo == algo { suffix = " (default)" } s = append(s, fmt.Sprintf(" * %s%s", algo, suffix)) } return strings.Join(s, "\n") } var helpTexts = map[string]helpEntry{ "init": { Usage: "Initialize a new repository.", ArgsUsage: "", Complete: completeArgsUsage, Flags: []cli.Flag{ // duplicate of global repo, because it is convenient to // write »brig init --repo blah«. cli.StringFlag{ Name: "repo", Usage: "Path to the repository. Only has effect for new daemons.", Value: "", EnvVar: "BRIG_PATH", }, cli.StringFlag{ Name: "backend,b", Value: "httpipfs", Usage: "What data backend to use for the new repo. One of `mock`, `httpipfs`. This cannot be changed later!", }, cli.BoolFlag{ Name: "empty,e", Usage: "Do not create an initial README and no initial commit.", }, cli.BoolFlag{ Name: "no-logo,n", Usage: "Do not display the super pretty logo on init.", }, cli.StringFlag{ Name: "ipfs-path-or-multiaddr", Usage: "Specify a path to an ipfs repo (/tmp/ipfs-repo) or a multiaddr to a running ipfs daemon (/ip4/127.0.0.1/tcp/5002)", Value: "", }, cli.BoolFlag{ Name: "no-ipfs-setup", Usage: "Do not try to install and setup IPFS.", }, cli.BoolFlag{ Name: "no-ipfs-config", Usage: "Do no changes in the IPFS config that are necessary for brig. Use only when you know what you're doing.", }, cli.BoolFlag{ Name: "no-ipfs-optimization,o", Usage: "Do no changes in the IPFS config that will improve the performance of brig, but are not necessary to work.", }, }, Description: `Initialize a new repository with a certain backend. If BRIG_PATH or --repo is set, the new repository will be created at this place. If nothing is specified, the repo is created at "~/.brig". If the directory is not empty, brig will warn you about it and abort. The user name can be specified as pretty much any string, but it is recommended to use the special format »user@domain.something/resource«. This is similar to XMPP IDs. Specifying a resource can help you use the same name for different computers and specifying a domain makes it possible to indicate groups. This is especially important for commands like »brig net locate« but is not used extensively by anything else yet. EXAMPLES: # Easiest way to create a repository at /tmp/brig $ brig init --repo /tmp/brig ali@wonderland.org/rabbithole `, }, "whoami": { Usage: "Print the own remote identity including IPFS id, fingerprint and user name.", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "fingerprint,f", Usage: "Only print the own fingerprint", }, cli.BoolFlag{ Name: "name,n", Usage: "Only print the own name", }, cli.BoolFlag{ Name: "addr,a", Usage: "Only print the IPFS id portion of the fingerprint", }, cli.BoolFlag{ Name: "key,k", Usage: "Only print the key portion of the fingerprint", }, }, Description: `This command prints your name, fingerprint and what store you are looking at. When you initialized your repository, you chose the name and a fingerprint (two longer hash values) was created for you. EXAMPLES: # Show the fingerprint only: $ brig whoami -f QmUYz9dbqnYPyHCLUi7ghtiwFbdU93MQKFH4qg8iXHWcPV:W1q4vzbvLPUVwDUUXxjQfnuYJxq2CYqbeqXPSv7pUr5NcP `, }, "remote": { Usage: "Add, list, remove and edit remotes.", Complete: completeSubcommands, Description: ` A remote is the data needed to contact other instances of brig in the web. In order to add a remote, you need their fingerprint (as shown by »brig whoami«). This fingerprint should be exchanged in prior over a secure side channel (a secure instant messenger for example). Once both sides added each other as remotes they are said to be »authenticated«. Each remote can be configured further by specifying folders they may access or special settings like auto-updating. See the individual commands for more information. Also see the »net locate« command for details about finding other remotes. EXAMPLES: # Show a diff for each remote: $ brig remote list --format '{{ .Name }}' | xargs -n 1 brig diff `, }, "remote.add": { Usage: "Add/Update a remote under a handy name with their fingerprint.", ArgsUsage: " ", Complete: completeArgsUsage, Description: "", Flags: []cli.Flag{ cli.BoolFlag{ Name: "auto-update,a", Usage: "Take automatic updates from this node.", }, cli.BoolFlag{ Name: "accept-push,p", Usage: "Allow this remote to push to our state.", }, cli.StringSliceFlag{ Name: "folder,f", Usage: "Configure the folders this remote may see. Can be given more than once. If the first letter of the folder is »-« it is added as read-only.", }, cli.StringFlag{ Name: "conflict-strategy,c", Usage: "Which conflict strategy to apply (either »marker«, »ignore« or »embrace«)", Value: "", }, }, }, "remote.remove": { Usage: "Remove a remote by name.", ArgsUsage: "", Complete: completeArgsUsage, Description: "Remove a remote by name.", }, "remote.list": { Usage: "List all remotes and their online status", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "offline,o", Usage: "Do not query the online status", }, cli.StringFlag{ Name: "format,f", Usage: "Format the output according to a template", }, }, Description: ` This goes over every entry in your remote list and prints by default the remote name, fingerprint, rountrip, last seen timestamp and settings. You can format the output by using »--format« with one the following attributes: * .Name * .Fingerprint * .Folders * .AutoUpdate The syntax of the template is borrowed from Go. You can read about the details here: https://golang.org/pkg/text/template Note that this command will try to peek the fingerprint of each node, even if we did not authenticate him yet. If you do not want this, you should use »--offline«. EXAMPLES: $ brig rmt ls -f '{{ .Name }}' # Show each remote name, line by line. `, }, "remote.clear": { Usage: "Clear the complete remote list.", Complete: completeArgsUsage, Description: "Note that you cannot undo this operation!", }, "remote.ping": { Usage: "Ping a remote.", Complete: completeArgsUsage, Description: `Ping a remote and check if we can reach them. There is a small difference to the »remote list« command. »ping« will only work if both sides authenticated each other and can thus be used as a test for this. Additionally, it shows the roundtrip time (i.e. the time the ping request took to travel). EXAMPLES: $ brig rmt ping `, }, "remote.edit": { Usage: "Edit the current list.", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.StringFlag{ Name: "yml,y", Value: "", Usage: "Directly overwrite remote list with yml file", }, }, Description: ` Edit the current list using $EDITOR as YAML file. It will be updated once you exit your editor.`, }, "remote.auto-update": { Usage: "Enable auto-updating for this remote", Complete: completeArgsUsage, Description: `When enabled you will get updates shortly after this remote made it. EXAMPLES: # Enable auto-updating both for bob and charlie. $ brig remote auto-update enable bob charlie # or shorter to prevent you from RSI: brig rmt au e bob charlie `, Flags: []cli.Flag{ cli.BoolFlag{ Name: "no-initial-sync,n", Usage: "Do not sync initially when upon enabling.", }, }, }, "remote.accept-push": { Usage: "Allow receiving push requests from this remote.", Complete: completeArgsUsage, Description: `When enabled, other remotes can do »brig push « to us. When we receive a push request we will sync with this remote. EXAMPLES: # Allow bob and charlie to push to us: $ brig remote accept-push enable bob charlie # or shorter to prevent you from RSI: brig rmt ap e bob charlie `, }, "remote.conflict-strategy": { Usage: "Change what conflict resolution strategy is used on conflicts.", Complete: completeArgsUsage, Description: `The conflict strategy defines how to act on sync conflicts. There are three different types: - marker: Create a conflict file with the remote's version. (default) - ignore: Ignore the remote version completely and keep our version. - embrace: Take the remote version and replace ours with it. See also »brig config doc fs.sync.conflict_strategy«. In case of an empty string, the config value above is used. EXAMPLES: # Allow bob and charlie to push to us: $ brig remote conflict-strategy embrace bob charlie # or shorter to prevent you from RSI: brig rmt cs embrace bob charlie `, }, "remote.folder": { Usage: "Configure what folders a remote is allowed to see.", Complete: completeArgsUsage, Description: ` By default every remote is allowed to see all of your folders. You might want to share only specific folders with certain remotes. By adding folders to this list, you're limiting the nodes other remotes can see. If you do not specify any subcommand, this is a shortcut for »brig rmt f ls«`, }, "remote.folder.add": { Usage: "Add a remote folder for a specific remote.", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "read-only,r", Usage: "Add the folder as read-only.", }, cli.StringFlag{ Name: "conflict-strategy,c", Usage: "What conflict strategy to use for this specific folder. Overwrites per-remote conflict strategy.", Value: "", }, }, Description: `If a folder is added as read-only, we do not accept changes when syncing from remotes. EXAMPLES: $ brig remote folder add bob /public --read-only `, }, "remote.folder.set": { Usage: "Update the settings of a remote folder.", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "read-only,r", Usage: "Add the folder as read-only.", }, cli.BoolFlag{ Name: "read-write,w", Usage: "Add the folder as read and writeable.", }, cli.StringFlag{ Name: "conflict-strategy,c", Usage: "What conflict strategy to use for this specific folder. Overwrites per-remote conflict strategy.", Value: "", }, }, Description: `This works exactly like »add« but overwrites an existing folder. EXAMPLES: $ brig remote folder set bob /public --read-only `, }, "remote.folder.remove": { Usage: "Remove a folder from a specific remote. ", Complete: completeArgsUsage, Description: ``, }, "remote.folder.clear": { Usage: "Clear all folders from a specific remote.", Complete: completeArgsUsage, Description: ``, }, "remote.folder.list": { Usage: "List all allowed folders for a specific remote.", Complete: completeArgsUsage, Description: ``, }, "pin": { Usage: "Commands to handle the pin state.", ArgsUsage: "", Complete: completeBrigPath(true, true), Description: `Pinning a file to keep it in local storage. When you retrieve a file from a remote machine, the file will be cached (or maybe only blocks of it) for some time on your machine. If the file is not pinned, it might be collected by the garbage collector on the next run. The garbage collector is currently not invoked automatically, but can be activated via »brig gc«. Note that you can also pin files that you do not have cached locally. The pin does not download a file automatically currently. Until we have a proper way to do this, you can use »brig cat > /dev/null«. This command contains the subcommand 'add', but for usability reasons, »brig pin add « is the same as »brig pin «. See also the »gc« command as counterpart of pinning. `, }, "pin.add": { Usage: "Pin a file or directory to local storage", ArgsUsage: "", Complete: completeBrigPath(true, true), Description: `A node that is pinned to local storage will not be deleted by the garbage collector.`, }, "pin.remove": { Usage: "Remove a pin", ArgsUsage: "", Complete: completeBrigPath(true, true), Description: `A node that is pinned to local storage will not be deleted by the garbage collector.`, }, "pin.repin": { Usage: "Recaculate pinning based on fs.repin.{quota,min_depth,max_depth}", ArgsUsage: "[]", Complete: completeBrigPath(true, true), Description: `Trigger a repin calculation. This uses the following configuration variables: - fs.repin.quota: Max. amount of data to store in a repository. - fs.repin.min_depth: Keep this many versions definitely pinned. Trumps quota. - fs.repin.max_depth: Unpin versions beyond this depth definitely. Trumps quota. If repin detects files that need to be unpinned, then it will first unpin all files that are beyond the max depth setting. If this is not sufficient to stay under the quota, it will delete old versions, layer by layer starting with the biggest version first. If the optional root path was specified, the repin is only run in this part of the filesystem. This can be used to give the repin algorithm a hint where the space should be reclaimed. `, }, "net": { Usage: "Commands that change or query the network status.", Complete: completeSubcommands, Description: `Most of these subcommands are somewhat low-level and are not often used.`, }, "net.offline": { Usage: "Prevent any online usage.", Complete: completeArgsUsage, Description: ` The daemon will be running after going offline. After going offline, other peers will not be able to contact you any more and vice versa. The daemon keeps running in this time and you can do all offline operations. BUGS: This currently does not prevent other nodes to contact us. Shutdown the IPFS daemon to be sure for now.`, }, "net.online": { Usage: "Allow online usage.", Complete: completeArgsUsage, Description: ` Opposite of »brig net offline«. This is the default state whenever the daemon starts.`, }, "net.status": { Usage: "Check if you're connected to the global network.", Complete: completeArgsUsage, Description: `This will either print the string »online« or »offline«.`, }, "net.locate": { Usage: "Try to locate a remote by their name or by a part of it.", ArgsUsage: "", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.StringFlag{ Name: "t,timeout", Value: "10s", Usage: "Wait at most seconds before bailing out", }, cli.StringFlag{ Name: "m,mask", Value: "exact,domain,user,email", Usage: "Indicate what part of the id you want to query for", }, }, Description: `brig is able to find the fingerprint of other users (that are online) by a part of their name. See the help of »brig init« to see out of what components the name is built of. Each found item shows the name, the fingerprint and what part of their name matched with your query. Sometimes other peers are offline and cannot send your their fingerprint. In this case the peer will still be shown, but as »offline«. IMPORTANT: Locating a remote DOES NOT replace proper authentication. It is relatively easy to fake a fingerprint or even to have two peers with the same name. Always authenticate your peer properly via a sidechannel (mail, telephone, in person). »locate« is supposed to be only a help of discovering other nodes. Note that this operation might take quite a few seconds. Specifying »--timeout« can help, but currently it still might take longer than the given timeout.`, }, "status": { Usage: "Show what has changed in the current commit.", Flags: []cli.Flag{ cli.BoolFlag{ Name: "tree,t", Usage: "View the status as a tree listing.", }, }, Description: `This a shortcut for »brig diff HEAD CURR«. See the »diff« command for more information.`, }, "diff": { Usage: "Show what changed between two commits.", ArgsUsage: "[] [ [ []]]]", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "list,l", Usage: "Output the diff as simple list (like status does by default)", }, cli.BoolFlag{ Name: "offline,o", Usage: "Do no fetch operation before computing the diff.", }, cli.BoolFlag{ Name: "self,s", Usage: "Assume self as owner of both sides and compare only commits.", }, cli.BoolFlag{ Name: "missing,m", Usage: "Show missing files in diff output.", }, }, Description: `View what sync would do when being called on the specified points in history. Diff does not show what changed inside of the files, but shows how the files themselves changed compared to the remote. To describe this, brig knows seven different change types: - Added (+): The file was added on the remote side. - Removed (-): The file was removed on the remote side. - Missing (_): The file is missing on the remote side (e.g. we added it) - Moved (→): The file was moved to a new location. - Ignored (*): This file was ignored because we chose to due to our settings. - Mergeable (⇄): Both sides have changes, but they can be merged. - Conflict (⚡): Both sides have changes but they conflict. Before computing the diff, it will try to fetch the metadata from the peer, if necessary. If you do not want this behaviour, use the »--offline« flag. See »brig commit« for a general explanation of commits. EXAMPLES: $ brig diff # Show diff from our CURR to our HEAD $ brig diff alice # Show diff from our CURR to alice's last state $ brig diff alice some_tag # Show diff from our CURR to 'some_tag' of alice $ brig diff alice bob HEAD HEAD # Show diff between alice and bob's HEAD $ brig diff -s HEAD CURR # Show diff between HEAD and CURR of alice `, }, "tag": { Usage: "Tag a commit with a specific name", Complete: completeArgsUsage, ArgsUsage: " ", Flags: []cli.Flag{ cli.BoolFlag{ Name: "delete,d", Usage: "Delete the tag instead of creating it", }, }, Description: `Give a name to a commit, which is easier to remember than the hash. You can use the name you gave in all places where brig requires you to specify a commit. There are three special tags pre-defined for you: - CURR: A reference to the staging commit. - HEAD: The last fully completed commit. - INIT: The very first commit in the chain. Tags are case insensitive. That means that »HEAD« and »head« mean the same. If you want to specify a commit by its index, you can use the special syntax »commit[$idx]« where »$idx« can be a zero-indexed number. The first commit has the index of zero. If you want to access the previous commit, you can also use the special syntax »$rev^« where »$rev« is any revision (either a commit hash, a tag name or anything else). The circumflex can be used more than once to go back further. EXAMPLES: $ brig tag SEfXUAH6AR my-tag-name # Name the commit SEfXUAH6AR 'my-tag-name'. $ brig tag -d my-tag-name # Delete the tag name again. $ brig tag HEAD^ previous-head # Tag the commit before the current HEAD with "previous-head". $ brig tag 'commit[1]' second # Tag the commit directly after init with "second". `, }, "log": { Usage: "Show all commits in a certain range", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.StringFlag{ Name: "format,f", Usage: "Format the output according to a template", }, }, Description: `Show a list of commits from a start (--from) up to and end (--to). If omitted »--from INIT --to CURR« will be assumed. The output will show one commit per line, each including the (short) hash of the commit, the date it was committed and the (optional) commit message. `, }, "fetch": { Usage: "Fetch all metadata from another peer.", ArgsUsage: "", Complete: completeArgsUsage, Description: `This is a plumbing commands and most likely is only needed for debugging. Get all the latest metadata of a certain peer. This does not download any actual data, but only the metadata of it. You have to be authenticated to the user to get his data. Fetch will be done automatically by »sync« and »diff« and is usually only helpful when doing it together with »become«.`, }, "sync": { Usage: "Sync with another peer", ArgsUsage: "", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "no-fetch,n", Usage: "Do not do a fetch before syncing.", }, cli.BoolFlag{ Name: "quiet,q", Usage: "Do not print what changed.", }, }, Description: `Sync and merge all metadata of another peer with our metadata. After this operation you might see new files in your folder. Those files were not downloaded yet and will be only on the first access. It is recommended that your first check what will be synced with »brig diff«. When passing no arguments, 'sync' will synchronize with all online remotes. When passing a single argument, it will be used as the remote name to sync with. The symbols in the output prefixing every path have the following meaning: + The file is only present on the remote side. - The file was removed on the remote side. → The file was moved to a new location. * This file was ignored because we chose to, due to our settings. ⇄ Both sides have changes, but they are compatible and can be merged. ⚡ Both sides have changes, but they are incompatible and result in conflicts. _ The file is missing on the remote side. See also »brig help diff« for some more details. Files from other remotes are not pinned automatically. `, }, "push": { Usage: "Ask a remote to sync with us.", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "dry-run,d", Usage: "Do not the actual push, but check if we may push.", }, }, Description: ``, }, "commit": { Usage: "Create a new commit", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.StringFlag{ Name: "message,m", Value: "", Usage: "Provide a meaningful commit message.", }, }, Description: `Create a new commit. The message (»--message«) is optional. If you do not pass it, a message will be generated which contains the current time. The commit history can be viewed by »brig log«. Think of commits as snapshots that can be created explicitly by you or even automated in an interval. It is important to remember that »commit« will only create a snapshot of the metadata. It is not guaranteed that you can still access the actual data of very old versions (See »brig help ) You normally do not need to issue this command manually, since there is a loop inside of brig that will auto-commit every 5 minute (default; see the "fs.autocommit.interval" config key). Sync operations will also create commits implicitly and every change from the gateway side will also result in a commit. `, }, "reset": { Usage: "Reset a file or the whole commit to an old state.", ArgsUsage: " []", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "force,f", Usage: "Reset even when there are changes in the staging area", }, }, Description: `Reset a file to an old state by specifying the commit it should be reverted to. If you do not pass »« the whole commit will be filled with the contents of the old commit. If you reset to an old commit and you have uncommitted changes, brig will warn you about that and refuse the »reset« unless you pass »--force«. Note for git users: It is not possible to go back in history and branch out from there. »reset« simply overwrites the staging commit (CURR) with an old state, thus keeping all the previous history. You can always jump back to the previous state. In other words: the reset operation of brig is not destructive. If you notice that you do not like the state you've reseted to, »brig reset head« will bring you back to the last known good state. `, }, "become": { Usage: "View the data of another user", ArgsUsage: "", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "self,s", Usage: "Become self (i.e. the owner of the repository)", }, }, Description: `View the data of another user. This is a plumbing command and meant for debugging. You can temporarily explore the metadata of another user, by »becoming« them. Once you became a certain user (which needs to be in your remote list and on which you called »brig fetch« before), you can look around in the data like in yours. You can also modify files, but keep in mind that they will be reset on he next fetch. `, }, "history": { Usage: "Show the history of a file or directory", ArgsUsage: "", Complete: completeBrigPath(true, true), Flags: []cli.Flag{ cli.BoolFlag{ Name: "empty,e", Usage: "Also show commits where nothing happens", }, }, Description: `Show a list of all changes that were made to this path. Not every change you ever made is recorded, but the change between each commit. In other words: If you modify a file, delete it and re-add all in one commit, then brig will see it only as one modification. Every line shows the type of change and what commits were involved. If it's a move, it will also show from and to where the path was moved. Possible types of changes are: - added: The file was added in this commit. - moved: The file was modified in this commit. - removed: The file was removed in this commit. - modified: The file was modified in this commit. Furthermore, the following combination are possible: - moved & modified: The file was moved and modified. - add & modified: The file was removed before and now re-added with different content. - moved & removed: The file was moved to another location. `, }, "stage": { Usage: "Add a local file to the storage.", ArgsUsage: "( []|--stdin )", Complete: completeLocalPath, Flags: []cli.Flag{ cli.BoolFlag{ Name: "stdin,i", Usage: "Read data from stdin.", }, cli.BoolFlag{ Name: "no-dereference,P", Usage: "Never follow symbolic links.", }, cli.BoolFlag{ Name: "continue-on-error,c", Usage: "Continue staging even if some parts fail to stage.", }, }, Description: `Read a local file (given by »local-path«) and try to read it. This is the conceptual equivalent of »git add«. The stream will be encrypted and possibly compressed before saving it to IPFS. If you omit »path«, the file will be added under the root directory, with the basename of »local-path«. You can change this by specifying where to save the local file by additionally passing »path«. Additionally you can read the file from standard input if you pass »--stdin«. In this case you pass only one path: The path where the stream is stored. EXAMPLES: $ brig stage file.png # gets added as /file.png $ brig stage file.png /photos/me.png # gets added as /photos/me.png $ cat file.png | brig --stdin /file.png # gets added as /file.png`, }, "touch": { Usage: "Create an empty file under the specified path", ArgsUsage: "", Complete: completeBrigPath(true, false), Description: `Convenience command for adding empty files. If the file or directory already exists, the modification time is updated to the current timestamp (like the original touch(1) does). `, }, "cat": { Usage: "Output the content of a file to standard output", ArgsUsage: "[]", Complete: completeBrigPath(true, false), Flags: []cli.Flag{ cli.BoolFlag{ Name: "offline,o", Usage: "Only output the file if it is cached locally.", }, cli.BoolFlag{ Name: "stream,s", Usage: "Use experimental streaming implementation.", }, }, Description: `Decrypt and decompress the stream from IPFS and write it to standard output. When specifying a directory instead of a file, the directory content will be output as tar archive. This is useful when saving a whole directory tree to disk (see also EXAMPLES). When no path is specified, »/« is assumed and all contents are outputted as tar. EXAMPLES: # Output a single file: $ brig cat photo.png # Create a tar from root and unpack it to the current directory. $ brig cat | tar xfv - # Create .tar.gz out of of the /photos directory. $ brig cat photos | gzip -f > photos.tar.gz `, }, "show": { Usage: "Show metadata of a file or directory or commit", ArgsUsage: "", Complete: completeBrigPath(true, true), Flags: []cli.Flag{ cli.StringFlag{ Name: "format,f", Usage: "Format the output according to a template", }, }, Description: `Show all metadata attributes known for a file or directory. Path: Absolute path of the file inside of the storage. User: User which modified the file last. Type: »file« or »directory«. Size: Exact content size in bytes. Hash: Hash of the node. Inode: Internal inode. Also shown as inode in FUSE. IsPinned: »yes« if the file is pinned, »no« else. IsExplicit: »yes« if the file is pinned explicitly, »no« elsewise. IsRaw: »no« if the file was encoded by brig (using encryption or compression). ModTime: Timestamp of last modification. ContentHash: Content hash of the file before encryption. BackendHash: Hash of the node in ipfs (ipfs cat ) TreeHash: Hash that is unique to this node. `, }, "rm": { Usage: "Remove a file or directory", ArgsUsage: "", Complete: completeBrigPath(true, true), Description: `Remove a file or directory. In contrast to the usual rm(1) there is no --recursive switch. Directories are deleted recursively by default. Even after deleting files, you will be able to access its history by using the »brig history« command and bring them back via »brig reset«. If you want to restore a deleted entry you are able to with the »brig reset« command. `, }, "ls": { Usage: "List files and directories.", ArgsUsage: "", Complete: completeBrigPath(false, true), Flags: []cli.Flag{ cli.IntFlag{ Name: "depth,d", Usage: "Max depth to traverse", Value: 1, }, cli.BoolFlag{ Name: "recursive,R", Usage: "Allow recursive traverse", }, cli.StringFlag{ Name: "format,f", Usage: "Format the output according to a template", }, }, Description: `List files an directories starting with »path«. If no »« is given, the root directory is assumed. Every line of »ls« shows a human readable size of each entry, the last modified time stamp, the user that last modified the entry (if there's more than one) and if the entry if pinned. `, }, "tree": { Usage: "List files and directories in a tree", ArgsUsage: "", Complete: completeBrigPath(false, true), Flags: []cli.Flag{ cli.IntFlag{ Name: "depth, d", Usage: "Max depth to traverse", Value: -1, }, }, Description: `Show entries in a tree(1)-like fashion. `, }, "mkdir": { Usage: "Create an empty directory", ArgsUsage: "", Complete: completeBrigPath(false, true), Flags: []cli.Flag{ cli.BoolFlag{ Name: "parents, p", Usage: "Create parent directories as needed", }, }, Description: `Create an empty directory at the specified »path«. By default, parent directories are not created. You can use »--parents« to enable this behaviour. `, }, "mv": { Usage: "Move a file or directory from »src« to »dst«", ArgsUsage: " ", Complete: completeBrigPath(true, true), Description: `Move a file or directory from »src« to »dst.« If »dst« already exists and is a file, it gets overwritten with »src«. If »dst« already exists and is a directory, »basename(src)« is created inside, (if the file inside does not exist yet) It's not allowed to move a directory into itself. This includes moving the root directory. `, }, "cp": { Usage: "Copy a file or directory from »src« to »dst«", ArgsUsage: " ", Complete: completeBrigPath(true, true), Description: `Copy a file or directory from »src« to »dst«. The semantics are the same as for »brig mv«, except that »cp« does not remove »src«. `, }, "edit": { Usage: "Edit a file in place with $EDITOR", ArgsUsage: "", Complete: completeBrigPath(true, false), Description: `Convenience command to read the file at »path« and display it in $EDITOR. Once $EDITOR quits, the file is saved back. If $EDITOR is not set, nano is assumed (I cried a little). If nano is not installed this command will fail and you neet to set $EDITOR> `, }, "daemon": { Usage: "Daemon management commands.", Complete: completeSubcommands, Description: `Commands to manually start or stop the daemon. The daemon process is normally started whenever you issue the first command (like »brig init« or later on a »brig ls«). This command will start it for you in the background. Therefore it is seldom useful to use any of those commands - unless you are debugging brig. `, }, "daemon.launch": { Usage: "Start the daemon process in the foreground", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "trace,t", Usage: "Create tracing output suitable for `go tool trace`", }, cli.BoolFlag{ Name: "s,log-to-stdout", Usage: "Log all messages to stdout instead of syslog", }, }, Description: `Start the dameon process in the foreground. EXAMPLES: $ brig daemon quit # Shut down any previous daemon. $ brig daemon launch -s # Start in foreground and log to stdout. `, }, "daemon.quit": { Usage: "Quit a running daemon process", Complete: completeArgsUsage, Description: `Quit a running daemon process. If no daemon process is running, it will tell you. `, }, "daemon.ping": { Usage: "Check if the daemon is running and reachable", Complete: completeArgsUsage, Description: `Send up to 100 ping packages to the daemon and also print the roundtrip time for each. `, Flags: []cli.Flag{ cli.IntFlag{ Name: "c,count", Usage: "How many times to ping the daemon", Value: 10, }, }, }, "config": { Usage: "View and modify config options.", Complete: completeSubcommands, Description: `Commands for getting, setting and listing configuration values. Each config key is a dotted path (»a.b.c«), associated with one key. These configuration values can help you to fine tune the behaviour of brig. In contrast to many other programs the config is applied immediately after setting it (where possible). Furthermore, each config key will describe itself and tell you if it needs a restart. For more details on each config value, type 'brig config ls'. Without further arguments »brig cfg« is a shortcut for »brig cfg ls«. `, }, "config.get": { Usage: "Get a specific config key", Complete: completeArgsUsage, ArgsUsage: "", Description: `Show the current value of a key`, }, "config.doc": { Usage: "Show the docs for this config key", Complete: completeArgsUsage, ArgsUsage: "", Description: `For each config key a few metadata entries are assigned. This includes a string describing the usage, the default value and an indicator if the service needs a restart when setting the value. `, }, "config.set": { Usage: "Set a specific config key to a new value.", Complete: completeArgsUsage, ArgsUsage: " ", Description: `Set the value at »key« to »value«. Some config values have associated validators that will tell you if a value is not allowed. Also you will be warned if the config key requires a restart. `, }, "config.list": { Usage: "List all existing config keys", Complete: completeArgsUsage, Description: `List all existing config keys.`, }, "fstab": { Usage: "Manage mounts that will be mounted on startup of the daemon.", Description: "This is the conceptual equivalent of the normal fstab(5).", }, "fstab.add": { Usage: "Add a new mount entry to fstab", Flags: []cli.Flag{ cli.BoolFlag{ Name: "r,readonly", Usage: "Create the filesystem as readonly.", }, cli.BoolFlag{ Name: "offline,o", Usage: "Error out on files that are only remotely available.", }, cli.StringFlag{ Name: "x,root", Usage: "Specify a root directory other than »/«.", }, }, }, "fstab.remove": { Usage: "Remove a mount from fstab.", }, "fstab.list": { Usage: "List all items in the filesystem table.", Flags: []cli.Flag{ cli.StringFlag{ Name: "format,f", Usage: "Format the output according to a template.", }, }, }, "fstab.apply": { Usage: "Sync the reality with the mounts in fstab.", Description: "Mounts and unmounts directories as necessary.", Flags: []cli.Flag{ cli.BoolFlag{ Name: "u,unmount", Usage: "Unmount all mounts in the filesystem table.", }, }, }, "mount": { Usage: "Mount the contents of brig as FUSE filesystem to »mount_path«.", ArgsUsage: "", Complete: completeArgsUsage, Description: `Show the all files and directories inside a normal directory. This directory is powered by a userspace filesystem which allows you to read and edit data like you are use to from from normal files. It is compatible to existing tools and allows brig to interact with filebrowsers, video players and other desktop tools. It is possible to have more than one mount. They will show the same content. CAVEATS Editing large files will currently eat big amounts of memory, proportional to the size of the file. We advise you to use normal commands like »brig cat« and »brig stage« until this is fixed. At this time, the filesystem also not very robust to files that timeout or error out otherwise. Consider this feature to be experimental while this has not been worked upon. `, Flags: []cli.Flag{ cli.BoolFlag{ Name: "no-mkdir", Usage: "Do not create the mount directory if it does not exist", }, cli.BoolFlag{ Name: "r,readonly", Usage: "Create the filesystem as readonly", }, cli.BoolFlag{ Name: "offline,o", Usage: "Error out on files that are only remotely available.", }, cli.StringFlag{ Name: "x,root", Usage: "Create the filesystem as readonly", }, }, }, "unmount": { Usage: "Unmount a previously mounted directory", ArgsUsage: "", Complete: completeArgsUsage, Description: `Unmount a previously mounted directory. All mounts get automatically unmounted once the daemon shuts down. In case the daemon crashed or failed to unmount, you can manually use this command to reclaim the mount point: $ fusermount -u -z /path/to/mount `, }, "version": { Usage: "Show the version of brig and IPFS", Complete: completeArgsUsage, Description: `Show the version of brig and IPFS. This includes the client and server version of brig. These two values should be ideally exactly the same to avoid problems. Apart from that, the version of IPFS is shown here. If available, also the git rev is included. This is useful to get the exact state of the software in case of problems. Additionally the build time of the binary is shown. Please include this information when reporting a bug. `, }, "gc": { Usage: "Trigger the garbage collector", Complete: completeArgsUsage, Flags: []cli.Flag{ cli.BoolFlag{ Name: "aggressive,a", Usage: "Also run the garbage collector on all file systems immediately", }, }, Description: `Manually trigger the garbage collector. Strictly speaking there are two garbage collectors in the system. The garbage collector of IPFS cleans up all unpinned files from local storage. This still means that the objects referenced there can be retrieved from other network nodes, but not locally anymore. This might save alot of space. The other garbage collector is not very important to the user and cleans up unused references inside of the metadata store. It is only run if you pass »--aggressive«. `, }, "docs": { Usage: "Open the online documentation in your default web browser.", }, "trash": { Usage: "Control the trash bin contents.", Description: ` The trash bin is a convenience interface to list and restore deleted files. It will list all files that were deleted and were not overwritten by other files. `, }, "trash.list": { Usage: "List all items in the trash bin.", }, "trash.undelete": { Usage: "Restore a path from the trashbin.", }, "gateway": { Usage: "Control the HTTP/S gateway service.", Description: `The gateway serves a UI and download endpoints over a browser. This enables users that do not use brig directly to still browse, edit and download files For having access to the gateway, users need to be created. By default no users are created. Create an »admin« user (password is also »admin«) with this command: $ brig gw user add admin admin --role-admin Most of the gateway is configured exclusively via config variables. Please refer to the individual config keys for more information (they all start with »gateway.«). The »brig gw status« command will also give you a nice, readable overview of what the current state is and how you can improve it. `, }, "gateway.start": { Usage: "Start the gateway.", Description: ` It is recommended to check the state with a »brig gw status« afterwards. This will give you important hints if something went wrong or needs attention. `, }, "gateway.stop": { Usage: "Stop the gateway.", }, "gateway.status": { Usage: "Print a diagnostic report on the status of the gateway.", }, "gateway.url": { Usage: "Helper to print the URL to a named file or directory.", }, "gateway.user": { Usage: "Control the user account that can access the HTTP gateway.", }, "gateway.user.add": { Usage: "Add a new gateway user.", ArgsUsage: " [ ]", Flags: []cli.Flag{ cli.BoolFlag{ Name: "role-admin,a", Usage: "Add this user as admin (short for »-r 'fs.view,fs.edit,fs.download,remotes.view,remotes.edit'«)", }, cli.BoolFlag{ Name: "role-editor,b", Usage: "Add this user as collaborator (short for »-r 'fs.view,fs.edit,fs.download,remotes.view'«)", }, cli.BoolFlag{ Name: "role-collaborator,c", Usage: "Add this user as collaborator (short for »-r 'fs.view,fs.edit,fs.download'«)", }, cli.BoolFlag{ Name: "role-viewer,d", Usage: "Add this user as viewer (short for »-r 'fs.view,fs.download'«)", }, cli.BoolFlag{ Name: "role-link-only,e", Usage: "Add this user as linker (short for »-r 'fs.download'«)", }, cli.StringFlag{ Name: "rights,r", Usage: "Comma separated list of rights of this user.", }, }, Description: ` The rights are as follows: fs.view: View and list all files. fs.edit: Edit and create new files. fs.download: Download file content. remotes.view: View the remotes tab. remotes.edit: Edit the remotes tab. If the folder list is empty, this user can access all files. If it is non-empty, the user can only access the files including and below all folders. `, }, "gateway.user.remove": { Usage: "Remove a gateway user by its name.", }, "gateway.user.list": { Usage: "List all gateway users.", Flags: []cli.Flag{ cli.StringFlag{ Name: "format,f", Usage: "Format the output by a template.", }, }, Description: ` List all gateway users. The keys accepted by »--format« are: - Name: Name of the user. - PasswordHash: Hashed password. - Salt: Salt of the password. - Folders: A list of folders this users may access (might be empty). - Rights: A list of rights this users has (might be empty). `, }, "pack-repo": { ArgsUsage: "", Description: ` Pack a repo into an encrypted tar archive. This is mainly useful to lock the repository after using it. The encryption key is derived from the password that you either... * ...enter on stdin. * ...specify with --password-command. * ...specify with --password-file. If you move a brig repository between computers or if you use brig in an untrusted environment, then this command is for you. By default, the archive is written next to the repository as »$BRIG_PATH.repopack«. If --no-remove is specified the repository is not removed upon successful completion. EXAMPLES: # Pack a repository, read password from 'pass' and write to usb stick. # Also removes the original repository! brig --repo /tmp/repo pack-repo \ /mnt/usb/brig.repopack \ --password-command "pass my/password/path" `, Usage: "Create an encrypted archive of the brig repo.", Flags: []cli.Flag{ cli.StringFlag{ Name: "password-command,p", Usage: "Execute this command to get the password from its stdout.", }, cli.StringFlag{ Name: "password-file,P", Usage: "Read this file to get the password.", }, cli.BoolFlag{ Name: "no-remove,n", Usage: "Do not remove the repository after successfully packing", }, }, }, "unpack-repo": { ArgsUsage: "", Description: ` The unpack-repo is the inverse of the pack-repo command. EXAMPLES: # Unpack a repository from an usb stick and write to a location # of your choice. Password is read from 'pass'. The archive is # removed upon successful completion. # # Specifying --repo is not necessary, but can be used to specify # where the repository should be unpacked to. brig --repo /tmp/repo unpack-repo \ /mnt/usb/brig.repopack \ --password-command "pass my/password/path" `, Usage: "Unpack an encrypted archive of a brig repo.", Flags: []cli.Flag{ cli.StringFlag{ Name: "password-command,p", Usage: "Execute this command to get the password from its stdout.", }, cli.StringFlag{ Name: "password-file,P", Usage: "Read this file to get the password.", }, cli.BoolFlag{ Name: "no-remove,n", Usage: "Do not remove the archive after successfully unpacking", }, }, }, "debug": { Usage: "Various debbugging utilities. Use with care.", }, "debug.decode-stream": { Usage: "Decode a brig stream", Flags: []cli.Flag{ cli.StringFlag{ Name: "key", Usage: "What key to use for encryption (base58 encoded)", Value: "4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw", }, cli.BoolFlag{ Name: "raw", Usage: "Specify if this is a raw stream", }, }, }, "debug.encode-stream": { Usage: "Encode a brig stream", Flags: []cli.Flag{ cli.StringFlag{ Name: "key", Usage: "What key to use for encryption (base58 encoded)", Value: "4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw", }, cli.StringFlag{ Name: "encryption", Usage: "What encryption type to use", Value: string(hints.EncryptionAES256GCM), }, cli.StringFlag{ Name: "compression,c", Usage: "What compression algorithm to use", Value: string(hints.CompressionGuess), }, }, }, "debug.ten-source": { Usage: "Produce a repeating stream of 0 to 9 numbers of a given size", Flags: []cli.Flag{ cli.StringFlag{ Name: "size,s", Usage: "Size of the stream in bytes (can be followed by a multipler K, M, or G)", Value: "256M", }, }, }, "debug.ten-sink": { Usage: "Check if the stream received over stdin is what ten-source produced", Flags: []cli.Flag{ cli.StringFlag{ Name: "size,s", Usage: "Expected size of the stream in bytes (can be followed by a multipler K, M, or G)", Value: "256M", }, }, }, "debug.fusemock": { Usage: "Start a fuse mock server (use only if you know what you do)", Description: "Hint: You don't.", Flags: []cli.Flag{ // NOTE: Changing things need to be also changed in fuse/fusetest. // We rely on the argument names there. cli.StringFlag{ Name: "mount-path,m", Usage: "Path to fuse mount", Value: "", Required: true, }, cli.StringFlag{ Name: "catfs-path,c", Usage: "Where to store metadata", Value: "", Required: true, }, cli.StringFlag{ Name: "url", Usage: "What compression hint to specify ('*' for all)", Value: "unix:/tmp/fuse-mock.socket", }, cli.StringFlag{ Name: "ipfs-path-or-multiaddr,i", Usage: "Path to IPFS, if you want to use it. Empty for memory only.", Value: "", }, cli.BoolFlag{ Name: "mount-offline,o", Usage: "Mount offline", }, cli.BoolFlag{ Name: "mount-ro,r", Usage: "Mount read-only", }, }, }, "debug.iobench": { Usage: "Benchmark I/O on your system", Description: ` This is a benchmark utility for the streaming system at the heart of brig. If invoked with no argument, it will attempt to do all possible benchmarks. All benchmarks are run in /tmp under the assumption that a tmpfs is located there. If so, you might want to double check that you have enough memory. How much is enough depends on many factors, just try it out. :) Individual benchmarks can be selected using the '--bench' parameter. It can be specified several times. Check 'brig debug iobench ls' for a full list of possible benchmarks. Each benchmark is run '--sample' times and uses an artificial input with '--size' bytes. The input type can be selected by appending one of the following names to the benchmark names separated by a colon (':'): * ten: easy to compress data. * mixed: easy to compress data that is mixed with streaks of hard to compress data. The ratio of easy to hard is roughly 50%. * random: Pseudo random data. Terrible to compress. By default each benchmark is run several times for each possible hint combination. If you do not want this, specify the desired algorithm using the '--compression' and '--encryption' parameters. If you want to parse the output of the benchmark, you can use '--json'. `, Flags: []cli.Flag{ cli.BoolFlag{ Name: "json,j", Usage: "Output the benchmark results as parsable JSON", }, cli.StringSliceFlag{ Name: "bench,b", Usage: "Which benchmarks to be run (can be given multiple times)", }, cli.StringFlag{ Name: "size,s", Usage: "How big the testdata should be", Value: "256M", }, cli.IntFlag{ Name: "samples,S", Usage: "How many time to run each benchmark", Value: 3, }, cli.StringFlag{ Name: "compression,c", Usage: "What compression hint to specify ('*' for all)", Value: "*", }, cli.StringFlag{ Name: "encryption,e", Usage: "What encryption hint to specify ('*' for all)", Value: "*", }, }, }, "debug.iobench.list": { Usage: "Just list all benchmark names", }, "debug.pprof-port": { Usage: "Print the pprof port of the daemon.", Description: ` This is useful if there is a performance issue (high cpu consumption in idle e.g.). See here for some examples of what you can do: https://golang.org/pkg/net/http/pprof EXAMPLES: # Show a graph with a cpu profile of the last 30s: go tool pprof -web "http://localhost:$(brig d p)/debug/pprof/profile?seconds=30" `, }, "hints": { Usage: "Manage hints for file or directories", Description: fmt.Sprintf(` Hints can be used to change the default behavior for brig. You can for example use it to change the default encryption algorithm for certain files. Hints are always associated to a path. If a hint is set to a path where a directory is located, then all files in it inherit this hint - except there is another hint somewhere lower in the hierarchy. Note that hints are only applied on the next file change. Files that have differing settings will not be affected by changing a hint. If you want an immediate effect you should use »brig hints set --recode «, or, if you want to do it a later point, »brig hints recode «. The available compression algorithms are: %s The available encryption algorithms are: %s EXAMPLES: $ brig mkdir /public $ echo "meow" | brig stage --stdin /public/cat-meme.png $ brig hints set /public --compression none --encryption none $ brig hints PATH ENCRYPTION COMPRESSION / aes256gcm guess /public none none # If a file could be streamed by »ipfs cat« alone, # then the »IsRaw« attribute is true. $ brig info --format '{{ .IsRaw }}' /public/cat-meme.png `, compressionHintsToBullets(), encryptionHintsToBullets()), }, "hints.set": { Usage: "Set a hint for a file or directory", Description: "See help of »brig hints«", ArgsUsage: "", Flags: []cli.Flag{ cli.BoolFlag{ Name: "recode,r", Usage: "Recode the stream immediately.", }, cli.StringFlag{ Name: "compression,c", Usage: "What compression algorithm to use for this hint", }, cli.StringFlag{ Name: "encryption,e", Usage: "What encryption algorithm to use for this hint", }, cli.BoolFlag{ Name: "force,f", Usage: "Also create hint if there is no such file or directory", }, }, }, "hints.list": { Usage: "List all existing hints.", Description: "See help of »brig hints«", }, "hints.remove": { ArgsUsage: "", Usage: "Remove an existing hint.", }, "hints.recode": { ArgsUsage: "[]", Usage: "Recode the streams in . If no path given all files are recoded.", }, "bug": { Usage: "Print a template for bug reports.", Flags: []cli.Flag{ cli.BoolFlag{ Name: "stdout,s", Usage: "Always print the report to stdout; do not open a browser", }, }, }, } func injectHelp(cmd *cli.Command, path string) { help, ok := helpTexts[path] if !ok { die(fmt.Sprintf("bug: no such help entry: %v", path)) } cmd.Usage = help.Usage cmd.ArgsUsage = help.ArgsUsage cmd.Description = help.Description cmd.BashComplete = help.Complete cmd.Flags = help.Flags } func translateHelp(cmds []cli.Command, prefix []string) { for idx := range cmds { path := append(append([]string{}, prefix...), cmds[idx].Name) injectHelp(&cmds[idx], strings.Join(path, ".")) translateHelp(cmds[idx].Subcommands, path) } } // TranslateHelp fills in the usage and description for each command. // This is separated from the command definition to make things more readable, // and separate logic from the (lengthy) documentation. func TranslateHelp(cmds []cli.Command) []cli.Command { translateHelp(cmds, nil) return cmds } // handleOpenHelp opens the online documentation a webbrowser. func handleOpenHelp(ctx *cli.Context) error { url := "https://brig.readthedocs.org" if err := webbrowser.Open(url); err != nil { fmt.Printf("could not open browser for you: %v\n", err) fmt.Printf("Please open this link yourself:\n\n\t%s\n", url) } else { fmt.Printf("A new tab was opened in your browser.\n") } return nil } ================================================ FILE: cmd/init.go ================================================ package cmd import ( e "github.com/pkg/errors" "github.com/sahib/brig/repo" "github.com/urfave/cli" ) // Init creates a new brig repository at `basePath` with specified options. func Init(ctx *cli.Context, ipfsPathOrMultiaddr string, opts repo.InitOptions) error { if err := repo.Init(opts); err != nil { return e.Wrapf(err, "repo-init") } // Remember the ipsf connection details, // so we can start it later. return repo.OverwriteConfigKey( opts.BaseFolder, "daemon.ipfs_path_or_url", ipfsPathOrMultiaddr, ) } ================================================ FILE: cmd/inode_other.go ================================================ // +build windows package cmd // indodeString convert file path a hardware dependent string // unfortunately on non unix platforms DeviceID and Inode are unavailable // so we return back the file path func inodeString(path string) (string, error) { return path, nil } ================================================ FILE: cmd/inode_unix.go ================================================ // +build !windows package cmd import ( "fmt" "golang.org/x/sys/unix" ) // indodeString convert file path a hardware dependent string in the form DeviceID/Inode func inodeString(path string) (string, error) { var stat unix.Stat_t if err := unix.Lstat(path, &stat); err != nil { return path, err } s := fmt.Sprintf("%d/%d", stat.Dev, stat.Ino) return s, nil } ================================================ FILE: cmd/iobench.go ================================================ package cmd import ( "encoding/json" "fmt" "os" "strings" "time" "github.com/dustin/go-humanize" "github.com/sahib/brig/bench" log "github.com/sirupsen/logrus" "github.com/urfave/cli" ) func allBenchmarks() []string { names := []string{} for _, benchName := range bench.BenchmarkNames() { for _, inputName := range bench.InputNames() { names = append(names, fmt.Sprintf("%s:%s", benchName, inputName)) } } return names } func printStats(s bench.Stats) { fmt.Println() fmt.Println("Time: ", s.Time.Format(time.RFC3339)) fmt.Println("CPU Name: ", s.CPUBrandName) fmt.Println("Logical Cores:", s.LogicalCores) fmt.Println("Has AESNI: ", yesify(s.HasAESNI)) fmt.Println() } type benchmarkRun struct { Stats bench.Stats `json:"stats"` Results []bench.Result `json:"results"` } func handleIOBench(ctx *cli.Context) error { run := benchmarkRun{ Stats: bench.FetchStats(), } benchmarks := ctx.StringSlice("bench") if len(benchmarks) == 0 { log.Infof("running all benchmarks...") benchmarks = allBenchmarks() } isJSON := ctx.Bool("json") if !isJSON { printStats(run.Stats) } inputSize, err := humanize.ParseBytes(ctx.String("size")) if err != nil { return err } samples := ctx.Int("samples") if samples <= 0 { return fmt.Errorf("samples must be at least 1") } log.SetLevel(log.WarnLevel) cfgs := []bench.Config{} for _, benchmark := range benchmarks { benchSplit := strings.SplitN(benchmark, ":", 2) benchInput := "ten" benchName := benchSplit[0] if len(benchSplit) >= 2 { benchInput = benchSplit[1] } cfgs = append(cfgs, bench.Config{ BenchName: benchName, InputName: benchInput, Size: inputSize, Samples: samples, Encryption: ctx.String("encryption"), Compression: ctx.String("compression"), }) } var baselineTiming time.Duration var lastSection string err = bench.Benchmark(cfgs, func(result bench.Result) { section := fmt.Sprintf( "%s:%s", result.Config.InputName, result.Config.BenchName, ) if section != lastSection { if !isJSON { drawHeading(section) } // First in list is always the none-none benchmark. baselineTiming = result.Took lastSection = section } if !isJSON { drawBench(result, baselineTiming, inputSize) } run.Results = append(run.Results, result) }) if err != nil { return err } if isJSON { enc := json.NewEncoder(os.Stdout) enc.SetIndent("", " ") enc.Encode(run) } return nil } func drawHeading(heading string) { fmt.Println() fmt.Println(heading) fmt.Println(strings.Repeat("=", len(heading))) fmt.Println() } func drawBench(result bench.Result, ref time.Duration, inputSize uint64) { fmt.Printf( "%-45s %9.2f MB/s %20s %8.2f%% %6d allocs %8.2f%% zipped\n", result.Name, result.Throughput, fmt.Sprintf( "%.2fMB/%v", float64(inputSize)/1000/1000, result.Took.Round(time.Millisecond), ), 100*float64(ref)/float64(result.Took), result.Allocs, result.CompressionRate*100, ) } func handleIOBenchList(ctx *cli.Context) error { for _, name := range allBenchmarks() { fmt.Println(name) } return nil } ================================================ FILE: cmd/log.go ================================================ package cmd import ( "fmt" "os" "strings" "github.com/urfave/cli" ) func logVerbose(ctx *cli.Context, format string, args ...interface{}) { if !ctx.GlobalBool("verbose") { return } if !strings.HasSuffix(format, "\n") { format = format + "\n" } fmt.Fprintf(os.Stderr, "-- "+format, args...) } ================================================ FILE: cmd/net_handlers.go ================================================ package cmd import ( "bytes" "fmt" "os" "strings" "time" "github.com/fatih/color" "github.com/sahib/brig/cmd/tabwriter" "github.com/sahib/brig/client" "github.com/urfave/cli" yml "gopkg.in/yaml.v2" ) func handleOffline(ctx *cli.Context, ctl *client.Client) error { return ctl.NetDisconnect() } func handleOnline(ctx *cli.Context, ctl *client.Client) error { return ctl.NetConnect() } func handleIsOnline(ctx *cli.Context, ctl *client.Client) error { self, err := ctl.Whoami() if err != nil { return err } if self.IsOnline { fmt.Println(color.GreenString("online")) } else { fmt.Println(color.RedString("offline")) } return nil } func handleRemoteList(ctx *cli.Context, ctl *client.Client) error { if ctx.Bool("offline") { return handleRemoteListOffline(ctx, ctl) } return handleRemoteListOnline(ctx, ctl) } func nFoldersToIcon(nFolders int) string { if nFolders == 0 { return color.GreenString("*") } return color.YellowString(fmt.Sprintf("%d", nFolders)) } func handleRemoteListOffline(ctx *cli.Context, ctl *client.Client) error { remotes, err := ctl.RemoteLs() if err != nil { return fmt.Errorf("remote ls: %v", err) } if ctx.IsSet("format") { tmpl, err := readFormatTemplate(ctx) if err != nil { return err } for _, remote := range remotes { if err := tmpl.Execute(os.Stdout, remote); err != nil { return err } } return nil } if len(remotes) == 0 { fmt.Println("No remotes yet. Use `brig remote add »user« »fingerprint«` to add some.") return nil } tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) fmt.Fprintln(tabW, "NAME\tFINGERPRINT\tAUTO-UPDATE\tACCEPT PUSH\tCONFLICT STRATEGY\tFOLDERS\t") for _, remote := range remotes { cs := remote.ConflictStrategy if cs == "" { cs = "marker" } fmt.Fprintf( tabW, "%s\t%s\t%s\t%s\t%s\t%s\n", remote.Name, remote.Fingerprint, yesOrNo(remote.AutoUpdate), yesOrNo(remote.AcceptPush), cs, nFoldersToIcon(len(remote.Folders)), ) } return tabW.Flush() } func handleRemoteListOnline(ctx *cli.Context, ctl *client.Client) error { peers, err := ctl.RemoteOnlineList() if err != nil { return err } tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) if len(peers) == 0 { fmt.Println("Remote list is empty. Nobody there to ping.") return nil } if !ctx.IsSet("format") { fmt.Fprintln(tabW, "NAME\tFINGERPRINT\tROUNDTRIP\tONLINE\tAUTHENTICATED\tLASTSEEN\tAUTO-UPDATE\tACCEPT PUSH\tCONFLICT STRATEGY\tFOLDERS\t") } tmpl, err := readFormatTemplate(ctx) if err != nil { return err } for _, status := range peers { if tmpl != nil { rmt := client.Remote{ Fingerprint: status.Remote.Fingerprint, Name: status.Remote.Name, } if err := tmpl.Execute(os.Stdout, rmt); err != nil { return err } continue } roundtrip := status.Roundtrip.String() isOnline := color.GreenString("✔ ") if status.Err != nil { isOnline = color.RedString("✘ " + status.Err.Error()) roundtrip = "∞" } authenticated := color.RedString("✘") if status.Authenticated { authenticated = color.GreenString("✔") } shortFp := "" splitFp := strings.SplitN(status.Remote.Fingerprint, ":", 2) if len(splitFp) > 0 { shortAddr := splitFp[0] if len(shortAddr) > 12 { shortAddr = shortAddr[:12] } shortFp += shortAddr } if len(splitFp) > 1 { shortPubKeyID := splitFp[1] if len(shortPubKeyID) > 12 { shortPubKeyID = shortPubKeyID[:12] } shortFp += ":" shortFp += shortPubKeyID } cs := status.Remote.ConflictStrategy if cs == "" { cs = "marker" } fmt.Fprintf( tabW, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t\n", status.Remote.Name, shortFp, roundtrip, isOnline, authenticated, status.LastSeen.Format(time.UnixDate), yesOrNo(status.Remote.AutoUpdate), yesOrNo(status.Remote.AcceptPush), cs, nFoldersToIcon(len(status.Remote.Folders)), ) } return tabW.Flush() } const ( remoteHelpText = `# No remotes yet. Uncomment the next lines for an example: # - Name: alice@wonderland.com # Fingerprint: QmVA5j2JHPkDTHgZ[...]:SEfXUDeJA1toVnP[...] ` ) func remoteListToYml(remotes []client.Remote) ([]byte, error) { if len(remotes) == 0 { // Provide a helpful description, instead of an empty list. return []byte(remoteHelpText), nil } return yml.Marshal(remotes) } func ymlToRemoteList(data []byte) ([]client.Remote, error) { remotes := []client.Remote{} if err := yml.Unmarshal(data, &remotes); err != nil { return nil, err } return remotes, nil } func handleRemoteAdd(ctx *cli.Context, ctl *client.Client) error { remote := client.Remote{ Name: ctx.Args().Get(0), Fingerprint: ctx.Args().Get(1), AutoUpdate: ctx.Bool("auto-update"), ConflictStrategy: ctx.String("conflict-strategy"), AcceptPush: ctx.Bool("accept-push"), } for _, folder := range ctx.StringSlice("folder") { isReadOnly := false if strings.HasPrefix(folder, "-") { isReadOnly = true folder = folder[1:] } remote.Folders = append(remote.Folders, client.RemoteFolder{ Folder: folder, ReadOnly: isReadOnly, }) } if err := ctl.RemoteAddOrUpdate(remote); err != nil { return fmt.Errorf("remote add: %v", err) } return nil } func handleRemoteAutoUpdate(ctx *cli.Context, ctl *client.Client) error { enable := true switch ctx.Args().First() { case "enable", "e": enable = true case "disable", "d": enable = false default: return fmt.Errorf("please specify 'enable' or 'disable' as first argument") } for _, remoteName := range ctx.Args()[1:] { rmt, err := ctl.RemoteByName(remoteName) if err != nil { return err } rmt.AutoUpdate = enable if err := ctl.RemoteAddOrUpdate(rmt); err != nil { return fmt.Errorf("remote update: %v", err) } if !ctx.Bool("no-initial-sync") { if _, err := ctl.Sync(remoteName, true); err != nil { return err } } } return nil } func handleRemoteAcceptPush(ctx *cli.Context, ctl *client.Client) error { enable := true switch ctx.Args().First() { case "enable", "e": enable = true case "disable", "d": enable = false default: return fmt.Errorf("please specify 'enable' or 'disable' as first argument") } for _, remoteName := range ctx.Args()[1:] { rmt, err := ctl.RemoteByName(remoteName) if err != nil { return err } rmt.AcceptPush = enable if err := ctl.RemoteAddOrUpdate(rmt); err != nil { return fmt.Errorf("remote update: %v", err) } } return nil } func handleRemoteConflictStrategy(ctx *cli.Context, ctl *client.Client) error { for _, remoteName := range ctx.Args()[1:] { rmt, err := ctl.RemoteByName(remoteName) if err != nil { return err } rmt.ConflictStrategy = ctx.Args().First() if err := ctl.RemoteAddOrUpdate(rmt); err != nil { return fmt.Errorf("remote update: %v", err) } } return nil } func handleRemoteRemove(ctx *cli.Context, ctl *client.Client) error { name := ctx.Args().First() if err := ctl.RemoteRm(name); err != nil { return fmt.Errorf("remote rm: %v", err) } return nil } func handleRemoteClear(ctx *cli.Context, ctl *client.Client) error { return ctl.RemoteClear() } func handleRemoteEdit(ctx *cli.Context, ctl *client.Client) error { remotes, err := ctl.RemoteLs() if err != nil { return fmt.Errorf("remote ls: %v", err) } data, err := remoteListToYml(remotes) if err != nil { return fmt.Errorf("Failed to convert to yml: %v", err) } // Launch an editor on the received data: newData, err := edit(data, "yml") if err != nil { return fmt.Errorf("Failed to launch editor: %v", err) } // Save a few network roundtrips if nothing was changed: if bytes.Equal(data, newData) { fmt.Println("Nothing changed.") return nil } newRemotes, err := ymlToRemoteList(newData) if err != nil { return err } if err := ctl.RemoteSave(newRemotes); err != nil { return fmt.Errorf("Saving back remotes failed: %v", err) } return nil } func findRemoteForName(ctl *client.Client, name string) (*client.Remote, error) { remotes, err := ctl.RemoteLs() if err != nil { return nil, err } for _, remote := range remotes { if remote.Name == name { return &remote, nil } } return nil, fmt.Errorf("No such remote with this name: %s", name) } func handleRemoteFolderAdd(ctx *cli.Context, ctl *client.Client) error { return handleRemoteFolderAddOrReplace(ctx, ctl, false) } func handleRemoteFolderSet(ctx *cli.Context, ctl *client.Client) error { return handleRemoteFolderAddOrReplace(ctx, ctl, true) } func handleRemoteFolderAddOrReplace(ctx *cli.Context, ctl *client.Client, replace bool) error { remote, err := findRemoteForName(ctl, ctx.Args().First()) if err != nil { return err } isReadOnly := ctx.Bool("read-only") conflictStrategy := ctx.String("conflict-strategy") for _, folder := range ctx.Args().Tail() { if _, err := ctl.Stat(folder); err != nil { fmt.Printf("warning: »%s« does not seem to exist. That's fine though, just in case you made a typo.\n", folder) } folderFound := false for idx, remoteFolder := range remote.Folders { if remoteFolder.Folder == folder { if replace { if ctx.IsSet("read-only") { remote.Folders[idx].ReadOnly = true } if ctx.IsSet("read-write") { remote.Folders[idx].ReadOnly = false } if ctx.IsSet("conflict-strategy") { remote.Folders[idx].ConflictStrategy = conflictStrategy } } folderFound = true break } } if !replace && folderFound { return fmt.Errorf("»%s« exists already", folder) } if replace && !folderFound { return fmt.Errorf("»%s« does not exist", folder) } if !replace { remote.Folders = append(remote.Folders, client.RemoteFolder{ Folder: folder, ReadOnly: isReadOnly, ConflictStrategy: conflictStrategy, }) } } return ctl.RemoteUpdate(*remote) } func handleRemoteFolderRemove(ctx *cli.Context, ctl *client.Client) error { remote, err := findRemoteForName(ctl, ctx.Args().First()) if err != nil { return err } folderName := ctx.Args().Get(1) newFolders := []client.RemoteFolder{} for _, folder := range remote.Folders { if string(folder.Folder) == folderName { continue } newFolders = append(newFolders, folder) } remote.Folders = newFolders return ctl.RemoteUpdate(*remote) } func handleRemoteFolderClear(ctx *cli.Context, ctl *client.Client) error { remote, err := findRemoteForName(ctl, ctx.Args().First()) if err != nil { return err } remote.Folders = []client.RemoteFolder{} return ctl.RemoteUpdate(*remote) } func handleRemoteFolderList(ctx *cli.Context, ctl *client.Client) error { remote, err := findRemoteForName(ctl, ctx.Args().First()) if err != nil { return err } if len(remote.Folders) == 0 { fmt.Println("No folders specified. All folders are accessible.") return nil } tabW := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape) fmt.Fprintln(tabW, "FOLDER\tREAD ONLY\tCONFLICT STRATEGY\t") for _, folder := range remote.Folders { fmt.Fprintf( tabW, "%s\t%s\t%s\t\n", folder.Folder, yesOrNo(folder.ReadOnly), folder.ConflictStrategy, ) } return tabW.Flush() } func handleRemoteFolderListAll(ctx *cli.Context, ctl *client.Client) error { remotes, err := ctl.RemoteLs() if err != nil { return err } tabW := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape) fmt.Fprintln(tabW, "REMOTE\tFOLDER\tREAD ONLY\tCONFLICT STRATEGY\t") for _, remote := range remotes { for _, folder := range remote.Folders { fmt.Fprintf( tabW, "%s\t%s\t%s\t%s\t\n", remote.Name, folder.Folder, yesOrNo(folder.ReadOnly), folder.ConflictStrategy, ) } } return tabW.Flush() } func handleNetLocate(ctx *cli.Context, ctl *client.Client) error { who := ctx.Args().First() timeoutSec, err := parseDuration(ctx.String("timeout")) if err != nil { return err } // Show a progress ticker, since the query might take quite long: progressTicker := time.NewTicker(500 * time.Millisecond) go func() { nDots := 0 for range progressTicker.C { fmt.Printf("Scanning%-5s\r", strings.Repeat(".", nDots+1)) nDots = (nDots + 1) % 5 } }() candidateCh, err := ctl.NetLocate(who, ctx.String("mask"), timeoutSec) if err != nil { return fmt.Errorf("Failed to locate peers: %v", err) } somethingFound := false for candidate := range candidateCh { if !somethingFound { progressTicker.Stop() somethingFound = true // We can't use tabwriter here, sine it needs to update in realtime. // So we just fake it (badly) with printf-like formatting. fmt.Printf("%-30s %-10s %s\n", "NAME", "TYPE", "FINGERPRINT") } fingerprint := candidate.Fingerprint if fingerprint == "" { fingerprint = candidate.Addr + color.RedString(" (offline)") } else { fingerprint = color.GreenString(fingerprint) } fmt.Printf( "%-30s %-10s %s\n", candidate.Name, strings.Join(candidate.Mask, "|"), fingerprint, ) } if !somethingFound { fmt.Println("No results. Maybe nobodoy is online?") } return nil } func handleRemotePing(ctx *cli.Context, ctl *client.Client) error { who := ctx.Args().First() msg := fmt.Sprintf("ping to %s: ", color.MagentaString(who)) roundtrip, err := ctl.RemotePing(who) if err != nil { msg += color.RedString("✘") msg += fmt.Sprintf(" (%v)", err) } else { msg += color.GreenString("✔") msg += fmt.Sprintf(" (%3.5fs)", roundtrip) } fmt.Println(msg) return nil } func handlePin(ctx *cli.Context, ctl *client.Client) error { path := ctx.Args().First() return ctl.Pin(path) } func handleUnpin(ctx *cli.Context, ctl *client.Client) error { path := ctx.Args().First() return ctl.Unpin(path) } func handleRepin(ctx *cli.Context, ctl *client.Client) error { root := "/" if len(ctx.Args()) > 0 { root = ctx.Args().First() } return ctl.Repin(root) } func handleWhoami(ctx *cli.Context, ctl *client.Client) error { self, err := ctl.Whoami() if err != nil { return err } splitFingerprint := strings.SplitN(self.Fingerprint, ":", 2) printFingerprint := ctx.Bool("fingerprint") printName := ctx.Bool("name") printAddr := ctx.Bool("addr") printKey := ctx.Bool("key") userName := color.YellowString(self.CurrentUser) ownerName := color.GreenString(self.Owner) if !printFingerprint && !printName && !printAddr && !printKey { if self.CurrentUser != self.Owner { fmt.Printf( "# Note: viewing %s's data currently\n", color.YellowString(userName), ) } fmt.Printf("- Name: %s\n", color.YellowString(self.Owner)) fmt.Printf(" Fingerprint: %s\n", self.Fingerprint) return nil } if printName { fmt.Printf("%s", ownerName) } if printFingerprint { if printName { fmt.Printf(" ") } fmt.Printf("%s", self.Fingerprint) } if printAddr { if printName || printFingerprint { fmt.Printf(" ") } if len(splitFingerprint) > 0 { fmt.Printf("%s", splitFingerprint[0]) } } if printKey { if printName || printFingerprint || printAddr { fmt.Printf(" ") } if len(splitFingerprint) > 1 { fmt.Printf("%s", splitFingerprint[1]) } } fmt.Printf("\n") return nil } func handlePush(ctx *cli.Context, ctl *client.Client) error { remoteName := ctx.Args().First() return ctl.Push(remoteName, ctx.Bool("dry-run")) } ================================================ FILE: cmd/parser.go ================================================ package cmd import ( "fmt" "os" "runtime" "runtime/debug" "runtime/pprof" "strings" "github.com/fatih/color" isatty "github.com/mattn/go-isatty" "github.com/sahib/brig/defaults" formatter "github.com/sahib/brig/util/log" "github.com/sahib/brig/version" log "github.com/sirupsen/logrus" "github.com/urfave/cli" ) func init() { log.SetOutput(os.Stderr) log.SetLevel(log.DebugLevel) var useColor bool switch envVar := os.Getenv("BRIG_COLOR"); envVar { case "", "auto": useColor = isatty.IsTerminal(os.Stdout.Fd()) case "never": useColor = false case "always": useColor = true default: log.Warningf("Bad value for $BRIG_COLOR: %s, disabling color", envVar) useColor = false } // Only use fancy logging if we print to a terminal: log.SetFormatter(&formatter.FancyLogFormatter{ UseColors: useColor, }) } func formatGroup(category string) string { return "\n" + strings.ToUpper(category) + " COMMANDS" } func memProfile() { memPath := os.Getenv("BRIG_MEM_PROFILE") if memPath == "" { return } fd, err := os.Create(memPath) if err != nil { log.Fatal("could not create memory profile: ", err) } defer fd.Close() runtime.GC() if err := pprof.WriteHeapProfile(fd); err != nil { log.Fatal("could not write memory profile: ", err) } } func startCPUProfile() *os.File { cpuPath := os.Getenv("BRIG_CPU_PROFILE") if cpuPath == "" { return nil } fd, err := os.Create(cpuPath) if err != nil { log.Fatal("could not create memory profile: ", err) } runtime.GC() if err := pprof.StartCPUProfile(fd); err != nil { log.Fatal("could not write memory profile: ", err) } return fd } func stopCPUProfile(fd *os.File) { if os.Getenv("BRIG_CPU_PROFILE") == "" { return } defer fd.Close() pprof.StopCPUProfile() } //////////////////////////// // Commandline definition // //////////////////////////// // RunCmdline starts a brig commandline tool. func RunCmdline(args []string) int { profFd := startCPUProfile() defer stopCPUProfile(profFd) defer memProfile() debug.SetTraceback("all") app := cli.NewApp() app.Name = "brig" app.Usage = "Secure and decentralized file synchronization" app.EnableBashCompletion = true app.Version = fmt.Sprintf( "%s [buildtime: %s] (client version)", version.String(), version.BuildTime, ) app.CommandNotFound = commandNotFound app.Description = "brig can be used to securely store, version and synchronize files between many peers." // Set global options here: app.Before = func(ctx *cli.Context) error { if ctx.Bool("no-color") { color.NoColor = true } return nil } // Groups: repoGroup := formatGroup("repository") wdirGroup := formatGroup("working tree") vcscGroup := formatGroup("version control") netwGroup := formatGroup("network") // Autocomplete all commands, but not their aliases. app.BashComplete = func(ctx *cli.Context) { for _, cmd := range app.Commands { fmt.Println(cmd.Name) } } app.Flags = []cli.Flag{ cli.StringFlag{ Name: "url,u", Usage: "URL on where to reach the brig daemon.", EnvVar: "BRIG_URL", Value: defaults.DaemonDefaultURL(), }, cli.StringFlag{ Name: "repo", Usage: "Path to the repository. Only has effect for new daemons.", Value: ".", EnvVar: "BRIG_PATH", }, cli.BoolFlag{ Name: "verbose,V", Usage: "Show certain messages during client startup (helpful for debugging)", }, cli.BoolFlag{ Name: "nodaemon,n", Usage: "Don't start the daemon automatically.", }, cli.BoolFlag{ Name: "no-color", Usage: "Forbid the usage of colors.", }, } app.Commands = TranslateHelp([]cli.Command{ { Name: "init", Category: repoGroup, Action: handleInit, }, { Name: "whoami", Aliases: []string{"id"}, Category: netwGroup, Action: withDaemon(handleWhoami, true), }, { Name: "remote", Aliases: []string{"rmt", "r"}, Category: netwGroup, Subcommands: []cli.Command{ { Name: "add", Aliases: []string{"a", "set"}, Action: withArgCheck(needAtLeast(2), withDaemon(handleRemoteAdd, true)), }, { Name: "remove", Aliases: []string{"rm"}, Action: withArgCheck(needAtLeast(1), withDaemon(handleRemoteRemove, true)), }, { Name: "list", Aliases: []string{"ls"}, Action: withDaemon(handleRemoteList, true), }, { Name: "clear", Action: withDaemon(handleRemoteClear, true), }, { Name: "edit", Action: withDaemon(handleRemoteEdit, true), }, { Name: "ping", Action: withArgCheck(needAtLeast(1), withDaemon(handleRemotePing, true)), }, { Name: "auto-update", Aliases: []string{"au"}, Action: withArgCheck(needAtLeast(2), withDaemon(handleRemoteAutoUpdate, true)), }, { Name: "accept-push", Aliases: []string{"ap"}, Action: withArgCheck(needAtLeast(2), withDaemon(handleRemoteAcceptPush, true)), }, { Name: "conflict-strategy", Aliases: []string{"cs"}, Action: withArgCheck(needAtLeast(2), withDaemon(handleRemoteConflictStrategy, true)), }, { Name: "folder", Aliases: []string{"fld", "f"}, Action: withDaemon(handleRemoteFolderListAll, true), Subcommands: []cli.Command{ { Name: "add", Aliases: []string{"a"}, Action: withArgCheck(needAtLeast(2), withDaemon(handleRemoteFolderAdd, true)), }, { Name: "set", Aliases: []string{"s"}, Action: withArgCheck(needAtLeast(2), withDaemon(handleRemoteFolderSet, true)), }, { Name: "remove", Aliases: []string{"rm"}, Action: withArgCheck(needAtLeast(2), withDaemon(handleRemoteFolderRemove, true)), }, { Name: "clear", Action: withArgCheck(needAtLeast(1), withDaemon(handleRemoteFolderClear, true)), }, { Name: "list", Aliases: []string{"ls"}, Action: withArgCheck(needAtLeast(1), withDaemon(handleRemoteFolderList, true)), }, }, }, }, }, { Name: "pin", Category: vcscGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handlePin, true)), Subcommands: []cli.Command{ { Name: "add", Action: withArgCheck(needAtLeast(1), withDaemon(handlePin, true)), }, { Name: "repin", Action: withDaemon(handleRepin, true), }, { Name: "remove", Aliases: []string{"rm"}, Action: withArgCheck(needAtLeast(1), withDaemon(handleUnpin, true)), }, }, }, { Name: "net", Category: netwGroup, Subcommands: []cli.Command{ { Name: "offline", Action: withDaemon(handleOffline, true), }, { Name: "online", Action: withDaemon(handleOnline, true), }, { Name: "status", Action: withDaemon(handleIsOnline, true), }, { Name: "locate", Action: withArgCheck(needAtLeast(1), withDaemon(handleNetLocate, true)), }, }, }, { Name: "status", Aliases: []string{"st"}, Category: vcscGroup, Action: withDaemon(handleStatus, true), }, { Name: "diff", Category: vcscGroup, Action: withDaemon(handleDiff, true), }, { Name: "tag", Category: vcscGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleTag, true)), }, { Name: "log", Category: vcscGroup, Action: withDaemon(handleLog, true), }, { Name: "fetch", Category: vcscGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleFetch, true)), }, { Name: "sync", Category: vcscGroup, Action: withDaemon(handleSync, true), }, { Name: "push", Category: vcscGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handlePush, true)), }, { Name: "commit", Aliases: []string{"cmt"}, Category: vcscGroup, Action: withDaemon(handleCommit, true), }, { Name: "reset", Aliases: []string{"re"}, Category: vcscGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleReset, true)), }, { Name: "become", Aliases: []string{"be"}, Category: vcscGroup, Action: withDaemon(handleBecome, true), }, { Name: "history", Aliases: []string{"hst", "hist"}, Category: vcscGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleHistory, true)), }, { Name: "stage", Aliases: []string{"stg", "add", "a"}, Category: wdirGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleStage, true)), }, { Name: "touch", Aliases: []string{"t"}, Category: wdirGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleTouch, true)), }, { Name: "cat", Category: wdirGroup, Action: withDaemon(handleCat, true), }, { Name: "show", Aliases: []string{"s", "info"}, Category: wdirGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleShow, true)), }, { Name: "rm", Aliases: []string{"remove"}, Category: wdirGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleRm, true)), }, { Name: "ls", Category: wdirGroup, Action: withDaemon(handleList, true), }, { Name: "tree", Category: wdirGroup, Action: withDaemon(handleTree, true), }, { Name: "mkdir", Category: wdirGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleMkdir, true)), }, { Name: "mv", Category: wdirGroup, Action: withArgCheck(needAtLeast(2), withDaemon(handleMv, true)), }, { Name: "cp", Category: wdirGroup, Action: withArgCheck(needAtLeast(2), withDaemon(handleCp, true)), }, { Name: "edit", Category: wdirGroup, Action: withArgCheck(needAtLeast(1), withDaemon(handleEdit, true)), }, { Name: "daemon", Category: repoGroup, Subcommands: []cli.Command{ { Name: "launch", Action: handleDaemonLaunch, }, { Name: "quit", Action: withDaemon(handleDaemonQuit, false), }, { Name: "ping", Action: withDaemon(handleDaemonPing, false), }, }, }, { Name: "config", Aliases: []string{"cfg"}, Category: repoGroup, Action: withDaemon(handleConfigList, true), Subcommands: []cli.Command{ { Name: "list", Aliases: []string{"ls"}, Action: withDaemon(handleConfigList, true), }, { Name: "get", Action: withArgCheck(needAtLeast(1), withDaemon(handleConfigGet, true)), }, { Name: "doc", Action: withArgCheck(needAtLeast(1), withDaemon(handleConfigDoc, true)), }, { Name: "set", Action: withArgCheck(needAtLeast(2), withDaemon(handleConfigSet, true)), }, }, }, { Name: "fstab", Category: repoGroup, Action: withArgCheck(needAtLeast(0), withDaemon(handleFstabList, true)), Subcommands: []cli.Command{ { Name: "add", Action: withArgCheck(needAtLeast(2), withDaemon(handleFstabAdd, true)), }, { Name: "remove", Aliases: []string{"rm"}, Action: withArgCheck(needAtLeast(1), withDaemon(handleFstabRemove, true)), }, { Name: "apply", Action: withDaemon(handleFstabApply, true), }, { Name: "list", Aliases: []string{"ls"}, Action: withDaemon(handleFstabList, true), }, }, }, { Name: "trash", Aliases: []string{"tr"}, Category: repoGroup, Action: withDaemon(handleTrashList, true), Subcommands: []cli.Command{ { Name: "list", Aliases: []string{"ls"}, Action: withDaemon(handleTrashList, true), }, { Name: "undelete", Aliases: []string{"rm"}, Action: withArgCheck(needAtLeast(1), withDaemon(handleTrashRemove, true)), }, }, }, { Name: "hints", Aliases: []string{"hi"}, Category: repoGroup, Action: withDaemon(handleRepoHintsList, true), Subcommands: []cli.Command{ { Name: "list", Aliases: []string{"ls"}, Action: withDaemon(handleRepoHintsList, true), }, { Name: "set", Aliases: []string{"s"}, Action: withArgCheck(needAtLeast(1), withDaemon(handleRepoHintsSet, true)), }, { Name: "remove", Aliases: []string{"rm"}, Action: withArgCheck(needAtLeast(1), withDaemon(handleRepoHintsRemove, true)), }, { Name: "recode", Aliases: []string{"r"}, Action: withDaemon(handleRepoHintsRecode, true), }, }, }, { Name: "gateway", Aliases: []string{"gw"}, Category: repoGroup, Subcommands: []cli.Command{ { Name: "start", Action: withDaemon(handleGatewayStart, true), }, { Name: "stop", Action: withDaemon(handleGatewayStop, true), }, { Name: "status", Action: withDaemon(handleGatewayStatus, true), }, { Name: "url", Action: withArgCheck(needAtLeast(1), withDaemon(handleGatewayURL, true)), }, { Name: "user", Aliases: []string{"u"}, Subcommands: []cli.Command{ { Name: "add", Aliases: []string{"a"}, Action: withArgCheck(needAtLeast(1), withDaemon(handleGatewayUserAdd, true)), }, { Name: "remove", Aliases: []string{"rm"}, Action: withArgCheck(needAtLeast(1), withDaemon(handleGatewayUserRemove, true)), }, { Name: "list", Aliases: []string{"ls"}, Action: withDaemon(handleGatewayUserList, true), }, }, }, }, }, { Name: "debug", Aliases: []string{"d"}, Category: repoGroup, Subcommands: []cli.Command{ { Name: "pprof-port", Aliases: []string{"p"}, Action: withDaemon(handleDebugPprofPort, true), }, { Name: "decode-stream", Aliases: []string{"ds"}, Action: handleDebugDecodeStream, }, { Name: "encode-stream", Aliases: []string{"es"}, Action: handleDebugEncodeStream, }, { Name: "ten-source", Aliases: []string{"tso"}, Action: handleDebugTenSource, }, { Name: "ten-sink", Aliases: []string{"tsi"}, Action: handleDebugTenSink, }, { Name: "iobench", Action: handleIOBench, Subcommands: []cli.Command{ { Name: "list", Aliases: []string{"ls"}, Action: handleIOBenchList, }, }, }, { Name: "fusemock", Action: handleDebugFuseMock, }, }, }, { Name: "mount", Category: repoGroup, Action: withDaemon(handleMount, true), }, { Name: "unmount", Category: repoGroup, Action: withDaemon(handleUnmount, true), }, { Name: "version", Category: repoGroup, Action: withDaemon(handleVersion, false), }, { Name: "gc", Category: repoGroup, Action: withDaemon(handleGc, true), }, { Name: "pack-repo", Category: repoGroup, Action: handleRepoPack, Aliases: []string{"pr"}, }, { Name: "unpack-repo", Category: repoGroup, Action: withArgCheck(needAtLeast(1), handleRepoUnpack), Aliases: []string{"ur"}, }, { Name: "docs", Action: handleOpenHelp, Hidden: true, }, { Name: "bug", Action: handleBugReport, }, }) exitCode := Success if err := app.Run(args); err != nil { log.Error(prettyPrintError(err)) cerr, ok := err.(ExitCode) if !ok { exitCode = UnknownError } exitCode = cerr.Code } return exitCode } ================================================ FILE: cmd/pwd/pwd-util/pwd-util.go ================================================ package main import ( "crypto/rand" "fmt" "github.com/sahib/brig/cmd/pwd" "github.com/sahib/brig/util" ) func main() { pwd, err := pwd.PromptNewPassword(40.0) if err != nil { fmt.Println("Failed: ", err) return } salt := make([]byte, 32) if _, err := rand.Reader.Read(salt); err != nil { fmt.Println("Reading salt failed, you're likely doomed.") return } key := util.DeriveKey([]byte(pwd), salt, 32) fmt.Printf("Key: %x\nSalt: %x\n", key, salt) } ================================================ FILE: cmd/pwd/pwd.go ================================================ package pwd import ( "bytes" "fmt" "github.com/chzyer/readline" "github.com/fatih/color" zxcvbn "github.com/nbutton23/zxcvbn-go" "github.com/sahib/brig/util" ) const ( msgLowEntropy = "Please enter a password with at least %g bits entropy." msgReEnter = "Well done! Please re-type your password now for safety:" msgBadPassword = "This did not seem to match. Please retype it again." msgMaxTriesHit = "Maximum number of password tries exceeded: %d" ) func doPromptLine(rl *readline.Instance, prompt string, hide bool) ([]byte, error) { var line []byte var sline string var err error if hide { line, err = rl.ReadPassword(prompt) } else { sline, err = rl.Readline() line = []byte(sline) } if err != nil { return nil, err } return line, nil } func createStrengthPrompt(password []rune, prefix string) string { var symbol string var colorFn func(format string, a ...interface{}) string strength := zxcvbn.PasswordStrength(string(password), nil) switch { case strength.Entropy >= 25: symbol = "✔" colorFn = color.GreenString case strength.Entropy >= 20: symbol = "⊞" colorFn = color.YellowString case strength.Entropy >= 15: symbol = "⊟" colorFn = color.MagentaString default: symbol = "⊠" colorFn = color.RedString } return colorFn(symbol + " " + prefix + "passphrase: ") } // PromptNewPassword asks the user to input a password. // // While typing, the user gets feedback by the prompt color, // which changes with the security of the password to green. // Additionally the entrtopy of the password is shown. // If minEntropy was not reached after hitting enter, // this function will log a message and ask the user again. func PromptNewPassword(minEntropy float64) ([]byte, error) { rl, err := readline.New("") if err != nil { return nil, err } defer util.Closer(rl) passwordCfg := rl.GenPasswordConfig() passwordCfg.SetListener(func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { rl.SetPrompt(createStrengthPrompt(line, "New ")) rl.Refresh() return nil, 0, false }) pwd := []byte{} for { pwd, err = rl.ReadPasswordWithConfig(passwordCfg) if err != nil { return nil, err } strength := zxcvbn.PasswordStrength(string(pwd), nil) if strength.Entropy >= minEntropy { break } fmt.Printf(color.YellowString(msgLowEntropy)+"\n", minEntropy) } passwordCfg.SetListener(func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { rl.SetPrompt(createStrengthPrompt(line, "Retype ")) rl.Refresh() return nil, 0, false }) fmt.Println(msgReEnter) for { newPwd, err := rl.ReadPasswordWithConfig(passwordCfg) if err != nil { return nil, err } if bytes.Equal(pwd, newPwd) { break } fmt.Println(color.YellowString(msgBadPassword)) } strength := zxcvbn.PasswordStrength(string(pwd), nil) fmt.Printf( "estimated time needed to crack password (according to zxcvbn): %s\n", color.BlueString(strength.CrackTimeDisplay), ) return pwd, nil } func promptPassword(prompt string) ([]byte, error) { rl, err := readline.New(prompt) if err != nil { return nil, err } defer util.Closer(rl) return doPromptLine(rl, prompt, true) } // PromptPassword just opens an uncolored password prompt. // // The password is not echo'd to stdout for safety reasons. func PromptPassword() ([]byte, error) { return promptPassword("Password: ") } ================================================ FILE: cmd/pwd/pwd_test.go ================================================ package pwd import ( "fmt" "testing" "time" zxcvbn "github.com/nbutton23/zxcvbn-go" ) func TestLongPassword(t *testing.T) { start := time.Now() zxcvbn.PasswordStrength("68b329da9893e34099c7d8ad5cb9c940", nil) fmt.Println("Took", time.Since(start)) } func BenchmarkLongPassword(b *testing.B) { fmt.Println("N", b.N) for i := 0; i < b.N; i++ { zxcvbn.PasswordStrength("1234567890123456", nil) } } ================================================ FILE: cmd/repo_handlers.go ================================================ package cmd import ( "bytes" "errors" "fmt" "io" "io/ioutil" "log/syslog" "net/url" "os" "os/exec" "path/filepath" "runtime/trace" "strings" "time" "github.com/fatih/color" e "github.com/pkg/errors" "github.com/sahib/brig/client" "github.com/sahib/brig/cmd/pwd" "github.com/sahib/brig/cmd/tabwriter" "github.com/sahib/brig/repo" "github.com/sahib/brig/repo/repopack" "github.com/sahib/brig/repo/setup" "github.com/sahib/brig/server" "github.com/sahib/brig/util" formatter "github.com/sahib/brig/util/log" "github.com/sahib/brig/version" log "github.com/sirupsen/logrus" "github.com/urfave/cli" ) const brigLogo = ` _____ / /\ ___ / /\ / /::\ / /::\ / /\ / /:/_ / /:/\:\ / /:/\:\ / /:/ / /:/ /\ / /:/~/::\ / /:/~/:/ /__/::\ / /:/_/::\ /__/:/ /:/\:| /__/:/ /:/___ \__\/\:\__ /__/:/__\/\:\ \ \:\/:/~/:/ \ \:\/:::::/ \ \:\/\ \ \:\ /~~/:/ \ \::/ /:/ \ \::/~~~~ \__\::/ \ \:\ /:/ \ \:\/:/ \ \:\ /__/:/ \ \:\/:/ \ \::/ \ \:\ \__\/ \ \::/ \__\/ \__\/ \__\/ ` const initBanner = ` A new file README.md was automatically added. Use 'brig cat README.md' to view it & get started. ` func createInitialReadme(ctl *client.Client, folder string) error { text := `Welcome to brig! Here's what you can do next: • Read the official documentation (Just type »brig docs«) • Add a few remotes to sync with (See »brig help remote«) • Mount your data somewhere convinient (See »brig help fstab«) • Sync with the remotes you've added (See »brig help sync«) • Have a relaxing day while exploring brig. Please remember that brig is software in its very early stages, and you should not rely on it yet for production purposes. If you're done with this README, you can easily remove it: $ brig rm README.md Your repository is here: %s Have a nice day. ` fd, err := ioutil.TempFile("", ".brig-init-readme-") if err != nil { return err } text = fmt.Sprintf(text, folder) if _, err := fd.WriteString(text); err != nil { return err } readmePath := fd.Name() if err := fd.Close(); err != nil { return err } if err := ctl.Stage(readmePath, "/README.md"); err != nil { return err } return ctl.MakeCommit("added initial README.md") } func isMultiAddr(ipfsPathOrMultiaddr string) bool { _, err := os.Stat(ipfsPathOrMultiaddr) return err != nil } func handleInit(ctx *cli.Context) error { if len(ctx.Args()) == 0 { return fmt.Errorf("Please specify a name for the owner of this repository") } owner := ctx.Args().First() backend := ctx.String("backend") folder := ctx.String("repo") if folder == "" { folder = ctx.GlobalString("repo") } if ctx.NArg() == 2 { var err error folder, err = filepath.Abs(ctx.Args().Get(1)) if err != nil { return fmt.Errorf("failed to get absolute path for %s: %v", folder, err) } } if ctx.NArg() > 2 { return fmt.Errorf("too many arguments") } if folder == "" { var err error folder, err = guessRepoFolder(ctx) if err != nil { return err } fmt.Printf("-- Guessed folder for init: %s\n", folder) } // doing init twice can easily break things. isInitialized, err := isNonEmptyDir(folder) if err != nil { return err } if isInitialized { return fmt.Errorf("`%s` already exists and is not empty; refusing to do init", folder) } ipfsPathOrMultiaddr := ctx.String("ipfs-path-or-multiaddr") doIpfsSetup := !ctx.Bool("no-ipfs-setup") doIpfsConfig := !ctx.Bool("no-ipfs-config") doExtraIpfsConfig := !ctx.Bool("no-ipfs-optimization") ipfsRepoPath := ipfsPathOrMultiaddr isMa := isMultiAddr(ipfsPathOrMultiaddr) if isMa { // NOTE: If we're connecting over a multiaddr, // then we should not setup an ipfs repo. // Assumption is that it exists already. doIpfsSetup = false ipfsRepoPath = "" } if backend == "httpipfs" { if _, err := setup.IPFS(setup.Options{ LogWriter: os.Stdout, Setup: doIpfsSetup, SetDefaultConfig: doIpfsConfig, SetExtraConfig: doExtraIpfsConfig, IpfsPath: ipfsRepoPath, }); err != nil { return err } } daemonURL, err := guessFreeDaemonURL(ctx, owner) if err != nil { log.WithError(err).Warnf("failed to figure out a free daemon url") } if err := Init( ctx, ipfsPathOrMultiaddr, repo.InitOptions{ BaseFolder: folder, Owner: owner, BackendName: backend, DaemonURL: daemonURL, }, ); err != nil { return ExitCode{UnknownError, fmt.Sprintf("init failed: %v", err)} } // Start the daemon on the freshly initialized repo: ctl, err := startDaemon(ctx, folder, daemonURL) if err != nil { return ExitCode{ DaemonNotResponding, fmt.Sprintf("Unable to start daemon: %v", err), } } // Run the actual handler: defer ctl.Close() return handleInitPost(ctx, ctl, folder) } func handleInitPost(ctx *cli.Context, ctl *client.Client, folder string) error { if !ctx.Bool("empty") { if err := createInitialReadme(ctl, folder); err != nil { return err } } if !ctx.Bool("no-logo") { fmt.Println(brigLogo) if !ctx.Bool("empty") { fmt.Println(initBanner) } } return nil } func printConfigDocEntry(entry client.ConfigEntry) { val := entry.Val if val == "" { val = color.YellowString("(empty)") } defaultMarker := "" if entry.Val == entry.Default { defaultMarker = color.CyanString("(default)") } fmt.Printf("%s: %v %s\n", color.GreenString(entry.Key), val, defaultMarker) needsRestart := yesify(entry.NeedsRestart) defaultVal := entry.Default if entry.Default == "" { defaultVal = color.YellowString("(empty)") } fmt.Printf(" Default: %v\n", defaultVal) fmt.Printf(" Documentation: %v\n", entry.Doc) fmt.Printf(" Needs restart: %v\n", needsRestart) } func handleConfigList(cli *cli.Context, ctl *client.Client) error { all, err := ctl.ConfigAll() if err != nil { return ExitCode{UnknownError, fmt.Sprintf("config list: %v", err)} } for _, entry := range all { printConfigDocEntry(entry) } return nil } func handleConfigGet(ctx *cli.Context, ctl *client.Client) error { key := ctx.Args().Get(0) val, err := ctl.ConfigGet(key) if err != nil { return ExitCode{UnknownError, fmt.Sprintf("config get: %v", err)} } for _, elem := range strings.Split(val, " ;; ") { fmt.Println(elem) } return nil } func handleConfigSet(ctx *cli.Context, ctl *client.Client) error { key := ctx.Args().Get(0) val := ctx.Args().Get(1) if len(ctx.Args()) > 2 { val = strings.Join(ctx.Args()[1:], " ;; ") } if err := ctl.ConfigSet(key, val); err != nil { return ExitCode{UnknownError, fmt.Sprintf("config set: %v", err)} } entry, err := ctl.ConfigDoc(key) if err != nil { return ExitCode{UnknownError, fmt.Sprintf("config doc: %v", err)} } if entry.NeedsRestart { fmt.Println("NOTE: You need to restart brig for this option to take effect.") } return nil } func handleConfigDoc(ctx *cli.Context, ctl *client.Client) error { key := ctx.Args().Get(0) entry, err := ctl.ConfigDoc(key) if err != nil { return ExitCode{UnknownError, fmt.Sprintf("config get: %v", err)} } printConfigDocEntry(entry) return nil } func handleDaemonPing(ctx *cli.Context, ctl *client.Client) error { count := ctx.Int("count") for i := 0; i < count; i++ { before := time.Now() symbol := color.GreenString("✔") if err := ctl.Ping(); err != nil { symbol = color.RedString("✘") } delay := time.Since(before) fmt.Printf("#%02d %s ➔ %s: %s (%v)\n", i+1, ctl.LocalAddr().String(), ctl.RemoteAddr().String(), symbol, delay, ) time.Sleep(1 * time.Second) } return nil } func handleDaemonQuit(ctx *cli.Context, ctl *client.Client) error { if err := ctl.Quit(); err != nil { return ExitCode{ DaemonNotResponding, fmt.Sprintf("brigd not responding: %v", err), } } return nil } func switchToSyslog() { wSyslog, err := syslog.New(syslog.LOG_NOTICE, "brig") if err != nil { log.Warningf("failed to open connection to syslog for brig: %v", err) logFd, err := ioutil.TempFile("", "brig-*.log") if err != nil { log.Warningf("") } else { log.Warningf("Will log to %s from now on.", logFd.Name()) log.SetOutput(logFd) } return } log.SetLevel(log.DebugLevel) log.SetFormatter(&formatter.FancyLogFormatter{ UseColors: false, }) // TODO: we should also forward panics (os.Stderr) to syslog. // They don't come from the log obviously though. log.SetOutput( io.MultiWriter( formatter.NewSyslogWrapper(wSyslog), os.Stdout, ), ) } func handleDaemonLaunch(ctx *cli.Context) error { // Enable tracing (for profiling) if required. if ctx.Bool("trace") { tracePath := fmt.Sprintf("/tmp/brig-%d.trace", os.Getpid()) log.Debugf("Writing trace output to %s", tracePath) fd, err := os.Create(tracePath) if err != nil { return err } defer util.Closer(fd) if err := trace.Start(fd); err != nil { return err } defer trace.Stop() } repoPath, err := guessRepoFolder(ctx) if err != nil { return err } daemonURL, err := guessDaemonURL(ctx) if err != nil { return err } // Make sure IPFS is running. Also set required options, // but don't bother to set optimizations. var ipfsPath string cfg, err := openConfig(repoPath) if err != nil { log.Warningf("failed to read config at %v: %v", repoPath, err) } else { ipfsPath = cfg.String("daemon.ipfs_path_or_url") } if _, err := setup.IPFS(setup.Options{ LogWriter: &logWriter{prefix: "ipfs"}, Setup: true, SetDefaultConfig: true, SetExtraConfig: false, IpfsPath: ipfsPath, }); err != nil { return err } logToStdout := ctx.Bool("log-to-stdout") if !logToStdout { log.Infof("all further logs will be also piped to the syslog daemon.") log.Infof("Use »journalctl -fet brig« to view logs.") switchToSyslog() } else { log.SetOutput(os.Stdout) } server, err := server.BootServer(repoPath, daemonURL) if err != nil { return ExitCode{ UnknownError, fmt.Sprintf("failed to boot brigd: %v", err), } } defer util.Closer(server) if err := server.Serve(); err != nil { return ExitCode{ UnknownError, fmt.Sprintf("failed to serve: %v", err), } } return nil } func handleMount(ctx *cli.Context, ctl *client.Client) error { mountPath := ctx.Args().First() absMountPath, err := filepath.Abs(mountPath) if err != nil { return err } if !ctx.Bool("no-mkdir") { if _, err := os.Stat(absMountPath); os.IsNotExist(err) { fmt.Printf( "Mount directory »%s« does not exist. Will create it.\n", absMountPath, ) if err := os.MkdirAll(absMountPath, 0700); err != nil { return e.Wrapf(err, "failed to mkdir mount point") } } } options := client.MountOptions{ ReadOnly: ctx.Bool("readonly"), Offline: ctx.Bool("offline"), RootPath: ctx.String("root"), } if err := ctl.Mount(absMountPath, options); err != nil { return ExitCode{ UnknownError, fmt.Sprintf("Failed to mount: %v", err), } } return nil } func handleUnmount(ctx *cli.Context, ctl *client.Client) error { mountPath := ctx.Args().First() absMountPath, err := filepath.Abs(mountPath) if err != nil { return err } if err := ctl.Unmount(absMountPath); err != nil { return ExitCode{ UnknownError, fmt.Sprintf("Failed to unmount: %v", err), } } return nil } func handleVersion(ctx *cli.Context, ctl *client.Client) error { vInfo, err := ctl.Version() if err != nil { return err } row := func(name, value string) { fmt.Printf("%25s: %s\n", name, value) } row("Client Version", version.String()) row("Client Rev", version.GitRev) row("Server Version", vInfo.ServerSemVer) row("Server Rev", vInfo.ServerRev) row("Backend (ipfs) Version", vInfo.BackendSemVer) row("Backend (ipfs) Rev", vInfo.BackendRev) row("Build time", version.BuildTime) return nil } func handleGc(ctx *cli.Context, ctl *client.Client) error { aggressive := ctx.Bool("aggressive") freed, err := ctl.GarbageCollect(aggressive) if err != nil { return err } if len(freed) == 0 { fmt.Println("Nothing freed.") return nil } tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) fmt.Fprintln(tabW, "CONTENT\tHASH\tOWNER\t") for _, gcItem := range freed { fmt.Fprintf( tabW, "%s\t%s\t%s\t\n", color.WhiteString(gcItem.Path), color.RedString(gcItem.Content.ShortB58()), color.CyanString(gcItem.Owner), ) } return tabW.Flush() } func handleFstabAdd(ctx *cli.Context, ctl *client.Client) error { mountName := ctx.Args().Get(0) mountPath := ctx.Args().Get(1) options := client.MountOptions{ ReadOnly: ctx.Bool("readonly"), RootPath: ctx.String("root"), Offline: ctx.Bool("offline"), } return ctl.FstabAdd(mountName, mountPath, options) } func handleFstabRemove(ctx *cli.Context, ctl *client.Client) error { mountName := ctx.Args().Get(0) return ctl.FstabRemove(mountName) } func handleFstabApply(ctx *cli.Context, ctl *client.Client) error { if ctx.Bool("unmount") { return ctl.FstabUnmountAll() } return ctl.FstabApply() } func handleFstabUnmounetAll(ctx *cli.Context, ctl *client.Client) error { return ctl.FstabUnmountAll() } func handleFstabList(ctx *cli.Context, ctl *client.Client) error { mounts, err := ctl.FsTabList() if err != nil { return ExitCode{UnknownError, fmt.Sprintf("config list: %v", err)} } if len(mounts) == 0 { return nil } tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) tmpl, err := readFormatTemplate(ctx) if err != nil { return err } if tmpl == nil && len(mounts) != 0 { fmt.Fprintln(tabW, "NAME\tPATH\tREAD_ONLY\tOFFLINE\tROOT\tACTIVE\t") } for _, entry := range mounts { if tmpl != nil { if err := tmpl.Execute(os.Stdout, entry); err != nil { return err } continue } fmt.Fprintf( tabW, "%s\t%s\t%s\t%s\t%s\t%s\n", entry.Name, entry.Path, yesify(entry.ReadOnly), yesify(entry.Offline), entry.Root, checkmarkify(entry.Active), ) } return tabW.Flush() } func handleGatewayStart(ctx *cli.Context, ctl *client.Client) error { isEnabled, err := ctl.ConfigGet("gateway.enabled") if err != nil { return err } if isEnabled == "false" { if err := ctl.ConfigSet("gateway.enabled", "true"); err != nil { return err } } else { fmt.Println("Seems like we're running already.") } port, err := ctl.ConfigGet("gateway.port") if err != nil { return err } domain := "localhost" protocol := "http" url := fmt.Sprintf("%s://%s:%s", protocol, domain, port) fmt.Printf("The gateway is accessible via %s\n", url) return nil } func handleGatewayStatus(ctx *cli.Context, ctl *client.Client) error { isEnabled, err := ctl.ConfigGet("gateway.enabled") if err != nil { return err } if isEnabled == "false" { fmt.Println("• The gateway is not running. Use »brig gateway start« to start.") return nil } port, err := ctl.ConfigGet("gateway.port") if err != nil { return err } domain := "localhost" protocol := "http" url := fmt.Sprintf("%s://%s:%s", protocol, domain, port) fmt.Printf("• Running on %s\n", color.GreenString(url)) uiIsEnabled, err := ctl.ConfigGet("gateway.ui.enabled") if err != nil { return err } if uiIsEnabled == "true" { fmt.Println("• The Web UI is currently enabled and can be accessed via the URL above.") fmt.Println(" If you want to disable the UI (»/get« will still work), then do:") fmt.Println("") fmt.Println(" $ brig cfg gateway.ui.enabled false") fmt.Println("") } else { fmt.Println("• There is no UI enabled. You can enable it via:") fmt.Println("") fmt.Println(" $ brig cfg gateway.ui.enabled true") fmt.Println("") } users, err := ctl.GatewayUserList() if err != nil { return err } authIsEnabled := len(users) > 0 if authIsEnabled { fmt.Printf( "• There are %s users currently. Review them with »brig gw user ls«.\n", color.GreenString(fmt.Sprintf("%d", len(users))), ) } else { fmt.Printf("• There is %s user authentication enabled.\n", color.YellowString("no")) fmt.Printf(" You can enable it by setting the following config keys:\n") fmt.Printf("\n") fmt.Printf(" $ brig gateway user add --role-admin \n") fmt.Printf("\n") } return nil } func handleGatewayStop(ctx *cli.Context, ctl *client.Client) error { isEnabled, err := ctl.ConfigGet("gateway.enabled") if err != nil { return err } if isEnabled == "true" { if err := ctl.ConfigSet("gateway.enabled", "false"); err != nil { return err } fmt.Println("The gateway will stop serving after handling all open requests.") } else { fmt.Println("It seems like the gateway is already stopped.") } return nil } func handleGatewayURL(ctx *cli.Context, ctl *client.Client) error { path := ctx.Args().First() if _, err := ctl.Stat(path); err != nil { return err } domain := "localhost" port, err := ctl.ConfigGet("gateway.port") if err != nil { return err } if port == "80" || port == "443" { port = "" } else { port = ":" + port } protocol := "http" escapedPath := url.PathEscape(strings.TrimLeft(path, "/")) fmt.Printf("%s://%s%s/get/%s\n", protocol, domain, port, escapedPath) return nil } func handleGatewayUserAdd(ctx *cli.Context, ctl *client.Client) error { nArgs := len(ctx.Args()) name := ctx.Args().First() var password string if nArgs > 1 { password = ctx.Args().Get(1) } else { bPassword, err := pwd.PromptNewPassword(14) if err != nil { return err } password = string(bPassword) } folders := []string{"/"} if nArgs > 2 { folders = ctx.Args()[2:] } allRights := []string{ "fs.download", "fs.view", "fs.edit", "remotes.view", "remotes.edit", } rights := []string{} if ctx.Bool("role-admin") { rights = allRights } if ctx.Bool("role-editor") { rights = allRights[:len(allRights)-1] } if ctx.Bool("role-collaborator") { rights = allRights[:len(allRights)-2] } if ctx.Bool("role-viewer") { rights = allRights[:len(allRights)-3] } if ctx.Bool("role-link-only") { rights = allRights[:len(allRights)-4] } if r := ctx.String("rights"); r != "" { rights = strings.Split(r, ",") } return ctl.GatewayUserAdd(name, password, folders, rights) } func handleGatewayUserRemove(ctx *cli.Context, ctl *client.Client) error { for _, name := range ctx.Args() { if err := ctl.GatewayUserRemove(name); err != nil { fmt.Printf("Failed to remove »%s«: %v\n", name, err) } } return nil } func handleGatewayUserList(ctx *cli.Context, ctl *client.Client) error { users, err := ctl.GatewayUserList() if err != nil { return err } tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) tmpl, err := readFormatTemplate(ctx) if err != nil { return err } if tmpl == nil { if len(users) == 0 { fmt.Println("No users. Add some with »brig gw user add «") } else { fmt.Fprintln(tabW, "NAME\tFOLDERS\tRIGHTS\t") } } for _, user := range users { if tmpl != nil { if err := tmpl.Execute(os.Stdout, user); err != nil { return err } continue } fmt.Fprintf( tabW, "%s\t%s\t%s\t\n", user.Name, strings.Join(user.Folders, ","), strings.Join(user.Rights, ","), ) } return tabW.Flush() } func readPassword(ctx *cli.Context, isNew bool) ([]byte, error) { if ctx.IsSet("password-command") { log.Debugf("reading by password command.") cmd := exec.Command("/bin/sh", "-c", ctx.String("password-command")) // Make sure sub command can access our streams. // Some password managers might ask for a master password. cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin out, err := cmd.Output() if err != nil { return nil, err } // Strip any newline produced by the tool. // Just hope that nobody really tries to use newlines // as part of the password. Would still work though // as long only --password-command is used to enter the password. return bytes.TrimRight(out, "\n\r"), nil } if ctx.IsSet("password-file") { log.Debugf("reading from password file.") return ioutil.ReadFile(ctx.String("password-file")) } if isNew { return pwd.PromptNewPassword(10) } return pwd.PromptPassword() } func handleRepoPack(ctx *cli.Context) error { folder, err := guessRepoFolder(ctx) if err != nil { return err } isRunning, err := isDaemonRunning(ctx) if err != nil { return e.Wrap(err, "failed to check if daemon is running") } if isRunning { log.Error("daemon is still running for this repo, please quit it first!") log.Errorf("Use »brig --repo %s daemon quit« for this.", folder) return errors.New("refusing to pack data, there might be inconsistencies") } pass, err := readPassword(ctx, true) if err != nil { return err } archivePath := ctx.Args().First() if archivePath == "" { archivePath = folder + ".repopack" } log.Infof("writing archive to »%s«", archivePath) return repopack.PackRepo( folder, archivePath, string(pass), !ctx.Bool("no-remove"), ) } func handleRepoUnpack(ctx *cli.Context) error { archivePath := ctx.Args().First() folder, err := guessRepoFolder(ctx) if err != nil { // Small convenience hack: if the archive ends in .repopack // assume that it was created from a repo with the same path // but without the suffix. folder = strings.TrimSuffix(archivePath, ".repopack") } isNonEmpty, err := isNonEmptyDir(folder) if err != nil { return err } if isNonEmpty { return fmt.Errorf("»%s« is non-empty, refusing to overwrite", folder) } if archivePath == "" { return fmt.Errorf("please specify the location of the packed archive") } pass, err := readPassword(ctx, false) if err != nil { return err } log.Infof("unpacking to »%s«", folder) return repopack.UnpackRepo( folder, archivePath, string(pass), !ctx.Bool("no-remove"), ) } func optionalStringParamAsPtr(ctx *cli.Context, name string) *string { if v := ctx.String(name); v != "" { return &v } return nil } func handleRepoHintsSet(ctx *cli.Context, ctl *client.Client) error { path := ctx.Args().First() if !ctx.Bool("force") { if _, err := ctl.Stat(path); err != nil { return fmt.Errorf("no file or directory at »%s« (use --force to create anyways)", path) } } zipHint := optionalStringParamAsPtr(ctx, "compression") encHint := optionalStringParamAsPtr(ctx, "encryption") // TODO: There seems to be a bug in the cli library. // When --recode comes directly after 'set' then // all other arguments are part of 'ctx.Args()' and do not get // parsed. This check at least catches this behavior. if zipHint == nil && encHint == nil { return fmt.Errorf("need at least one of --encryption or --compression") } if err := ctl.HintSet(path, zipHint, encHint); err != nil { return err } if ctx.Bool("recode") { return ctl.RecodeStream(path) } return nil } func handleRepoHintsList(ctx *cli.Context, ctl *client.Client) error { hints, err := ctl.HintList() if err != nil { return err } if len(ctx.Args()) != 0 { return fmt.Errorf("extra arguments passed") } tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) fmt.Fprintln(tabW, "PATH\tENCRYPTION\tCOMPRESSION\t") for _, hint := range hints { fmt.Fprintf( tabW, "%s\t%s\t%s\t\n", hint.Path, hint.EncryptionAlgo, hint.CompressionAlgo, ) } return tabW.Flush() } func handleRepoHintsRemove(ctx *cli.Context, ctl *client.Client) error { return ctl.HintRemove(ctx.Args().First()) } func handleRepoHintsRecode(ctx *cli.Context, ctl *client.Client) error { repoPath := ctx.Args().Get(0) if repoPath == "" { repoPath = "/" } return ctl.RecodeStream(repoPath) } ================================================ FILE: cmd/suggest.go ================================================ package cmd import ( "context" "fmt" "io/ioutil" "os" "sort" "strings" "github.com/fatih/color" "github.com/sahib/brig/client" "github.com/urfave/cli" "github.com/xrash/smetrics" ) type suggestion struct { name string score float64 } func levenshteinRatio(s, t string) float64 { lensum := float64(len(s) + len(t)) if lensum == 0 { return 1.0 } dist := float64(smetrics.WagnerFischer(s, t, 1, 1, 2)) return (lensum - dist) / lensum } func findLastGoodCommands(ctx *cli.Context) ([]string, []cli.Command) { for ctx.Parent() != nil { ctx = ctx.Parent() } args := ctx.Args() if len(args) == 0 || len(args) == 1 { return nil, ctx.App.Commands } cmd := ctx.App.Command(args[0]) if cmd == nil { return nil, ctx.App.Commands } validArgs := []string{args[0]} args = args[1 : len(args)-1] for len(args) != 0 && cmd != nil { for _, subCmd := range cmd.Subcommands { if subCmd.Name == args[0] { cmd = &subCmd } } validArgs = append(validArgs, args[0]) args = args[1:] } return validArgs, cmd.Subcommands } func findSimilarCommands(cmdName string, cmds []cli.Command) []suggestion { similars := []suggestion{} for _, cmd := range cmds { candidates := []string{cmd.Name} candidates = append(candidates, cmd.Aliases...) for _, candidate := range candidates { if score := levenshteinRatio(cmdName, candidate); score >= 0.6 { similars = append(similars, suggestion{ name: cmd.Name, score: score, }) break } } } // Special cases for the git inclined: staticSuggestions := map[string]string{ "insert": "stage", "pull": "sync", "merge": "sync", } for gitName, brigName := range staticSuggestions { if cmdName == gitName { similars = append(similars, suggestion{ name: brigName, score: 0.0, }) } } // Let suggestions be sorted by their similarity: sort.Slice(similars, func(i, j int) bool { return similars[i].score < similars[j].score }) return similars } func findCurrentCommand(ctx *cli.Context) *cli.Command { for { par := ctx.Parent() if par == nil { break } ctx = par } var command *cli.Command for args := ctx.Args(); len(args) > 0; { subCommand := ctx.App.Command(args[0]) args = args[1:] if subCommand != nil { command = subCommand } } return command } func completeLocalPath(ctx *cli.Context) { files, err := ioutil.ReadDir("./") if err != nil { return } for _, file := range files { fmt.Println(file.Name()) } } func completeBrigPath(allowFiles, allowDirs bool) func(ctx *cli.Context) { return func(ctx *cli.Context) { // Check if the daemon is running: daemonURL, err := guessDaemonURL(ctx) if err != nil { return } ctl, err := client.Dial(context.Background(), daemonURL) if err != nil { return } stats, err := ctl.List("/", -1) if err != nil { return } for _, stat := range stats { if stat.Path == "/" { continue } if stat.IsDir && allowDirs { fmt.Println(stat.Path) } if !stat.IsDir && allowFiles { fmt.Println(stat.Path) } } } } func completeArgsUsage(ctx *cli.Context) { if command := findCurrentCommand(ctx); command != nil { if len(command.Flags) == 0 { return } for _, flag := range command.Flags { split := strings.SplitN(flag.GetName(), ",", 2) longName := split[0] fmt.Printf("--%s\n", longName) } fmt.Println(command.ArgsUsage) } } func completeLocalFile(ctx *cli.Context) { if len(os.Args) >= 2 { lastArg := os.Args[len(os.Args)-2] cmd := findCurrentCommand(ctx) if lastArg != cmd.FullName() { return } } // CAVEAT: We currently do not get partial words from bash/zsh. // e.g. "brig stage /us" will pass the following os.Args: // ["brig", "stage", "--generate-bash-completion"] // // Because of that we do no prefix completion right now. // We can probably tweak autcomplete/{z,ba}sh_autcomplete to // somehow do this, but after 30mins of googling I give up for now. // // If you read this, I challenge you to do it better. dir, err := os.Getwd() if err != nil { // silent error. return } children, err := ioutil.ReadDir(dir) if err != nil { // silent error. return } for _, child := range children { fmt.Println(child.Name()) } } func completeSubcommands(ctx *cli.Context) { if command := findCurrentCommand(ctx); command != nil { for _, subCmd := range command.Subcommands { fmt.Println(subCmd.Name) } } } func commandNotFound(ctx *cli.Context, cmdName string) { // Try to find the commands we need to look at for a suggestion. // We only want to show the user the relevant subcommands. cmdPath, lastGoodCmds := findLastGoodCommands(ctx) // Figure out if it was a toplevel command or if some subcommand // (like e.g. 'remote') was correct. badCmd := color.RedString(cmdName) if cmdPath == nil { // A toplevel command was wrong: fmt.Printf("»%s« is not a valid command. ", badCmd) } else { // A command of a subcommand was wrong: lastGoodSubCmd := color.YellowString(strings.Join(cmdPath, " ")) fmt.Printf("»%s« is not a valid subcommand of »%s«. ", badCmd, lastGoodSubCmd) } // Get a list of similar commands: similars := findSimilarCommands(cmdName, lastGoodCmds) switch len(similars) { case 0: fmt.Printf("\n") case 1: suggestion := color.GreenString(similars[0].name) fmt.Printf("Did you maybe mean »%s«?\n", suggestion) default: fmt.Println("\n\nDid you maybe mean one of those?") for _, similar := range similars { fmt.Printf(" * %s\n", color.GreenString(similar.name)) } } } ================================================ FILE: cmd/tabwriter/example_test.go ================================================ // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tabwriter_test import ( "fmt" "os" "text/tabwriter" ) func ExampleWriter_Init() { w := new(tabwriter.Writer) // Format in tab-separated columns with a tab stop of 8. w.Init(os.Stdout, 0, 8, 0, '\t', 0) fmt.Fprintln(w, "a\tb\tc\td\t.") fmt.Fprintln(w, "123\t12345\t1234567\t123456789\t.") fmt.Fprintln(w) w.Flush() // Format right-aligned in space-separated columns of minimal width 5 // and at least one blank of padding (so wider column entries do not // touch each other). w.Init(os.Stdout, 5, 0, 1, ' ', tabwriter.AlignRight) fmt.Fprintln(w, "a\tb\tc\td\t.") fmt.Fprintln(w, "123\t12345\t1234567\t123456789\t.") fmt.Fprintln(w) w.Flush() // output: // a b c d . // 123 12345 1234567 123456789 . // // a b c d. // 123 12345 1234567 123456789. } func Example_elastic() { // Observe how the b's and the d's, despite appearing in the // second cell of each line, belong to different columns. w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, '.', tabwriter.AlignRight|tabwriter.Debug) fmt.Fprintln(w, "a\tb\tc") fmt.Fprintln(w, "aa\tbb\tcc") fmt.Fprintln(w, "aaa\t") // trailing tab fmt.Fprintln(w, "aaaa\tdddd\teeee") w.Flush() // output: // ....a|..b|c // ...aa|.bb|cc // ..aaa| // .aaaa|.dddd|eeee } func Example_trailingTab() { // Observe that the third line has no trailing tab, // so its final cell is not part of an aligned column. const padding = 3 w := tabwriter.NewWriter(os.Stdout, 0, 0, padding, '-', tabwriter.AlignRight|tabwriter.Debug) fmt.Fprintln(w, "a\tb\taligned\t") fmt.Fprintln(w, "aa\tbb\taligned\t") fmt.Fprintln(w, "aaa\tbbb\tunaligned") // no trailing tab fmt.Fprintln(w, "aaaa\tbbbb\taligned\t") w.Flush() // output: // ------a|------b|---aligned| // -----aa|-----bb|---aligned| // ----aaa|----bbb|unaligned // ---aaaa|---bbbb|---aligned| } ================================================ FILE: cmd/tabwriter/tabwriter.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package tabwriter implements a write filter (tabwriter.Writer) that // translates tabbed columns in input into properly aligned text. // // The package is using the Elastic Tabstops algorithm described at // http://nickgravgaard.com/elastictabstops/index.html. // // The text/tabwriter package is frozen and is not accepting new features. package tabwriter import ( "bytes" "io" "unicode/utf8" ) // ---------------------------------------------------------------------------- // Filter implementation // A cell represents a segment of text terminated by tabs or line breaks. // The text itself is stored in a separate buffer; cell only describes the // segment's size in bytes, its width in runes, and whether it's an htab // ('\t') terminated cell. // type cell struct { size int // cell size in bytes width int // cell width in runes htab bool // true if the cell is terminated by an htab ('\t') } // A Writer is a filter that inserts padding around tab-delimited // columns in its input to align them in the output. // // The Writer treats incoming bytes as UTF-8-encoded text consisting // of cells terminated by horizontal ('\t') or vertical ('\v') tabs, // and newline ('\n') or formfeed ('\f') characters; both newline and // formfeed act as line breaks. // // Tab-terminated cells in contiguous lines constitute a column. The // Writer inserts padding as needed to make all cells in a column have // the same width, effectively aligning the columns. It assumes that // all characters have the same width, except for tabs for which a // tabwidth must be specified. Column cells must be tab-terminated, not // tab-separated: non-tab terminated trailing text at the end of a line // forms a cell but that cell is not part of an aligned column. // For instance, in this example (where | stands for a horizontal tab): // // aaaa|bbb|d // aa |b |dd // a | // aa |cccc|eee // // the b and c are in distinct columns (the b column is not contiguous // all the way). The d and e are not in a column at all (there's no // terminating tab, nor would the column be contiguous). // // The Writer assumes that all Unicode code points have the same width; // this may not be true in some fonts or if the string contains combining // characters. // // If DiscardEmptyColumns is set, empty columns that are terminated // entirely by vertical (or "soft") tabs are discarded. Columns // terminated by horizontal (or "hard") tabs are not affected by // this flag. // // If a Writer is configured to filter HTML, HTML tags and entities // are passed through. The widths of tags and entities are // assumed to be zero (tags) and one (entities) for formatting purposes. // // A segment of text may be escaped by bracketing it with Escape // characters. The tabwriter passes escaped text segments through // unchanged. In particular, it does not interpret any tabs or line // breaks within the segment. If the StripEscape flag is set, the // Escape characters are stripped from the output; otherwise they // are passed through as well. For the purpose of formatting, the // width of the escaped text is always computed excluding the Escape // characters. // // The formfeed character acts like a newline but it also terminates // all columns in the current line (effectively calling Flush). Tab- // terminated cells in the next line start new columns. Unless found // inside an HTML tag or inside an escaped text segment, formfeed // characters appear as newlines in the output. // // The Writer must buffer input internally, because proper spacing // of one line may depend on the cells in future lines. Clients must // call Flush when done calling Write. // type Writer struct { // configuration output io.Writer minwidth int tabwidth int padding int padbytes [8]byte flags uint // current state buf bytes.Buffer // collected text excluding tabs or line breaks pos int // buffer position up to which cell.width of incomplete cell has been computed cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0) lines [][]cell // list of lines; each line is a list of cells widths []int // list of column widths in runes - re-used during formatting } func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) } // Reset the current state. func (b *Writer) reset() { b.buf.Reset() b.pos = 0 b.cell = cell{} b.endChar = 0 b.lines = b.lines[0:0] b.widths = b.widths[0:0] b.addLine() } // Internal representation (current state): // // - all text written is appended to buf; tabs and line breaks are stripped away // - at any given time there is a (possibly empty) incomplete cell at the end // (the cell starts after a tab or line break) // - cell.size is the number of bytes belonging to the cell so far // - cell.width is text width in runes of that cell from the start of the cell to // position pos; html tags and entities are excluded from this width if html // filtering is enabled // - the sizes and widths of processed text are kept in the lines list // which contains a list of cells for each line // - the widths list is a temporary list with current widths used during // formatting; it is kept in Writer because it's re-used // // |<---------- size ---------->| // | | // |<- width ->|<- ignored ->| | // | | | | // [---processed---tab------------......] // ^ ^ ^ // | | | // buf start of incomplete cell pos // Formatting can be controlled with these flags. const ( // Ignore html tags and treat entities (starting with '&' // and ending in ';') as single characters (width = 1). FilterHTML uint = 1 << iota // Strip Escape characters bracketing escaped text segments // instead of passing them through unchanged with the text. StripEscape // Force right-alignment of cell content. // Default is left-alignment. AlignRight // Handle empty columns as if they were not present in // the input in the first place. DiscardEmptyColumns // Always use tabs for indentation columns (i.e., padding of // leading empty cells on the left) independent of padchar. TabIndent // Print a vertical bar ('|') between columns (after formatting). // Discarded columns appear as zero-width columns ("||"). Debug ) // Init initiliazes a Writer. The first parameter (output) // specifies the filter output. The remaining parameters control the formatting: // // minwidth minimal cell width including any padding // tabwidth width of tab characters (equivalent number of spaces) // padding padding added to a cell before computing its width // padchar ASCII char used for padding // if padchar == '\t', the Writer will assume that the // width of a '\t' in the formatted output is tabwidth, // and cells are left-aligned independent of align_left // (for correct-looking results, tabwidth must correspond // to the tab width in the viewer displaying the result) // flags formatting control // func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { if minwidth < 0 || tabwidth < 0 || padding < 0 { panic("negative minwidth, tabwidth, or padding") } b.output = output b.minwidth = minwidth b.tabwidth = tabwidth b.padding = padding for i := range b.padbytes { b.padbytes[i] = padchar } if padchar == '\t' { // tab padding enforces left-alignment flags &^= AlignRight } b.flags = flags b.reset() return b } // debugging support (keep code around) func (b *Writer) dump() { pos := 0 for i, line := range b.lines { print("(", i, ") ") for _, c := range line { print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]") pos += c.size } print("\n") } print("\n") } // local error wrapper so we can distinguish errors we want to return // as errors from genuine panics (which we don't want to return as errors) type osError struct { err error } func (b *Writer) write0(buf []byte) { n, err := b.output.Write(buf) if n != len(buf) && err == nil { err = io.ErrShortWrite } if err != nil { panic(osError{err}) } } func (b *Writer) writeN(src []byte, n int) { for n > len(src) { b.write0(src) n -= len(src) } b.write0(src[0:n]) } var ( newline = []byte{'\n'} tabs = []byte("\t\t\t\t\t\t\t\t") ) func (b *Writer) writePadding(textw, cellw int, useTabs bool) { if b.padbytes[0] == '\t' || useTabs { // padding is done with tabs if b.tabwidth == 0 { return // tabs have no width - can't do any padding } // make cellw the smallest multiple of b.tabwidth cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth n := cellw - textw // amount of padding if n < 0 { panic("internal error") } b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth) return } // padding is done with non-tab characters b.writeN(b.padbytes[0:], cellw-textw) } var vbar = []byte{'|'} func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) { pos = pos0 for i := line0; i < line1; i++ { line := b.lines[i] // if TabIndent is set, use tabs to pad leading empty cells useTabs := b.flags&TabIndent != 0 for j, c := range line { if j > 0 && b.flags&Debug != 0 { // indicate column break b.write0(vbar) } if c.size == 0 { // empty cell if j < len(b.widths) { b.writePadding(c.width, b.widths[j], useTabs) } } else { // non-empty cell useTabs = false if b.flags&AlignRight == 0 { // align left b.write0(b.buf.Bytes()[pos : pos+c.size]) pos += c.size if j < len(b.widths) { b.writePadding(c.width, b.widths[j], false) } } else { // align right if j < len(b.widths) { b.writePadding(c.width, b.widths[j], false) } b.write0(b.buf.Bytes()[pos : pos+c.size]) pos += c.size } } } if i+1 == len(b.lines) { // last buffered line - we don't have a newline, so just write // any outstanding buffered data b.write0(b.buf.Bytes()[pos : pos+b.cell.size]) pos += b.cell.size } else { // not the last line - write newline b.write0(newline) } } return } // Format the text between line0 and line1 (excluding line1); pos // is the buffer position corresponding to the beginning of line0. // Returns the buffer position corresponding to the beginning of // line1 and an error, if any. // func (b *Writer) format(pos0 int, line0, line1 int) (pos int) { pos = pos0 column := len(b.widths) for this := line0; this < line1; this++ { line := b.lines[this] if column < len(line)-1 { // cell exists in this column => this line // has more cells than the previous line // (the last cell per line is ignored because cells are // tab-terminated; the last cell per line describes the // text before the newline/formfeed and does not belong // to a column) // print unprinted lines until beginning of block pos = b.writeLines(pos, line0, this) line0 = this // column block begin width := b.minwidth // minimal column width discardable := true // true if all cells in this column are empty and "soft" for ; this < line1; this++ { line = b.lines[this] if column < len(line)-1 { // cell exists in this column c := line[column] // update width if w := c.width + b.padding; w > width { width = w } // update discardable if c.width > 0 || c.htab { discardable = false } } else { break } } // column block end // discard empty columns if necessary if discardable && b.flags&DiscardEmptyColumns != 0 { width = 0 } // format and print all columns to the right of this column // (we know the widths of this column and all columns to the left) b.widths = append(b.widths, width) // push width pos = b.format(pos, line0, this) b.widths = b.widths[0 : len(b.widths)-1] // pop width line0 = this } } // print unprinted lines until end return b.writeLines(pos, line0, line1) } // Append text to current cell. func (b *Writer) append(text []byte) { b.buf.Write(text) b.cell.size += len(text) } // Update the cell width. func (b *Writer) updateWidth() { b.cell.width += utf8.RuneCount(b.buf.Bytes()[b.pos:b.buf.Len()]) b.pos = b.buf.Len() } // Escape escapes a text segment. // For instance, the tab in this string "Ignore this tab: \xff\t\xff" // does not terminate a cell and constitutes a single character of // width one for formatting purposes. // // The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence. // const Escape = '\xff' // ColorStart defines a byte sequence that indicates the start of terminal colors. const ColorStart = '\x1B' // Start escaped mode. func (b *Writer) startEscape(ch byte) { switch ch { case Escape: b.endChar = Escape case '<': b.endChar = '>' case '&': b.endChar = ';' case ColorStart: b.endChar = 'm' } } // Terminate escaped mode. If the escaped text was an HTML tag, its width // is assumed to be zero for formatting purposes; if it was an HTML entity, // its width is assumed to be one. In all other cases, the width is the // unicode width of the text. // func (b *Writer) endEscape() { switch b.endChar { case Escape: b.updateWidth() if b.flags&StripEscape == 0 { b.cell.width -= 2 // don't count the Escape chars } case 'm': // color escape do not count case '>': // tag of zero width case ';': b.cell.width++ // entity, count as one rune } b.pos = b.buf.Len() b.endChar = 0 } // Terminate the current cell by adding it to the list of cells of the // current line. Returns the number of cells in that line. // func (b *Writer) terminateCell(htab bool) int { b.cell.htab = htab line := &b.lines[len(b.lines)-1] *line = append(*line, b.cell) b.cell = cell{} return len(*line) } func handlePanic(err *error, op string) { if e := recover(); e != nil { if nerr, ok := e.(osError); ok { *err = nerr.err return } panic("tabwriter: panic during " + op) } } // Flush should be called after the last call to Write to ensure // that any data buffered in the Writer is written to output. Any // incomplete escape sequence at the end is considered // complete for formatting purposes. func (b *Writer) Flush() error { return b.flush() } func (b *Writer) flush() (err error) { defer b.reset() // even in the presence of errors defer handlePanic(&err, "Flush") // add current cell if not empty if b.cell.size > 0 { if b.endChar != 0 { // inside escape - terminate it even if incomplete b.endEscape() } b.terminateCell(false) } // format contents of buffer b.format(0, 0, len(b.lines)) return nil } var hbar = []byte("---\n") // Write writes buf to the writer b. // The only errors returned are ones encountered // while writing to the underlying output stream. // func (b *Writer) Write(buf []byte) (n int, err error) { defer handlePanic(&err, "Write") // split text into cells n = 0 for i, ch := range buf { if b.endChar == 0 { // outside escape switch ch { case '\t', '\v', '\n', '\f': // end of cell b.append(buf[n:i]) b.updateWidth() n = i + 1 // ch consumed ncells := b.terminateCell(ch == '\t') if ch == '\n' || ch == '\f' { // terminate line b.addLine() if ch == '\f' || ncells == 1 { // A '\f' always forces a flush. Otherwise, if the previous // line has only one cell which does not have an impact on // the formatting of the following lines (the last cell per // line is ignored by format()), thus we can flush the // Writer contents. if err = b.Flush(); err != nil { return } if ch == '\f' && b.flags&Debug != 0 { // indicate section break b.write0(hbar) } } } case Escape: // start of escaped sequence b.append(buf[n:i]) b.updateWidth() n = i if b.flags&StripEscape != 0 { n++ // strip Escape } b.startEscape(Escape) case ColorStart: b.append(buf[n:i]) b.updateWidth() n = i b.startEscape(ch) case '<', '&': // possibly an html tag/entity if b.flags&FilterHTML != 0 { // begin of tag/entity b.append(buf[n:i]) b.updateWidth() n = i b.startEscape(ch) } } } else { // inside escape if ch == b.endChar { // end of tag/entity j := i + 1 if ch == Escape && b.flags&StripEscape != 0 { j = i // strip Escape } b.append(buf[n:j]) n = i + 1 // ch consumed b.endEscape() } } } // append leftover text b.append(buf[n:]) n = len(buf) return } // NewWriter allocates and initializes a new tabwriter.Writer. // The parameters are the same as for the Init function. // func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags) } ================================================ FILE: cmd/tabwriter/tabwriter_test.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tabwriter_test import ( "io" "testing" . "text/tabwriter" ) type buffer struct { a []byte } func (b *buffer) init(n int) { b.a = make([]byte, 0, n) } func (b *buffer) clear() { b.a = b.a[0:0] } func (b *buffer) Write(buf []byte) (written int, err error) { n := len(b.a) m := len(buf) if n+m <= cap(b.a) { b.a = b.a[0 : n+m] for i := 0; i < m; i++ { b.a[n+i] = buf[i] } } else { panic("buffer.Write: buffer too small") } return len(buf), nil } func (b *buffer) String() string { return string(b.a) } func write(t *testing.T, testname string, w *Writer, src string) { written, err := io.WriteString(w, src) if err != nil { t.Errorf("--- test: %s\n--- src:\n%q\n--- write error: %v\n", testname, src, err) } if written != len(src) { t.Errorf("--- test: %s\n--- src:\n%q\n--- written = %d, len(src) = %d\n", testname, src, written, len(src)) } } func verify(t *testing.T, testname string, w *Writer, b *buffer, src, expected string) { err := w.Flush() if err != nil { t.Errorf("--- test: %s\n--- src:\n%q\n--- flush error: %v\n", testname, src, err) } res := b.String() if res != expected { t.Errorf("--- test: %s\n--- src:\n%q\n--- found:\n%q\n--- expected:\n%q\n", testname, src, res, expected) } } func check(t *testing.T, testname string, minwidth, tabwidth, padding int, padchar byte, flags uint, src, expected string) { var b buffer b.init(1000) var w Writer w.Init(&b, minwidth, tabwidth, padding, padchar, flags) // write all at once title := testname + " (written all at once)" b.clear() write(t, title, &w, src) verify(t, title, &w, &b, src, expected) // write byte-by-byte title = testname + " (written byte-by-byte)" b.clear() for i := 0; i < len(src); i++ { write(t, title, &w, src[i:i+1]) } verify(t, title, &w, &b, src, expected) // write using Fibonacci slice sizes title = testname + " (written in fibonacci slices)" b.clear() for i, d := 0, 0; i < len(src); { write(t, title, &w, src[i:i+d]) i, d = i+d, d+1 if i+d > len(src) { d = len(src) - i } } verify(t, title, &w, &b, src, expected) } var tests = []struct { testname string minwidth, tabwidth, padding int padchar byte flags uint src, expected string }{ { "1a", 8, 0, 1, '.', 0, "", "", }, { "1a debug", 8, 0, 1, '.', Debug, "", "", }, { "1b esc stripped", 8, 0, 1, '.', StripEscape, "\xff\xff", "", }, { "1b esc", 8, 0, 1, '.', 0, "\xff\xff", "\xff\xff", }, { "1c esc stripped", 8, 0, 1, '.', StripEscape, "\xff\t\xff", "\t", }, { "1c esc", 8, 0, 1, '.', 0, "\xff\t\xff", "\xff\t\xff", }, { "1d esc stripped", 8, 0, 1, '.', StripEscape, "\xff\"foo\t\n\tbar\"\xff", "\"foo\t\n\tbar\"", }, { "1d esc", 8, 0, 1, '.', 0, "\xff\"foo\t\n\tbar\"\xff", "\xff\"foo\t\n\tbar\"\xff", }, { "1e esc stripped", 8, 0, 1, '.', StripEscape, "abc\xff\tdef", // unterminated escape "abc\tdef", }, { "1e esc", 8, 0, 1, '.', 0, "abc\xff\tdef", // unterminated escape "abc\xff\tdef", }, { "2", 8, 0, 1, '.', 0, "\n\n\n", "\n\n\n", }, { "3", 8, 0, 1, '.', 0, "a\nb\nc", "a\nb\nc", }, { "4a", 8, 0, 1, '.', 0, "\t", // '\t' terminates an empty cell on last line - nothing to print "", }, { "4b", 8, 0, 1, '.', AlignRight, "\t", // '\t' terminates an empty cell on last line - nothing to print "", }, { "5", 8, 0, 1, '.', 0, "*\t*", "*.......*", }, { "5b", 8, 0, 1, '.', 0, "*\t*\n", "*.......*\n", }, { "5c", 8, 0, 1, '.', 0, "*\t*\t", "*.......*", }, { "5c debug", 8, 0, 1, '.', Debug, "*\t*\t", "*.......|*", }, { "5d", 8, 0, 1, '.', AlignRight, "*\t*\t", ".......**", }, { "6", 8, 0, 1, '.', 0, "\t\n", "........\n", }, { "7a", 8, 0, 1, '.', 0, "a) foo", "a) foo", }, { "7b", 8, 0, 1, ' ', 0, "b) foo\tbar", "b) foo bar", }, { "7c", 8, 0, 1, '.', 0, "c) foo\tbar\t", "c) foo..bar", }, { "7d", 8, 0, 1, '.', 0, "d) foo\tbar\n", "d) foo..bar\n", }, { "7e", 8, 0, 1, '.', 0, "e) foo\tbar\t\n", "e) foo..bar.....\n", }, { "7f", 8, 0, 1, '.', FilterHTML, "f) f<o\tbar\t\n", "f) f<o..bar.....\n", }, { "7g", 8, 0, 1, '.', FilterHTML, "g) f<o\tbar\t non-terminated entity &", "g) f<o..bar..... non-terminated entity &", }, { "7g debug", 8, 0, 1, '.', FilterHTML | Debug, "g) f<o\tbar\t non-terminated entity &", "g) f<o..|bar.....| non-terminated entity &", }, { "8", 8, 0, 1, '*', 0, "Hello, world!\n", "Hello, world!\n", }, { "9a", 1, 0, 0, '.', 0, "1\t2\t3\t4\n" + "11\t222\t3333\t44444\n", "1.2..3...4\n" + "11222333344444\n", }, { "9b", 1, 0, 0, '.', FilterHTML, "1\t2\t3\t4\n" + // \f inside HTML is ignored "11\t222\t3333\t44444\n", "1.2..3...4\n" + "11222333344444\n", }, { "9c", 1, 0, 0, '.', 0, "1\t2\t3\t4\f" + // \f causes a newline and flush "11\t222\t3333\t44444\n", "1234\n" + "11222333344444\n", }, { "9c debug", 1, 0, 0, '.', Debug, "1\t2\t3\t4\f" + // \f causes a newline and flush "11\t222\t3333\t44444\n", "1|2|3|4\n" + "---\n" + "11|222|3333|44444\n", }, { "10a", 5, 0, 0, '.', 0, "1\t2\t3\t4\n", "1....2....3....4\n", }, { "10b", 5, 0, 0, '.', 0, "1\t2\t3\t4\t\n", "1....2....3....4....\n", }, { "11", 8, 0, 1, '.', 0, "本\tb\tc\n" + "aa\t\u672c\u672c\u672c\tcccc\tddddd\n" + "aaa\tbbbb\n", "本.......b.......c\n" + "aa......本本本.....cccc....ddddd\n" + "aaa.....bbbb\n", }, { "12a", 8, 0, 1, ' ', AlignRight, "a\tè\tc\t\n" + "aa\tèèè\tcccc\tddddd\t\n" + "aaa\tèèèè\t\n", " a è c\n" + " aa èèè cccc ddddd\n" + " aaa èèèè\n", }, { "12b", 2, 0, 0, ' ', 0, "a\tb\tc\n" + "aa\tbbb\tcccc\n" + "aaa\tbbbb\n", "a b c\n" + "aa bbbcccc\n" + "aaabbbb\n", }, { "12c", 8, 0, 1, '_', 0, "a\tb\tc\n" + "aa\tbbb\tcccc\n" + "aaa\tbbbb\n", "a_______b_______c\n" + "aa______bbb_____cccc\n" + "aaa_____bbbb\n", }, { "13a", 4, 0, 1, '-', 0, "4444\t日本語\t22\t1\t333\n" + "999999999\t22\n" + "7\t22\n" + "\t\t\t88888888\n" + "\n" + "666666\t666666\t666666\t4444\n" + "1\t1\t999999999\t0000000000\n", "4444------日本語-22--1---333\n" + "999999999-22\n" + "7---------22\n" + "------------------88888888\n" + "\n" + "666666-666666-666666----4444\n" + "1------1------999999999-0000000000\n", }, { "13b", 4, 0, 3, '.', 0, "4444\t333\t22\t1\t333\n" + "999999999\t22\n" + "7\t22\n" + "\t\t\t88888888\n" + "\n" + "666666\t666666\t666666\t4444\n" + "1\t1\t999999999\t0000000000\n", "4444........333...22...1...333\n" + "999999999...22\n" + "7...........22\n" + "....................88888888\n" + "\n" + "666666...666666...666666......4444\n" + "1........1........999999999...0000000000\n", }, { "13c", 8, 8, 1, '\t', FilterHTML, "4444\t333\t22\t1\t333\n" + "999999999\t22\n" + "7\t22\n" + "\t\t\t88888888\n" + "\n" + "666666\t666666\t666666\t4444\n" + "1\t1\t999999999\t0000000000\n", "4444\t\t333\t22\t1\t333\n" + "999999999\t22\n" + "7\t\t22\n" + "\t\t\t\t88888888\n" + "\n" + "666666\t666666\t666666\t\t4444\n" + "1\t1\t999999999\t0000000000\n", }, { "14", 1, 0, 2, ' ', AlignRight, ".0\t.3\t2.4\t-5.1\t\n" + "23.0\t12345678.9\t2.4\t-989.4\t\n" + "5.1\t12.0\t2.4\t-7.0\t\n" + ".0\t0.0\t332.0\t8908.0\t\n" + ".0\t-.3\t456.4\t22.1\t\n" + ".0\t1.2\t44.4\t-13.3\t\t", " .0 .3 2.4 -5.1\n" + " 23.0 12345678.9 2.4 -989.4\n" + " 5.1 12.0 2.4 -7.0\n" + " .0 0.0 332.0 8908.0\n" + " .0 -.3 456.4 22.1\n" + " .0 1.2 44.4 -13.3", }, { "14 debug", 1, 0, 2, ' ', AlignRight | Debug, ".0\t.3\t2.4\t-5.1\t\n" + "23.0\t12345678.9\t2.4\t-989.4\t\n" + "5.1\t12.0\t2.4\t-7.0\t\n" + ".0\t0.0\t332.0\t8908.0\t\n" + ".0\t-.3\t456.4\t22.1\t\n" + ".0\t1.2\t44.4\t-13.3\t\t", " .0| .3| 2.4| -5.1|\n" + " 23.0| 12345678.9| 2.4| -989.4|\n" + " 5.1| 12.0| 2.4| -7.0|\n" + " .0| 0.0| 332.0| 8908.0|\n" + " .0| -.3| 456.4| 22.1|\n" + " .0| 1.2| 44.4| -13.3|", }, { "15a", 4, 0, 0, '.', 0, "a\t\tb", "a.......b", }, { "15b", 4, 0, 0, '.', DiscardEmptyColumns, "a\t\tb", // htabs - do not discard column "a.......b", }, { "15c", 4, 0, 0, '.', DiscardEmptyColumns, "a\v\vb", "a...b", }, { "15d", 4, 0, 0, '.', AlignRight | DiscardEmptyColumns, "a\v\vb", "...ab", }, { "16a", 100, 100, 0, '\t', 0, "a\tb\t\td\n" + "a\tb\t\td\te\n" + "a\n" + "a\tb\tc\td\n" + "a\tb\tc\td\te\n", "a\tb\t\td\n" + "a\tb\t\td\te\n" + "a\n" + "a\tb\tc\td\n" + "a\tb\tc\td\te\n", }, { "16b", 100, 100, 0, '\t', DiscardEmptyColumns, "a\vb\v\vd\n" + "a\vb\v\vd\ve\n" + "a\n" + "a\vb\vc\vd\n" + "a\vb\vc\vd\ve\n", "a\tb\td\n" + "a\tb\td\te\n" + "a\n" + "a\tb\tc\td\n" + "a\tb\tc\td\te\n", }, { "16b debug", 100, 100, 0, '\t', DiscardEmptyColumns | Debug, "a\vb\v\vd\n" + "a\vb\v\vd\ve\n" + "a\n" + "a\vb\vc\vd\n" + "a\vb\vc\vd\ve\n", "a\t|b\t||d\n" + "a\t|b\t||d\t|e\n" + "a\n" + "a\t|b\t|c\t|d\n" + "a\t|b\t|c\t|d\t|e\n", }, { "16c", 100, 100, 0, '\t', DiscardEmptyColumns, "a\tb\t\td\n" + // hard tabs - do not discard column "a\tb\t\td\te\n" + "a\n" + "a\tb\tc\td\n" + "a\tb\tc\td\te\n", "a\tb\t\td\n" + "a\tb\t\td\te\n" + "a\n" + "a\tb\tc\td\n" + "a\tb\tc\td\te\n", }, { "16c debug", 100, 100, 0, '\t', DiscardEmptyColumns | Debug, "a\tb\t\td\n" + // hard tabs - do not discard column "a\tb\t\td\te\n" + "a\n" + "a\tb\tc\td\n" + "a\tb\tc\td\te\n", "a\t|b\t|\t|d\n" + "a\t|b\t|\t|d\t|e\n" + "a\n" + "a\t|b\t|c\t|d\n" + "a\t|b\t|c\t|d\t|e\n", }, } func Test(t *testing.T) { for _, e := range tests { check(t, e.testname, e.minwidth, e.tabwidth, e.padding, e.padchar, e.flags, e.src, e.expected) } } type panicWriter struct{} func (panicWriter) Write([]byte) (int, error) { panic("cannot write") } func wantPanicString(t *testing.T, want string) { if e := recover(); e != nil { got, ok := e.(string) switch { case !ok: t.Errorf("got %v (%T), want panic string", e, e) case got != want: t.Errorf("wrong panic message: got %q, want %q", got, want) } } } func TestPanicDuringFlush(t *testing.T) { defer wantPanicString(t, "tabwriter: panic during Flush") var p panicWriter w := new(Writer) w.Init(p, 0, 0, 5, ' ', 0) io.WriteString(w, "a") w.Flush() t.Errorf("failed to panic during Flush") } func TestPanicDuringWrite(t *testing.T) { defer wantPanicString(t, "tabwriter: panic during Write") var p panicWriter w := new(Writer) w.Init(p, 0, 0, 5, ' ', 0) io.WriteString(w, "a\n\n") // the second \n triggers a call to w.Write and thus a panic t.Errorf("failed to panic during Write") } ================================================ FILE: cmd/tree.go ================================================ package cmd import ( "fmt" "sort" "strings" "unicode" "github.com/fatih/color" "github.com/sahib/brig/client" ) var ( treeRunePipe = "│" treeRuneTri = "├" treeRuneBar = "──" treeRuneCorner = "└" ) type treeNode struct { name string order []*treeNode children map[string]*treeNode isLast bool parent *treeNode depth int entry client.StatInfo } type treeCfg struct { showPin bool format func(n *treeNode) string } // This is a very stripped down version of util.Trie.Insert() // but with support for ordering the elements. func (n *treeNode) Insert(entry client.StatInfo) { parts := strings.Split(entry.Path, "/") if len(parts) > 0 && parts[0] == "" { parts = parts[1:] } curr := n currParts := []string{""} for depth, name := range parts { if curr.children == nil { curr.children = make(map[string]*treeNode) } currParts = append(currParts, name) currPath := strings.Join(currParts, "/") child, ok := curr.children[name] if !ok { childEntry := entry if currPath != entry.Path { childEntry = client.StatInfo{ IsPinned: false, Path: currPath, } } child = &treeNode{ name: name, depth: depth + 1, entry: childEntry, } child.isLast = true if len(curr.order) > 0 { curr.order[len(curr.order)-1].isLast = false } child.parent = curr curr.children[name] = child curr.order = append(curr.order, child) } curr = child } } func (n *treeNode) Len() int { return len(n.order) } func (n *treeNode) Swap(i, j int) { n.order[i], n.order[j] = n.order[j], n.order[i] // This is not very clever, but works and is obvious: n.order[i].isLast = i == len(n.order)-1 n.order[j].isLast = j == len(n.order)-1 } func (n *treeNode) Less(i, j int) bool { // Sort case insensitive. iRunes := []rune(n.order[i].name) jRunes := []rune(n.order[j].name) for idx := 0; idx < len(iRunes) && idx < len(jRunes); idx++ { ir := iRunes[idx] jr := jRunes[idx] lir := unicode.ToLower(ir) ljr := unicode.ToLower(jr) if lir != ljr { return lir < ljr } if ir != jr { return ir < jr } } return false } func (n *treeNode) Print(cfg *treeCfg) { parents := make([]*treeNode, n.depth) curr := n sort.Sort(n) // You could do probably go upwards and print to // a string buffer for performance, but this is probably // not necessary/critical here. for i := 0; i < n.depth; i++ { parents[n.depth-i-1] = curr curr = curr.parent } for i := 0; i < n.depth; i++ { if i == n.depth-1 { if n.isLast { fmt.Printf("%s", treeRuneCorner) } else { fmt.Printf("%s", treeRuneTri) } } else { if parents[i].isLast { fmt.Printf("%s ", " ") } else { fmt.Printf("%s ", treeRunePipe) } } } // Default to an auto-formatter: format := cfg.format if format == nil { format = func(n *treeNode) string { switch { case n.name == "/": return color.MagentaString("•") case n.entry.IsDir: return " " + color.GreenString(n.name+"/") } return " " + n.name } } prefix := treeRuneBar if n.name == "/" { prefix = "" } formatted := format(n) pinState := "" if cfg.showPin && n.entry.IsPinned { pinState = " " + pinStateToSymbol(n.entry.IsPinned, n.entry.IsExplicit) } fmt.Printf("%s%s%s\n", prefix, formatted, pinState) for _, child := range n.order { child.Print(cfg) } } func showTree(entries []client.StatInfo, cfg *treeCfg) { root := &treeNode{name: "/"} nfiles, ndirs := 0, 0 hasRoot := false for _, entry := range entries { if entry.Path == "/" { root.entry = entry hasRoot = true } else { root.Insert(entry) } if entry.IsDir { ndirs++ } else { nfiles++ } } if !hasRoot { root.entry = client.StatInfo{ Path: "/", } } root.Print(cfg) // Speak understandable english: dirLabel := "directories" if ndirs == 1 { dirLabel = "directory" } fileLabel := "files" if nfiles == 1 { fileLabel = "file" } fmt.Printf("\n%d %s, %d %s\n", ndirs, dirLabel, nfiles, fileLabel) } ================================================ FILE: cmd/util.go ================================================ package cmd import ( "bytes" "context" "fmt" "io" "io/ioutil" "math/rand" "net/url" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "text/template" "time" "github.com/fatih/color" "github.com/sahib/brig/client" "github.com/sahib/brig/defaults" "github.com/sahib/brig/util" "github.com/sahib/config" log "github.com/sirupsen/logrus" "github.com/urfave/cli" ) var ( // backend delivers overly descriptive error messages including // the string below. Simply filter this info: rpcErrPattern = regexp.MustCompile(`\s*server/capnp/local_api.capnp.*rpc exception:\s*`) ) // ExitCode is an error that maps the error interface to a specific error // message and a unix exit code type ExitCode struct { Code int Message string } func (err ExitCode) Error() string { return err.Message } func mustAbsPath(path string) string { absPath, err := filepath.Abs(path) if err != nil { fmt.Printf("Failed to get absolute repo path: %v", err) os.Exit(1) } return absPath } func yesify(val bool) string { if val { return color.GreenString("yes") } return color.RedString("no") } func checkmarkify(val bool) string { if val { return color.GreenString("✔") } return "" } // guessRepoFolder tries to find the repository path by using a number of // sources. This helper may call exit when it fails to get the path. func guessRepoFolder(ctx *cli.Context) (string, error) { if ctx.GlobalIsSet("repo") { // No guessing needed, follow user wish. return ctx.GlobalString("repo"), nil } guessLocations := []string{ // TODO: For now just one. ".", } var lastError error for _, guessLocation := range guessLocations { repoFolder := mustAbsPath(guessLocation) if _, err := os.Stat(filepath.Join(repoFolder, "config.yml")); err != nil { lastError = err continue } return repoFolder, nil } return "", lastError } func openConfig(folder string) (*config.Config, error) { configPath := filepath.Join(folder, "config.yml") cfg, err := defaults.OpenMigratedConfig(configPath) if err != nil { return nil, fmt.Errorf("could not find config: %v", err) } return cfg, nil } func guessDaemonURL(ctx *cli.Context) (string, error) { if ctx.GlobalIsSet("url") { // No guessing needed, follow user wish. return ctx.GlobalString("url"), nil } folder, err := guessRepoFolder(ctx) if err != nil { log.Warnf("note: I don't know where the repository is or cannot read it.") log.Warnf(" I will continue with default values, cross fingers.") log.Warnf(" We recommend to set BRIG_PATH or pass --repo always.") log.Warnf(" Alternatively you can cd to your repository.") return ctx.GlobalString("url"), err } cfg, err := openConfig(folder) if err != nil { // Assume default: return ctx.GlobalString("url"), nil } return cfg.String("daemon.url"), nil } func guessFreeDaemonURL(ctx *cli.Context, owner string) (string, error) { if ctx.GlobalIsSet("url") { // No guessing needed, follow user wish. return ctx.GlobalString("url"), nil } defaultURL := defaults.DaemonDefaultURL() u, err := url.Parse(defaultURL) if err != nil { // this is a programming error panic("invalid hard-coded default daemon url") } switch u.Scheme { case "unix": // Distinguish the path, so that we can have // several daemon on a single system. v := u.Query() v.Add("id", strings.ReplaceAll(owner, "/", "_")) u.RawQuery = v.Encode() return u.String(), nil case "tcp": // Do a best effort by searching for a free port // and use that for the brig repository. // This might be racy, but at least try it. port := util.FindFreePort() return fmt.Sprintf("tcp://127.0.0.1:%d", port), nil default: return "", fmt.Errorf("default url has unknown ") } } func prefixSlash(s string) string { if !strings.HasPrefix(s, "/") { return "/" + s } return s } type cmdHandlerWithClient func(ctx *cli.Context, ctl *client.Client) error func getExecutablePath() (string, error) { // NOTE: This might not work on other platforms. // In this case we fall back to LookPath(). exePath, err := os.Readlink("/proc/self/exe") if err != nil { return exec.LookPath("brig") } return filepath.Clean(exePath), nil } func startDaemon(ctx *cli.Context, repoPath, daemonURL string) (*client.Client, error) { stat, err := os.Stat(repoPath) if err != nil { return nil, err } if !stat.IsDir() { return nil, fmt.Errorf("»%s« is not a directory", repoPath) } exePath, err := getExecutablePath() if err != nil { return nil, err } logVerbose(ctx, "using executable path: %s", exePath) logVerbose( ctx, "No Daemon running at %s. Starting daemon from binary: %s", daemonURL, exePath, ) daemonArgs := []string{ "--repo", repoPath, "--url", daemonURL, "daemon", "launch", } argString := fmt.Sprintf("'%s'", strings.Join(daemonArgs, "' '")) logVerbose(ctx, "Starting daemon as: %s %s", exePath, argString) proc := exec.Command(exePath, daemonArgs...) // #nosec proc.Env = append(proc.Env, fmt.Sprintf("PATH=%s", os.Getenv("PATH"))) if err := proc.Start(); err != nil { log.Infof("Failed to start the daemon: %v", err) return nil, err } // This will likely suffice for most cases: time.Sleep(500 * time.Millisecond) warningPrinted := false for i := 0; i < 500; i++ { ctl, err := client.Dial(context.Background(), daemonURL) if err != nil { // Only print this warning once... if !warningPrinted && i >= 100 { log.Warnf("waiting a bit long for daemon to bootup...") warningPrinted = true } time.Sleep(50 * time.Millisecond) continue } return ctl, nil } return nil, fmt.Errorf("Daemon could not be started or took to long") } func isDaemonRunning(ctx *cli.Context) (bool, error) { daemonURL, err := guessDaemonURL(ctx) if err != nil { return false, err } tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() ctl, err := client.Dial(tctx, daemonURL) if err != nil { return false, nil } defer ctl.Close() return true, nil } func withDaemon(handler cmdHandlerWithClient, startNew bool) cli.ActionFunc { return func(ctx *cli.Context) error { daemonURL, _ := guessDaemonURL(ctx) if startNew { logVerbose(ctx, "Using url %s to check for running daemon.", daemonURL) } else { logVerbose(ctx, "Using url %s to connect to existing daemon.", daemonURL) } // Check if the daemon is running already: ctl, err := client.Dial(context.Background(), daemonURL) if err == nil { defer ctl.Close() return handler(ctx, ctl) } if !startNew { // Daemon was not running and we may not start a new one. return ExitCode{DaemonNotResponding, "Daemon not running"} } // Start the server & pass the password: folder, err := guessRepoFolder(ctx) if err != nil { return ExitCode{ BadArgs, fmt.Sprintf("could not guess folder: %v", err), } } logVerbose(ctx, "starting new daemon in background, on folder '%s'", folder) ctl, err = startDaemon(ctx, folder, daemonURL) if err != nil { return ExitCode{ DaemonNotResponding, fmt.Sprintf("Unable to start daemon: %v", err), } } // Run the actual handler: defer ctl.Close() return handler(ctx, ctl) } } type checkFunc func(ctx *cli.Context) int func withArgCheck(checker checkFunc, handler cli.ActionFunc) cli.ActionFunc { return func(ctx *cli.Context) error { if checker(ctx) != Success { os.Exit(BadArgs) } return handler(ctx) } } func prettyPrintError(err error) string { return rpcErrPattern.ReplaceAllString(err.Error(), " ") } func needAtLeast(min int) checkFunc { return func(ctx *cli.Context) int { if ctx.NArg() < min { if min == 1 { log.Warningf("Need at least %d argument.", min) } else { log.Warningf("Need at least %d arguments.", min) } if err := cli.ShowCommandHelp(ctx, ctx.Command.Name); err != nil { log.Warningf("Failed to display --help: %v", err) } return BadArgs } return Success } } func isNonEmptyDir(dir string) (bool, error) { fd, err := os.Open(dir) // #nosec if err != nil && os.IsNotExist(err) { return false, nil } if err != nil { return false, err } names, err := fd.Readdirnames(-1) if err != nil { return false, err } // dumb heuristic: if there's stuff in there, // assume we shouldn't init over. return len(names) >= 1, nil } // tempFileWithSuffix works the same as ioutil.TempFile(), // but allows for the addition of a suffix to the filepath. // This has the nice side effect that some editors can recognize // the filetype based on the ending and provide you syntax highlighting. // (this is used in edit() below) func tempFileWithSuffix(dir, prefix, suffix string) (f *os.File, err error) { if dir == "" { dir = os.TempDir() } for i := 0; i < 10000; i++ { mid := strconv.Itoa(rand.Int()) // #nosec name := filepath.Join(dir, prefix+mid+suffix) f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if os.IsExist(err) { continue } break } return } // editToPath opens up $EDITOR with `data` and saves the edited data // to a temporary path that is then returned. func editToPath(data []byte, suffix string) (string, error) { editor := os.Getenv("EDITOR") if editor == "" { // It makes my heart bleed, but assume that vi is too hard // for the majority I've met & that might use brig. editor = "nano" } fd, err := tempFileWithSuffix("", "brig-cmd-buffer-", suffix) if err != nil { return "", err } doDelete := false // Make sure it gets cleaned up. defer func() { if doDelete { if err := os.Remove(fd.Name()); err != nil { fmt.Printf("Failed to remove temp file: %v\n", err) } } if err := fd.Close(); err != nil { fmt.Printf("Failed to close file: %v\n", err) } }() if _, err := fd.Write(data); err != nil { return "", err } // Launch editor and hook it up with all necessary fds: cmd := exec.Command(editor, fd.Name()) // #nosec cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { doDelete = true return "", fmt.Errorf("Running $EDITOR (%s) failed: %v", editor, err) } if _, err := fd.Seek(0, io.SeekStart); err != nil { doDelete = true return "", err } return fd.Name(), nil } // edit opens up $EDITOR with `data` and returns the edited data. func edit(data []byte, suffix string) ([]byte, error) { tempPath, err := editToPath(data, suffix) if err != nil { return nil, err } defer func() { if err := os.Remove(tempPath); err != nil { fmt.Printf("Failed to remove temp file: %v\n", err) } }() newData, err := ioutil.ReadFile(tempPath) // #nosec if err != nil { return nil, err } // Some editors might add a trailing newline: return bytes.TrimRight(newData, "\n"), nil } // parseDuration tries to convert the string `s` to // a duration in seconds (+ fractions). // It uses time.ParseDuration() internally, but allows // whole numbers which are counted as seconds. func parseDuration(s string) (float64, error) { sec, err := strconv.ParseFloat(s, 64) if err == nil { return sec, nil } dur, err := time.ParseDuration(s) if err != nil { return 0.0, err } return float64(dur) / float64(time.Second), nil } func readFormatTemplate(ctx *cli.Context) (*template.Template, error) { if ctx.IsSet("format") { source := ctx.String("format") + "\n" tmpl, err := template.New("format").Parse(source) if err != nil { return nil, err } return tmpl, nil } return nil, nil } func pinStateToSymbol(isPinned, isExplicit bool) string { if isPinned { colorFn := color.CyanString if isExplicit { colorFn = color.MagentaString } return colorFn("✔") } return "" } func yesOrNo(v bool) string { if v { return color.GreenString("yes") } return color.RedString("no") } type logWriter struct{ prefix string } func (lw *logWriter) Write(buf []byte) (int, error) { log.Infof("%s: %s", lw.prefix, string(bytes.TrimSpace(buf))) return len(buf), nil } ================================================ FILE: cmd/vcs_handlers.go ================================================ package cmd import ( "fmt" "os" "path" "path/filepath" "sort" "strings" "time" "github.com/sahib/brig/cmd/tabwriter" "github.com/fatih/color" "github.com/sahib/brig/client" "github.com/urfave/cli" ) func handleReset(ctx *cli.Context, ctl *client.Client) error { force := ctx.Bool("force") rev := ctx.Args().First() path := "" if len(ctx.Args()) > 1 { path = ctx.Args().Get(1) } if err := ctl.Reset(path, rev, force); err != nil { return ExitCode{UnknownError, fmt.Sprintf("reset: %v", err)} } return nil } func commitName(cmt *client.Commit) string { if cmt == nil { return "" } if len(cmt.Tags) > 0 { return strings.ToUpper(cmt.Tags[0]) } return cmt.Hash.ShortB58() } func handleHistory(ctx *cli.Context, ctl *client.Client) error { path := ctx.Args().First() history, err := ctl.History(path) if err != nil { return ExitCode{UnknownError, fmt.Sprintf("history: %v", err)} } if _, err := ctl.Stat(path); err != nil { fmt.Printf("%s %s", color.YellowString("WARNING:"), `This file is not part of this commit, but there's still history for it. Most likely this file was moved or removed in the past. `) } tabW := tabwriter.NewWriter( os.Stdout, 0, 0, 2, ' ', tabwriter.StripEscape, ) containsMoves := false for _, entry := range history { for _, detail := range entry.Mask { if detail == "moved" { containsMoves = true break } } if containsMoves { break } } if len(history) != 0 { if containsMoves { fmt.Fprintf(tabW, "CHANGE\tFROM\tTO\tHOW\tWHEN\tPIN\t\n") } else { fmt.Fprintf(tabW, "CHANGE\tFROM\tTO\t\tWHEN\tPIN\t\n") } } for _, entry := range history { what := "" printLine := true for _, detail := range entry.Mask { // If it was moved, let's display what moved. if detail == "moved" { src := entry.WasPreviouslyAt dst := entry.Path if entry.MovedTo != "" { dst = entry.MovedTo } what = fmt.Sprintf( "%s → %s", color.RedString(src), color.RedString(dst), ) } // Only display empty changes if nothing happened. if detail == "none" && !ctx.Bool("empty") { printLine = false } } if !printLine { continue } changeDesc := color.YellowString(strings.Join(entry.Mask, ", ")) when := color.MagentaString(entry.Head.Date.Format(time.UnixDate)) fmt.Fprintf( tabW, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", changeDesc, color.CyanString(commitName(entry.Next)), color.GreenString(commitName(entry.Head)), what, when, pinStateToSymbol(entry.IsPinned, entry.IsExplicit), entry.Head.Msg, ) } return tabW.Flush() } // makePathAbbrev tries to abbreviate the `dst` path if // both are in the same directory. func makePathAbbrev(srcNd, dstNd client.StatInfo) string { src, dst := srcNd.Path, dstNd.Path if path.Dir(src) == path.Dir(dst) { dstBase := path.Base(dst) if dstNd.IsDir { return dstBase + "/" } return dstBase } relPath, err := filepath.Rel(path.Dir(src), dst) if err != nil { fmt.Println("Failed to get relative path: ", err) if dstNd.IsDir { return dst + "/" } return dst } // We could also possibly check here if relPath is longer than dst // and only display the relative version then. But being consistent // is more valuable here I think. if dstNd.IsDir { return relPath + "/" } return relPath } func suffixIfDir(nd *treeNode) string { if nd.entry.IsDir { return nd.name + "/" } return nd.name } const ( diffTypeNone = iota diffTypeAdded diffTypeRemoved diffTypeMissing diffTypeMoved diffTypeIgnored diffTypeConflict diffTypeMerged ) type diffEntry struct { typ int pair client.DiffPair } // Called to format each name in the resulting tree: func printDiffTreeLineFormatter(types map[string]diffEntry, n *treeNode) string { if n.name == "/" { return color.MagentaString("•") } if diffEntry, ok := types[n.entry.Path]; ok { switch diffEntry.typ { case diffTypeAdded: return color.GreenString(" + " + "▩ ← " + suffixIfDir(n)) case diffTypeRemoved: return color.RedString(" - " + suffixIfDir(n) + " ← ▩") case diffTypeMissing: return color.MagentaString(" _ " + suffixIfDir(n) + " → ▩") case diffTypeIgnored: return color.YellowString(" * " + suffixIfDir(n)) case diffTypeMoved: srcPath := makePathAbbrev(diffEntry.pair.Dst, diffEntry.pair.Src) dstBase := path.Base(diffEntry.pair.Dst.Path) if diffEntry.pair.Src.IsDir { dstBase += "/" } return color.CyanString(fmt.Sprintf(" %s ↔ %s", dstBase, srcPath)) case diffTypeMerged: dstPath := makePathAbbrev(diffEntry.pair.Dst, diffEntry.pair.Src) srcBase := path.Base(diffEntry.pair.Src.Path) if diffEntry.pair.Src.IsDir { srcBase += "/" } // Attempt to figure out which way merge should go // based on modification times. // This information was available at resolver time in the PairDiff // but server returns simplified PairDiff without modification masks. srcModTime := diffEntry.pair.Src.ModTime dstModTime := diffEntry.pair.Dst.ModTime var mergeSymbol string = color.MagentaString("→") if srcModTime.After(dstModTime) { mergeSymbol = color.GreenString("←") } return color.WhiteString(fmt.Sprintf(" %s %s %s ", dstPath, mergeSymbol, srcBase)) case diffTypeConflict: dstPath := makePathAbbrev(diffEntry.pair.Dst, diffEntry.pair.Src) srcBase := path.Base(diffEntry.pair.Src.Path) if diffEntry.pair.Src.IsDir { srcBase += "/" } return color.MagentaString(fmt.Sprintf(" %s ⚡%s", dstPath, srcBase)) } } return n.name } func printDiffTree(diff *client.Diff, printMissing bool) { entries := []client.StatInfo{} types := make(map[string]diffEntry) // Singular types: for _, info := range diff.Added { types[info.Path] = diffEntry{typ: diffTypeAdded} entries = append(entries, info) } for _, info := range diff.Removed { types[info.Path] = diffEntry{typ: diffTypeRemoved} entries = append(entries, info) } if printMissing { for _, info := range diff.Missing { types[info.Path] = diffEntry{typ: diffTypeMissing} entries = append(entries, info) } } for _, info := range diff.Ignored { types[info.Path] = diffEntry{typ: diffTypeIgnored} entries = append(entries, info) } // Pair types: for _, pair := range diff.Moved { types[pair.Dst.Path] = diffEntry{ typ: diffTypeMoved, pair: pair, } entries = append(entries, pair.Dst) } for _, pair := range diff.Conflict { types[pair.Dst.Path] = diffEntry{ typ: diffTypeConflict, pair: pair, } entries = append(entries, pair.Dst) } for _, pair := range diff.Merged { types[pair.Dst.Path] = diffEntry{ typ: diffTypeMerged, pair: pair, } entries = append(entries, pair.Dst) } if len(entries) == 0 { // Nothing to show: return } sort.Slice(entries, func(i, j int) bool { return entries[i].Path < entries[j].Path }) // Render the tree: showTree(entries, &treeCfg{ format: func(n *treeNode) string { return printDiffTreeLineFormatter(types, n) }, showPin: false, }) } func isEmptyDiff(diff *client.Diff) bool { return 0 == 0+ len(diff.Added)+ len(diff.Conflict)+ len(diff.Ignored)+ len(diff.Merged)+ len(diff.Missing)+ len(diff.Moved)+ len(diff.Removed) } func printDiff(diff *client.Diff, printMissing bool) { simpleSection := func(heading string, infos []client.StatInfo) { if len(infos) == 0 { return } fmt.Println(heading) for _, info := range infos { path := info.Path if info.IsDir { path += "/" } fmt.Printf(" %s\n", path) } fmt.Println() } pairSection := func(heading, symbol string, infos []client.DiffPair) { if len(infos) == 0 { return } fmt.Println(heading) for _, pair := range infos { srcPath := pair.Src.Path if pair.Src.IsDir { srcPath += "/" } dstPath := pair.Dst.Path if pair.Dst.IsDir { dstPath += "/" } if pair.Src.Path != pair.Dst.Path { fmt.Printf(" %s %s %s\n", dstPath, symbol, srcPath) } else { fmt.Printf(" %s %s\n", symbol, srcPath) } } fmt.Println() } var addedAtRemote []client.DiffPair for _, src := range diff.Added { var pair client.DiffPair pair.Dst.Path = "▩" pair.Src = src addedAtRemote = append(addedAtRemote, pair) } pairSection(color.GreenString("Added:"), "←", addedAtRemote) simpleSection(color.YellowString("Ignored:"), diff.Ignored) var removedAtRemote []client.DiffPair for _, dst := range diff.Removed { var pair client.DiffPair pair.Dst = dst pair.Src.Path = "▩" removedAtRemote = append(removedAtRemote, pair) } pairSection(color.RedString("Removed:"), "←", removedAtRemote) // split diff.Merged to changedLocally and changedRemotely arrays var changedLocally, changedRemotely []client.DiffPair for _, pair := range diff.Merged { srcModTime := pair.Src.ModTime dstModTime := pair.Dst.ModTime if srcModTime.After(dstModTime) { changedRemotely = append(changedRemotely, pair) } else { changedLocally = append(changedLocally, pair) } } if printMissing { var missedAtRemote []client.DiffPair for _, dst := range diff.Missing { var pair client.DiffPair pair.Dst = dst pair.Src.Path = "▩" missedAtRemote = append(missedAtRemote, pair) } pairSection(color.RedString("Missing:"), "→", missedAtRemote) } pairSection(color.CyanString("Moved:"), "↔", diff.Moved) pairSection(color.WhiteString("Changed Locally:"), "→", changedLocally) pairSection(color.WhiteString("Changed Remotely:"), "←", changedRemotely) pairSection(color.MagentaString("Conflicts:"), "⚡", diff.Conflict) } func handleDiff(ctx *cli.Context, ctl *client.Client) error { if ctx.NArg() > 4 { fmt.Println("More than four arguments can't be handled.") } self, err := ctl.Whoami() if err != nil { return err } localName := self.CurrentUser remoteName := self.CurrentUser remoteRev := "CURR" localRev := "CURR" nArgs := ctx.NArg() if nArgs == 0 { // Special case: When typing brig diff we want to show // the diff from our CURR to HEAD only. localRev = "HEAD" } if ctx.Bool("self") { switch { case nArgs >= 2: localRev = ctx.Args().Get(1) fallthrough case nArgs >= 1: remoteRev = ctx.Args().Get(0) } } else { switch { case nArgs >= 4: localRev = ctx.Args().Get(3) fallthrough case nArgs >= 3: remoteRev = ctx.Args().Get(2) fallthrough case nArgs >= 2: localName = ctx.Args().Get(1) fallthrough case nArgs >= 1: remoteName = ctx.Args().Get(0) } } needFetch := !ctx.Bool("offline") diff, err := ctl.MakeDiff(localName, remoteName, localRev, remoteRev, needFetch) if err != nil { return ExitCode{UnknownError, fmt.Sprintf("diff: %v", err)} } printMissing := ctx.Bool("missing") if ctx.Bool("list") { printDiff(diff, printMissing) } else { printDiffTree(diff, printMissing) } return nil } func handleFetch(ctx *cli.Context, ctl *client.Client) error { who := ctx.Args().First() return ctl.Fetch(who) } func handleSync(ctx *cli.Context, ctl *client.Client) error { if len(ctx.Args()) > 0 { return handleSyncSingle(ctx, ctl, ctx.Args().First()) } remotes, err := ctl.RemoteLs() if err != nil { return err } for _, rmt := range remotes { _, err := ctl.RemotePing(rmt.Name) if err != nil { fmt.Printf("Cannot reach %s..\n", rmt.Name) continue } fmt.Printf("Syncing with `%s`...\n", rmt.Name) if err := handleSyncSingle(ctx, ctl, rmt.Name); err != nil { return err } } return nil } func handleSyncSingle(ctx *cli.Context, ctl *client.Client, remoteName string) error { needFetch := true if ctx.Bool("no-fetch") { needFetch = false } if ctx.Bool("quiet") { return nil } diff, err := ctl.Sync(remoteName, needFetch) if err != nil { return err } if isEmptyDiff(diff) { fmt.Println("Nothing changed.") return nil } printDiff(diff, false) return nil } func handleStatus(ctx *cli.Context, ctl *client.Client) error { self, err := ctl.Whoami() if err != nil { return err } curr := self.CurrentUser diff, err := ctl.MakeDiff(curr, curr, "HEAD", "CURR", false) if err != nil { return err } if ctx.Bool("tree") { printDiffTree(diff, false) } else { printDiff(diff, false) } return nil } func handleBecome(ctx *cli.Context, ctl *client.Client) error { becomeSelf := ctx.Bool("self") if !becomeSelf && ctx.NArg() < 1 { return fmt.Errorf("become needs at least one argument without -s") } whoami, err := ctl.Whoami() if err != nil { return err } who := ctx.Args().First() if becomeSelf { who = whoami.Owner } if whoami.CurrentUser == who { fmt.Printf("You are already %s.\n", color.GreenString(who)) return nil } if err := ctl.Become(who); err != nil { return err } suffix := "Everything is read only." if who == whoami.Owner { suffix = "Welcome back!" } fmt.Printf( "You are viewing %s's data now. %s\n", color.GreenString(who), suffix, ) return nil } func handleCommit(ctx *cli.Context, ctl *client.Client) error { var msg string // Build the message: if ctx.IsSet("message") { msg = ctx.String("message") } else if len(ctx.Args()) >= 1 { msg = strings.Join(ctx.Args(), " ") } else { msg = "manual commit" } // Send the commit: if err := ctl.MakeCommit(msg); err != nil { return ExitCode{UnknownError, fmt.Sprintf("commit: %v", err)} } return nil } func handleTag(ctx *cli.Context, ctl *client.Client) error { if ctx.Bool("delete") { name := ctx.Args().Get(0) if err := ctl.Untag(name); err != nil { return ExitCode{ UnknownError, fmt.Sprintf("untag: %v", err), } } } else { if len(ctx.Args()) < 2 { return ExitCode{BadArgs, "tag needs at least two arguments"} } rev := ctx.Args().Get(0) name := ctx.Args().Get(1) if err := ctl.Tag(rev, name); err != nil { return ExitCode{ UnknownError, fmt.Sprintf("tag: %v", err), } } } return nil } func handleLog(ctx *cli.Context, ctl *client.Client) error { entries, err := ctl.Log() if err != nil { return ExitCode{UnknownError, fmt.Sprintf("commit: %v", err)} } tmpl, err := readFormatTemplate(ctx) if err != nil { return err } for _, entry := range entries { if tmpl != nil { if err := tmpl.Execute(os.Stdout, entry); err != nil { return err } continue } tags := "" isCurr := false if len(entry.Tags) > 0 { tags = fmt.Sprintf(" (%s)", strings.Join(entry.Tags, ", ")) for _, tag := range entry.Tags { if tag == "curr" { isCurr = true break } } } msg := entry.Msg if msg == "" { msg = color.RedString("•") } entry.Hash.ShortB58() commitHash := entry.Hash.ShortB58() if isCurr { commitHash = " - " } fmt.Printf( "%s %s %s%s\n", color.GreenString(commitHash), color.YellowString(entry.Date.Format(time.UnixDate)), msg, color.CyanString(tags), ) } return nil } ================================================ FILE: defaults/defaults.go ================================================ package defaults import ( "os" e "github.com/pkg/errors" "github.com/sahib/config" ) // CurrentVersion is the current version of brig's config const CurrentVersion = 0 // Defaults is the default validation for brig var Defaults = DefaultsV0 // OpenMigratedConfig takes the config.yml at path and loads it. // If required, it also migrates the config structure to the newest // version - brig can always rely on the latest config keys to be present. func OpenMigratedConfig(path string) (*config.Config, error) { fd, err := os.Open(path) // #nosec if err != nil { return nil, e.Wrapf(err, "failed to open config path %s", path) } defer fd.Close() // NOTE: Add here any migrations with mgr.Add if needed. mgr := config.NewMigrater(CurrentVersion, config.StrictnessPanic) mgr.Add(0, nil, DefaultsV0) cfg, err := mgr.Migrate(config.NewYamlDecoder(fd)) if err != nil { return nil, e.Wrap(err, "failed to migrate or open") } return cfg, nil } ================================================ FILE: defaults/defaults_v0.go ================================================ package defaults import ( "errors" "net/url" "runtime" "github.com/sahib/config" ) // DaemonDefaultURL returns the default URL for the current OS. func DaemonDefaultURL() string { // If the platform supports unix sockets, // we should make use of it. switch runtime.GOOS { case "linux", "darwin": // See "man 7 unix" - we use an abstract unix domain socket. // This means there is no socket file on the file system. // (other tools use unix:@/path, but Go does not support that notation) // This also means that there are no user rights on the socket file. // If you need this, specify the url in the config. return "unix:/tmp/brig.socket?abstract=true" default: return "tcp://127.0.0.1:6666" } } func urlValidator(val interface{}) error { s, ok := val.(string) if !ok { return errors.New("url is not an string") } _, err := url.Parse(s) return err } // DefaultsV0 is the default config validation for brig var DefaultsV0 = config.DefaultMapping{ "daemon": config.DefaultMapping{ "url": config.DefaultEntry{ Default: DaemonDefaultURL(), NeedsRestart: true, Docs: "URL of the daemon process.", Validator: urlValidator, }, "ipfs_path_or_url": config.DefaultEntry{ Default: "", NeedsRestart: true, Docs: "URL or path to the IPFS repository you want to use.", }, "enable_pprof": config.DefaultEntry{ Default: true, NeedsRestart: true, Docs: "Enable a ppropf profile server on startup (see »brig d p --help«)", }, }, "events": config.DefaultMapping{ "enabled": config.DefaultEntry{ Default: true, NeedsRestart: false, Docs: "Wether we should handle incoming events and publish auto update events.", }, "recv_interval": config.DefaultEntry{ Default: "100ms", NeedsRestart: false, Docs: "Time window in which events are buffered before handling them.", }, "recv_max_events_per_second": config.DefaultEntry{ Default: 0.5, NeedsRestart: false, Docs: "How many incoming events per second to process at max.", }, "send_interval": config.DefaultEntry{ Default: "200ms", NeedsRestart: false, Docs: "Time window in which events are buffered before sending them.", }, "send_max_events_per_second": config.DefaultEntry{ Default: 5.0, NeedsRestart: false, Docs: "How many outgoing events per second to send out at max", }, }, "gateway": config.DefaultMapping{ "enabled": config.DefaultEntry{ Default: false, NeedsRestart: false, Docs: "Wether the gateway should be running. Will start when enabled.", }, "port": config.DefaultEntry{ Default: 6001, NeedsRestart: false, Docs: "On what port the gateway runs on.", }, "ui": config.DefaultMapping{ "enabled": config.DefaultEntry{ Default: true, NeedsRestart: false, Docs: "Enable the UI. This does not affect the /get endpoint.", }, "debug_mode": config.DefaultEntry{ Default: false, NeedsRestart: false, Docs: "Enable debug mode (load resources from filesystem).", }, }, "auth": config.DefaultMapping{ "anon_allowed": config.DefaultEntry{ Default: false, NeedsRestart: false, Docs: "Wether a login is required.", }, "anon_user": config.DefaultEntry{ Default: "anon", NeedsRestart: false, Docs: "What user to copy settings (folder, rights etc.) from.", }, "session-encryption-key": config.DefaultEntry{ Default: "", NeedsRestart: true, Docs: "Encryption key for session cookies. Generated when left empty.", }, "session-authentication-key": config.DefaultEntry{ Default: "", NeedsRestart: true, Docs: "Authentication key for session cookies. Generated when left empty.", }, "session-csrf-key": config.DefaultEntry{ Default: "", NeedsRestart: true, Docs: "Key used for CSRF protection. Generated if empty.", }, }, }, "fs": config.DefaultMapping{ "sync": config.DefaultMapping{ "ignore_removed": config.DefaultEntry{ Default: false, NeedsRestart: false, Docs: "Do not remove what the remote removed.", }, "ignore_moved": config.DefaultEntry{ Default: false, NeedsRestart: false, Docs: "Do not move what the remote moved", }, "pin_added": config.DefaultEntry{ Default: false, NeedsRestart: false, Docs: "Do not pin files which were added at the remote", }, "conflict_strategy": config.DefaultEntry{ Default: "marker", NeedsRestart: false, Validator: config.EnumValidator( "marker", "ignore", "embrace", ), Docs: `What strategy to apply in case of conflicts: * marker: Create a conflict file with the remote's version. * ignore: Ignore the remote version completely and keep our version. * embrace: Take the remote version and replace ours with it. `, }, }, "pre_cache": config.DefaultMapping{ "enabled": config.DefaultEntry{ Default: false, NeedsRestart: false, Docs: "pre-cache files up-on pinning.", }, }, "pagecache": config.DefaultMapping{ "max_memory": config.DefaultEntry{ Default: "1G", NeedsRestart: true, Docs: "Consume at max this amount of memory for the pagecache", }, "l2compress": config.DefaultEntry{ Default: true, NeedsRestart: true, Docs: "Compress swapped pages over max_memory before going to disk", }, }, "repin": config.DefaultMapping{ "enabled": config.DefaultEntry{ Default: true, NeedsRestart: false, Docs: "Perform repinning to reclaim space (see »brig pin repin --help«)", }, "interval": config.DefaultEntry{ Default: "15m", NeedsRestart: false, Docs: "In what time interval to trigger repinning automatically.", Validator: config.DurationValidator(), }, "quota": config.DefaultEntry{ Default: "5GB", NeedsRestart: false, Docs: `Maximum stored amount of pinned files to have. If the quota limit is hit, old versions of a file are unpinned first on the next repin. Biggest file first. `, }, "min_depth": config.DefaultEntry{ Default: 1, NeedsRestart: false, Docs: `Keep at least »n« versions of a pinned file, even if this would exceed the quota.`, }, "max_depth": config.DefaultEntry{ Default: 10, NeedsRestart: false, Docs: `Keep at max »n« versions of a pinned file and remove it even if it does not exceed quota.`, }, "pin_unpinned": config.DefaultEntry{ Default: false, NeedsRestart: false, Docs: `Pin unpinned files: * 'true' if you want maximum permitted mirroring * 'false' if you want to save traffic If a file version »n« is such that (min_depth <= »n« < max_depth), then the repinner will pin such version if pin_unpinned is set to true. Otherwise, it will keep the file unpinned, i.e. not cached at the backend. `, }, }, "autocommit": config.DefaultMapping{ "enabled": config.DefaultEntry{ Default: true, NeedsRestart: false, Docs: "Wether to make automatic commits in a fixed interval.", }, "interval": config.DefaultEntry{ Default: "5m", NeedsRestart: false, Docs: "In what interval to make automatic commits.", Validator: config.DurationValidator(), }, }, }, "repo": config.DefaultMapping{ "current_user": config.DefaultEntry{ Default: "", NeedsRestart: false, Docs: "The repository owner that is published to the outside.", }, "autogc": config.DefaultMapping{ "enabled": config.DefaultEntry{ Default: true, NeedsRestart: false, Docs: "Wether to make automatic commits in a fixed interval.", }, "interval": config.DefaultEntry{ Default: "60m", NeedsRestart: false, Docs: "In what interval to make automatic commits.", Validator: config.DurationValidator(), }, }, }, "mounts": config.DefaultMapping{ // This key stands for the fstab name entry: "__many__": config.DefaultMapping{ "path": config.DefaultEntry{ Default: "", NeedsRestart: true, Docs: "The place where the mount path can be found.", }, "read_only": config.DefaultEntry{ Default: false, NeedsRestart: true, Docs: "Wether this mount should be done read-only.", }, "offline": config.DefaultEntry{ Default: false, NeedsRestart: true, Docs: "Error out on remote files early if set true.", }, "root": config.DefaultEntry{ Default: "/", NeedsRestart: true, Docs: "The virtual root of the mount.", }, }, }, } ================================================ FILE: docs/.gitignore ================================================ _build/ ================================================ FILE: docs/Makefile ================================================ # Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = brig SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ================================================ FILE: docs/_static/css/custom.css ================================================ span.strikethrough { text-decoration: line-through; } a { color: #1866bc !important; } #navbar a { color: #18bc9c !important; } .alert-info { background-color: #5cb7f5a1; color: #333; } .alert-warning { background-color: #f39c1281; color: #333; } pre { color: #4e5858; } .form-control { padding: 1px 2px; height: 35px; margin-top: 5px; } .navbar-brand { font-weight: 900; } .caption-text { font-weight: 900; } #sidebar > li { display: block; } #sidebar { padding-left: 10px; position: relative; } /* Make the ``brig`` markup in page color */ .pre { color: #1866bc !important; color: #18bc9c !important; } /* Maket the selected item in the sidebar a little darker */ a.current { background-color: #00000011; } tbody { text-align: left !important; } ================================================ FILE: docs/asciinema/1_init.json ================================================ { "version": 1, "width": 119, "height": 29, "duration": 27.933128, "command": null, "title": null, "env": { "TERM": "xterm-256color", "SHELL": "/bin/zsh" }, "stdout": [ [ 0.244236, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 2.8e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007" ], [ 0.000153, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 6e-05, "\u001b[?1h\u001b=" ], [ 0.000183, "\u001b[?2004h" ], [ 1.163009, "\u001b[4mm\u001b[24m" ], [ 0.168255, "\b\u001b[24m\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[0m\u001b[39m" ], [ 0.127064, "\b\b\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[1m\u001b[31md\u001b[0m\u001b[39m" ], [ 0.08503, "\b\u001b[1m\u001b[31md\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.077231, "\b\b\b\b\u001b[0m\u001b[32mm\u001b[0m\u001b[32mk\u001b[0m\u001b[32md\u001b[0m\u001b[32mi\u001b[32mr\u001b[39m" ], [ 0.091295, " " ], [ 0.118571, "r" ], [ 0.054292, "e" ], [ 0.048684, "p" ], [ 0.142053, "o" ], [ 0.105353, " " ], [ 0.190366, "&" ], [ 0.148164, "&" ], [ 0.098179, " " ], [ 0.108353, "\u001b[4mc\u001b[24m" ], [ 0.082543, "\b\u001b[24m\u001b[32mc\u001b[32md\u001b[39m" ], [ 0.066756, " " ], [ 0.402253, "r" ], [ 0.058028, "e" ], [ 0.070225, "p" ], [ 0.139698, "o" ], [ 0.175843, "\u001b[?1l\u001b>" ], [ 0.007316, "\u001b[?2004l\r\r\n" ], [ 0.002185, "\u001b]2;mkdir repo && cd repo\u0007\u001b]1;mkdir\u0007" ], [ 0.004325, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000382, "\u001b]2;sahib@werkbank: ~/dev/brig/docs/repo\u0007" ], [ 0.000119, "\u001b]1;..rig/docs/repo\u0007" ], [ 0.000282, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 0.000451, "\u001b[?1h\u001b=" ], [ 0.001252, "\u001b[?2004h" ], [ 0.533999, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.099738, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.094967, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.100018, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.060099, " " ], [ 0.380891, "i" ], [ 0.221134, "n" ], [ 0.147805, "i" ], [ 0.041873, "t" ], [ 0.076952, " " ], [ 0.085267, "s" ], [ 0.061555, "a" ], [ 0.042689, "h" ], [ 0.150819, "i" ], [ 0.071275, "b" ], [ 0.438737, "@" ], [ 0.118104, "o" ], [ 0.169356, "n" ], [ 0.388506, "\b \b" ], [ 0.226276, "w" ], [ 0.136792, "\b \b" ], [ 0.140039, "\b \b" ], [ 0.100199, "w" ], [ 0.074795, "a" ], [ 0.089204, "l" ], [ 0.130848, "d" ], [ 0.253754, "." ], [ 0.112072, "d" ], [ 0.047147, "e" ], [ 0.221917, "/" ], [ 0.861882, "l" ], [ 0.245981, "a" ], [ 0.150465, "p" ], [ 0.178583, "t" ], [ 0.064085, "o" ], [ 0.149899, "p" ], [ 0.167082, "\u001b[?1l\u001b>" ], [ 0.004906, "\u001b[?2004l\r\r\n" ], [ 0.001659, "\u001b]2;brig init sahib@wald.de/laptop\u0007\u001b]1;brig\u0007" ], [ 0.122907, "\u001b[32m18.02.2018/16:03:09 ⚐\u001b[0m No Daemon running. Starting daemon from binary: /home/sahib/go/bin/brig\r\n" ], [ 0.202807, "\u001b[J\u001b[2K\r\u001b[31m⊠ New passphrase: \u001b[0m \b" ], [ 1.232412, "\u001b[J\u001b[2K\r\u001b[31m⊠ New passphrase: \u001b[0m\u0000" ], [ 0.000696, "\u001b[J\u001b[2K\r\u001b[31m⊠ New passphrase: \u001b[0m\u0000" ], [ 0.30993, "\u001b[J\u001b[2K\r\u001b[31m⊠ New passphrase: \u001b[0m\u0000\u0000" ], [ 0.000123, "\u001b[J\u001b[2K\r\u001b[31m⊠ New passphrase: \u001b[0m\u0000\u0000" ], [ 0.149752, "\u001b[J\u001b[2K\r\u001b[31m⊠ New passphrase: \u001b[0m\u0000\u0000\u0000" ], [ 0.000538, "\u001b[J\u001b[2K\r\u001b[31m⊠ New passphrase: \u001b[0m\u0000\u0000\u0000" ], [ 0.116645, "\u001b[J\u001b[2K\r" ], [ 8.2e-05, "\u001b[31m⊠ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000" ], [ 0.002332, "\u001b[J\u001b[2K\r\u001b[35m⊟ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000" ], [ 0.21385, "\u001b[J\u001b[2K\r" ], [ 0.000168, "\u001b[35m⊟ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000" ], [ 0.000549, "\u001b[J\u001b[2K\r\u001b[32m⚿ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000" ], [ 0.128442, "\u001b[J\u001b[2K\r\u001b[32m⚿ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.00057, "\u001b[J\u001b[2K\r\u001b[32m⚿ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.116187, "\u001b[J\u001b[2K\r\u001b[32m⚿ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.000762, "\u001b[J\u001b[2K\r\u001b[32m⚿ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.192273, "\u001b[J\u001b[2K\r\u001b[32m⚿ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001b[J\u001b[2K\r\u001b[32m⚿ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.364106, "\u001b[J\u001b[2K\r\u001b[32m⚿ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.000363, "\u001b[J\u001b[2K\r\u001b[32m⚿ New passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\r\n" ], [ 0.00064, "Well done! Please re-type your password now for safety:\r\n" ], [ 0.000402, "\u001b[J\u001b[2K\r\u001b[31m⊠ Retype passphrase: \u001b[0m \b" ], [ 0.380539, "\u001b[J\u001b[2K\r\u001b[31m⊠ Retype passphrase: \u001b[0m\u0000" ], [ 0.000573, "\u001b[J\u001b[2K\r\u001b[31m⊠ Retype passphrase: \u001b[0m\u0000" ], [ 0.29352, "\u001b[J\u001b[2K\r\u001b[31m⊠ Retype passphrase: \u001b[0m\u0000\u0000\u001b[J\u001b[2K\r\u001b[31m⊠ Retype passphrase: \u001b[0m\u0000\u0000" ], [ 0.150925, "\u001b[J\u001b[2K\r\u001b[31m⊠ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u001b[J\u001b[2K\r\u001b[31m⊠ Retype passphrase: \u001b[0m\u0000\u0000\u0000" ], [ 0.114661, "\u001b[J\u001b[2K\r\u001b[31m⊠ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000" ], [ 0.0005, "\u001b[J\u001b[2K\r\u001b[35m⊟ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000" ], [ 0.213871, "\u001b[J\u001b[2K\r\u001b[35m⊟ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000" ], [ 0.000542, "\u001b[J\u001b[2K\r\u001b[32m⚿ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000" ], [ 0.129238, "\u001b[J\u001b[2K\r\u001b[32m⚿ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.000605, "\u001b[J\u001b[2K\r\u001b[32m⚿ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.121982, "\u001b[J\u001b[2K\r\u001b[32m⚿ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.001004, "\u001b[J\u001b[2K\r\u001b[32m⚿ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.101944, "\u001b[J\u001b[2K\r\u001b[32m⚿ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.001069, "\u001b[J\u001b[2K\r\u001b[32m⚿ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000" ], [ 0.494948, "\u001b[J\u001b[2K\r\u001b[32m⚿ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001b[J\u001b[2K\r\u001b[32m⚿ Retype passphrase: \u001b[0m\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\r\n" ], [ 0.00067, "Estimated time needed to crack password: \u001b[34m12.0 months\u001b[0m\r\n\u001b[J\u001b[2K\r" ], [ 3.187785, "\r\n _____ / /\\ ___ / /\\ \r\n / /::\\ / /::\\ / /\\ / /:/_\r\n / /:/\\:\\ / /:/\\:\\ / /:/ / /:/ /\\ \r\n / /:/~/::\\ / /:/~/:/ /__/::\\ / /:/_/::\\ \r\n /__/:/ /:/\\:| /__/:/ /:/___ \\__\\/\\:\\__ /__/:/__\\/\\:\\\r\n \\ \\:\\/:/~/:/ \\ \\:\\/:::::/ \\ \\:\\/\\ \\ \\:\\ /~~/:/\r\n \\ \\::/ /:/ \\ \\::/~~~~ \\__\\::/ \\ \\:\\ /:/\r\n \\ \\:\\/:/ \\ \\:\\ /__/:/ \\ \\:\\/:/\r\n \\ \\::/ \\ \\:\\ \\__\\/ \\ \\::/\r\n \\__\\/ \\__\\/ \\__\\/\r\n\r\n\r\n A new file README.md was automatically added.\r\n Use 'brig cat README.md' to view it & get started.\r\n\r\n\r\n" ], [ 0.002162, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000148, "\u001b]2;sahib@werkbank: ~/dev/brig/docs/repo\u0007\u001b]1;..rig/docs/repo\u0007" ], [ 5.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 0.000107, "\u001b[?1h\u001b=" ], [ 0.000278, "\u001b[?2004h" ], [ 0.916781, "\u001b[1m\u001b[31mn\u001b[0m\u001b[39m" ], [ 0.16261, "\b\u001b[1m\u001b[31mn\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.078733, "\b\b\u001b[1m\u001b[31mn\u001b[1m\u001b[31mr\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" ], [ 0.112002, "\b\u001b[1m\u001b[31mo\u001b[1m\u001b[31mg\u001b[0m\u001b[39m" ], [ 0.06899, " " ], [ 0.171648, "\b" ], [ 0.248889, "\b\b\b\b\u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \b\b\b\b" ], [ 0.270981, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.130296, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.110821, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.120156, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.064046, " " ], [ 0.535157, "\u001b[4ml\u001b[24m" ], [ 0.112487, "\b\u001b[24mls" ], [ 0.568157, "\u001b[?1l\u001b>" ], [ 0.00422, "\u001b[?2004l\r\r\n" ], [ 0.00208, "\u001b]2;brig ls\u0007\u001b]1;brig\u0007" ], [ 0.160871, "SIZE MODTIME PATH PIN \r\n886 B Feb 18 16:03:17 \u001b[37m/README.md\u001b[0m \u001b[36m🖈\u001b[0m \r\n" ], [ 0.001331, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 7.4e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs/repo\u0007" ], [ 1.2e-05, "\u001b]1;..rig/docs/repo\u0007" ], [ 6.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 7.4e-05, "\u001b[?1h\u001b=" ], [ 0.000196, "\u001b[?2004h" ], [ 1.016178, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.105196, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.10579, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.09966, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.129177, " " ], [ 0.179915, "\u001b[4mc\u001b[24m" ], [ 0.058107, "\b\u001b[24mca" ], [ 0.146678, "t" ], [ 0.126798, " " ], [ 0.176763, "R" ], [ 0.070224, "E" ], [ 0.134468, "A" ], [ 0.131393, "D" ], [ 0.13735, "M" ], [ 0.091651, "E" ], [ 0.188663, "." ], [ 0.214387, "m" ], [ 0.098478, "d" ], [ 0.198813, "\u001b[?1l\u001b>" ], [ 0.006238, "\u001b[?2004l\r\r\n" ], [ 0.002035, "\u001b]2;brig cat README.md\u0007\u001b]1;brig\u0007" ], [ 0.122483, "Welcome to brig!\r\n\r\nHere's what you can do next:\r\n\r\n • Add a few remotes to sync with (See 'brig remote add -h')\r\n • Mount your data somewhere convinient (See 'brig mount -h')\r\n • Have a relaxing day exploring brig's features.\r\n\r\nPlease remember that brig is software in it's very early stages,\r\nand will currently eat your data with near-certainty.\r\n\r\nIf you're done with this README, you can easily remove it:\r\n\r\n $ brig rm README.md\r\n\r\n" ], [ 0.001463, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000111, "\u001b]2;sahib@werkbank: ~/dev/brig/docs/repo\u0007\u001b]1;..rig/docs/repo\u0007" ], [ 3.6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 6.9e-05, "\u001b[?1h\u001b=" ], [ 0.00018, "\u001b[?2004h" ], [ 1.523065, "\u001b[?2004l\r\r\n" ] ] } ================================================ FILE: docs/asciinema/1_init_with_pwm.json ================================================ {"version": 2, "width": 172, "height": 42, "timestamp": 1542382930, "env": {"SHELL": "/bin/zsh", "TERM": "xterm-256color"}, "title": "brig init (with password manager)"} [0.26352, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [0.26403, "o", "\u001b]2;sahib@werkbank: /tmp\u0007\u001b]1;/tmp\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K\u001b[?1h\u001b=\u001b[?2004h"] [0.739097, "o", "\u001b[4mm\u001b[24m"] [0.92474, "o", "\b\u001b[24m\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[0m\u001b[39m"] [1.031303, "o", "\b\b\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[1m\u001b[31md\u001b[0m\u001b[39m"] [1.153579, "o", "\b\u001b[1m\u001b[31md\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [1.230782, "o", "\b\b\b\b\u001b[0m\u001b[32mm\u001b[0m\u001b[32mk\u001b[0m\u001b[32md\u001b[0m\u001b[32mi\u001b[32mr\u001b[39m"] [1.310904, "o", " "] [1.579276, "o", "\u001b[4mr\u001b[24m"] [1.642426, "o", "\b\u001b[24mre"] [1.712467, "o", "p"] [1.860411, "o", "o"] [1.993894, "o", " "] [3.034364, "o", "&"] [3.155707, "o", "&"] [3.263644, "o", " "] [3.395157, "o", "\u001b[1m\u001b[31mc\u001b[0m\u001b[39m"] [3.462327, "o", "\b\u001b[0m\u001b[32mc\u001b[32md\u001b[39m"] [3.539217, "o", " "] [3.645307, "o", "\u001b[4mr\u001b[24m"] [3.707485, "o", "\b\u001b[24mre"] [3.772131, "o", "p"] [3.95697, "o", "o"] [4.725546, "o", "\u001b[?1l\u001b>"] [4.734682, "o", "\u001b[?2004l\r\r\n"] [4.737525, "o", "\u001b]2;mkdir repo && cd repo\u0007\u001b]1;mkdir\u0007"] [4.742294, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [4.742882, "o", "\u001b]2;sahib@werkbank: /tmp/repo\u0007"] [4.743388, "o", "\u001b]1;/tmp/repo\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [4.744018, "o", "\u001b[?1h\u001b="] [4.745864, "o", "\u001b[?2004h"] [5.92003, "o", "\u001b[1m\u001b[31mp\u001b[0m\u001b[39m"] [6.056376, "o", "\b\u001b[1m\u001b[31mp\u001b[1m\u001b[31ma\u001b[0m\u001b[39m"] [6.124477, "o", "\b\b\u001b[1m\u001b[31mp\u001b[1m\u001b[31ma\u001b[1m\u001b[31ms\u001b[0m\u001b[39m"] [6.290954, "o", "\b\b\b\u001b[0m\u001b[32mp\u001b[0m\u001b[32ma\u001b[0m\u001b[32ms\u001b[32ms\u001b[39m"] [6.399263, "o", " "] [7.291905, "o", "g"] [7.360923, "o", "e"] [7.422143, "o", "n"] [7.499221, "o", "e"] [7.544024, "o", "r"] [7.653468, "o", "a"] [7.775172, "o", "t"] [7.826824, "o", "e"] [7.905244, "o", " "] [8.187297, "o", "b"] [8.307228, "o", "r"] [8.406541, "o", "i"] [8.503768, "o", "g"] [8.74212, "o", "/"] [8.926867, "o", "r"] [9.007904, "o", "e"] [9.047081, "o", "p"] [9.200125, "o", "o"] [9.764218, "o", "/"] [9.933345, "o", "s"] [9.984954, "o", "a"] [10.128667, "o", "h"] [10.30151, "o", "i"] [10.396692, "o", "b"] [10.772247, "o", "@"] [11.629835, "o", "w"] [11.71424, "o", "a"] [11.820773, "o", "l"] [11.951813, "o", "d"] [12.115615, "o", "."] [12.210083, "o", "d"] [12.266417, "o", "e"] [12.876332, "o", " "] [13.199804, "o", "\b"] [14.158845, "o", "/"] [14.369763, "o", ";"] [14.48393, "o", "\u001b[1m\u001b[31ma\u001b[0m\u001b[39m"] [14.799511, "o", "\b\u001b[0m\u001b[39m \b"] [14.928714, "o", "\b \b"] [15.09851, "o", "l"] [15.216493, "o", "a"] [15.303913, "o", "p"] [15.434103, "o", "t"] [15.513938, "o", "o"] [15.672982, "o", "p"] [15.795216, "o", " "] [15.997295, "o", "-"] [16.210899, "o", "n"] [16.289418, "o", " "] [16.457636, "o", "2"] [16.59324, "o", "0"] [17.222134, "o", "\u001b[?1l\u001b>"] [17.227172, "o", "\u001b[?2004l"] [17.227446, "o", "\r\r\n"] [17.229032, "o", "\u001b]2;pass generate brig/repo/sahib@wald.de/laptop -n 20\u0007"] [17.22927, "o", "\u001b]1;pass\u0007"] [17.25874, "o", "mkdir: Verzeichnis '/home/sahib/.password-store/brig/repo/sahib@wald.de' angelegt\r\n"] [17.273215, "o", "\u001b[1mThe generated password for \u001b[4mbrig/repo/sahib@wald.de/laptop\u001b[24m is:\u001b[0m\r\n\u001b[1m\u001b[93mwRQUbpoYCCwZIJxkn7VV\u001b[0m\r\n"] [17.273828, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [17.273942, "o", "\u001b]2;sahib@werkbank: /tmp/repo\u0007"] [17.273967, "o", "\u001b]1;/tmp/repo\u0007"] [17.274064, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [17.274153, "o", "\u001b[?1h\u001b="] [17.274413, "o", "\u001b[?2004h"] [18.856296, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [18.940748, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [19.061012, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [19.159053, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [19.233445, "o", " "] [21.190964, "o", "i"] [21.386602, "o", "n"] [21.538857, "o", "i"] [21.609502, "o", "t"] [21.683962, "o", " "] [22.683931, "o", " "] [22.997219, "o", "\b"] [23.738281, "o", "s"] [23.799633, "o", "a"] [23.859625, "o", "h"] [24.023621, "o", "i"] [24.106901, "o", "b"] [24.445686, "o", "@"] [24.668167, "o", "w"] [24.747857, "o", "a"] [24.836444, "o", "l"] [24.964705, "o", "d"] [25.107596, "o", "."] [25.213753, "o", "d"] [25.304183, "o", "e"] [25.855981, "o", "/"] [26.067438, "o", "l"] [26.19953, "o", "a"] [26.324293, "o", "p"] [26.537115, "o", "t"] [26.626544, "o", "o"] [26.82602, "o", "p"] [26.984607, "o", " "] [27.63086, "o", "-"] [27.910446, "o", "w"] [28.066763, "o", " "] [28.24338, "o", "\u001b[33m'\u001b[39m"] [28.476716, "o", "\b\u001b[33m'\u001b[33mp\u001b[39m"] [28.548911, "o", "\b\u001b[33mp\u001b[33ma\u001b[39m"] [28.591751, "o", "\b\u001b[33ma\u001b[33ms\u001b[39m"] [28.74535, "o", "\b\u001b[33ms\u001b[33ms\u001b[39m"] [28.821941, "o", "\b\u001b[33ms\u001b[33m \u001b[39m"] [31.700107, "o", "\u001b[40D\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[1m\u001b[31mg\u001b[1m\u001b[31m \u001b[1m\u001b[31mi\u001b[1m\u001b[31mn\u001b[1m\u001b[31mi\u001b[1m\u001b[31mt\u001b[1m\u001b[31m \u001b[1m\u001b[31ms\u001b[1m\u001b[31ma\u001b[1m\u001b[31mh\u001b[1m\u001b[31mi\u001b[1m\u001b[31mb\u001b[1m\u001b[31m@\u001b[1m\u001b[31mw\u001b[1m\u001b[31ma\u001b[1m\u001b[31ml\u001b[1m\u001b[31md\u001b[1m\u001b[31m.\u001b[1m\u001b[31md\u001b[1m\u001b[31me\u001b[1m\u001b[31m/\u001b[1m\u001b[31ml\u001b[1m\u001b[31ma\u001b[1m\u001b[31mp\u001b[1m\u001b[31mt\u001b[1m\u001b[31mo\u001b[1m\u001b[31mp\u001b[1m\u001b[31m \u001b[1m\u001b[31m-\u001b[1m\u001b[31mw\u001b[1m\u001b[31m \u001b[1m\u001b[31m'\u001b[1m\u001b[31mp\u001b[1m\u001b[31ma\u001b[1m\u001b[31ms\u001b[1m\u001b[31ms\u001b[1m\u001b[31m \u001b[1m\u001b[31mbrig/repo/sahib@wald.de/laptop\u001b[0m\u001b[27m\u001b[39m"] [32.642035, "o", "\u001b[70D\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[0m\u001b[32mg\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39mi\u001b[0m\u001b[39mn\u001b[0m\u001b[39mi\u001b[0m\u001b[39mt\u001b[0m\u001b[39m \u001b[0m\u001b[39ms\u001b[0m\u001b[39ma\u001b[0m\u001b[39mh\u001b[0m\u001b[39mi\u001b[0m\u001b[39mb\u001b[0m\u001b[39m@\u001b[0m\u001b[39mw\u001b[0m\u001b[39ma\u001b[0m\u001b[39ml\u001b[0m\u001b[39md\u001b[0m\u001b[39m.\u001b[0m\u001b[39md\u001b[0m\u001b[39me\u001b[0m\u001b[39m/\u001b[0m\u001b[39ml\u001b[0m\u001b[39ma\u001b[0m\u001b[39mp\u001b[0m\u001b[39mt\u001b[0m\u001b[39mo\u001b[0m\u001b[39mp\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39mw\u001b[0m\u001b[39m \u001b[0m\u001b[33m'\u001b[0m\u001b[33mp\u001b[0m\u001b[33ma\u001b[0m\u001b[33ms\u001b[0m\u001b[33ms\u001b[0m\u001b[33m \u001b[0m\u001b[33mb\u001b[0m\u001b[33mr\u001b[0m\u001b[33mi\u001b[0m\u001b[33mg\u001b[0m\u001b[33m/\u001b[0m\u001b[33mr\u001b[0m\u001b[33me\u001b[0m\u001b[33mp\u001b[0m\u001b[33mo\u001b[0m\u001b[33m/\u001b[0m\u001b[33ms\u001b[0m\u001b[33ma\u001b[0m\u001b[33mh\u001b[0m\u001b[33mi\u001b[0m\u001b[33mb\u001b[0m\u001b[33m@\u001b[0m\u001b[33mw\u001b[0m\u001b[33ma\u001b[0m\u001b[33ml\u001b[0m\u001b[33md\u001b[0m\u001b[33m.\u001b[0m\u001b[33md\u001b[0m\u001b[33me\u001b[0m\u001b[33m/\u001b[0m\u001b[33ml\u001b[0m\u001b[33ma\u001b[0m\u001b[33mp\u001b[0m\u001b[33mt\u001b[0m\u001b[33mo\u001b[0m\u001b[33mp\u001b[33m'\u001b[39m"] [33.456599, "o", "\u001b[?1l\u001b>"] [33.458004, "o", "\u001b[?2004l\r\r\n"] [33.458685, "o", "\u001b]2;brig init sahib@wald.de/laptop -w 'pass brig/repo/sahib@wald.de/laptop'\u0007\u001b]1;brig\u0007"] [33.843016, "o", "Guessed folder for init: /tmp/repo\r\n"] [36.537341, "o", "\r\n _____ / /\\ ___ / /\\ \r\n / /::\\ / /::\\ / /\\ / /:/_\r\n / /:/\\:\\ / /:/\\:\\ / /:/ / /:/ /\\ \r\n / /:/~/::\\ / /:/~/:/ /__/::\\ / /:/_/::\\ \r\n /__/:/ /:/\\:| /__/:/ /:/___ \\__\\/\\:\\__ /__/:/__\\/\\:\\\r\n \\ \\:\\/:/~/:/ \\ \\:\\/:::::/ \\ \\:\\/\\ \\ \\:\\ /~~/:/\r\n \\ \\::/ /:/ \\ \\::/~~~~ \\__\\::/ \\ \\:\\ /:/\r\n \\ \\:\\/:/ \\ \\:\\ /__/:/ \\ \\:\\/:/\r\n \\ \\::/ \\ \\:\\ \\__\\/ \\ \\::/\r\n \\__\\/ \\__\\/ \\__\\/\r\n\r\n\r\n A new file README.md was automatically added.\r\n Use 'brig cat README.md' to view it & get started.\r\n\r\n\r\n"] [36.543616, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [36.543888, "o", "\u001b]2;sahib@werkbank: /tmp/repo\u0007\u001b]1;/tmp/repo\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K\u001b[?1h\u001b="] [36.544159, "o", "\u001b[?2004h"] [39.488223, "o", "\u001b[?2004l\r\r\n"] ================================================ FILE: docs/asciinema/2_adding.json ================================================ { "version": 1, "width": 119, "height": 29, "duration": 21.32594, "command": null, "title": null, "env": { "TERM": "xterm-256color", "SHELL": "/bin/zsh" }, "stdout": [ [ 0.25386, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 2.7e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs/repo\u0007\u001b]1;..rig/docs/repo\u0007" ], [ 0.000118, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 9.8e-05, "\u001b[?1h\u001b=" ], [ 0.000186, "\u001b[?2004h" ], [ 0.453974, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.105389, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.085149, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.092634, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.053191, " " ], [ 0.120726, "s" ], [ 0.138693, "t" ], [ 0.104973, "a" ], [ 0.113068, "g" ], [ 0.033543, "e" ], [ 0.086594, " " ], [ 0.108291, "\u001b[4mm\u001b[24m" ], [ 0.162977, "\b\u001b[4mm\u001b[4mu\u001b[24m" ], [ 0.112732, "\b\u001b[4mu\u001b[4ms\u001b[24m" ], [ 0.234469, "\b\u001b[4ms\u001b[4mic.mp3\u001b[24m " ], [ 0.725932, "\u001b[?1l\u001b>" ], [ 0.005389, "\u001b[?2004l\r\r\n" ], [ 0.002298, "\u001b]2;brig stage music.mp3\u0007\u001b]1;brig\u0007" ], [ 0.462899, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 4.1e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs/repo\u0007" ], [ 1.1e-05, "\u001b]1;..rig/docs/repo\u0007" ], [ 6.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 0.0001, "\u001b[?1h\u001b=" ], [ 0.000226, "\u001b[?2004h" ], [ 0.688362, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.115037, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.088531, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.101543, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.066626, " " ], [ 0.109212, "\u001b[4ml\u001b[24m" ], [ 0.125075, "\b\u001b[24mls" ], [ 0.105769, "\u001b[?1l\u001b>" ], [ 0.006356, "\u001b[?2004l\r\r\n" ], [ 0.001889, "\u001b]2;brig ls\u0007\u001b]1;brig\u0007" ], [ 0.181364, "SIZE MODTIME " ], [ 0.00013, "PATH PIN \r\n886 B Feb 18 16:03:17 \u001b[37m/README.md\u001b[0m \u001b[36m🖈\u001b[0m \r\n\u001b[33m13 MB\u001b[0m Feb 18 16:05:45 \u001b[37m/music.mp3\u001b[0m \u001b[36m🖈\u001b[0m \r\n" ], [ 0.001523, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000107, "\u001b]2;sahib@werkbank: ~/dev/brig/docs/repo\u0007" ], [ 1.2e-05, "\u001b]1;..rig/docs/repo\u0007" ], [ 8.5e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 0.000104, "\u001b[?1h\u001b=" ], [ 0.000267, "\u001b[?2004h" ], [ 0.618057, "\u001b[32mbrig\u001b[39m ls" ], [ 0.228909, "\b\b \b\b" ], [ 0.192473, "t" ], [ 0.144214, "r" ], [ 0.038708, "e" ], [ 0.165568, "e" ], [ 0.182023, "\u001b[?1l\u001b>" ], [ 0.004345, "\u001b[?2004l\r\r\n" ], [ 0.001945, "\u001b]2;brig tree\u0007\u001b]1;brig\u0007" ], [ 0.11428, "\u001b[35m•\u001b[0m \u001b[36m🖈\u001b[0m\r\n├── music.mp3 \u001b[36m🖈\u001b[0m\r\n└── README.md \u001b[36m🖈\u001b[0m\r\n\r\n1 directory, 2 files\r\n" ], [ 0.001242, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 8.5e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs/repo\u0007" ], [ 1.3e-05, "\u001b]1;..rig/docs/repo\u0007" ], [ 7.3e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 8.3e-05, "\u001b[?1h\u001b=" ], [ 0.000189, "\u001b[?2004h" ], [ 1.986702, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.139791, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.061285, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.1216, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.059354, " " ], [ 1.141574, "\u001b[4mc\u001b[24m" ], [ 0.049557, "\b\u001b[24mca" ], [ 0.159666, "t" ], [ 0.068815, " " ], [ 0.146648, "\u001b[4mm\u001b[24m" ], [ 0.155988, "\b\u001b[4mm\u001b[4mu\u001b[24m" ], [ 0.079945, "\b\u001b[4mu\u001b[4ms\u001b[24m" ], [ 0.091685, "\b\u001b[4ms\u001b[4mi\u001b[24m" ], [ 0.075616, "\b\u001b[4mi\u001b[4mc\u001b[24m" ], [ 0.139947, "\b\u001b[4mc\u001b[4m.\u001b[24m" ], [ 0.180603, "\b\u001b[4m.\u001b[4mm\u001b[24m" ], [ 0.163774, "\b\u001b[4mm\u001b[4mp\u001b[24m" ], [ 0.06169, "\b\u001b[4mp\u001b[4m3\u001b[24m" ], [ 0.646438, " " ], [ 0.192937, "|" ], [ 0.128012, " " ], [ 0.355076, "\u001b[4mm\u001b[24m" ], [ 0.185119, "\b\u001b[24m\u001b[1m\u001b[31mm\u001b[1m\u001b[31mp\u001b[0m\u001b[39m" ], [ 0.079648, "\b\b\u001b[0m\u001b[32mm\u001b[0m\u001b[32mp\u001b[32mv\u001b[39m" ], [ 0.069737, " " ], [ 0.098894, "-" ], [ 0.442838, "\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001b]2;brig cat music.mp3 | mpv -\u0007\u001b]1;brig\u0007" ], [ 0.119644, "\u001b[0mPlaying: -\r\n\u001b[0m" ], [ 0.006226, "\u001b[0m[file] Reading from stdin...\r\n\u001b[0m" ], [ 0.019648, "\u001b[0;33m[ffmpeg/demuxer] mp3: invalid concatenated file detected - using bitrate for duration\r\n\u001b[0m" ], [ 0.00472, "\u001b[0m (+) Audio --aid=1 (mp3 2ch 44100Hz)\r\n" ], [ 8.2e-05, "\u001b[0m\u001b[0mFile tags:\r\n" ], [ 2.1e-05, "\u001b[0m\u001b[0m Artist: Epica\r\n\u001b[0m" ], [ 1.7e-05, "\u001b[0m Album: The Classical Conspiracy: Live in Miskolc, Hungary\r\n\u001b[0m\u001b[0m Album_Artist: Epica\r\n" ], [ 1.8e-05, "\u001b[0m\u001b[0m Title: The Imperial March\r\n\u001b[0m\u001b[0m Track: 8/18\r\n" ], [ 1.3e-05, "\u001b[0m" ], [ 0.004403, "\u001b[0mAO: [pulse] 44100Hz stereo 2ch s16\r\n\u001b[0m" ], [ 0.015081, "\r\u001b[K\u001b[0mA: 00:00:00 / 00:00:00 Cache: 10s+5MB\r\u001b[0m" ], [ 0.675774, "\r\u001b[K\u001b[0mA: 00:00:00 / 00:00:00 Cache: 9s+5MB\r\u001b[0m" ], [ 0.203763, "\r\u001b[K\u001b[0mA: 00:00:00 / 00:00:00 Cache: 10s+5MB\r\u001b[0m" ], [ 0.203299, "\r\u001b[K\u001b[0mA: 00:00:01 / 00:00:00 Cache: 10s+5MB\r\u001b[0m" ], [ 0.385072, "\r\u001b[K\u001b[0mA: 00:00:01 / 00:00:00 Cache: 9s+5MB\r\u001b[0m" ], [ 0.126352, "\r\u001b[K\u001b[0mA: 00:00:01 / 00:00:00 Cache: 10s+5MB\r\u001b[0m" ], [ 0.404842, "\r\u001b[K\u001b[0mA: 00:00:02 / 00:00:00 Cache: 10s+5MB\r\u001b[0m" ], [ 0.202959, "\r\u001b[K\u001b[0mA: 00:00:02 / 00:00:00 (1%) Cache: 10s+5MB\r\u001b[0m" ], [ 0.813401, "\r\u001b[K\u001b[0mA: 00:00:03 / 00:00:00 (1%) Cache: 10s+5MB\r\u001b[0m" ], [ 0.407844, "\r\u001b[K\u001b[0mA: 00:00:03 / 00:00:00 (1%) Cache: 9s+5MB\r\u001b[0m" ], [ 0.079101, "\r\u001b[K\u001b[0mA: 00:00:03 / 00:00:00 (1%) Cache: 10s+5MB\r\u001b[0m" ], [ 0.126073, "\r\u001b[K\u001b[0mA: 00:00:03 / 00:00:00 (2%) Cache: 10s+5MB\r\u001b[0m" ], [ 0.40592, "\r\u001b[K\u001b[0mA: 00:00:04 / 00:00:00 (2%) Cache: 10s+5MB\r\u001b[0m" ], [ 1.015693, "\r\u001b[K\u001b[0mA: 00:00:05 / 00:00:00 (2%) Cache: 10s+5MB\r\u001b[0m" ], [ 0.609984, "\r\u001b[K\u001b[0mA: 00:00:05 / 00:00:00 (3%) Cache: 9s+5MB\r\u001b[0m" ], [ 0.079399, "\r\u001b[K\u001b[0mA: 00:00:05 / 00:00:00 (3%) Cache: 10s+5MB\r\u001b[0m" ], [ 0.254169, "^C" ], [ 0.006464, "\r\n\u001b[0m\r\n\u001b[0m" ], [ 0.000298, "\u001b[0m\r\n\u001b[0m\u001b[0mExiting... (Quit)\r\n\u001b[0m" ], [ 0.022616, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000604, "\u001b]2;sahib@werkbank: ~/dev/brig/docs/repo\u0007" ], [ 0.000122, "\u001b]1;..rig/docs/repo\u0007" ], [ 0.001088, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K\u001b[?1h\u001b=" ], [ 0.001661, "\u001b[?2004h" ], [ 1.256398, "\u001b[?2004l\r\r\n" ] ] } ================================================ FILE: docs/asciinema/3_coreutils.json ================================================ { "version": 1, "width": 119, "height": 29, "duration": 44.439839, "command": null, "title": null, "env": { "TERM": "xterm-256color", "SHELL": "/bin/zsh" }, "stdout": [ [ 0.207775, "b" ], [ 0.03976, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000112, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007" ], [ 0.000121, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 6.6e-05, "\u001b[?1h\u001b=" ], [ 8.2e-05, "\u001b[?2004h" ], [ 0.001595, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.104775, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.103078, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.171794, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.084843, " " ], [ 0.111081, "\u001b[4mm\u001b[24m" ], [ 0.2013, "\b\u001b[24mmk" ], [ 0.213333, "d" ], [ 0.116987, "i" ], [ 0.096428, "r" ], [ 0.108839, " " ], [ 0.615141, "s" ], [ 0.112813, "u" ], [ 0.093527, "b" ], [ 0.458499, "\u001b[?1l\u001b>" ], [ 0.006789, "\u001b[?2004l\r\r\n" ], [ 0.0028, "\u001b]2;brig mkdir sub\u0007\u001b]1;brig\u0007" ], [ 0.112551, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 6e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007" ], [ 1.4e-05, "\u001b]1;~/dev/brig/docs\u0007" ], [ 5.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 7.3e-05, "\u001b[?1h\u001b=" ], [ 0.000193, "\u001b[?2004h" ], [ 1.144128, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.151721, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.099278, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.113866, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.071336, " " ], [ 0.12183, "\u001b[4mc\u001b[24m" ], [ 0.085112, "\b\u001b[24mcp" ], [ 0.076426, " " ], [ 0.118376, "\u001b[4mm\u001b[24m" ], [ 0.150092, "\b\u001b[24mmu" ], [ 0.061787, "s" ], [ 0.111346, "i" ], [ 0.102251, "c" ], [ 0.134833, "." ], [ 0.208191, "m" ], [ 0.234634, "p" ], [ 0.096577, "3" ], [ 0.163669, " " ], [ 0.315213, "s" ], [ 0.110484, "u" ], [ 0.097823, "b" ], [ 0.185982, " " ], [ 0.3964, "\b" ], [ 0.377085, "/" ], [ 0.556364, "\b \b" ], [ 0.255609, "\u001b[?1l\u001b>" ], [ 0.007065, "\u001b[?2004l\r\r\n" ], [ 0.002288, "\u001b]2;brig cp music.mp3 sub\u0007\u001b]1;brig\u0007" ], [ 0.116393, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 6.1e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007" ], [ 1.4e-05, "\u001b]1;~/dev/brig/docs\u0007" ], [ 6e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 6.4e-05, "\u001b[?1h\u001b=" ], [ 0.000187, "\u001b[?2004h" ], [ 1.240271, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.1047, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.102508, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.088436, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.086373, " " ], [ 0.138829, "t" ], [ 0.166321, "r" ], [ 0.074024, "e" ], [ 0.177903, "e" ], [ 0.169713, "\u001b[?1l\u001b>" ], [ 0.003955, "\u001b[?2004l\r\r\n" ], [ 0.001864, "\u001b]2;brig tree\u0007\u001b]1;brig\u0007" ], [ 0.123081, "\u001b[35m•\u001b[0m \u001b[36m🖈\u001b[0m\r\n├── README.md \u001b[36m🖈\u001b[0m\r\n" ], [ 9e-05, "└──\u001b[32msub\u001b[0m \u001b[36m🖈\u001b[0m\r\n └── music.mp3 \u001b[36m🖈\u001b[0m\r\n\r\n2 directories, 3 files\r\n" ], [ 0.001329, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 7.5e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007" ], [ 1.2e-05, "\u001b]1;~/dev/brig/docs\u0007" ], [ 6.1e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ " ], [ 1.1e-05, "\u001b[K" ], [ 7.3e-05, "\u001b[?1h\u001b=" ], [ 0.00019, "\u001b[?2004h" ], [ 2.752774, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.218615, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.15312, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.47546, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.091406, " " ], [ 0.851159, "\u001b[4mi\u001b[24m" ], [ 0.153946, "\b\u001b[4mi\u001b[4mn\u001b[24m" ], [ 0.113271, "\b\b\u001b[24mi\u001b[24mnf" ], [ 0.070447, "o" ], [ 0.064971, " " ], [ 0.216765, "R" ], [ 0.069948, "E" ], [ 0.089236, "A" ], [ 0.136061, "D" ], [ 0.12703, "M" ], [ 0.08556, "E" ], [ 0.179354, "." ], [ 0.223757, "m" ], [ 0.09138, "d" ], [ 0.121202, "\u001b[?1l\u001b>" ], [ 0.00509, "\u001b[?2004l\r\r\n" ], [ 0.001736, "\u001b]2;brig info README.md\u0007\u001b]1;brig\u0007" ], [ 0.117613, "ATTR VALUE" ], [ 3.4e-05, " \r\n\u001b[37mPath\u001b[0m /README.md \r\n\u001b[37mType\u001b[0m file \r\n\u001b[37mSize\u001b[0m 886 B \r\n\u001b[37mHash\u001b[0m SEfXUBbsLgBV1KQtogr9N3WxLuNWYACVoyZbqqDvrCYWgRusPSMTBcfXgveekZh6qNBEYxaFPXZq6mncayPg5yc55z8rp \r\n\u001b[37mInode\u001b[0m " ], [ 6.5e-05, "4 " ], [ 4.7e-05, " \r\n\u001b[37mPinned\u001b[0m \u001b[32myes\u001b[0m \r\n\u001b[37mModTime\u001b[0m 2018-02-18T16:03:17+01:00 \r\n\u001b[37mContent\u001b[0m QmRD1CzUEBg8BMTArBJymVHs8MVZAYAHi2tzA1UxXqf2qp " ], [ 4.5e-05, " \r\n" ], [ 0.001377, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 7.7e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007" ], [ 1.2e-05, "\u001b]1;~/dev/brig/docs\u0007" ], [ 5.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 8.6e-05, "\u001b[?1h\u001b=" ], [ 0.000226, "\u001b[?2004h" ], [ 2.408108, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.112957, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.110455, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.097714, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.243174, "\b\b\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[1m\u001b[31mg\u001b[1m\u001b[31me\u001b[0m\u001b[39m" ], [ 0.341565, "\b\b\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[0m\u001b[32mg\u001b[39m\u001b[0m\u001b[39m \b" ], [ 0.084114, " " ], [ 0.060533, "\u001b[4me\u001b[24m" ], [ 0.103896, "\b\u001b[24med" ], [ 0.049514, "i" ], [ 0.172611, "t" ], [ 0.090499, " " ], [ 0.237876, "R" ], [ 0.073149, "E" ], [ 0.131375, "A" ], [ 0.111012, "D" ], [ 0.146386, "M" ], [ 0.088129, "E" ], [ 0.165009, "." ], [ 0.216731, "m" ], [ 0.083528, "d" ], [ 1.476078, "\b\b" ], [ 0.264974, "\b\b\b\b\b\b\b" ], [ 0.575857, "\b" ], [ 0.294863, "\b\u001b[P\u001b[10C \u001b[11D" ], [ 0.198429, "\b\u001b[P\u001b[10C \u001b[11D" ], [ 0.169154, "t README.md\u001b[10D" ], [ 0.118885, "i README.md\u001b[10D" ], [ 0.214733, "\u001b[?1l\u001b>" ], [ 0.005064, "\u001b[?2004l\r\r\n" ], [ 0.001899, "\u001b]2;brig edti README.md\u0007\u001b]1;brig\u0007" ], [ 0.103904, "`\u001b[31medti\u001b[0m` is not a valid command. Did you maybe mean `\u001b[32medit\u001b[0m`?\r\n" ], [ 0.001328, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 7.2e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007" ], [ 1.3e-05, "\u001b]1;~/dev/brig/docs\u0007" ], [ 5.9e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 7e-05, "\u001b[?1h\u001b=" ], [ 0.000196, "\u001b[?2004h" ], [ 0.869697, "\u001b[32mbrig\u001b[39m edti README.md" ], [ 0.269395, "\u001b[19D" ], [ 0.134928, "\u001b[5C" ], [ 0.176458, "\u001b[5C" ], [ 0.380294, "\b" ], [ 0.37218, "\b\u001b[P\u001b[10C \u001b[11D" ], [ 0.147017, "\b\u001b[P\u001b[10C \u001b[11D" ], [ 0.163162, "i README.md\u001b[10D" ], [ 0.081396, "t README.md\u001b[10D" ], [ 0.73976, "\u001b[?1l\u001b>" ], [ 0.005398, "\u001b[?2004l\r\r\n" ], [ 0.001748, "\u001b]2;brig edit README.md\u0007\u001b]1;brig\u0007" ], [ 0.127872, "\u001b[?1000h\u001b[?2004h\u001b[?1049h\u001b[22;0;0t\u001b[?1h\u001b=\u001b[?2004h" ], [ 0.000239, "\u001b[1;29r\u001b[?12h\u001b[?12l\u001b[27m\u001b[23m\u001b[29m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[29;1H\"/tmp/brig-cmd-buffer-1263340949723681570.md\"" ], [ 5.3e-05, " 15L, 443C" ], [ 0.012203, "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c\u001b]10;?\u0007\u001b]11;?\u0007" ], [ 0.002133, "\u001b[1;1HWelcome to brig!\r\n\r\nHere's what you can do next:\u001b[5;5H•\u001b[5;7HAdd a few remotes to sync with (See 'brig remote add -h')\r\n •\u001b[6;7HMount your data somewhere convinient (See 'brig mount -h')\r\n •\u001b[7;7HHave a relaxing day exploring brig's features.\r\n\r\nPlease remember that brig is software in it's very early stages,\r\nand will currently eat your data with near-certainty.\r\n\r\nIf you're done with this README, you can easily remove it:\u001b[14;5H$ brig rm README.md\r\n\r\n\u001b[94m~ \u001b[17;1H~ \u001b[18;1H~ \u001b[19;1H~ \u001b[20;1H~ " ], [ 3e-05, " \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ \u001b[24;1H~ \u001b[25;1H~ \u001b[26;1H~ \u001b[27;1H~ \u001b[28;1H~ " ], [ 9e-06, " \u001b[m\u001b[29;102H1,1\u001b[9CAlles\u001b[1;1H\u001b[?25h" ], [ 0.011277, "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\" ], [ 0.002856, "\u001b[?1000l\u001b[?1006h\u001b[?1002h\u001b[?1006l\u001b[?1002l\u001b[?1006h\u001b[?1002h\u001b[27m\u001b[23m\u001b[29m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[1;1HWelcome to brig!\r\n\r\nHere's what you can do next:\u001b[5;5H•\u001b[5;7HAdd a few remotes to sync with (See 'brig remote add -h')\r\n •\u001b[6;7HMount your data somewhere convinient (See 'brig mount -h')\r\n •\u001b[7;7HHave a relaxing day exploring brig's features.\r\n\r\nPlease remember that brig is software in it's very early stages,\r\nand will currently eat your data with near-certainty.\r\n\r\nIf you're done with this README, you can easily remove it:\u001b[14;5H$ brig rm README.md\r\n\r\n\u001b[94m~ \u001b[17;1H~ \u001b[18;1H~ \u001b[19;1H~ " ], [ 5.3e-05, " \u001b[20;1H~ \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ \u001b[24;1H~ \u001b[25;1H~ \u001b[26;1H~ \u001b[27;1H~ " ], [ 2.9e-05, " \u001b[28;1H~ \u001b[m\u001b[29;102H1,1\u001b[9CAlles\r\"/tmp/brig-cmd-buffer-1263340949723681570.md\" 15L, 443C\u001b[1;1H\u001b[?25h" ], [ 0.61525, "\u001b[?25l\u001b[29;92Hj\u001b[1;1H" ], [ 0.000361, "\u001b[29;92H \u001b[2;1H\u001b[29;102H2,0-1\u001b[2;1H\u001b[?25h" ], [ 0.307898, "\u001b[?25l\u001b[29;92HG\u001b[2;1H" ], [ 0.000155, "\u001b[29;92H \u001b[15;1H\u001b[29;102H15,0-1\u001b[15;1H\u001b[?25h" ], [ 0.548891, "\u001b[?25l\u001b[29;92Ho\u001b[15;1H" ], [ 0.000101, "\u001b[29;92H \u001b[16;1H" ], [ 0.000179, "\u001b[29;1H\u001b[1m-- EINFÜGEN --\u001b[m\u001b[29;15H\u001b[K\u001b[29;102H16,1\u001b[8CAlles" ], [ 0.001464, "\u001b[16;1H\u001b[K\u001b[16;1H\u001b[?25h" ], [ 0.195215, "\u001b[?25l\u001b[17;1H\u001b[K\u001b[29;103H7\u001b[17;1H\u001b[?25h" ], [ 0.411704, "\u001b[?25lI\u001b[29;105H2\u001b[17;2H\u001b[?25h" ], [ 0.234594, "\u001b[?25l'\u001b[29;105H3\u001b[17;3H\u001b[?25h" ], [ 0.122519, "\u001b[?25lv\u001b[29;105H4\u001b[17;4H\u001b[?25h" ], [ 0.066522, "\u001b[?25le\u001b[29;105H5\u001b[17;5H\u001b[?25h" ], [ 0.112979, "\u001b[?25l\u001b[29;105H6\u001b[17;6H\u001b[?25h" ], [ 0.312782, "\u001b[?25le\u001b[29;105H7\u001b[17;7H\u001b[?25h" ], [ 0.154464, "\u001b[?25ld\u001b[29;105H8\u001b[17;8H\u001b[?25h" ], [ 0.168844, "\u001b[?25li\u001b[29;105H9\u001b[17;9H\u001b[?25h" ], [ 0.090198, "\u001b[?25lt\u001b[29;105H10\u001b[17;10H\u001b[?25h" ], [ 0.089237, "\u001b[?25le\u001b[29;106H1\u001b[17;11H\u001b[?25h" ], [ 0.129701, "\u001b[?25lr\u001b[29;106H2\u001b[17;12H\u001b[?25h" ], [ 0.439249, "\u001b[?25l\u001b[17;11H\u001b[K\u001b[29;106H1\u001b[17;11H\u001b[?25h" ], [ 0.080156, "\u001b[?25ld\u001b[29;106H2\u001b[17;12H\u001b[?25h" ], [ 0.091909, "\u001b[?25l\u001b[29;106H3\u001b[17;13H\u001b[?25h" ], [ 0.168699, "\u001b[?25ly\u001b[29;106H4\u001b[17;14H\u001b[?25h" ], [ 0.104238, "\u001b[?25le\u001b[29;106H5\u001b[17;15H\u001b[?25h" ], [ 0.220856, "\u001b[?25l\u001b[17;14H\u001b[K\u001b[29;106H4\u001b[17;14H\u001b[?25h" ], [ 0.14326, "\u001b[?25l\u001b[17;13H\u001b[K\u001b[29;106H3\u001b[17;13H\u001b[?25h" ], [ 0.142547, "\u001b[?25li\u001b[29;106H4\u001b[17;14H\u001b[?25h" ], [ 0.089015, "\u001b[?25lt\u001b[29;106H5\u001b[17;15H\u001b[?25h" ], [ 0.101241, "\u001b[?25l.\u001b[29;106H6\u001b[17;16H\u001b[?25h" ], [ 0.148249, "\u001b[29;1H\u001b[K\u001b[17;15H\u001b[?25l\u001b[29;92H^[\u001b[17;15H" ], [ 0.100582, "\u001b[29;92H \u001b[17;16H" ], [ 0.000733, "\u001b[29;102H17,15\u001b[7CAlles\u001b[17;15H\u001b[?25h" ], [ 0.18371, "\u001b[?25l\u001b[29;92H:\u001b[17;15H\u001b[29;92H\u001b[K\u001b[29;1H:\u001b[?2004h\u001b[?25h" ], [ 0.067401, "w\u001b[?25l\u001b[?25h" ], [ 0.068066, "q" ], [ 1.9e-05, "\u001b[?25l\u001b[?25h" ], [ 0.061677, "\r" ], [ 4.3e-05, "\u001b[?25l\u001b[?1006l\u001b[?1002l\u001b[?2004l" ], [ 3.8e-05, "\"/tmp/brig-cmd-buffer-1263340949723681570.md\"" ], [ 8e-05, " 17L, 460C geschrieben" ], [ 0.000507, "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?25h\u001b[?1049l\u001b[23;0;0t" ], [ 0.135995, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 0.000115, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007" ], [ 1.9e-05, "\u001b]1;~/dev/brig/docs\u0007" ], [ 0.000112, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 0.000149, "\u001b[?1h\u001b=" ], [ 0.000335, "\u001b[?2004h" ], [ 1.145903, "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" ], [ 0.128894, "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" ], [ 0.085092, "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" ], [ 0.104495, "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m" ], [ 0.07748, " " ], [ 0.150049, "\u001b[4mc\u001b[24m" ], [ 0.065485, "\b\u001b[24mca" ], [ 0.151361, "t" ], [ 0.07143, " " ], [ 0.798179, "R" ], [ 0.049034, "E" ], [ 0.148152, "A" ], [ 0.101871, "D" ], [ 0.147676, "M" ], [ 0.08569, "E" ], [ 0.501451, "." ], [ 0.181451, "m" ], [ 0.090745, "d" ], [ 0.115721, "\u001b[?1l\u001b>" ], [ 0.005991, "\u001b[?2004l\r\r\n" ], [ 0.001998, "\u001b]2;brig cat README.md\u0007\u001b]1;brig\u0007" ], [ 0.113793, "Welcome to brig!\r\n\r\nHere's what you can do next:\r\n\r\n • Add a few remotes to sync with (See 'brig remote add -h')\r\n • Mount your data somewhere convinient (See 'brig mount -h')\r\n • Have a relaxing day exploring brig's features.\r\n\r\nPlease remember that brig is software in it's very early stages,\r\nand will currently eat your data with near-certainty.\r\n\r\nIf you're done with this README, you can easily remove it:\r\n\r\n $ brig rm README.md\r\n\r\n\r\nI've edited it.\r\n" ], [ 0.00158, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 8.5e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007" ], [ 1.4e-05, "\u001b]1;~/dev/brig/docs\u0007" ], [ 7.7e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 7.7e-05, "\u001b[?1h\u001b=" ], [ 0.000193, "\u001b[?2004h" ], [ 1.198824, "\u001b[32mbrig\u001b[39m cat README.md" ], [ 0.183351, "\u001b[13Dedit README.md" ], [ 0.414961, "\u001b[12Dti\u001b[10C" ], [ 0.409659, "\u001b[14Dinfo\u001b[10C" ], [ 0.534112, "\u001b[?1l\u001b>" ], [ 0.005484, "\u001b[?2004l\r\r\n" ], [ 0.001893, "\u001b]2;brig info README.md\u0007\u001b]1;brig\u0007" ], [ 0.117828, "ATTR VALUE " ], [ 2.7e-05, " \r\n\u001b[37mPath\u001b[0m /README.md \r\n\u001b[37mType\u001b[0m file \r\n\u001b[37mSize\u001b[0m 460 B \r\n\u001b[37mHash\u001b[0m SEfXUEBxWHqPU8J2cn4mg2bBoB48Wrpba2DUwsyuGaDc2nYfgBLZ6S3Bs4iY4kiCUJfa2KWSNx8gEBh2FMbh2D1yCNxst \r\n\u001b[37mInode\u001b[0m 4 \r\n\u001b[37mPinned\u001b[0m" ], [ 1.3e-05, " \u001b[32myes\u001b[0m " ], [ 1e-05, " \r\n\u001b[37mModTime\u001b[0m 2018-02-18T21:51:40+01:00 " ], [ 9e-06, " \r\n" ], [ 1e-05, "\u001b[37mContent\u001b[0m QmQMGt4QGSGhBXnNAHD3pYymmyxjV7dS7nax3F4wCf2Uut " ], [ 1e-05, " \r\n" ], [ 0.001378, "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" ], [ 6.7e-05, "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007" ], [ 1.6e-05, "\u001b]1;~/dev/brig/docs\u0007" ], [ 6.4e-05, "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K" ], [ 7.7e-05, "\u001b[?1h\u001b=" ], [ 0.000187, "\u001b[?2004h" ], [ 1.715623, "\u001b[?2004l\r\r\n" ] ] } ================================================ FILE: docs/asciinema/4_mount.json ================================================ {"version": 2, "width": 119, "height": 29, "timestamp": 1519811299, "env": {"SHELL": "/bin/zsh", "TERM": "xterm-256color"}} [0.259676, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [0.259761, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [0.259893, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [0.260008, "o", "\u001b[?1h\u001b="] [0.260247, "o", "\u001b[?2004h"] [0.682887, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [0.801996, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [0.891365, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [0.986864, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [1.037332, "o", " "] [1.149093, "o", "\u001b[4ml\u001b[24m"] [1.267448, "o", "\b\u001b[24mls"] [1.513916, "o", "\u001b[?1l\u001b>"] [1.518176, "o", "\u001b[?2004l\r\r\n"] [1.520369, "o", "\u001b]2;brig ls\u0007\u001b]1;brig\u0007"] [1.631909, "o", "SIZE MODTIME "] [1.632051, "o", " PATH PIN \r\n886 B Feb 28 10:45:21 \u001b[37m/README.md\u001b[0m \u001b[36m🖈\u001b[0m \r\n\u001b[33m13 MB\u001b[0m Feb 28 10:46:48 \u001b[32m/sub\u001b[0m \u001b[36m🖈\u001b[0m \r\n"] [1.633428, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [1.63352, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007"] [1.633546, "o", "\u001b]1;~/dev/brig/docs\u0007"] [1.633623, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [1.633823, "o", "\u001b[?1h\u001b="] [1.633919, "o", "\u001b[?2004h"] [2.603338, "o", "\u001b[4mm\u001b[24m"] [2.796509, "o", "\b\u001b[24m\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[0m\u001b[39m"] [2.907083, "o", "\b\b\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[1m\u001b[31md\u001b[0m\u001b[39m"] [2.996988, "o", "\b\u001b[1m\u001b[31md\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [3.074828, "o", "\b\b\b\b\u001b[0m\u001b[32mm\u001b[0m\u001b[32mk\u001b[0m\u001b[32md\u001b[0m\u001b[32mi\u001b[32mr\u001b[39m"] [3.15677, "o", " "] [3.247834, "o", "\u001b[4m/\u001b[24m"] [3.646365, "o", "\b\u001b[4m/\u001b[4mt\u001b[24m"] [3.74645, "o", "\b\u001b[4mt\u001b[4mm\u001b[24m"] [3.948814, "o", "\b\u001b[4mm\u001b[4mp\u001b[24m"] [4.289082, "o", "\b\u001b[4mp\u001b[4m/\u001b[24m"] [4.576632, "o", "\b\b\b\b\b\u001b[24m/\u001b[24mt\u001b[24mm\u001b[24mp\u001b[24m/m"] [4.75591, "o", "o"] [4.931808, "o", "u"] [5.100663, "o", "n"] [5.192704, "o", "t"] [5.461691, "o", "\u001b[?1l\u001b>"] [5.466679, "o", "\u001b[?2004l\r\r\n"] [5.468092, "o", "\u001b]2;mkdir /tmp/mount\u0007"] [5.468757, "o", "\u001b]1;mkdir\u0007"] [5.474787, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [5.475931, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [5.476488, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K\u001b[?1h\u001b="] [5.477887, "o", "\u001b[?2004h"] [8.033329, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [8.134549, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [8.228743, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [8.321856, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [8.365702, "o", " "] [8.637156, "o", "\u001b[4mm\u001b[24m"] [8.942413, "o", "\b\u001b[24mmo"] [9.131874, "o", "u"] [9.297628, "o", "n"] [9.389473, "o", "t"] [9.460171, "o", " "] [9.532387, "o", "\u001b[4m/\u001b[24m"] [9.647129, "o", "\b\u001b[4m/\u001b[4mt\u001b[24m"] [9.742548, "o", "\b\u001b[4mt\u001b[4mm\u001b[24m"] [10.265361, "o", "\b\u001b[4mm\u001b[4mp\u001b[24m"] [10.523571, "o", "\b\u001b[4mp\u001b[4m/\u001b[24m"] [10.764126, "o", "\b\u001b[4m/\u001b[4mm\u001b[24m"] [10.940644, "o", "\b\u001b[4mm\u001b[4mo\u001b[24m"] [11.11384, "o", "\b\u001b[4mo\u001b[4mu\u001b[24m"] [11.274619, "o", "\b\u001b[4mu\u001b[4mn\u001b[24m"] [11.336741, "o", "\b\u001b[4mn\u001b[4mt\u001b[24m"] [11.501596, "o", "\u001b[?1l\u001b>"] [11.5099, "o", "\u001b[?2004l\r\r\n"] [11.511935, "o", "\u001b]2;brig mount /tmp/mount\u0007\u001b]1;brig\u0007"] [11.630285, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [11.63042, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007"] [11.630489, "o", "\u001b]1;~/dev/brig/docs\u0007"] [11.630544, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [11.630595, "o", "\u001b[?1h\u001b="] [11.630847, "o", "\u001b[?2004h"] [11.783534, "o", "\u001b[4mc\u001b[24m"] [11.846931, "o", "\b\u001b[24m\u001b[32mc\u001b[32md\u001b[39m"] [11.874495, "o", " "] [12.0069, "o", "\u001b[4m/\u001b[24m"] [12.139978, "o", "\b\u001b[4m/\u001b[4mt\u001b[24m"] [12.262404, "o", "\b\u001b[4mt\u001b[4mm\u001b[24m"] [12.368633, "o", "\b\u001b[4mm\u001b[4mp\u001b[1m\u001b[4m/\u001b[0m\u001b[24m"] [12.526367, "o", "\b\u001b[0m\u001b[4m/\u001b[4mm\u001b[24m"] [12.718498, "o", "\b\u001b[4mm\u001b[4mo\u001b[24m"] [12.890811, "o", "\b\u001b[4mo\u001b[4mu\u001b[24m"] [12.979988, "o", "\b\u001b[4mu\u001b[4mnt\u001b[1m\u001b[4m/\u001b[0m\u001b[24m"] [13.185149, "o", "\b\b\u001b[4mt\u001b[24m\u001b[0m\u001b[24m \b"] [13.186246, "o", "\u001b[?1l\u001b>"] [13.191054, "o", "\u001b[?2004l\r\r\n"] [13.192883, "o", "\u001b]2;cd /tmp/mount\u0007\u001b]1;cd\u0007"] [13.193619, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [13.193984, "o", "\u001b]2;sahib@werkbank: /tmp/mount\u0007\u001b]1;/tmp/mount\u0007"] [13.194845, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K\u001b[?1h\u001b="] [13.195642, "o", "\u001b[?2004h"] [13.358323, "o", "\u001b[32ml\u001b[39m"] [13.500286, "o", "\b\u001b[32ml\u001b[32ms\u001b[39m"] [13.623484, "o", "\u001b[?1l\u001b>"] [13.626994, "o", "\u001b[?2004l\r\r\n"] [13.628768, "o", "\u001b]2;ls --color=tty\u0007\u001b]1;ls\u0007"] [13.640118, "o", "\u001b[0m\u001b[35mREADME.md\u001b[0m \u001b[1msub\u001b[0m\r\n\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\u001b]2;sahib@werkbank: /tmp/mount\u0007\u001b]1;/tmp/mount\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ "] [13.640771, "o", "\u001b[K"] [13.641637, "o", "\u001b[?1h\u001b="] [13.642956, "o", "\u001b[?2004h"] [15.590359, "o", " "] [15.871315, "o", "\b"] [16.642904, "o", "\u001b[1m\u001b[31mv\u001b[0m\u001b[39m"] [16.831186, "o", "\b\u001b[0m\u001b[32mv\u001b[32mi\u001b[39m"] [16.939098, "o", " "] [17.6128, "o", "n"] [17.666004, "o", "e"] [17.810336, "o", "w"] [17.917136, "o", "-"] [18.016204, "o", "f"] [18.101805, "o", "i"] [18.250022, "o", "l"] [18.312769, "o", "e"] [18.473484, "o", "\u001b[?1l\u001b>"] [18.482016, "o", "\u001b[?2004l\r\r\n"] [18.484593, "o", "\u001b]2;nvim new-file\u0007"] [18.485713, "o", "\u001b]1;vi\u0007"] [18.692978, "o", "\u001b[?1049h\u001b[22;0;0t\u001b[?1h\u001b=\u001b[H\u001b[2J\u001b[?2004h\u001b[?1004h\u001b[8;29;119t\u001b[r\u001b[1;1H\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m \r\n \r\n \r\n \r\n \r\n \r\n \r\n "] [18.693131, "o", " \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n "] [18.693209, "o", " \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n "] [18.693313, "o", " \r\n \r\n \r\n \r\n \u001b[H\u001b[?25h"] [18.693852, "o", "\u001b[?25l\u001b[2 q\u001b[2 q \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n "] [18.69393, "o", " \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n "] [18.693987, "o", " \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n "] [18.694043, "o", " \r\n \r\n \r\n \u001b[H\u001b[28B\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m\"new-file\"\u001b[C[Neue\u001b[CDatei]\u001b[?25h"] [18.755878, "o", "\u001b[?25l\u001b[H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;254;128;25m buffers \u001b(B\u001b[m\u001b[38;2;254;128;25m\u001b[48;2;168;153;132m \u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m new-file \u001b(B\u001b[m\u001b[38;2;168;153;132m\u001b[48;2;40;40;40m \u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;40;40;40m ◀\u001b(B\u001b[m\u001b[38;2;168;153;132m\u001b[48;2;40;40;40m◀\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m [new-file] \u001b(B\u001b[m\u001b[38;2;254;128;25m\u001b[48;2;168;153;132m◀\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;254;128;25m tabs \r\n\u001b(B\u001b[m\u001b[38;2;124;111;100m\u001b[48;2;40;40;40m 1 \r\n\u001b(B\u001b[m\u001b[38;2;80;73;69m\u001b[48;2;40;40;40m~ \r\n~ \r\n~ \r\n~ "] [18.756006, "o", " \r\n~ \r\n~ \r\n~ \r\n~ \r\n~ \r\n~ \r\n~ \r\n~ \r\n~ "] [18.756032, "o", " \r\n~ \r\n~ \r\n~ \r\n~ \r\n~ \r\n~ \r\n~ \r\n~ "] [18.756049, "o", " \r\n~ \r\n~ \r\n~ \r\n~ \r\n\u001b[?25h"] [18.760143, "o", "\u001b[?25l"] [18.776267, "o", "\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m \u001b(B\u001b[0;1m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132mNORMAL\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m » SPELL [HUN-DE-DE-FRAMI] \u001b(B\u001b[m\u001b[38;2;168;153;132m\u001b[48;2;80;73;69m▶\u001b(B\u001b[m\u001b[38;2;80;73;69m\u001b[48;2;60;56;54m▶\u001b(B\u001b[m\u001b[38;2;168;153;132m\u001b[48;2;60;56;54m new-file \u001b(B\u001b[m\u001b[38;2;60;56;54m\u001b[48;2;60;56;54m◀\u001b(B\u001b[m\u001b[38;2;80;73;69m\u001b[48;2;60;56;54m◀\u001b(B\u001b[m\u001b[38;2;168;153;132m\u001b[48;2;80;73;69m utf-8[BOM][unix] ◀\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m 100% \u001b(B\u001b[0;1m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m␊ 0/1 ㏑\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m : 1 \u001b(B\u001b[m\u001b[38;2;254;128;25m\u001b[48;2;168;153;132m◀\u001b(B\u001b[m\u001b[38;2;251;73;52m\u001b[48;2;254;128;25m◀\r\n\u001b]0;/tmp/mount/new-file\u0007\u001b[?25h"] [18.77748, "o", "\u001b[?25l\u001b[2;5H\u001b[?25h"] [19.345027, "o", "\u001b[?25l\u001b[29;109H\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mi\u001b[2;5H\u001b[?25h"] [19.345442, "o", "\u001b[?25l\u001b[6 q\u001b[29;109H \u001b[2;5H\u001b[?25h"] [19.346299, "o", "\u001b[?25l\r\u001b[27B\u001b(B\u001b[0;1m\u001b[38;2;250;189;47m\u001b[48;2;40;40;40m-- EINFÜGEN --\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m \u001b[29;15H\u001b[?25h"] [19.377501, "o", "\u001b[?25l\r\u001b[A\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m \u001b(B\u001b[0;1m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152mINSERT\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m » SPELL [HUN-DE-DE-FRAMI] \u001b(B\u001b[m\u001b[38;2;131;165;152m\u001b[48;2;80;73;69m▶\u001b(B\u001b[m\u001b[38;2;80;73;69m\u001b[48;2;80;73;69m▶\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;80;73;69m new-file \u001b(B\u001b[m\u001b[38;2;80;73;69m\u001b[48;2;80;73;69m◀◀\u001b[18C\u001b(B\u001b[m\u001b[38;2;131;165;152m\u001b[48;2;80;73;69m◀\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m 100% \u001b(B\u001b[0;1m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m␊ 0/1 ㏑\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m : 1 \u001b(B\u001b[m\u001b[38;2;254;128;25m\u001b[48;2;131;165;152m◀\u001b[2;5H\u001b[?25h"] [19.901032, "o", "\u001b[?25l\u001b[1;20H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m+ \u001b(B\u001b[m\u001b[38;2;168;153;132m\u001b[48;2;40;40;40m \u001b[2;5H\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mT\u001b[28;37H\u001b(B\u001b[m\u001b[38;2;131;165;152m\u001b[48;2;80;73;69m new-file[+] \u001b[31C\u001b(B\u001b[0;1m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m1\u001b[9C\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;6H\u001b[?25h"] [19.905544, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mh\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;7H\u001b[?25h"] [20.080393, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mi\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m4\u001b[2;8H\u001b[?25h"] [20.164795, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ms\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m5\u001b[2;9H\u001b[?25h"] [20.258114, "o", "\u001b[?25l\b\b\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mThis\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m6\u001b[2;10H\u001b[?25h"] [20.673937, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mi\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m7\u001b[2;11H\u001b[?25h"] [21.163759, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ms\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m8\u001b[2;12H\u001b[?25h"] [21.317033, "o", "\u001b[?25l\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mis\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m9\u001b[2;13H\u001b[?25h"] [21.403983, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ma\u001b[28;115H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m10\u001b[2;14H\u001b[?25h"] [21.515109, "o", "\u001b[?25l\u001b[28;116H1\u001b[2;15H\u001b[?25h"] [21.579911, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mf\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;16H\u001b[?25h"] [21.919128, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mn\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;17H\u001b[?25h"] [21.959223, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40me\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m4\u001b[2;18H\u001b[?25h"] [22.191207, "o", "\u001b[?25l\u001b[2 q\u001b[6 q\u001b[?25h"] [22.205546, "o", "\u001b[?25l\b\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m \u001b[2;17H\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;17H\u001b[?25h"] [22.349697, "o", "\u001b[?25l\u001b[2 q\u001b[6 q\u001b[?25h"] [22.359876, "o", "\u001b[?25l\b\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m \u001b[2;16H\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;16H\u001b[?25h"] [22.486515, "o", "\u001b[?25l\u001b[2 q\u001b[6 q\u001b[?25h"] [22.497949, "o", "\u001b[?25l\b\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m \u001b[2;15H\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m1\u001b[2;15H\u001b[?25h"] [22.683688, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mn\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;16H\u001b[?25h"] [22.738237, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40me\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;17H\u001b[?25h"] [22.890398, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mw\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m4\u001b[2;18H\u001b[?25h"] [23.014604, "o", "\u001b[?25l\b\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mnew\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m5\u001b[2;19H\u001b[?25h"] [23.051557, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mf\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m6\u001b[2;20H\u001b[?25h"] [23.166211, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mi\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m7\u001b[2;21H\u001b[?25h"] [23.304776, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ml\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m8\u001b[2;22H\u001b[?25h"] [23.324634, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40me\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m9\u001b[2;23H\u001b[?25h"] [23.404199, "o", "\u001b[?25l\b\b\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mfile\u001b[28;115H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m20\u001b[2;24H\u001b[?25h"] [23.523839, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ma\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m1\u001b[2;25H\u001b[?25h"] [23.647926, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mdn\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;27H\u001b[?25h"] [23.865008, "o", "\u001b[?25l\b\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40madn\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m4\u001b[2;28H\u001b[?25h"] [24.140954, "o", "\u001b[?25l\u001b[2 q\u001b[6 q\u001b[?25h"] [24.158755, "o", "\u001b[?25l\b\b\b\b\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40madn\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;27H\u001b[?25h"] [24.294178, "o", "\u001b[?25l\u001b[2 q\u001b[6 q\u001b[?25h"] [24.30896, "o", "\u001b[?25l"] [24.309298, "o", "\b\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m \u001b[2;26H\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;26H"] [24.309498, "o", "\u001b[?25h"] [24.429442, "o", "\u001b[?25l\u001b[2 q\u001b[6 q\u001b[?25h"] [24.448041, "o", "\u001b[?25l\b\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m \u001b[2;25H\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m1\u001b[2;25H\u001b[?25h"] [24.621783, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mn\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;26H\u001b[?25h"] [24.69289, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40md\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;27H\u001b[?25h"] [24.75518, "o", "\u001b[?25l\b\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mand\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m4\u001b[2;28H\u001b[?25h"] [24.882117, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mI\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m5\u001b[2;29H\u001b[?25h"] [24.990957, "o", "\u001b[?25l\u001b[28;116H6\u001b[2;30H\u001b[?25h"] [25.147616, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mc\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m7\u001b[2;31H\u001b[?25h"] [25.259868, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ma\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m8\u001b[2;32H\u001b[?25h"] [25.338547, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mn\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m9\u001b[2;33H\u001b[?25h"] [25.563801, "o", "\u001b[?25l\b\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mcan\u001b[28;115H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m30\u001b[2;34H\u001b[?25h"] [25.781546, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mu\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m1\u001b[2;35H\u001b[?25h"] [25.811621, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ms\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;36H\u001b[?25h"] [25.971726, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40me\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;37H\u001b[?25h"] [26.056187, "o", "\u001b[?25l\b\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40muse\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m4\u001b[2;38H\u001b[?25h"] [26.157084, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mt\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m5\u001b[2;39H\u001b[?25h"] [26.216725, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mh\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m6\u001b[2;40H\u001b[?25h"] [26.33611, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mi\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m7\u001b[2;41H\u001b[?25h"] [26.402646, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ms\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m8\u001b[2;42H\u001b[?25h"] [26.469983, "o", "\u001b[?25l\b\b\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mthis\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m9\u001b[2;43H\u001b[?25h"] [26.604867, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mf\u001b[28;115H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m40\u001b[2;44H\u001b[?25h"] [26.688028, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mo\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m1\u001b[2;45H\u001b[?25h"] [26.831965, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ml\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;46H\u001b[?25h"] [26.876067, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40md\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;47H\u001b[?25h"] [26.960031, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40me\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m4\u001b[2;48H\u001b[?25h"] [27.028809, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mr\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m5\u001b[2;49H\u001b[?25h"] [27.101686, "o", "\u001b[?25l\u001b[6D\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mfolder\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m6\u001b[2;50H\u001b[?25h"] [27.345086, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mj\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m7\u001b[2;51H\u001b[?25h"] [27.524854, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mu\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m8\u001b[2;52H\u001b[?25h"] [27.608849, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ms\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m9\u001b[2;53H\u001b[?25h"] [27.732593, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mt\u001b[28;115H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m50\u001b[2;54H\u001b[?25h"] [27.818347, "o", "\u001b[?25l\u001b[28;116H1\u001b[2;55H\u001b[?25h"] [27.896692, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ml\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;56H\u001b[?25h"] [28.032423, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mi\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;57H\u001b[?25h"] [28.1877, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mk\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m4\u001b[2;58H\u001b[?25h"] [28.263534, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40me\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m5\u001b[2;59H\u001b[?25h"] [28.355656, "o", "\u001b[?25l\b\b\b\b\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mlike\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m6\u001b[2;60H\u001b[?25h"] [28.484556, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ma\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m7\u001b[2;61H\u001b[?25h"] [28.565738, "o", "\u001b[?25l\u001b[28;116H8\u001b[2;62H\u001b[?25h"] [28.672064, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mn\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m9\u001b[2;63H\u001b[?25h"] [28.866181, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mo\u001b[28;115H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m60\u001b[2;64H\u001b[?25h"] [28.935765, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mr\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m1\u001b[2;65H\u001b[?25h"] [29.071917, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mm\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;66H\u001b[?25h"] [29.189156, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ma\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m3\u001b[2;67H\u001b[?25h"] [29.254533, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ml\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m4\u001b[2;68H\u001b[?25h"] [29.345343, "o", "\u001b[?25l\u001b[28;116H5\u001b[2;69H\u001b[?25h"] [30.022506, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mf\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m6\u001b[2;70H\u001b[?25h"] [30.09986, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mo\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m7\u001b[2;71H\u001b[?25h"] [30.266437, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40ml\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m8\u001b[2;72H\u001b[?25h"] [30.290487, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40md\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m9\u001b[2;73H\u001b[?25h"] [30.393706, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40me\u001b[28;115H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m70\u001b[2;74H\u001b[?25h"] [30.451108, "o", "\u001b[?25l\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mr\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m1\u001b[2;75H\u001b[?25h"] [30.645571, "o", "\u001b[?25l\u001b[6D\u001b(B\u001b[0;4m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40mfolder\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m.\u001b[28;116H\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;131;165;152m2\u001b[2;76H\u001b[?25h"] [30.860939, "o", "\u001b[?25l\r\u001b[27B\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m \r\u001b[29;1H\u001b[2 q\u001b[?25h"] [30.915162, "o", "\u001b[?25l\u001b[A\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m \u001b(B\u001b[0;1m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132mNORMAL\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m » SPELL [HUN-DE-DE-FRAMI] \u001b(B\u001b[m\u001b[38;2;168;153;132m\u001b[48;2;80;73;69m▶\u001b(B\u001b[m\u001b[38;2;80;73;69m\u001b[48;2;60;56;54m▶\u001b(B\u001b[m\u001b[38;2;131;165;152m\u001b[48;2;60;56;54m new-file[+] \u001b(B\u001b[m\u001b[38;2;60;56;54m\u001b[48;2;60;56;54m◀\u001b(B\u001b[m\u001b[38;2;80;73;69m\u001b[48;2;60;56;54m◀\u001b[18C\u001b(B\u001b[m\u001b[38;2;168;153;132m\u001b[48;2;80;73;69m◀\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m 100% \u001b(B\u001b[0;1m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m␊ 1/1 ㏑\u001b(B\u001b[m\u001b[38;2;40;40;40m\u001b[48;2;168;153;132m : 71 \u001b(B\u001b[m\u001b[38;2;254;128;25m\u001b[48;2;168;153;132m◀\u001b[2;75H\u001b[?25h"] [31.136268, "o", "\u001b[?25l\u001b[29;109H\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m:\u001b[2;75H\u001b[?25h"] [31.136405, "o", "\u001b[?25l\u001b[29;109H\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m \u001b[29;109H\r\u001b[29;1H\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m:\u001b[2 q\u001b[?25h"] [31.224948, "o", "\u001b[?25lw\u001b[?25h"] [31.286773, "o", "\u001b[?25lq\u001b[?25h"] [31.42495, "o", "\u001b[?25l\r\u001b[29;1H\u001b[?25h"] [31.427199, "o", "\u001b[?25l\u001b[2 q\"new-file\"\u001b[?25h"] [31.670283, "o", "\u001b[?25l\u001b[C[Neu]\u001b[C1L,\u001b[C72C\u001b[Cgeschrieben\u001b[?25h"] [31.677007, "o", "\u001b[?25l"] [31.6881, "o", "\u001b[?25h"] [31.717445, "o", "\u001b[?25l\r\u001b[29;1H\u001b[H\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m\u001b[M\u001b[28B\u001b(B\u001b[0;1m\u001b[38;2;40;40;40m\u001b[48;2;251;73;52mFehler beim Ausführen von \"VimLeave Autokommandos für \"*\"\":\u001b[?25h\u001b[?25l\u001b[H\u001b(B\u001b[m\u001b[38;2;235;219;178m\u001b[48;2;40;40;40m\u001b[M\u001b[29;60H\r\u001b[29;1H\u001b(B\u001b[0;1m\u001b[38;2;40;40;40m\u001b[48;2;251;73;52mE488: Überschüssige Zeichen\u001b[?25h"] [31.717957, "o", "\u001b[?25l\r\u001b[29;1H\u001b[2 q\u001b(B\u001b[m\u001b[?25h\u001b[?1l\u001b>\u001b[?1049l\u001b[23;0;0t\u001b[?2004l\u001b[?1004l\u001b[?25h"] [31.799109, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\u001b]2;sahib@werkbank: /tmp/mount\u0007\u001b]1;/tmp/mount\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K\u001b[?1h\u001b=\u001b[?2004h"] [33.189103, "o", "\u001b[32ml\u001b[39m"] [33.316672, "o", "\b\u001b[32ml\u001b[32ms\u001b[39m"] [33.618794, "o", "\u001b[?1l\u001b>"] [33.622899, "o", "\u001b[?2004l\r\r\n"] [33.625006, "o", "\u001b]2;ls --color=tty\u0007\u001b]1;ls\u0007"] [33.634291, "o", "\u001b[0m\u001b[35mnew-file\u001b[0m \u001b[35mREADME.md\u001b[0m \u001b[1msub\u001b[0m\r\n"] [33.635036, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [33.63557, "o", "\u001b]2;sahib@werkbank: /tmp/mount\u0007"] [33.635961, "o", "\u001b]1;/tmp/mount\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [33.636845, "o", "\u001b[?1h\u001b="] [33.637693, "o", "\u001b[?2004h"] [34.15552, "o", "\u001b[1m\u001b[31mc\u001b[0m\u001b[39m"] [34.286797, "o", "\b\u001b[1m\u001b[31mc\u001b[1m\u001b[31ma\u001b[0m\u001b[39m"] [34.455531, "o", "\b\b\u001b[0m\u001b[32mc\u001b[0m\u001b[32ma\u001b[32mt\u001b[39m"] [34.566448, "o", " "] [35.046696, "o", "\u001b[4mn\u001b[24m"] [35.112191, "o", "\b\u001b[4mn\u001b[4me\u001b[24m"] [35.273926, "o", "\b\u001b[4me\u001b[4mw\u001b[24m"] [35.414208, "o", "\b\u001b[4mw\u001b[4m-\u001b[24m"] [35.529747, "o", "\b\u001b[4m-\u001b[4mf\u001b[24m"] [35.627323, "o", "\b\u001b[4mf\u001b[4mi\u001b[24m"] [35.779769, "o", "\b\u001b[4mi\u001b[4ml\u001b[24m"] [35.883377, "o", "\b\u001b[4ml\u001b[4me\u001b[24m"] [36.375857, "o", "\u001b[?1l\u001b>"] [36.381644, "o", "\u001b[?2004l\r\r\n"] [36.383975, "o", "\u001b]2;cat new-file\u0007\u001b]1;cat\u0007"] [36.402338, "o", "This is a new file and I can use this folder just like a normal folder.\r\n"] [36.407302, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [36.408527, "o", "\u001b]2;sahib@werkbank: /tmp/mount\u0007\u001b]1;/tmp/mount\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [36.40922, "o", "\u001b[?1h\u001b="] [36.411242, "o", "\u001b[?2004h"] [37.968678, "o", "\u001b[1m\u001b[31mm\u001b[0m\u001b[39m"] [38.166901, "o", "\b\u001b[1m\u001b[31mm\u001b[1m\u001b[31mp\u001b[0m\u001b[39m"] [38.246176, "o", "\b\b\u001b[0m\u001b[32mm\u001b[0m\u001b[32mp\u001b[32mv\u001b[39m"] [38.350516, "o", " "] [38.465578, "o", "\u001b[4ms\u001b[24m"] [38.566672, "o", "\b\u001b[4ms\u001b[4mu\u001b[24m"] [38.690564, "o", "\b\u001b[4mu\u001b[4mb\u001b[24m"] [38.876413, "o", "\b\u001b[4mb\u001b[4m/\u001b[24m"] [39.302298, "o", "\b\u001b[4m/\u001b[4mmusic.mp3\u001b[24m "] [39.993006, "o", "\u001b[?1l\u001b>"] [39.998325, "o", "\u001b[?2004l\r\r\n"] [40.000757, "o", "\u001b]2;mpv sub/music.mp3\u0007\u001b]1;mpv\u0007"] [40.280961, "o", "\u001b="] [40.286502, "o", "\u001b[0mPlaying: sub/music.mp3\r\n\u001b[0m"] [40.30279, "o", "\u001b[0;33m[ffmpeg/demuxer] mp3: invalid concatenated file detected - using bitrate for duration\r\n\u001b[0m"] [40.303922, "o", "\u001b[0;33m[ffmpeg/demuxer] mp3: Estimating duration from bitrate, this may be inaccurate\r\n\u001b[0m"] [40.306491, "o", "\u001b[0m (+) Audio --aid=1 (mp3 2ch 44100Hz)\r\n\u001b[0m\u001b[0mFile tags:\r\n\u001b[0m"] [40.306638, "o", "\u001b[0m Artist: Epica\r\n\u001b[0m\u001b[0m Album: The Classical Conspiracy: Live in Miskolc, Hungary\r\n\u001b[0m\u001b[0m Album_Artist: Epica\r\n\u001b[0m\u001b[0m Title: The Imperial March\r\n\u001b[0m\u001b[0m Track: 8/18\r\n\u001b[0m"] [40.310502, "o", "\u001b[0mAO: [pulse] 44100Hz stereo 2ch s16\r\n\u001b[0m"] [40.336663, "o", "\r\u001b[K\u001b[0mA: 00:00:00 / 00:06:50 (0%) Cache: 10s+1MB\r\u001b[0m"] [40.450885, "o", "\r\u001b[K\u001b[0mA: 00:00:00 / 00:06:50 (0%) Cache: 10s+6MB\r\u001b[0m"] [40.525866, "o", "\r\u001b[K\u001b[0m"] [40.525925, "o", "A: 00:00:00 / 00:06:50 (0%) Cache: 10s+11MB\r\u001b[0m"] [40.575935, "o", "\r\u001b[K\u001b[0mA: 00:00:00 / 00:06:50 (0%) Cache: 10s+12MB\r\u001b[0m"] [41.337249, "o", "\r\u001b[K\u001b[0mA: 00:00:01 / 00:06:50 (0%) Cache: 10s+12MB\r\u001b[0m"] [42.315624, "o", "\r\u001b[K\u001b[0mA: 00:00:02 / 00:06:50 (0%) Cache: 10s+12MB\r\u001b[0m"] [43.335133, "o", "\r\u001b[K\u001b[0mA: 00:00:03 / 00:06:50 (0%) Cache: 10s+12MB\r\u001b[0m"] [44.356615, "o", "\r\u001b[K\u001b[0mA: 00:00:04 / 00:06:50 (0%) Cache: 10s+12MB\r\u001b[0m"] [44.42698, "o", "\r\u001b[K\u001b[0mA: 00:00:04 / 00:06:50 (1%) Cache: 10s+12MB\r\u001b[0m"] [44.561352, "o", "\r\u001b[K\u001b[0m"] [44.56157, "o", "A: 00:00:04 / 00:06:50 (1%) Cache: 9s+12MB\r\u001b[0m"] [44.631238, "o", "\r\u001b[K\u001b[0mA: 00:00:04 / 00:06:50 (1%) Cache: 10s+12MB\r\u001b[0m"] [44.790759, "o", "\u001b>"] [44.801467, "o", "\r\n\u001b[0m\r\n\u001b[0m\u001b[0m\r\n\u001b[0m\u001b[0mExiting... (Quit)\r\n\u001b[0m"] [44.82915, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [44.829887, "o", "\u001b]2;sahib@werkbank: /tmp/mount\u0007\u001b]1;/tmp/mount\u0007"] [44.830358, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [44.830797, "o", "\u001b[?1h\u001b="] [44.831683, "o", "\u001b[?2004h"] [45.582474, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [45.738395, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [45.836404, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [45.920141, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [45.992154, "o", " "] [46.078312, "o", "l"] [46.169958, "o", "s"] [46.259206, "o", "\u001b[?1l\u001b>"] [46.267116, "o", "\u001b[?2004l\r\r\n"] [46.269603, "o", "\u001b]2;brig ls\u0007"] [46.270458, "o", "\u001b]1;brig\u0007"] [46.466591, "o", "SIZE MODTIME PATH PIN \r\n886 B Feb 28 10:45:21 \u001b[37m/README.md\u001b[0m \u001b[36m🖈\u001b[0m \r\n72 B Feb 28 10:48:51 \u001b[37m/new-file\u001b[0m \u001b[36m🖈\u001b[0m \r\n\u001b[33m13 MB\u001b[0m Feb 28 10:46:48"] [46.466663, "o", " \u001b[32m/sub\u001b[0m \u001b[36m🖈\u001b[0m \r\n"] [46.468235, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [46.468348, "o", "\u001b]2;sahib@werkbank: /tmp/mount\u0007"] [46.468429, "o", "\u001b]1;/tmp/mount\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [46.468519, "o", "\u001b[?1h\u001b="] [46.468726, "o", "\u001b[?2004h"] ================================================ FILE: docs/asciinema/5_commits.json ================================================ {"version": 2, "width": 119, "height": 29, "timestamp": 1519811965, "env": {"SHELL": "/bin/zsh", "TERM": "xterm-256color"}} [0.252493, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [0.252647, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [0.252698, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [0.252849, "o", "\u001b[?1h\u001b="] [0.253042, "o", "\u001b[?2004h"] [1.089292, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [1.199596, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [1.295245, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [1.394617, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [1.475068, "o", " "] [1.532962, "o", "\u001b[4ml\u001b[24m"] [1.689091, "o", "\b\u001b[4ml\u001b[4mo\u001b[24m"] [1.764881, "o", "\b\u001b[4mo\u001b[4mg\u001b[24m"] [2.164452, "o", "\u001b[?1l\u001b>"] [2.168482, "o", "\u001b[?2004l\b\b\b\u001b[24ml\u001b[24mo\u001b[24mg\r\r\n"] [2.171031, "o", "\u001b]2;brig log\u0007"] [2.17135, "o", "\u001b]1;brig\u0007"] [2.32099, "o", "\u001b[32mSEfXUD64dE\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m \u001b[31m•\u001b[0m\u001b[36m (curr)\u001b[0m\r\n"] [2.32113, "o", "\u001b[32mSEfXUCU47p\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m Added initial README.md\u001b[36m (head)\u001b[0m\r\n\u001b[32mSEfXUCEaXL\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m initial commit\u001b[36m (init)\u001b[0m\r\n"] [2.322517, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [2.322672, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [2.322731, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [2.322798, "o", "\u001b[?1h\u001b="] [2.323008, "o", "\u001b[?2004h"] [3.60464, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [3.701166, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [3.806198, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [3.908073, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [3.981747, "o", " "] [4.083177, "o", "\u001b[4mc\u001b[24m"] [4.180388, "o", "\b\u001b[4mc\u001b[4mo\u001b[24m"] [4.34891, "o", "\b\u001b[4mo\u001b[4mm\u001b[24m"] [4.471983, "o", "\b\b\b\u001b[24mc\u001b[24mo\u001b[24mmm"] [4.662754, "o", "i"] [4.794937, "o", "t"] [4.828483, "o", " "] [4.91971, "o", "-"] [5.12147, "o", "m"] [5.232306, "o", " "] [5.36732, "o", "\u001b[33m'\u001b[39m"] [6.367657, "o", "\b\u001b[33m'\u001b[33mA\u001b[39m"] [6.542322, "o", "\b\u001b[33mA\u001b[33md\u001b[39m"] [6.703172, "o", "\b\u001b[33md\u001b[33md\u001b[39m"] [6.98262, "o", "\b\u001b[33md\u001b[33me\u001b[39m"] [7.114576, "o", "\b\u001b[33me\u001b[33md\u001b[39m"] [7.226658, "o", "\b\u001b[33md\u001b[33m \u001b[39m"] [7.400424, "o", "\b\u001b[33m \u001b[33md\u001b[39m"] [7.48366, "o", "\b\u001b[33md\u001b[33ma\u001b[39m"] [7.601921, "o", "\b\u001b[33ma\u001b[33mr\u001b[39m"] [7.741708, "o", "\b\u001b[33mr\u001b[33mt\u001b[39m"] [7.830802, "o", "\b\u001b[33mt\u001b[33mh\u001b[39m"] [7.915606, "o", "\b\u001b[33mh\u001b[33m \u001b[39m"] [8.040754, "o", "\b\u001b[33m \u001b[33mv\u001b[39m"] [8.142824, "o", "\b\u001b[33mv\u001b[33ma\u001b[39m"] [8.275411, "o", "\b\u001b[33ma\u001b[33md\u001b[39m"] [8.392729, "o", "\b\u001b[33md\u001b[33me\u001b[39m"] [8.481706, "o", "\b\u001b[33me\u001b[33mr\u001b[39m"] [8.867549, "o", "\b\u001b[33mr\u001b[33m \u001b[39m"] [9.017732, "o", "\b\u001b[33m \u001b[33mm\u001b[39m"] [9.201591, "o", "\b\u001b[33mm\u001b[33mu\u001b[39m"] [9.306462, "o", "\b\u001b[33mu\u001b[33ms\u001b[39m"] [9.421788, "o", "\b\u001b[33ms\u001b[33mi\u001b[39m"] [9.482797, "o", "\b\u001b[33mi\u001b[33mc\u001b[39m"] [9.746346, "o", "\b\u001b[33mc\u001b[33m'\u001b[39m"] [10.36415, "o", "\u001b[?1l\u001b>"] [10.370844, "o", "\u001b[?2004l\r\r\n"] [10.374473, "o", "\u001b]2;brig commit -m 'Added darth vader music'\u0007\u001b]1;brig\u0007"] [10.562116, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [10.562253, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [10.562287, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [10.562385, "o", "\u001b[?1h\u001b="] [10.562584, "o", "\u001b[?2004h"] [11.482654, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [11.595748, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [11.713421, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [11.801969, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [11.871499, "o", " "] [11.986051, "o", "\u001b[4ml\u001b[24m"] [12.130566, "o", "\b\u001b[4ml\u001b[4mo\u001b[24m"] [12.208039, "o", "\b\u001b[4mo\u001b[4mg\u001b[24m"] [12.3702, "o", "\u001b[?1l\u001b>"] [12.375073, "o", "\u001b[?2004l\b\b\b\u001b[24ml\u001b[24mo\u001b[24mg\r\r\n"] [12.377147, "o", "\u001b]2;brig log\u0007\u001b]1;brig\u0007"] [12.483055, "o", "\u001b[32mSEfXUBgTGT\u001b[0m \u001b[33mFeb 28 10:59:36\u001b[0m \u001b[31m•\u001b[0m\u001b[36m (curr)\u001b[0m\r\n"] [12.483199, "o", "\u001b[32mSEfXUCjYxA\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m Added darth vader music\u001b[36m (head)\u001b[0m\r\n\u001b[32mSEfXUCU47p\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m Added initial README.md\u001b[36m\u001b[0m\r\n\u001b[32mSEfXUCEaXL\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m initial commit\u001b[36m (init)\u001b[0m\r\n"] [12.484754, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [12.484868, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007"] [12.484981, "o", "\u001b]1;~/dev/brig/docs\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [12.485059, "o", "\u001b[?1h\u001b="] [12.48524, "o", "\u001b[?2004h"] [15.116498, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [15.286419, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [15.394661, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [15.484396, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [15.570743, "o", " "] [15.667128, "o", "\u001b[4me\u001b[24m"] [15.794115, "o", "\b\u001b[24med"] [15.83337, "o", "i"] [16.711928, "o", "t"] [16.792473, "o", " "] [16.945941, "o", "R"] [17.022294, "o", "E"] [17.072731, "o", "A"] [17.247167, "o", "D"] [17.41645, "o", "M"] [17.546609, "o", "E"] [17.745329, "o", "."] [17.940659, "o", "m"] [18.027964, "o", "d"] [18.145754, "o", "\u001b[?1l\u001b>"] [18.151185, "o", "\u001b[?2004l\r\r\n"] [18.15313, "o", "\u001b]2;brig edit README.md\u0007\u001b]1;brig\u0007"] [18.311358, "o", "\u001b[?1000h\u001b[?2004h\u001b[?1049h\u001b[22;0;0t\u001b[?1h\u001b=\u001b[?2004h"] [18.311704, "o", "\u001b[1;29r\u001b[?12h\u001b[?12l\u001b[27m\u001b[23m\u001b[29m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[29;1H\"/tmp/brig-cmd-buffer-7867430388356292876.md\""] [18.311802, "o", " 15L, 443C"] [18.327957, "o", "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c\u001b]10;?\u0007\u001b]11;?\u0007"] [18.330073, "o", "\u001b[1;1HWelcome to brig!\r\n\r\nHere's what you can do next:\u001b[5;5H•\u001b[5;7HAdd a few remotes to sync with (See 'brig remote add -h')\r\n •\u001b[6;7HMount your data somewhere convinient (See 'brig mount -h')\r\n •\u001b[7;7HHave a relaxing day exploring brig's features.\r\n\r\nPlease remember that brig is software in it's very early stages,\r\nand will currently eat your data with near-certainty.\r\n\r\nIf you're done with this README, you can easily remove it:\u001b[14;5H$ brig rm README.md\r\n\r\n\u001b[94m~ \u001b[17;1H~ \u001b[18;1H~ \u001b[19;1H~ \u001b[20;1H~ "] [18.330257, "o", " \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ \u001b[24;1H~ \u001b[25;1H~ \u001b[26;1H~ \u001b[27;1H~ \u001b[28;1H~ "] [18.330339, "o", " \u001b[m\u001b[29;102H1,1\u001b[9CAlles\u001b[1;1H\u001b[?25h"] [18.358891, "o", "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\"] [18.374291, "o", "\u001b[?1000l\u001b[?1006h\u001b[?1002h\u001b[?1006l\u001b[?1002l\u001b[?1006h\u001b[?1002h\u001b[27m\u001b[23m\u001b[29m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[1;1HWelcome to brig!\r\n\r\nHere's what you can do next:\u001b[5;5H•\u001b[5;7HAdd a few remotes to sync with (See 'brig remote add -h')\r\n •\u001b[6;7HMount your data somewhere convinient (See 'brig mount -h')\r\n •\u001b[7;7HHave a relaxing day exploring brig's features.\r\n\r\nPlease remember that brig is software in it's very early stages,\r\nand will currently eat your data with near-certainty.\r\n\r\nIf you're done with this README, you can easily remove it:\u001b[14;5H$ brig rm README.md\r\n\r\n\u001b[94m~ \u001b[17;1H~ \u001b[18;1H~ \u001b[19;1H~ "] [18.374432, "o", " \u001b[20;1H~ \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ \u001b[24;1H~ \u001b[25;1H~ \u001b[26;1H~ \u001b[27;1H~ "] [18.374725, "o", " \u001b[28;1H~ \u001b[m\u001b[29;102H1,1\u001b[9CAlles\r\"/tmp/brig-cmd-buffer-7867430388356292876.md\" 15L, 443C\u001b[1;1H\u001b[?25h"] [18.829942, "o", "\u001b[?25l\u001b[29;92HG\u001b[1;1H"] [18.830099, "o", "\u001b[29;92H \u001b[15;1H\u001b[29;103H5,0-1\u001b[15;1H\u001b[?25h"] [19.244409, "o", "\u001b[?25l\u001b[29;92Ho\u001b[15;1H"] [19.244943, "o", "\u001b[29;92H \u001b[16;1H\u001b[29;1H\u001b[1m-- EINFÜGEN --\u001b[m\u001b[29;15H\u001b[K\u001b[29;102H16,1\u001b[8CAlles"] [19.245973, "o", "\u001b[16;1H\u001b[K\u001b[16;1H\u001b[?25h"] [19.448015, "o", "\u001b[?25l\u001b[17;1H\u001b[K\u001b[29;103H7\u001b[17;1H\u001b[?25h"] [20.560184, "o", "\u001b[?25lA\u001b[29;105H2\u001b[17;2H\u001b[?25h"] [20.692311, "o", "\u001b[?25ln\u001b[29;105H3\u001b[17;3H\u001b[?25h"] [20.872813, "o", "\u001b[?25lo\u001b[29;105H4\u001b[17;4H\u001b[?25h"] [21.177969, "o", "\u001b[?25l\u001b[17;3H\u001b[K\u001b[29;105H3\u001b[17;3H\u001b[?25h"] [21.326674, "o", "\u001b[?25l\u001b[17;2H\u001b[K\u001b[29;105H2\u001b[17;2H\u001b[?25h"] [21.482987, "o", "\u001b[?25l\u001b[17;1H\u001b[K\u001b[29;105H1\u001b[17;1H\u001b[?25h"] [21.639899, "o", "\u001b[?25l\u001b[94m~ \u001b[m\u001b[29;103H6\u001b[16;1H\u001b[?25h"] [21.84207, "o", "\u001b[?25lO\u001b[29;105H2\u001b[16;2H\u001b[?25h"] [22.336138, "o", "\u001b[?25l\u001b[16;1H\u001b[K\u001b[29;105H1\u001b[16;1H\u001b[?25h"] [22.556694, "o", "\u001b[?25lO\u001b[29;105H2\u001b[16;2H\u001b[?25h"] [22.716563, "o", "\u001b[?25ln\u001b[29;105H3\u001b[16;3H\u001b[?25h"] [22.807098, "o", "\u001b[?25le\u001b[29;105H4\u001b[16;4H\u001b[?25h"] [22.853901, "o", "\u001b[?25l\u001b[29;105H5\u001b[16;5H\u001b[?25h"] [22.927145, "o", "\u001b[?25lm\u001b[29;105H6\u001b[16;6H\u001b[?25h"] [23.078599, "o", "\u001b[?25lo\u001b[29;105H7\u001b[16;7H\u001b[?25h"] [23.147662, "o", "\u001b[?25lr\u001b[29;105H8\u001b[16;8H\u001b[?25h"] [23.213428, "o", "\u001b[?25le\u001b[29;105H9\u001b[16;9H\u001b[?25h"] [23.237209, "o", "\u001b[?25l\u001b[29;105H10\u001b[16;10H\u001b[?25h"] [23.367679, "o", "\u001b[?25le\u001b[29;106H1\u001b[16;11H\u001b[?25h"] [23.506297, "o", "\u001b[?25ld\u001b[29;106H2\u001b[16;12H\u001b[?25h"] [23.561789, "o", "\u001b[?25li\u001b[29;106H3\u001b[16;13H\u001b[?25h"] [23.732131, "o", "\u001b[?25lt\u001b[29;106H4\u001b[16;14H\u001b[?25h"] [23.89776, "o", "\u001b[?25l.\u001b[29;106H5\u001b[16;15H\u001b[?25h"] [24.121988, "o", "\u001b[29;1H\u001b[K\u001b[16;14H\u001b[?25l\u001b[29;92H^[\u001b[16;14H"] [24.222361, "o", "\u001b[29;92H \u001b[16;15H"] [24.223161, "o", "\u001b[29;102H16,14\u001b[7CAlles\u001b[16;14H\u001b[?25h"] [24.480728, "o", "\u001b[?25l\u001b[29;92H:\u001b[16;14H\u001b[29;92H\u001b[K\u001b[29;1H:"] [24.480865, "o", "\u001b[?2004h\u001b[?25h"] [24.616449, "o", "w\u001b[?25l\u001b[?25h"] [24.680356, "o", "q\u001b[?25l\u001b[?25h"] [24.782547, "o", "\r\u001b[?25l"] [24.782777, "o", "\u001b[?1006l\u001b[?1002l\u001b[?2004l\"/tmp/brig-cmd-buffer-7867430388356292876.md\""] [24.783558, "o", " 16L, 458C geschrieben"] [24.785684, "o", "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?25h\u001b[?1049l\u001b[23;0;0t"] [24.976962, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [24.977439, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007"] [24.977573, "o", "\u001b]1;~/dev/brig/docs\u0007"] [24.977854, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [24.978174, "o", "\u001b[?1h\u001b="] [24.97931, "o", "\u001b[?2004h"] [26.39299, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [26.903388, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [27.043634, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [27.143328, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [27.220485, "o", " "] [27.432396, "o", "\u001b[4mm\u001b[24m"] [27.536401, "o", "\b\u001b[24mmv"] [27.604616, "o", " "] [28.387822, "o", "s"] [28.457765, "o", "u"] [28.599949, "o", "b"] [29.092484, "o", "/"] [29.516744, "o", "m"] [29.716171, "o", "u"] [29.805969, "o", "s"] [29.894391, "o", "i"] [29.986773, "o", "c"] [30.122947, "o", "."] [30.30213, "o", "m"] [30.46432, "o", "p"] [30.537035, "o", "3"] [30.6676, "o", " "] [31.453307, "o", "s"] [31.522437, "o", "u"] [31.647459, "o", "b"] [32.02632, "o", "/"] [32.528259, "o", "e"] [32.628939, "o", "l"] [32.781576, "o", "s"] [32.932367, "o", "e"] [33.02803, "o", "."] [33.198452, "o", "m"] [33.394547, "o", "p"] [33.498707, "o", "3"] [33.72191, "o", "\u001b[?1l\u001b>"] [33.728505, "o", "\u001b[?2004l\r\r\n"] [33.730485, "o", "\u001b]2;brig mv sub/music.mp3 sub/else.mp3\u0007\u001b]1;brig\u0007"] [33.852268, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [33.852394, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [33.852468, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [33.852541, "o", "\u001b[?1h\u001b="] [33.852729, "o", "\u001b[?2004h"] [34.937603, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [35.090575, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [35.180064, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [35.303067, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [35.386245, "o", " "] [35.509001, "o", "d"] [35.628174, "o", "i"] [35.711723, "o", "f"] [35.837246, "o", "f"] [36.037302, "o", "\u001b[?1l\u001b>"] [36.04126, "o", "\u001b[?2004l\r\r\n"] [36.043317, "o", "\u001b]2;brig diff\u0007\u001b]1;brig\u0007"] [36.175276, "o", "\u001b[35m•\u001b[0m\r\n├──\u001b[36m README.md ⇄ README.md\u001b[0m\r\n└──sub\r\n └──\u001b[34m else.mp3 → music.mp3\u001b[0m\r\n\r\n0 directories, 3 files\r\n"] [36.176742, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [36.176848, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007"] [36.176922, "o", "\u001b]1;~/dev/brig/docs\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [36.177004, "o", "\u001b[?1h\u001b="] [36.177226, "o", "\u001b[?2004h"] [39.463756, "o", "\u001b[?2004l\r\r\n"] ================================================ FILE: docs/asciinema/6_history.json ================================================ {"version": 2, "width": 119, "height": 29, "timestamp": 1519815271, "env": {"SHELL": "/bin/zsh", "TERM": "xterm-256color"}} [0.247164, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [0.247332, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [0.2474, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [0.247473, "o", "\u001b[?1h\u001b="] [0.247687, "o", "\u001b[?2004h"] [0.758319, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [0.903542, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [0.991215, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [1.097336, "o", "\b\u001b[1m\u001b[31mi\u001b[1m\u001b[31mh\u001b[0m\u001b[39m"] [1.234071, "o", " "] [1.604765, "o", "\b"] [1.742115, "o", "\b\b\u001b[1m\u001b[31mi\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b"] [1.819036, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [1.874837, "o", " "] [2.066405, "o", "h"] [2.230528, "o", "i"] [2.308608, "o", "s"] [2.441739, "o", "t"] [2.556275, "o", "o"] [2.631017, "o", "r"] [2.782399, "o", "y"] [2.849719, "o", " "] [2.992586, "o", "n"] [3.076498, "o", "e"] [3.244405, "o", "w"] [3.343333, "o", "-"] [3.443824, "o", "f"] [3.517283, "o", "i"] [3.661094, "o", "l"] [3.760592, "o", "e"] [3.867321, "o", "\u001b[?1l\u001b>"] [3.873454, "o", "\u001b[?2004l\r\r\n"] [3.875831, "o", "\u001b]2;brig history new-file\u0007\u001b]1;brig\u0007"] [3.994567, "o", "CHANGE FROM TO WHEN \r\n\u001b[33madded\u001b[0m"] [3.994631, "o", " \u001b[36mSEfXUCU47p\u001b[0m \u001b[32mHEAD\u001b[0m \u001b[35mFeb 28 10:45:21\u001b[0m \r\n"] [3.996106, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [3.996207, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [3.996274, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [3.996336, "o", "\u001b[?1h\u001b="] [3.996539, "o", "\u001b[?2004h"] [4.612566, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [4.750317, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [4.85455, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [4.948061, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [5.035556, "o", " "] [5.136146, "o", "\u001b[4me\u001b[24m"] [5.266322, "o", "\b\u001b[24med"] [5.368632, "o", "i"] [5.518402, "o", "t"] [5.592434, "o", " "] [5.747638, "o", "n"] [5.811149, "o", "e"] [5.988417, "o", "w"] [6.102252, "o", "-"] [6.216878, "o", "f"] [6.308529, "o", "i"] [6.462365, "o", "l"] [6.568562, "o", "e"] [6.966448, "o", "\u001b[?1l\u001b>"] [6.972753, "o", "\u001b[?2004l\r\r\n"] [6.974692, "o", "\u001b]2;brig edit new-file\u0007\u001b]1;brig\u0007"] [7.098775, "o", "\u001b[?1000h\u001b[?2004h\u001b[?1049h\u001b[22;0;0t\u001b[?1h\u001b=\u001b[?2004h"] [7.099102, "o", "\u001b[1;29r\u001b[?12h\u001b[?12l\u001b[27m\u001b[23m\u001b[29m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[29;1H\"/tmp/brig-cmd-buffer-1040861570544051988\""] [7.099183, "o", " 1L, 72C"] [7.102658, "o", "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c\u001b]10;?\u0007\u001b]11;?\u0007"] [7.102931, "o", "\u001b[1;1HThis is a new file and I can use this folder just like a normal folder.\r\n\u001b[94m~ \u001b[3;1H~ \u001b[4;1H~ \u001b[5;1H~ \u001b[6;1H~ \u001b[7;1H~ \u001b[8;1H~ \u001b[9;1H~ "] [7.103048, "o", " \u001b[10;1H~ \u001b[11;1H~ \u001b[12;1H~ \u001b[13;1H~ \u001b[14;1H~ \u001b[15;1H~ \u001b[16;1H~ \u001b[17;1H~ "] [7.103117, "o", " \u001b[18;1H~ \u001b[19;1H~ \u001b[20;1H~ \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ \u001b[24;1H~ \u001b[25;1H~ "] [7.103178, "o", " \u001b[26;1H~ \u001b[27;1H~ \u001b[28;1H~ \u001b[m\u001b[29;102H1,1\u001b[9CAlles\u001b[1;1H\u001b[?25h"] [7.115208, "o", "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\"] [7.118422, "o", "\u001b[?1000l\u001b[?1006h\u001b[?1002h\u001b[?1006l\u001b[?1002l\u001b[?1006h\u001b[?1002h\u001b[27m\u001b[23m\u001b[29m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[1;1HThis is a new file and I can use this folder just like a normal folder.\r\n\u001b[94m~ \u001b[3;1H~ \u001b[4;1H~ \u001b[5;1H~ \u001b[6;1H~ \u001b[7;1H~ \u001b[8;1H~ "] [7.119068, "o", " \u001b[9;1H~ \u001b[10;1H~ \u001b[11;1H~ \u001b[12;1H~ \u001b[13;1H~ \u001b[14;1H~ \u001b[15;1H~ \u001b[16;1H~ \u001b"] [7.119231, "o", "[17;1H~ \u001b[18;1H~ \u001b[19;1H~ \u001b[20;1H~ \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ \u001b[24;1H~ \u001b[25;1H~ "] [7.11934, "o", " \u001b[26;1H~ \u001b[27;1H~ \u001b[28;1H~ \u001b[m\u001b[29;102H1,1\u001b[9CAlles\r\"/tmp/brig-cmd-buffer-1040861570544051988\" 1L, 72C\u001b[1;1H\u001b[?25h"] [8.390161, "o", "\u001b[?25l\u001b[29;92Ho\u001b[1;1H\u001b[29;92H \u001b[2;1H\u001b[29;1H\u001b[1m-- EINFÜGEN --\u001b[m\u001b[29;15H\u001b[K\u001b[29;102H2,1\u001b[9CAlles"] [8.390573, "o", "\u001b[2;1H\u001b[K\u001b[2;1H\u001b[?25h"] [8.7854, "o", "\u001b[?25lI\u001b[29;104H2\u001b[2;2H\u001b[?25h"] [8.936335, "o", "\u001b[?25l\u001b[29;104H3\u001b[2;3H\u001b[?25h"] [9.210809, "o", "\u001b[?25la\u001b[29;104H4\u001b[2;4H\u001b[?25h"] [9.316575, "o", "\u001b[?25ld\u001b[29;104H5\u001b[2;5H\u001b[?25h"] [9.485026, "o", "\u001b[?25ld\u001b[29;104H6\u001b[2;6H\u001b[?25h"] [9.589198, "o", "\u001b[?25le\u001b[29;104H7\u001b[2;7H\u001b[?25h"] [9.68663, "o", "\u001b[?25ld\u001b[29;104H8\u001b[2;8H\u001b[?25h"] [9.762494, "o", "\u001b[?25l\u001b[29;104H9\u001b[2;9H\u001b[?25h"] [9.928953, "o", "\u001b[?25la\u001b[29;104H10\u001b[2;10H\u001b[?25h"] [10.009957, "o", "\u001b[?25ln\u001b[29;105H1\u001b[2;11H\u001b[?25h"] [10.181435, "o", "\u001b[?25lo\u001b[29;105H2\u001b[2;12H\u001b[?25h"] [10.277076, "o", "\u001b[?25lt\u001b[29;105H3\u001b[2;13H\u001b[?25h"] [10.380755, "o", "\u001b[?25lh\u001b[29;105H4\u001b[2;14H\u001b[?25h"] [10.467393, "o", "\u001b[?25le\u001b[29;105H5\u001b[2;15H\u001b[?25h"] [10.515522, "o", "\u001b[?25lr\u001b[29;105H6\u001b[2;16H\u001b[?25h"] [10.599308, "o", "\u001b[?25l\u001b[29;105H7\u001b[2;17H\u001b[?25h"] [10.678216, "o", "\u001b[?25ll\u001b[29;105H8\u001b[2;18H\u001b[?25h"] [10.812556, "o", "\u001b[?25li\u001b[29;105H9\u001b[2;19H\u001b[?25h"] [10.972619, "o", "\u001b[?25ln\u001b[29;104H20\u001b[2;20H\u001b[?25h"] [11.036455, "o", "\u001b[?25le\u001b[29;105H1\u001b[2;21H\u001b[?25h"] [11.140062, "o", "\u001b[?25l\u001b[29;105H2\u001b[2;22H\u001b[?25h"] [11.219498, "o", "\u001b[?25lt\u001b[29;105H3\u001b[2;23H\u001b[?25h"] [11.330255, "o", "\u001b[?25lo\u001b[29;105H4\u001b[2;24H\u001b[?25h"] [11.394367, "o", "\u001b[?25l\u001b[29;105H5\u001b[2;25H\u001b[?25h"] [11.487906, "o", "\u001b[?25ls\u001b[29;105H6\u001b[2;26H\u001b[?25h"] [11.550727, "o", "\u001b[?25lh\u001b[29;105H7\u001b[2;27H\u001b[?25h"] [11.714439, "o", "\u001b[?25lo\u001b[29;105H8\u001b[2;28H\u001b[?25h"] [11.82308, "o", "\u001b[?25lw\u001b[29;105H9\u001b[2;29H\u001b[?25h"] [11.911686, "o", "\u001b[?25l\u001b[29;104H30\u001b[2;30H\u001b[?25h"] [12.008968, "o", "\u001b[?25lt\u001b[29;105H1\u001b[2;31H\u001b[?25h"] [12.089608, "o", "\u001b[?25lh\u001b[29;105H2\u001b[2;32H\u001b[?25h"] [12.179902, "o", "\u001b[?25le\u001b[29;105H3\u001b[2;33H\u001b[?25h"] [12.216415, "o", "\u001b[?25l\u001b[29;105H4\u001b[2;34H\u001b[?25h"] [12.425471, "o", "\u001b[?25lh\u001b[29;105H5\u001b[2;35H\u001b[?25h"] [12.555247, "o", "\u001b[?25li\u001b[29;105H6\u001b[2;36H\u001b[?25h"] [12.661093, "o", "\u001b[?25ls\u001b[29;105H7\u001b[2;37H\u001b[?25h"] [12.798374, "o", "\u001b[?25ly\u001b[29;105H8\u001b[2;38H\u001b[?25h"] [12.891934, "o", "\u001b[?25lo\u001b[29;105H9\u001b[2;39H\u001b[?25h"] [12.999356, "o", "\u001b[?25lr\u001b[29;104H40\u001b[2;40H\u001b[?25h"] [13.307156, "o", "\u001b[?25l\u001b[2;39H\u001b[K\u001b[29;104H39\u001b[2;39H\u001b[?25h"] [13.446672, "o", "\u001b[?25l\u001b[2;38H\u001b[K\u001b[29;105H8\u001b[2;38H\u001b[?25h"] [13.583738, "o", "\u001b[?25l\u001b[2;37H\u001b[K\u001b[29;105H7\u001b[2;37H\u001b[?25h"] [13.680682, "o", "\u001b[?25lt\u001b[29;105H8\u001b[2;38H\u001b[?25h"] [13.774376, "o", "\u001b[?25lo\u001b[29;105H9\u001b[2;39H\u001b[?25h"] [13.861644, "o", "\u001b[?25lr\u001b[29;104H40\u001b[2;40H\u001b[?25h"] [14.015369, "o", "\u001b[?25ly\u001b[29;105H1\u001b[2;41H\u001b[?25h"] [14.086862, "o", "\u001b[?25l\u001b[29;105H2\u001b[2;42H\u001b[?25h"] [14.235689, "o", "\u001b[?25lf\u001b[29;105H3\u001b[2;43H\u001b[?25h"] [14.935863, "o", "\u001b[?25le\u001b[29;105H4\u001b[2;44H\u001b[?25h"] [15.011571, "o", "\u001b[?25la\u001b[29;105H5\u001b[2;45H\u001b[?25h"] [15.144528, "o", "\u001b[?25lt\u001b[29;105H6\u001b[2;46H\u001b[?25h"] [15.26693, "o", "\u001b[?25lu\u001b[29;105H7\u001b[2;47H\u001b[?25h"] [15.337705, "o", "\u001b[?25lr\u001b[29;105H8\u001b[2;48H\u001b[?25h"] [15.408419, "o", "\u001b[?25le\u001b[29;105H9\u001b[2;49H\u001b[?25h"] [15.520857, "o", "\u001b[?25l.\u001b[29;104H50\u001b[2;50H\u001b[?25h"] [15.684168, "o", "\u001b[29;1H\u001b[K\u001b[2;49H\u001b[?25l\u001b[29;92H^[\u001b[2;49H"] [15.784483, "o", "\u001b[29;92H \u001b[2;50H"] [15.785392, "o", "\u001b[29;102H2,49\u001b[8CAlles\u001b[2;49H\u001b[?25h"] [15.965731, "o", "\u001b[?25l\u001b[29;92H:\u001b[2;49H"] [15.965848, "o", "\u001b[29;92H\u001b[K\u001b[29;1H:\u001b[?2004h\u001b[?25h"] [16.0631, "o", "w\u001b[?25l\u001b[?25h"] [16.130332, "o", "q\u001b[?25l\u001b[?25h"] [16.237935, "o", "\r"] [16.238532, "o", "\u001b[?25l\u001b[?1006l\u001b[?1002l\u001b[?2004l\"/tmp/brig-cmd-buffer-1040861570544051988\""] [16.239785, "o", " 2L, 122C geschrieben"] [16.242119, "o", "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?25h\u001b[?1049l\u001b[23;0;0t"] [16.483625, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [16.483851, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ "] [16.484182, "o", "\u001b[K\u001b[?1h\u001b="] [16.484373, "o", "\u001b[?2004h"] [16.649917, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [16.74022, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [16.863539, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [16.933108, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [17.02806, "o", " "] [17.139172, "o", "\u001b[4mc\u001b[24m"] [17.196635, "o", "\b\u001b[4mc\u001b[4mo\u001b[24m"] [17.369386, "o", "\b\u001b[4mo\u001b[4mm\u001b[24m"] [17.506413, "o", "\b\b\b\u001b[24mc\u001b[24mo\u001b[24mmm"] [17.70377, "o", "i"] [17.781604, "o", "t"] [17.844497, "o", " "] [17.932295, "o", "-"] [18.147048, "o", "m"] [19.411942, "o", " "] [19.579929, "o", "\u001b[33m'\u001b[39m"] [20.289417, "o", "\b\u001b[33m'\u001b[33me\u001b[39m"] [20.429284, "o", "\b\u001b[33me\u001b[33md\u001b[39m"] [20.564702, "o", "\b\u001b[33md\u001b[33mi\u001b[39m"] [20.64266, "o", "\b\u001b[33mi\u001b[33mt\u001b[39m"] [20.694368, "o", "\b\u001b[33mt\u001b[33me\u001b[39m"] [20.818051, "o", "\b\u001b[33me\u001b[33md\u001b[39m"] [20.908198, "o", "\b\u001b[33md\u001b[33m \u001b[39m"] [20.970043, "o", "\b\u001b[33m \u001b[33mn\u001b[39m"] [21.013369, "o", "\b\u001b[33mn\u001b[33me\u001b[39m"] [21.171105, "o", "\b\u001b[33me\u001b[33mw\u001b[39m"] [21.240077, "o", "\b\u001b[33mw\u001b[33m-\u001b[39m"] [21.33241, "o", "\b\u001b[33m-\u001b[33mf\u001b[39m"] [21.421937, "o", "\b\u001b[33mf\u001b[33mi\u001b[39m"] [21.581226, "o", "\b\u001b[33mi\u001b[33ml\u001b[39m"] [21.694969, "o", "\b\u001b[33ml\u001b[33me\u001b[39m"] [21.839741, "o", "\b\u001b[33me\u001b[33m'\u001b[39m"] [22.007146, "o", "\u001b[?1l\u001b>"] [22.013129, "o", "\u001b[?2004l\r\r\n"] [22.01528, "o", "\u001b]2;brig commit -m 'edited new-file'\u0007\u001b]1;brig\u0007"] [22.139023, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [22.139152, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [22.13918, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [22.139295, "o", "\u001b[?1h\u001b="] [22.13949, "o", "\u001b[?2004h"] [23.020604, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [23.13374, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [23.26246, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [23.335487, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [23.437349, "o", " "] [23.917417, "o", "\u001b[4ml\u001b[24m"] [24.065659, "o", "\b\u001b[4ml\u001b[4mo\u001b[24m"] [24.14673, "o", "\b\u001b[4mo\u001b[4mg\u001b[24m"] [24.248101, "o", "\u001b[?1l\u001b>"] [24.253336, "o", "\u001b[?2004l\b\b\b\u001b[24ml\u001b[24mo\u001b[24mg\r\r\n"] [24.255808, "o", "\u001b]2;brig log\u0007\u001b]1;brig\u0007"] [24.368164, "o", "\u001b[32mSEfXUERkEf\u001b[0m \u001b[33mFeb 28 11:54:53\u001b[0m \u001b[31m•\u001b[0m\u001b[36m (curr)\u001b[0m\r\n"] [24.368292, "o", "\u001b[32mSEfXUBmLmQ\u001b[0m \u001b[33mFeb 28 10:59:36\u001b[0m edited new-file\u001b[36m (head)\u001b[0m\r\n\u001b[32mSEfXUCjYxA\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m Added darth vader music\u001b[36m\u001b[0m\r\n\u001b[32mSEfXUCU47p\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m Added initial README.md\u001b[36m\u001b[0m\r\n"] [24.368379, "o", "\u001b[32mSEfXUCEaXL\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m initial commit\u001b[36m (init)\u001b[0m\r\n"] [24.369869, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [24.370075, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [24.370148, "o", "\u001b[?1h\u001b="] [24.370345, "o", "\u001b[?2004h"] [26.14913, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [26.246472, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [26.346298, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [26.437241, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [30.638228, "o", " "] [30.201934, "o", "h"] [30.329335, "o", "i"] [30.411966, "o", "s"] [30.557041, "o", "y"] [30.647071, "o", "o"] [30.764317, "o", "r"] [31.0818, "o", "\b \b"] [31.228267, "o", "\b \b"] [31.376425, "o", "\b \b"] [31.504246, "o", "t"] [31.605135, "o", "o"] [31.697515, "o", "r"] [31.868937, "o", "y"] [31.949671, "o", " "] [32.481463, "o", "n"] [32.540945, "o", "e"] [32.706198, "o", "w"] [32.780756, "o", "-"] [32.893561, "o", "f"] [32.984879, "o", "i"] [33.131834, "o", "l"] [33.235133, "o", "e"] [33.29174, "o", "\u001b[?1l\u001b>"] [33.298203, "o", "\u001b[?2004l\r\r\n"] [33.30009, "o", "\u001b]2;brig history new-file\u0007\u001b]1;brig\u0007"] [33.411816, "o", "CHANGE FROM TO WHEN \r\n\u001b[33mmodified\u001b[0m \u001b[36mSEfXUCjYxA\u001b[0m \u001b[32mHEAD\u001b[0m \u001b[35mFeb 28 10:59:36\u001b[0m \r\n\u001b[33madded\u001b[0m"] [33.411883, "o", " \u001b[36mSEfXUCU47p\u001b[0m \u001b[32mSEfXUCjYxA\u001b[0m \u001b[35mFeb 28 10:45:21\u001b[0m \r\n"] [33.413372, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [33.413489, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [33.413564, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [33.413654, "o", "\u001b[?1h\u001b="] [33.413868, "o", "\u001b[?2004h"] [34.337473, "o", "\u001b[32mbrig\u001b[39m history new-file"] [34.823467, "o", "\b\b\b\b \b\b\b\b"] [35.02909, "o", "\b\b\b\b \b\b\b\b"] [35.651468, "o", "\u001b[8D \b\b\b\b\b\b\b"] [35.930863, "o", "\u001b[4mr\u001b[24m"] [35.992925, "o", "\b\u001b[4mr\u001b[4me\u001b[24m"] [36.177469, "o", "\b\b\u001b[24mr\u001b[24mes"] [36.343523, "o", "e"] [36.425736, "o", "t"] [36.475568, "o", " "] [37.198725, "o", "n"] [37.297678, "o", "e"] [37.66047, "o", "\b \b"] [37.811151, "o", "\b \b"] [38.581713, "o", "H"] [38.674511, "o", "E"] [38.720165, "o", "A"] [38.849624, "o", "D"] [39.521639, "o", "^"] [39.629115, "o", " "] [39.97663, "o", "n"] [40.071728, "o", "e"] [40.241242, "o", "w"] [40.336104, "o", "-"] [40.44449, "o", "f"] [40.546932, "o", "i"] [40.701172, "o", "l"] [40.796942, "o", "e"] [40.88402, "o", "\u001b[?1l\u001b>"] [40.890725, "o", "\u001b[?2004l\r\r\n"] [40.892491, "o", "\u001b]2;brig reset HEAD^ new-file\u0007\u001b]1;brig\u0007"] [41.011663, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [41.011779, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [41.011852, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [41.011931, "o", "\u001b[?1h\u001b="] [41.012152, "o", "\u001b[?2004h"] [43.678464, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [43.771458, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [43.869942, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [43.974751, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [44.052256, "o", " "] [44.319182, "o", "\u001b[4mc\u001b[24m"] [44.389519, "o", "\b\u001b[24mca"] [44.537355, "o", "t"] [44.605275, "o", " "] [45.401779, "o", "n"] [45.476219, "o", "e"] [45.63769, "o", "w"] [45.742511, "o", "-"] [45.841412, "o", "f"] [45.935062, "o", "i"] [46.094286, "o", "l"] [46.199279, "o", "e"] [46.29546, "o", "\u001b[?1l\u001b>"] [46.301738, "o", "\u001b[?2004l\r\r\n"] [46.303773, "o", "\u001b]2;brig cat new-file\u0007\u001b]1;brig\u0007"] [46.462339, "o", "This is a new file and I can use this folder just like a normal folder.\r\n"] [46.465781, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [46.465999, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [46.466058, "o", "\u001b[?1h\u001b="] [46.466267, "o", "\u001b[?2004h"] [50.068751, "o", "\u001b[?2004l\r\r\n"] ================================================ FILE: docs/asciinema/7_remotes.json ================================================ {"version": 2, "width": 119, "height": 29, "timestamp": 1519833254, "env": {"SHELL": "/bin/zsh", "TERM": "xterm-256color"}, "title": "brig remote"} [0.256979, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [0.257127, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [0.257195, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [0.257294, "o", "\u001b[?1h\u001b="] [0.257515, "o", "\u001b[?2004h"] [0.452491, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [0.592882, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [0.657491, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [0.768666, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [0.836203, "o", " "] [0.935009, "o", "w"] [1.089043, "o", "h"] [1.16401, "o", "o"] [1.309524, "o", "a"] [1.374321, "o", "m"] [1.554943, "o", "i"] [1.864501, "o", "\u001b[?1l\u001b>"] [1.868891, "o", "\u001b[?2004l\r\r\n"] [1.871071, "o", "\u001b]2;brig whoami\u0007\u001b]1;brig\u0007"] [2.029988, "o", "- Name: \u001b[33msahib@wald.de/laptop\u001b[0m\r\n Fingerprint: QmXdjMyoLSYzE5r2v1LFZFkgDtNjtcLZZuoHesg3temwLH:SEfXUDYVaZuHryzkXmQvsD35qYz9iBZwnpnRVQvwJjB5yWu2Ygqzbu23CCWUyvk7GVtmwg5m5j93b4S9QNhNs7UXmkPid\r\n"] [2.031508, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [2.031622, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [2.031701, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [2.031773, "o", "\u001b[?1h\u001b="] [2.03198, "o", "\u001b[?2004h"] [3.57628, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [3.690324, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [3.806995, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [3.880188, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [3.961119, "o", " "] [4.064671, "o", "n"] [4.144889, "o", "e"] [4.249377, "o", "t"] [4.299546, "o", " "] [4.433507, "o", "\u001b[4ml\u001b[24m"] [4.59375, "o", "\b\u001b[4ml\u001b[4mo\u001b[24m"] [4.750709, "o", "\b\b\u001b[24ml\u001b[24moc"] [4.794775, "o", "a"] [4.943057, "o", "t"] [5.005801, "o", "e"] [5.036716, "o", " "] [5.521982, "o", "\u001b[4ma\u001b[24m"] [5.66573, "o", "\b\u001b[24mal"] [5.849781, "o", "i"] [5.949514, "o", "c"] [5.993024, "o", "e"] [6.615229, "o", "\u001b[?1l\u001b>"] [6.624865, "o", "\u001b[?2004l\r\r\n"] [6.627092, "o", "\u001b]2;brig net locate alice\u0007\u001b]1;brig\u0007"] [7.24608, "o", "Scanning. \r"] [7.746166, "o", "Scanning.. \r"] [8.246136, "o", "Scanning... \r"] [8.746117, "o", "Scanning.... \r"] [9.246041, "o", "Scanning.....\r"] [9.746112, "o", "Scanning. \r"] [10.246042, "o", "Scanning.. \r"] [10.746183, "o", "Scanning... \r"] [11.246097, "o", "Scanning.... \r"] [11.746129, "o", "Scanning.....\r"] [12.246214, "o", "Scanning. \r"] [12.746018, "o", "Scanning.. \r"] [13.246042, "o", "Scanning... \r"] [13.746093, "o", "Scanning.... \r"] [14.246131, "o", "Scanning.....\r"] [14.746065, "o", "Scanning. \r"] [15.246104, "o", "Scanning.. \r"] [15.746047, "o", "Scanning... \r"] [16.246048, "o", "Scanning.... \r"] [16.746052, "o", "Scanning.....\r"] [16.776321, "o", "NAME TYPE FINGERPRINT\r\nalice@wonderland.lit/container email \u001b[32mQmUzLSHCKUNDow8YS6tEZAPzKYVqoG8oYj41ioN1YgHWCb:SEfXUDA157kQoqRc6EvwRy3v3kFdTJh2HXcWso7HBGJVMi7WhCC476Gp6BHMQKtZiVuJqc4o43RZ1dz3dPeJEgLyMcRTB\u001b[0m\r\n"] [18.54981, "o", "^C"] [18.555759, "o", "\r\n"] [18.556274, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [18.556666, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007"] [18.557775, "o", "\u001b]1;~/dev/brig/docs\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K\u001b[?1h\u001b="] [18.558285, "o", "\u001b[?2004h"] [19.449127, "o", "\u001b[1m\u001b[31mn\u001b[0m\u001b[39m"] [19.548774, "o", "\b\u001b[1m\u001b[31mn\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [19.644313, "o", "\b\b\u001b[1m\u001b[31mn\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [19.732335, "o", "\b\u001b[1m\u001b[31mi\u001b[1m\u001b[31mg\u001b[0m\u001b[39m"] [19.796273, "o", " "] [20.355635, "o", "\b\b\b\b\b\u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \b\b\b\b"] [20.610364, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [20.713316, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [20.842937, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [20.943321, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [21.056141, "o", " "] [21.20604, "o", "\u001b[4mr\u001b[24m"] [21.269933, "o", "\b\u001b[4mr\u001b[4me\u001b[24m"] [21.303008, "o", "\b\b\u001b[24mr\u001b[24mem"] [21.466296, "o", "o"] [21.515036, "o", "t"] [21.558255, "o", "e"] [21.595261, "o", " "] [21.701607, "o", "\u001b[4ml\u001b[24m"] [21.790604, "o", "\b\u001b[24mls"] [21.875449, "o", "\u001b[?1l\u001b>"] [21.880974, "o", "\u001b[?2004l\r\r\n"] [21.883036, "o", "\u001b]2;brig remote ls\u0007\u001b]1;brig\u0007"] [22.033551, "o", "None yet. Use `brig remote add ` to add some.\r\n"] [22.034945, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [22.035119, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007"] [22.03519, "o", "\u001b]1;~/dev/brig/docs\u0007"] [22.035295, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [22.035476, "o", "\u001b[?1h\u001b="] [22.035744, "o", "\u001b[?2004h"] [23.350038, "o", "\u001b[32mbrig\u001b[39m remote ls"] [23.797034, "o", "\b\b \b\b"] [24.100147, "o", "\u001b[4ma\u001b[24m"] [24.191926, "o", "\b\u001b[24mad"] [24.372643, "o", "d"] [24.466264, "o", " "] [25.414876, "o", "\u001b[4ma\u001b[24m"] [25.522413, "o", "\b\u001b[24mal"] [25.687799, "o", "i"] [25.748435, "o", "c"] [25.806366, "o", "e"] [25.863034, "o", " "] [29.196254, "o", "\u001b[22D\u001b[39mb\u001b[39mr\u001b[39mi\u001b[39mg\u001b[18C\u001b[7mQmUzLSHCKUNDow8YS6tEZAPzKYVqoG8oYj41ioN1YgHWCb:SEfXUDA157kQoqRc6EvwRy3v3kFdTJh2HXcWso7HBGJVMi7W\u001b[7mh\u001b[7mCC476Gp6BHMQKtZiVuJqc4o43RZ1dz3dPeJEgLyMcRTB\u001b[27m\u001b[K"] [30.112026, "o", "\u001b[A\u001b[43D\u001b[32mb\u001b[32mr\u001b[32mi\u001b[32mg\u001b[39m\u001b[18C\u001b[27mQ\u001b[27mm\u001b[27mU\u001b[27mz\u001b[27mL\u001b[27mS\u001b[27mH\u001b[27mC\u001b[27mK\u001b[27mU\u001b[27mN\u001b[27mD\u001b[27mo\u001b[27mw\u001b[27m8\u001b[27mY\u001b[27mS\u001b[27m6\u001b[27mt\u001b[27mE\u001b[27mZ\u001b[27mA\u001b[27mP\u001b[27mz\u001b[27mK\u001b[27mY\u001b[27mV\u001b[27mq\u001b[27mo\u001b[27mG\u001b[27m8\u001b[27mo\u001b[27mY\u001b[27mj\u001b[27m4\u001b[27m1\u001b[27mi\u001b[27mo\u001b[27mN\u001b[27m1\u001b[27mY\u001b[27mg\u001b[27mH\u001b[27mW\u001b[27mC\u001b[27mb\u001b[27m:\u001b[27mS\u001b[27mE\u001b[27mf\u001b[27mX\u001b[27mU\u001b[27mD\u001b[27mA\u001b[27m1\u001b[27m5\u001b[27m7\u001b[27mk\u001b[27mQ\u001b[27mo\u001b[27mq\u001b[27mR\u001b[27mc\u001b[27m6\u001b[27mE\u001b[27mv\u001b[27mw\u001b[27mR\u001b[27my\u001b[27m3\u001b[27mv\u001b[27m3\u001b[27mk\u001b[27mF\u001b[27md\u001b[27mT\u001b[27mJ\u001b[27mh\u001b[27m2\u001b[27mH\u001b[27mX\u001b[27mc\u001b[27mW\u001b[27ms\u001b[27mo\u001b[27m7\u001b[27mH\u001b[27mB\u001b[27mG\u001b[27mJ\u001b[27mV\u001b[27mM\u001b[27mi\u001b[27m7\u001b[27mWh\u001b[27mC\u001b[27mC\u001b[27m4\u001b[27m7\u001b[27m6\u001b[27mG\u001b[27mp\u001b[27m6\u001b[27mB\u001b[27mH\u001b[27mM\u001b[27mQ\u001b[27mK\u001b[27mt\u001b[27mZ\u001b[27mi\u001b[27mV\u001b[27mu\u001b[27mJ\u001b[27mq\u001b[27mc\u001b[27m4\u001b[27mo\u001b[27m4\u001b[27m3\u001b[27mR\u001b[27mZ\u001b[27m1\u001b[27md\u001b[27mz\u001b[27m3\u001b[27md\u001b[27mP\u001b[27me\u001b[27mJ\u001b[27mE\u001b[27mg\u001b[27mL\u001b[27my\u001b[27mM\u001b[27mc\u001b[27mR\u001b[27mT\u001b[27mB"] [30.112723, "o", "\u001b[?1l\u001b>"] [30.121712, "o", "\u001b[?2004l\r\r\n"] [30.123729, "o", "\u001b]2;brig remote add alice \u0007\u001b]1;brig\u0007"] [30.245746, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [30.245919, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [30.24613, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [30.246223, "o", "\u001b[?1h\u001b="] [30.246451, "o", "\u001b[?2004h"] [31.932036, "o", "\u001b[32mbrig\u001b[39m remote add alice QmUzLSHCKUNDow8YS6tEZAPzKYVqoG8oYj41ioN1YgHWCb:SEfXUDA157kQoqRc6EvwRy3v3kFdTJh2HXcWso7HBGJVMi7WhCC476Gp6BHMQKtZiVuJqc4o43RZ1dz3dPeJEgLyMcRTB\u001b[K"] [32.081344, "o", "\u001b[A\u001b[31Dls\u001b[K\u001b[1B\r\u001b[K\u001b[A\u001b[16C"] [32.635824, "o", "\u001b[?1l\u001b>"] [32.641717, "o", "\u001b[?2004l\u001b[1B\r"] [32.643823, "o", "\u001b]2;brig remote ls\u0007\u001b]1;brig\u0007"] [32.753559, "o", "- Name: alice\r\n Fingerprint: QmUzLSHCKUNDow8YS6tEZAPzKYVqoG8oYj41ioN1YgHWCb:SEfXUDA157kQoqRc6EvwRy3v3kFdTJh2HXcWso7HBGJVMi7WhCC476Gp6BHMQKtZiVuJqc4o43RZ1dz3dPeJEgLyMcRTB\r\n\r\n"] [32.755086, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [32.755201, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [32.755269, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [32.755388, "o", "\u001b[?1h\u001b="] [32.755607, "o", "\u001b[?2004h"] [34.857157, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [34.964859, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [35.066833, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [35.147421, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [35.21895, "o", " "] [35.530163, "o", "\u001b[4mr\u001b[24m"] [35.592531, "o", "\b\u001b[4mr\u001b[4me\u001b[24m"] [35.64326, "o", "\b\b\u001b[24mr\u001b[24mem"] [35.818233, "o", "o"] [35.86627, "o", "t"] [35.909441, "o", "e"] [35.960071, "o", " "] [36.225287, "o", "\u001b[4me\u001b[24m"] [36.330022, "o", "\b\u001b[24med"] [36.445383, "o", "i"] [36.537795, "o", "t"] [36.657444, "o", "\u001b[?1l\u001b>"] [36.663347, "o", "\u001b[?2004l\r\r\n"] [36.665361, "o", "\u001b]2;brig remote edit\u0007\u001b]1;brig\u0007"] [36.809897, "o", "\u001b[?1000h\u001b[?2004h\u001b[?1049h\u001b[22;0;0t\u001b[?1h\u001b=\u001b[?2004h"] [36.810124, "o", "\u001b[1;29r\u001b[?12h\u001b[?12l\u001b[27m\u001b[23m\u001b[29m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[29;1H\"/tmp/brig-cmd-buffer-7541559781143048777yml\""] [36.810195, "o", " 2L, 170C"] [36.811364, "o", "\u001b[2;1H▽\u001b[6n\u001b[2;1H \u001b[1;1H\u001b[>c\u001b]10;?\u0007\u001b]11;?\u0007"] [36.811624, "o", "\u001b[1;1H- Name: alice\r\n Fingerprint: QmUzLSHCKUNDow8YS6tEZAPzKYVqoG8oYj41ioN1YgHWCb:SEfXUDA157kQoqRc6EvwRy3v3kFdTJh2HXcWso7HBGJVMi7WhCC476Gp66\u001b[3;1HBHMQKtZiVuJqc4o43RZ1dz3dPeJEgLyMcRTB\r\n\u001b[94m~ \u001b[5;1H~ \u001b[6;1H~ \u001b[7;1H~ \u001b[8;1H~ \u001b[9;1H~ \u001b[10;1H~ "] [36.811691, "o", " \u001b[11;1H~ \u001b[12;1H~ \u001b[13;1H~ \u001b[14;1H~ \u001b[15;1H~ \u001b[16;1H~ \u001b[17;1H~ \u001b[18;1H~ "] [36.811752, "o", " \u001b[19;1H~ \u001b[20;1H~ \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ \u001b[24;1H~ \u001b[25;1H~ \u001b[26;1H~ "] [36.811809, "o", " \u001b[27;1H~ \u001b[28;1H~ \u001b[m\u001b[29;102H1,1\u001b[9CAlles\u001b[1;1H\u001b[?25h"] [36.821729, "o", "\u001bP+q436f\u001b\\\u001bP+q6b75\u001b\\\u001bP+q6b64\u001b\\\u001bP+q6b72\u001b\\\u001bP+q6b6c\u001b\\\u001bP+q2332\u001b\\\u001bP+q2334\u001b\\\u001bP+q2569\u001b\\\u001bP+q2a37\u001b\\\u001bP+q6b31\u001b\\"] [36.824519, "o", "\u001b[?1000l\u001b[?1006h\u001b[?1002h\u001b[?1006l\u001b[?1002l\u001b[?1006h\u001b[?1002h\u001b[27m\u001b[23m\u001b[29m\u001b[m\u001b[H\u001b[2J\u001b[?25l\u001b[1;1H- Name: alice\r\n Fingerprint: QmUzLSHCKUNDow8YS6tEZAPzKYVqoG8oYj41ioN1YgHWCb:SEfXUDA157kQoqRc6EvwRy3v3kFdTJh2HXcWso7HBGJVMi7WhCC476Gp66\u001b[3;1HBHMQKtZiVuJqc4o43RZ1dz3dPeJEgLyMcRTB\r\n\u001b[94m~ \u001b[5;1H~ \u001b[6;1H~ \u001b[7;1H~ \u001b[8;1H~ \u001b[9;1H~ \u001b[1"] [36.824903, "o", "0;1H~ \u001b[11;1H~ \u001b[12;1H~ \u001b[13;1H~ \u001b[14;1H~ \u001b[15;1H~ \u001b[16;1H~ \u001b[17;1H~ \u001b[18;1H~ "] [36.825159, "o", " \u001b[19;1H~ \u001b[20;1H~ \u001b[21;1H~ \u001b[22;1H~ \u001b[23;1H~ \u001b[24;1H~ \u001b[25;1H~ \u001b[26;1H~ "] [36.825392, "o", " \u001b[27;1H~ \u001b[28;1H~ \u001b[m\u001b[29;102H1,1\u001b[9CAlles\r\"/tmp/brig-cmd-buffer-7541559781143048777yml\" 2L, 170C\u001b[1;1H\u001b[?25h"] [37.72463, "o", "\u001b[?25l\u001b[29;92HO\u001b[1;1H"] [37.724801, "o", "\u001b[29;92H \u001b[1;1H\u001b[29;1H\u001b[1m-- EINFÜGEN --\u001b[m\u001b[29;15H\u001b[K\u001b[29;102H1,1\u001b[9CAlles"] [37.725003, "o", "\u001b[1;28r\u001b[1;1H\u001b[L\u001b[1;29r"] [37.725116, "o", "\u001b[29;102H\u001b[K\u001b[29;102H1,1\u001b[9CAlles\u001b[1;1H\u001b[?25h"] [38.227707, "o", "\u001b[?25l#\u001b[29;104H2\u001b[1;2H\u001b[?25h"] [38.338907, "o", "\u001b[?25l\u001b[29;104H3\u001b[1;3H\u001b[?25h"] [38.530171, "o", "\u001b[?25lY\u001b[29;104H4\u001b[1;4H\u001b[?25h"] [38.666247, "o", "\u001b[?25lo\u001b[29;104H5\u001b[1;5H\u001b[?25h"] [38.829278, "o", "\u001b[?25lu\u001b[29;104H6\u001b[1;6H\u001b[?25h"] [38.915063, "o", "\u001b[?25l\u001b[29;104H7\u001b[1;7H\u001b[?25h"] [39.028541, "o", "\u001b[?25lc\u001b[29;104H8\u001b[1;8H\u001b[?25h"] [39.105496, "o", "\u001b[?25lo\u001b[29;104H9\u001b[1;9H\u001b[?25h"] [39.261918, "o", "\u001b[?25lu\u001b[29;104H10\u001b[1;10H\u001b[?25h"] [39.427131, "o", "\u001b[?25ll\u001b[29;105H1\u001b[1;11H\u001b[?25h"] [39.509225, "o", "\u001b[?25ld\u001b[29;105H2\u001b[1;12H\u001b[?25h"] [39.598817, "o", "\u001b[?25l\u001b[29;105H3\u001b[1;13H\u001b[?25h"] [39.661772, "o", "\u001b[?25la\u001b[29;105H4\u001b[1;14H\u001b[?25h"] [40.286289, "o", "\u001b[?25ll\u001b[29;105H5\u001b[1;15H\u001b[?25h"] [40.445697, "o", "\u001b[?25ls\u001b[29;105H6\u001b[1;16H\u001b[?25h"] [40.581195, "o", "\u001b[?25lo\u001b[29;105H7\u001b[1;17H\u001b[?25h"] [40.693541, "o", "\u001b[?25l\u001b[29;105H8\u001b[1;18H\u001b[?25h"] [40.787179, "o", "\u001b[?25le\u001b[29;105H9\u001b[1;19H\u001b[?25h"] [40.916071, "o", "\u001b[?25ld\u001b[29;104H20\u001b[1;20H\u001b[?25h"] [41.055248, "o", "\u001b[?25li\u001b[29;105H1\u001b[1;21H\u001b[?25h"] [41.122993, "o", "\u001b[?25lt\u001b[29;105H2\u001b[1;22H\u001b[?25h"] [41.224271, "o", "\u001b[?25l\u001b[29;105H3\u001b[1;23H\u001b[?25h"] [41.297735, "o", "\u001b[?25lt\u001b[29;105H4\u001b[1;24H\u001b[?25h"] [41.374971, "o", "\u001b[?25lh\u001b[29;105H5\u001b[1;25H\u001b[?25h"] [41.499302, "o", "\u001b[?25le\u001b[29;105H6\u001b[1;26H\u001b[?25h"] [41.541186, "o", "\u001b[?25l\u001b[29;105H7\u001b[1;27H\u001b[?25h"] [41.660885, "o", "\u001b[?25ll\u001b[29;105H8\u001b[1;28H\u001b[?25h"] [41.801246, "o", "\u001b[?25li\u001b[29;105H9\u001b[1;29H\u001b[?25h"] [41.856822, "o", "\u001b[?25ls\u001b[29;104H30\u001b[1;30H\u001b[?25h"] [42.001292, "o", "\u001b[?25lt\u001b[29;105H1\u001b[1;31H\u001b[?25h"] [42.047968, "o", "\u001b[?25l\u001b[29;105H2\u001b[1;32H\u001b[?25h"] [42.143845, "o", "\u001b[?25li\u001b[29;105H3\u001b[1;33H\u001b[?25h"] [42.309877, "o", "\u001b[?25ln\u001b[29;105H4\u001b[1;34H\u001b[?25h"] [42.392342, "o", "\u001b[?25l\u001b[29;105H5\u001b[1;35H\u001b[?25h"] [42.592242, "o", "\u001b[?25lt\u001b[29;105H6\u001b[1;36H\u001b[?25h"] [42.668149, "o", "\u001b[?25lh\u001b[29;105H7\u001b[1;37H\u001b[?25h"] [42.746122, "o", "\u001b[?25le\u001b[29;105H8\u001b[1;38H\u001b[?25h"] [42.828254, "o", "\u001b[?25l\u001b[29;105H9\u001b[1;39H\u001b[?25h"] [43.003693, "o", "\u001b[?25le\u001b[29;104H40\u001b[1;40H\u001b[?25h"] [43.154363, "o", "\u001b[?25ld\u001b[29;105H1\u001b[1;41H\u001b[?25h"] [43.263772, "o", "\u001b[?25li\u001b[29;105H2\u001b[1;42H\u001b[?25h"] [43.367632, "o", "\u001b[?25lt\u001b[29;105H3\u001b[1;43H\u001b[?25h"] [43.507165, "o", "\u001b[?25lo\u001b[29;105H4\u001b[1;44H\u001b[?25h"] [43.57823, "o", "\u001b[?25lr\u001b[29;105H5\u001b[1;45H\u001b[?25h"] [43.693052, "o", "\u001b[?25l\u001b[29;105H6\u001b[1;46H\u001b[?25h"] [43.939149, "o", "\u001b[?25lo\u001b[29;105H7\u001b[1;47H\u001b[?25h"] [44.019491, "o", "\u001b[?25lf\u001b[29;105H8\u001b[1;48H\u001b[?25h"] [44.112539, "o", "\u001b[?25l\u001b[29;105H9\u001b[1;49H\u001b[?25h"] [44.20118, "o", "\u001b[?25lt\u001b[29;104H50\u001b[1;50H\u001b[?25h"] [44.2878, "o", "\u001b[?25lo\u001b[29;105H1\u001b[1;51H\u001b[?25h"] [44.42828, "o", "\u001b[?25lu\u001b[29;105H2\u001b[1;52H\u001b[?25h"] [44.684537, "o", "\u001b[?25l\u001b[1;51H\u001b[K\u001b[29;105H1\u001b[1;51H\u001b[?25h"] [44.830548, "o", "\u001b[?25l\u001b[1;50H\u001b[K\u001b[29;105H0\u001b[1;50H\u001b[?25h"] [44.970737, "o", "\u001b[?25l\u001b[1;49H\u001b[K\u001b[29;104H49\u001b[1;49H\u001b[?25h"] [45.046065, "o", "\u001b[?25ly\u001b[29;104H50\u001b[1;50H\u001b[?25h"] [45.133149, "o", "\u001b[?25lo\u001b[29;105H1\u001b[1;51H\u001b[?25h"] [45.287311, "o", "\u001b[?25lu\u001b[29;105H2\u001b[1;52H\u001b[?25h"] [45.328195, "o", "\u001b[?25lr\u001b[29;105H3\u001b[1;53H\u001b[?25h"] [45.402768, "o", "\u001b[?25l\u001b[29;105H4\u001b[1;54H\u001b[?25h"] [45.498703, "o", "\u001b[?25lc\u001b[29;105H5\u001b[1;55H\u001b[?25h"] [45.537462, "o", "\u001b[?25lh\u001b[29;105H6\u001b[1;56H\u001b[?25h"] [45.718158, "o", "\u001b[?25lo\u001b[29;105H7\u001b[1;57H\u001b[?25h"] [45.8428, "o", "\u001b[?25li\u001b[29;105H8\u001b[1;58H\u001b[?25h"] [45.905946, "o", "\u001b[?25lc\u001b[29;105H9\u001b[1;59H\u001b[?25h"] [45.953682, "o", "\u001b[?25le\u001b[29;104H60\u001b[1;60H\u001b[?25h"] [46.081636, "o", "\u001b[?25l.\u001b[29;105H1\u001b[1;61H\u001b[?25h"] [46.267888, "o", "\u001b[?25l.\u001b[29;105H2\u001b[1;62H\u001b[?25h"] [46.410391, "o", "\u001b[?25l.\u001b[29;105H3\u001b[1;63H\u001b[?25h"] [46.586319, "o", "\u001b[29;1H\u001b[K\u001b[1;62H\u001b[?25l\u001b[29;92H^[\u001b[1;62H"] [46.686908, "o", "\u001b[29;92H \u001b[1;63H"] [46.688225, "o", "\u001b[29;102H1,62\u001b[8CAlles\u001b[1;62H\u001b[?25h"] [46.886607, "o", "\u001b[?25l\u001b[29;92H:\u001b[1;62H\u001b[29;92H\u001b[K\u001b[29;1H:"] [46.886749, "o", "\u001b[?2004h\u001b[?25h"] [46.978696, "o", "w\u001b[?25l\u001b[?25h"] [47.035528, "o", "q\u001b[?25l\u001b[?25h"] [47.105184, "o", "\r"] [47.105636, "o", "\u001b[?25l\u001b[?1006l\u001b[?1002l\u001b[?2004l\"/tmp/brig-cmd-buffer-7541559781143048777yml\""] [47.106097, "o", " 3L, 233C geschrieben"] [47.108146, "o", "\r\r\r\n\u001b[?2004l\u001b[?1l\u001b>\u001b[?25h\u001b[?1049l\u001b[23;0;0t"] [47.117848, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [47.118393, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [47.118661, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [47.118889, "o", "\u001b[?1h\u001b="] [47.119552, "o", "\u001b[?2004h"] [47.797722, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [47.92976, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [48.010327, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [48.120604, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [48.19344, "o", " "] [48.40232, "o", "n"] [48.485116, "o", "e"] [48.582485, "o", "t"] [48.636681, "o", " "] [48.710048, "o", "\u001b[4ml\u001b[24m"] [48.858827, "o", "\b\u001b[24mli"] [48.950673, "o", "s"] [49.079882, "o", "t"] [49.184659, "o", "\u001b[?1l\u001b>"] [49.189848, "o", "\u001b[?2004l\r\r\n"] [49.192088, "o", "\u001b]2;brig net list\u0007\u001b]1;brig\u0007"] [49.350062, "o", "NAME ADDR ROUNDTRIP LASTSEEN \r\nalice QmUzLSHCK 0s \u001b[32m✔ Feb 28 16:54:59\u001b[0m "] [49.350218, "o", "\r\n"] [49.351703, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [49.351808, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007"] [49.351835, "o", "\u001b]1;~/dev/brig/docs\u0007"] [49.351916, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [49.352019, "o", "\u001b[?1h\u001b="] [49.352226, "o", "\u001b[?2004h"] [52.42199, "o", "\u001b[?2004l\r\r\n"] ================================================ FILE: docs/asciinema/8_sync.json ================================================ {"version": 2, "width": 119, "height": 29, "timestamp": 1519834569, "env": {"SHELL": "/bin/zsh", "TERM": "xterm-256color"}, "title": "brig sync"} [0.265949, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [0.266115, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [0.266291, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K\u001b[?1h\u001b="] [0.266474, "o", "\u001b[?2004h"] [0.900166, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [1.020164, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [1.114403, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [1.205482, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [1.285285, "o", " "] [1.371649, "o", "n"] [1.450098, "o", "e"] [1.551298, "o", "t"] [1.621169, "o", " "] [1.730383, "o", "\u001b[4ml\u001b[24m"] [1.866582, "o", "\b\u001b[24mli"] [1.963829, "o", "s"] [2.085691, "o", "t"] [2.256785, "o", "\u001b[?1l\u001b>"] [2.263479, "o", "\u001b[?2004l\r\r\n"] [2.266598, "o", "\u001b]2;brig net list\u0007"] [2.273975, "o", "\u001b]1;brig\u0007"] [2.428703, "o", "NAME ADDR ROUNDTRIP LASTSEEN \r\nalice QmUzLSHCK 1ms \u001b[32m✔ Feb 28 17:16:09\u001b[0m \r\n"] [2.430672, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [2.430901, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [2.431006, "o", "\u001b[?1h\u001b="] [2.431162, "o", "\u001b[?2004h"] [4.013274, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [4.158376, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [4.26778, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [4.387579, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [4.505512, "o", " "] [4.725358, "o", "d"] [4.846141, "o", "i"] [4.919277, "o", "f"] [5.060555, "o", "f"] [5.119819, "o", " "] [5.235577, "o", "\u001b[4ma\u001b[24m"] [5.362352, "o", "\b\u001b[24mal"] [5.514241, "o", "i"] [5.602878, "o", "c"] [5.668968, "o", "e"] [5.933904, "o", "\u001b[?1l\u001b>"] [5.938148, "o", "\u001b[?2004l\r\r\n"] [5.94028, "o", "\u001b]2;brig diff alice\u0007\u001b]1;brig\u0007"] [6.125933, "o", "\u001b[35m•\u001b[0m\r\n├"] [6.126184, "o", "──\u001b[32m + greetings_from_alice\u001b[0m\r\n├──\u001b[31m - new-file\u001b[0m\r\n├──\u001b[36m README.md ⇄ README.md\u001b[0m\r\n└──\u001b[31m - sub\u001b[0m\r\n └──\u001b[31m - music.mp3\u001b[0m\r\n\r\n1 directory, 4 files\r\n"] [6.127905, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [6.127995, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007"] [6.128022, "o", "\u001b]1;~/dev/brig/docs\u0007"] [6.128098, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [6.128194, "o", "\u001b[?1h\u001b="] [6.128434, "o", "\u001b[?2004h"] [7.746912, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [7.859958, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [7.979952, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [8.088853, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [8.174836, "o", " "] [8.975584, "o", "s"] [9.18399, "o", "y"] [9.343424, "o", "n"] [9.460907, "o", "c"] [9.550074, "o", " "] [9.684063, "o", "\u001b[4ma\u001b[24m"] [9.816329, "o", "\b\u001b[24mal"] [9.971113, "o", "i"] [10.076632, "o", "c"] [10.141374, "o", "e"] [10.418895, "o", "\u001b[?1l\u001b>"] [10.426116, "o", "\u001b[?2004l\r\r\n"] [10.428601, "o", "\u001b]2;brig sync alice\u0007\u001b]1;brig\u0007"] [10.855741, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [10.85592, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [10.856032, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [10.856137, "o", "\u001b[?1h\u001b="] [10.856417, "o", "\u001b[?2004h"] [12.084895, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [12.233591, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [12.318774, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [12.447816, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [12.534781, "o", " "] [13.184579, "o", "\u001b[4ml\u001b[24m"] [13.382836, "o", "\b\u001b[4ml\u001b[4mo\u001b[24m"] [13.521385, "o", "\b\u001b[4mo\u001b[4mg\u001b[24m"] [13.689946, "o", "\u001b[?1l\u001b>"] [13.691892, "o", "\u001b[?2004l\b\b\b\u001b[24ml\u001b[24mo\u001b[24mg\r\r\n"] [13.696934, "o", "\u001b]2;brig log\u0007"] [13.700079, "o", "\u001b]1;brig\u0007"] [13.88577, "o", "\u001b[32mSEfXUB9STL\u001b[0m \u001b[33mFeb 28 17:16:20\u001b[0m \u001b[31m•\u001b[0m\u001b[36m (curr)\u001b[0m\r\n"] [13.885898, "o", "\u001b[32mSEfXUEP9pV\u001b[0m \u001b[33mFeb 28 11:54:53\u001b[0m Merge with alice\u001b[36m (head)\u001b[0m\r\n\u001b[32mSEfXUBmLmQ\u001b[0m \u001b[33mFeb 28 10:59:36\u001b[0m edited new-file\u001b[36m\u001b[0m\r\n\u001b[32mSEfXUCjYxA\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m Added darth vader music\u001b[36m\u001b[0m\r\n"] [13.885923, "o", "\u001b[32mSEfXUCU47p\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m Added initial README.md\u001b[36m\u001b[0m\r\n"] [13.885971, "o", "\u001b[32mSEfXUCEaXL\u001b[0m \u001b[33mFeb 28 10:45:21\u001b[0m initial commit\u001b[36m (init)\u001b[0m\r\n"] [13.887705, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [13.887892, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [13.887972, "o", "\u001b[?1h\u001b="] [13.888183, "o", "\u001b[?2004h"] [16.401928, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [16.515067, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [16.621212, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [16.721487, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [17.42544, "o", " "] [17.566868, "o", "\u001b[4ml\u001b[24m"] [17.683992, "o", "\b\u001b[24mls"] [17.875134, "o", "\u001b[?1l\u001b>"] [17.879444, "o", "\u001b[?2004l\r\r\n"] [17.881636, "o", "\u001b]2;brig ls\u0007\u001b]1;brig\u0007"] [18.095557, "o", "SIZE MODTIME PATH PIN \r\n458 B Feb 28 10:59:50 \u001b[37m/README.md\u001b[0m "] [18.095745, "o", " \u001b[36m🖈\u001b[0m \r\n78 B Feb 28 17:16:20 \u001b[37m/greetings_from_alice\u001b[0m \u001b[36m🖈\u001b[0m \r\n122 B Feb 28 11:54:47 \u001b[37m/new-file\u001b[0m \u001b[36m🖈\u001b[0m \r\n\u001b[33m26 MB\u001b[0m Feb 28 10:46:48 \u001b[32m/sub\u001b[0m \u001b[36m🖈\u001b[0m \r\n"] [18.097525, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [18.097646, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [18.097753, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ "] [18.097859, "o", "\u001b[K"] [18.097977, "o", "\u001b[?1h\u001b="] [18.098284, "o", "\u001b[?2004h"] [19.043151, "o", "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [19.178064, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [19.265344, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [19.37568, "o", "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [19.45086, "o", " "] [19.677479, "o", "\u001b[4mc\u001b[24m"] [19.758973, "o", "\b\u001b[24mca"] [19.899614, "o", "t"] [19.969866, "o", " "] [22.205985, "o", "\u001b[9D\u001b[39mb\u001b[39mr\u001b[39mi\u001b[39mg\u001b[5C\u001b[7m/greetings_from_alice\u001b[27m"] [23.043074, "o", "\u001b[30D\u001b[32mb\u001b[32mr\u001b[32mi\u001b[32mg\u001b[39m\u001b[5C\u001b[27m/\u001b[27mg\u001b[27mr\u001b[27me\u001b[27me\u001b[27mt\u001b[27mi\u001b[27mn\u001b[27mg\u001b[27ms\u001b[27m_\u001b[27mf\u001b[27mr\u001b[27mo\u001b[27mm\u001b[27m_\u001b[27ma\u001b[27ml\u001b[27mi\u001b[27mc\u001b[27me"] [23.043568, "o", "\u001b[?1l\u001b>"] [23.050362, "o", "\u001b[?2004l\r\r\n"] [23.052589, "o", "\u001b]2;brig cat /greetings_from_alice\u0007\u001b]1;brig\u0007"] [23.223224, "o", "This files comes from alice! Hi there!\r\n"] [23.224979, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [23.225084, "o", "\u001b]2;sahib@werkbank: ~/dev/brig/docs\u0007\u001b]1;~/dev/brig/docs\u0007"] [23.225227, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [23.225277, "o", "\u001b[?1h\u001b="] [23.225477, "o", "\u001b[?2004h"] [25.956984, "o", "\u001b[?2004l\r\r\n"] ================================================ FILE: docs/asciinema/9_pin.json ================================================ {"version": 2, "width": 211, "height": 54, "timestamp": 1523915438, "env": {"SHELL": "/bin/zsh", "TERM": "xterm-256color"}, "title": "brig pin"} [0.30112, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [0.301615, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [0.3017, "o", "\u001b[?1h\u001b="] [0.301905, "o", "\u001b[?2004h"] [0.838565, "o", "\u001b[4mb\u001b[24m"] [0.951131, "o", "\b\u001b[4mb\u001b[4mr\u001b[24m"] [1.039016, "o", "\b\b\u001b[4mb\u001b[4mr\u001b[4mi\u001b[24m"] [1.132231, "o", "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mr\u001b[24m\u001b[32mi\u001b[32mg\u001b[39m"] [1.210328, "o", " "] [1.313066, "o", "\u001b[4ml\u001b[24m"] [1.400234, "o", "\b\u001b[24mls"] [1.71358, "o", "\u001b[?1l\u001b>"] [1.718911, "o", "\u001b[?2004l\r\r\n"] [1.72261, "o", "\u001b]2;brig ls\u0007\u001b]1;brig\u0007"] [1.946186, "o", "SIZE MODTIME USER PATH PIN \r\n649 B Apr 16 19:04:01 \u001b[32malice\u001b[0m "] [1.946235, "o", "\u001b[37m/README.md\u001b[0m \u001b[36m🖈\u001b[0m \r\n649 B Apr 16 23:04:58 \u001b[32mbob\u001b[0m \u001b[37m/README.md.conflict.0\u001b[0m \r\n16 B Apr 16 23:04:58 \u001b[32mbob\u001b[0m \u001b[37m/greetings_from_bob\u001b[0m \u001b[36m🖈\u001b[0m \r\n0 B Apr 16 19:06:32 \u001b[32malice\u001b[0m \u001b[37m/new-file\u001b[0m \u001b[36m🖈\u001b[0m \r\n"] [1.946464, "o", "0 B Apr 16 19:06:51 \u001b[32malice\u001b[0m \u001b[32m/sub\u001b[0m \r\n"] [1.947875, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [1.947981, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007"] [1.948001, "o", "\u001b]1;~/dev/brig\u0007"] [1.948121, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [1.948181, "o", "\u001b[?1h\u001b="] [1.948385, "o", "\u001b[?2004h"] [3.11197, "o", "\u001b[4mb\u001b[24m"] [3.241792, "o", "\b\u001b[4mb\u001b[4mr\u001b[24m"] [3.342311, "o", "\b\b\u001b[4mb\u001b[4mr\u001b[4mi\u001b[24m"] [3.437441, "o", "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mr\u001b[24m\u001b[32mi\u001b[32mg\u001b[39m"] [3.506383, "o", " "] [3.695927, "o", "p"] [3.862273, "o", "i"] [4.035358, "o", "n"] [4.113841, "o", " "] [4.243842, "o", "\u001b[4mn\u001b[24m"] [4.292819, "o", "\b\u001b[4mn\u001b[4me\u001b[24m"] [4.46478, "o", "\b\b\u001b[24mn\u001b[24mew"] [4.563586, "o", "-"] [4.66959, "o", "f"] [4.756648, "o", "i"] [4.912451, "o", "l"] [4.997947, "o", "e"] [5.161257, "o", "\u001b[?1l\u001b>"] [5.176626, "o", "\u001b[?2004l\r\r\n"] [5.17932, "o", "\u001b]2;brig pin new-file\u0007\u001b]1;brig\u0007"] [5.353915, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [5.354034, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007"] [5.354255, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K\u001b[?1h\u001b="] [5.354405, "o", "\u001b[?2004h"] [5.908839, "o", "\u001b[4mb\u001b[24m"] [6.051314, "o", "\b\u001b[4mb\u001b[4mr\u001b[24m"] [6.104361, "o", "\b\b\u001b[4mb\u001b[4mr\u001b[4mi\u001b[24m"] [6.224113, "o", "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mr\u001b[24m\u001b[32mi\u001b[32mg\u001b[39m"] [6.25579, "o", " "] [6.353504, "o", "\u001b[4ml\u001b[24m"] [6.443741, "o", "\b\u001b[24mls"] [6.647207, "o", "\u001b[?1l\u001b>"] [6.653213, "o", "\u001b[?2004l\r\r\n"] [6.65678, "o", "\u001b]2;brig ls\u0007\u001b]1;brig\u0007"] [6.841293, "o", "SIZE MODTIME USER PATH PIN \r\n649 B Apr 16 19:04:01 \u001b[32malice\u001b[0m \u001b[37m/README.md\u001b[0m "] [6.841343, "o", " \u001b[36m🖈\u001b[0m \r\n649 B Apr 16 23:04:58 \u001b[32mbob\u001b[0m \u001b[37m/README.md.conflict.0\u001b[0m \r\n16 B Apr 16 23:04:58 \u001b[32mbob\u001b[0m \u001b[37m/greetings_from_bob\u001b[0m \u001b[36m🖈\u001b[0m \r\n0 B Apr 16 19:06:32 \u001b[32malice\u001b[0m \u001b[37m/new-file\u001b[0m \u001b[35m🖈\u001b[0m \r\n"] [6.841357, "o", "0 B Apr 16 19:06:51 \u001b[32malice\u001b[0m \u001b[32m/sub\u001b[0m "] [6.842937, "o", " \r\n"] [6.843057, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [6.843133, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007"] [6.843185, "o", "\u001b]1;~/dev/brig\u0007"] [6.843235, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [6.843335, "o", "\u001b[?1h\u001b="] [6.843541, "o", "\u001b[?2004h"] [8.067965, "o", "\u001b[4mb\u001b[24m"] [8.257929, "o", "\b\u001b[4mb\u001b[4mr\u001b[24m"] [8.386808, "o", "\b\b\u001b[4mb\u001b[4mr\u001b[4mi\u001b[24m"] [8.490726, "o", "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mr\u001b[24m\u001b[32mi\u001b[32mg\u001b[39m"] [8.574755, "o", " "] [9.357392, "o", "p"] [9.579199, "o", "i"] [9.77668, "o", "n"] [10.012016, "o", " "] [10.622499, "o", "\u001b[4mr\u001b[24m"] [10.728788, "o", "\b\u001b[24mrm"] [10.801391, "o", " "] [11.527164, "o", "g"] [11.750003, "o", "r"] [11.845988, "o", "e"] [12.906214, "o", "e"] [13.082861, "o", "t"] [13.201274, "o", "i"] [13.364602, "o", "n"] [13.417331, "o", "g"] [13.61018, "o", "s"] [13.814561, "o", "_"] [13.915141, "o", "f"] [14.06849, "o", "r"] [14.138404, "o", "o"] [14.289591, "o", "m"] [14.508238, "o", "_"] [14.687799, "o", "b"] [14.758077, "o", "o"] [14.86678, "o", "b"] [15.159533, "o", "\u001b[?1l\u001b>"] [15.195429, "o", "\u001b[?2004l"] [15.196049, "o", "\r\r\n"] [15.200924, "o", "\u001b]2;brig pin rm greetings_from_bob\u0007"] [15.202633, "o", "\u001b]1;brig\u0007"] [15.417428, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [15.417661, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [15.417747, "o", "\u001b[?1h\u001b="] [15.417951, "o", "\u001b[?2004h"] [15.962116, "o", "\u001b[4mb\u001b[24m"] [16.083125, "o", "\b\u001b[4mb\u001b[4mr\u001b[24m"] [16.170519, "o", "\b\b\u001b[4mb\u001b[4mr\u001b[4mi\u001b[24m"] [16.260992, "o", "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mr\u001b[24m\u001b[32mi\u001b[32mg\u001b[39m"] [16.298779, "o", " "] [16.420998, "o", "\u001b[4ml\u001b[24m"] [16.508748, "o", "\b\u001b[24mls"] [16.636075, "o", "\u001b[?1l\u001b>"] [16.64118, "o", "\u001b[?2004l"] [16.641957, "o", "\r\r\n"] [16.646004, "o", "\u001b]2;brig ls\u0007\u001b]1;brig\u0007"] [16.831002, "o", "SIZE MODTIME USER PATH PIN \r\n649 B Apr 16 19:04:01 \u001b[32malice\u001b[0m"] [16.831261, "o", " \u001b[37m/README.md\u001b[0m \u001b[36m🖈\u001b[0m \r\n649 B Apr 16 23:04:58 \u001b[32mbob\u001b[0m \u001b[37m/README.md.conflict.0\u001b[0m \r\n16 B Apr 16 23:04:58 \u001b[32mbob\u001b[0m \u001b[37m/greetings_from_bob\u001b[0m \r\n0 B Apr 16 19:06:32 \u001b[32malice\u001b[0m \u001b[37m/new-file\u001b[0m \u001b[35m🖈\u001b[0m \r\n0 B Apr 16 19:06:51 \u001b[32malice\u001b[0m \u001b[32m/sub\u001b[0m \r\n"] [16.832584, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [16.832682, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007"] [16.832708, "o", "\u001b]1;~/dev/brig\u0007"] [16.832772, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [16.832864, "o", "\u001b[?1h\u001b="] [16.833068, "o", "\u001b[?2004h"] [18.59478, "o", "\u001b[4mb\u001b[24m"] [18.715649, "o", "\b\u001b[4mb\u001b[4mr\u001b[24m"] [18.852998, "o", "\b\b\u001b[4mb\u001b[4mr\u001b[4mi\u001b[24m"] [18.967583, "o", "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mr\u001b[24m\u001b[32mi\u001b[32mg\u001b[39m"] [19.0639, "o", " "] [20.2069, "o", "g"] [20.395469, "o", "c"] [20.53824, "o", " "] [21.301441, "o", "\u001b[?1l\u001b>"] [21.307104, "o", "\u001b[?2004l\r\r\n"] [21.311895, "o", "\u001b]2;brig gc\u0007\u001b]1;brig\u0007"] [21.502859, "o", "CONTENT HASH OWNER \r\n\u001b[37mQmUVb1XejZP6ENNzLgSmtobfMMALXc2GNGDTq3Q2ZXKyHN\u001b[0m \u001b[31mQmUVb1XejZ\u001b[0m \u001b[36malice\u001b[0m \r\n\u001b[37mQmUVb1XejZP6ENNzLgSmtobfMMALXc2GNGDTq3Q2ZXKyHN\u001b[0m "] [21.502908, "o", "\u001b[31mQmUVb1XejZ\u001b[0m \u001b[36mbob\u001b[0m \r\n"] [21.504292, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [21.504399, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007"] [21.504475, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [21.504573, "o", "\u001b[?1h\u001b="] [21.50493, "o", "\u001b[?2004h"] [23.327619, "o", "\u001b[4mb\u001b[24m"] [23.472565, "o", "\b\u001b[4mb\u001b[4mr\u001b[24m"] [23.606627, "o", "\b\b\u001b[4mb\u001b[4mr\u001b[4mi\u001b[24m"] [23.688013, "o", "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mr\u001b[24m\u001b[32mi\u001b[32mg\u001b[39m"] [23.772934, "o", " "] [23.901943, "o", "\u001b[4mc\u001b[24m"] [23.967451, "o", "\b\u001b[4mc\u001b[4ma\u001b[24m"] [24.107524, "o", "\b\u001b[4ma\u001b[4mt\u001b[24m"] [24.209351, "o", "\b\b\b\u001b[24mc\u001b[24ma\u001b[24mt "] [24.355964, "o", "g"] [24.518603, "o", "r"] [24.58132, "o", "e"] [24.758268, "o", "e"] [24.962315, "o", "t"] [25.059056, "o", "i"] [25.211339, "o", "n"] [25.25484, "o", "g"] [25.426998, "o", "s"] [25.750634, "o", "_"] [25.874119, "o", "f"] [26.013535, "o", "r"] [26.089798, "o", "o"] [26.251522, "o", "m"] [26.465614, "o", "_"] [26.608815, "o", "b"] [26.682756, "o", "o"] [26.768963, "o", "b"] [26.902155, "o", "\u001b[?1l\u001b>"] [26.908719, "o", "\u001b[?2004l\r\r\n"] [26.911227, "o", "\u001b]2;brig cat greetings_from_bob\u0007\u001b]1;brig\u0007"] [27.10136, "o", "Hello from Bob!"] [27.101676, "o", "\r\n"] [27.106601, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [27.106983, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [27.107104, "o", "\u001b[?1h\u001b="] [27.107364, "o", "\u001b[?2004h"] [28.818037, "o", "\u001b[4mb\u001b[24m"] [29.724845, "o", "\b\u001b[24m\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m"] [29.89817, "o", "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [30.07675, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31m-\u001b[0m\u001b[39m"] [30.309077, "o", "\b\u001b[1m\u001b[31m-\u001b[1m\u001b[31mb\u001b[0m\u001b[39m"] [30.402236, "o", "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mr\u001b[0m\u001b[39m"] [30.492736, "o", "\b\u001b[1m\u001b[31mr\u001b[1m\u001b[31mi\u001b[0m\u001b[39m"] [30.582505, "o", "\b\b\b\b\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mb\u001b[0m\u001b[32m-\u001b[0m\u001b[32mb\u001b[0m\u001b[32mr\u001b[0m\u001b[32mi\u001b[32mg\u001b[39m"] [30.652536, "o", " "] [31.687412, "o", "\u001b[4md\u001b[24m"] [31.740827, "o", "\b\u001b[24mda"] [31.855845, "o", "e"] [31.950756, "o", "m"] [32.118484, "o", "o"] [32.282845, "o", "n"] [32.383776, "o", " "] [32.448042, "o", "q"] [32.519802, "o", "u"] [32.703519, "o", "i"] [32.71348, "o", "o"] [32.794122, "o", "t"] [33.108008, "o", "\b \b"] [33.244054, "o", "\b \b"] [33.439531, "o", "t"] [33.591015, "o", "\u001b[?1l\u001b>"] [33.598721, "o", "\u001b[?2004l\r\r\n"] [33.601482, "o", "\u001b]2;brig -p 6667 daemon quit\u0007\u001b]1;bob-brig\u0007"] [33.791989, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [33.792127, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007"] [33.792189, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [33.79226, "o", "\u001b[?1h\u001b="] [33.792466, "o", "\u001b[?2004h"] [35.253852, "o", "\u001b[32mbob-brig\u001b[39m daemon quit"] [35.462572, "o", "\u001b[20D\u001b[32mb\u001b[32mr\u001b[32mi\u001b[32mg\u001b[39m\u001b[39m \u001b[39mc\u001b[39ma\u001b[39mt greetings_from_bob"] [35.946018, "o", "\u001b[22Dgc \u001b[19D"] [36.768454, "o", "\u001b[?1l\u001b>"] [36.776677, "o", "\u001b[?2004l\r\r\n"] [36.778738, "o", "\u001b]2;brig gc\u0007\u001b]1;brig\u0007"] [36.95629, "o", "CONTENT HASH OWNER \r\n\u001b[37mQmUVb1XejZP6ENNzLgSmtobfMMALXc2GNGDTq3Q2ZXKyHN\u001b[0m \u001b[31mQmUVb1XejZ\u001b[0m \u001b[36malice\u001b[0m \r\n\u001b[37mQmUVb1XejZP6ENNzLgSmtobfMMALXc2GNGDTq3Q2ZXKyHN\u001b[0m"] [36.956342, "o", " \u001b[31mQmUVb1XejZ\u001b[0m \u001b[36mbob\u001b[0m \r\n"] [36.957825, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [36.957921, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007"] [36.957939, "o", "\u001b]1;~/dev/brig\u0007"] [36.95801, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [36.958121, "o", "\u001b[?1h\u001b="] [36.958343, "o", "\u001b[?2004h"] [37.86989, "o", "\u001b[4mb\u001b[24m"] [37.975293, "o", "\b\u001b[4mb\u001b[4mr\u001b[24m"] [38.06847, "o", "\b\b\u001b[4mb\u001b[4mr\u001b[4mi\u001b[24m"] [38.16544, "o", "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mr\u001b[24m\u001b[32mi\u001b[32mg\u001b[39m"] [38.581496, "o", " "] [38.986283, "o", "gc "] [39.402656, "o", "\b\b\b \b\b"] [39.492823, "o", "\u001b[4mc\u001b[24m"] [39.56276, "o", "\b\u001b[4mc\u001b[4ma\u001b[24m"] [39.735627, "o", "\b\u001b[4ma\u001b[4mt\u001b[24m"] [39.823745, "o", "\b\b\b\u001b[24mc\u001b[24ma\u001b[24mt "] [39.936504, "o", "greetings_from_bob"] [40.746588, "o", "\u001b[?1l\u001b>"] [40.755815, "o", "\u001b[?2004l\r\r\n"] [40.759163, "o", "\u001b]2;brig cat greetings_from_bob\u0007\u001b]1;brig\u0007"] [42.622192, "o", "\r\n"] [42.782638, "o", "\r\n"] [42.952416, "o", "\r\n"] [43.167917, "o", "\r\n"] [43.649776, "o", "^C"] [43.659072, "o", "\r\n\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ "] [43.660243, "o", "\u001b[K"] [43.661344, "o", "\u001b[?1h\u001b="] [43.668262, "o", "\u001b[?2004h"] [44.189165, "o", "\u001b[1m\u001b[30m#\u001b[0m\u001b[39m"] [44.301126, "o", "\b\u001b[1m\u001b[30m#\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [44.585404, "o", "\b\b\u001b[1m\u001b[30m#\u001b[1m\u001b[30m \u001b[1m\u001b[30mN\u001b[0m\u001b[39m"] [45.196173, "o", "\b\u001b[1m\u001b[30mN\u001b[1m\u001b[30mo\u001b[0m\u001b[39m"] [46.24694, "o", "\b\u001b[1m\u001b[30mo\u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [46.33817, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30mh\u001b[0m\u001b[39m"] [46.434192, "o", "\b\u001b[1m\u001b[30mh\u001b[1m\u001b[30mi\u001b[0m\u001b[39m"] [46.447753, "o", "\b\u001b[1m\u001b[30mi\u001b[1m\u001b[30mo\u001b[0m\u001b[39m"] [46.594595, "o", "\b\u001b[1m\u001b[30mo\u001b[1m\u001b[30mn\u001b[0m\u001b[39m"] [46.707113, "o", "\b\u001b[1m\u001b[30mn\u001b[1m\u001b[30mg\u001b[0m\u001b[39m"] [46.809953, "o", "\b\u001b[1m\u001b[30mg\u001b[1m\u001b[30m.\u001b[0m\u001b[39m"] [46.927586, "o", "\b\u001b[1m\u001b[30m.\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [47.11099, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mT\u001b[0m\u001b[39m"] [47.211523, "o", "\b\u001b[1m\u001b[30mT\u001b[1m\u001b[30mh\u001b[0m\u001b[39m"] [47.279516, "o", "\b\u001b[1m\u001b[30mh\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [47.357467, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [47.428878, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mf\u001b[0m\u001b[39m"] [47.533471, "o", "\b\u001b[1m\u001b[30mf\u001b[1m\u001b[30mi\u001b[0m\u001b[39m"] [47.680787, "o", "\b\u001b[1m\u001b[30mi\u001b[1m\u001b[30ml\u001b[0m\u001b[39m"] [47.727994, "o", "\b\u001b[1m\u001b[30ml\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [47.825724, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [47.910826, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mw\u001b[0m\u001b[39m"] [47.969872, "o", "\b\u001b[1m\u001b[30mw\u001b[1m\u001b[30ma\u001b[0m\u001b[39m"] [48.07492, "o", "\b\u001b[1m\u001b[30ma\u001b[1m\u001b[30ms\u001b[0m\u001b[39m"] [48.119166, "o", "\b\u001b[1m\u001b[30ms\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [48.245168, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mc\u001b[0m\u001b[39m"] [48.345909, "o", "\b\u001b[1m\u001b[30mc\u001b[1m\u001b[30ml\u001b[0m\u001b[39m"] [48.45448, "o", "\b\u001b[1m\u001b[30ml\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [48.525043, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30ma\u001b[0m\u001b[39m"] [48.598672, "o", "\b\u001b[1m\u001b[30ma\u001b[1m\u001b[30mn\u001b[0m\u001b[39m"] [48.773518, "o", "\b\u001b[1m\u001b[30mn\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [48.854201, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30md\u001b[0m\u001b[39m"] [48.916214, "o", "\b\u001b[1m\u001b[30md\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [49.069317, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mu\u001b[0m\u001b[39m"] [49.479573, "o", "\b\u001b[1m\u001b[30mu\u001b[1m\u001b[30mp\u001b[0m\u001b[39m"] [49.608735, "o", "\b\u001b[1m\u001b[30mp\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [49.895406, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [49.996553, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30mh\u001b[0m\u001b[39m"] [50.392309, "o", "\b\b\u001b[1m\u001b[30mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b"] [50.526275, "o", "\b\b\u001b[1m\u001b[30m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b"] [50.712234, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mb\u001b[0m\u001b[39m"] [50.808566, "o", "\b\u001b[1m\u001b[30mb\u001b[1m\u001b[30my\u001b[0m\u001b[39m"] [50.884775, "o", "\b\u001b[1m\u001b[30my\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [51.053816, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mg\u001b[0m\u001b[39m"] [51.263847, "o", "\b\u001b[1m\u001b[30mg\u001b[1m\u001b[30mc\u001b[0m\u001b[39m"] [51.453123, "o", "\b\u001b[1m\u001b[30mc\u001b[1m\u001b[30m.\u001b[0m\u001b[39m"] [51.773268, "o", "\u001b[?1l\u001b>"] [51.776168, "o", "\u001b[?2004l\r\r\n"] [51.779073, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [51.780013, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007"] [51.780404, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [51.780798, "o", "\u001b[?1h\u001b="] [51.782864, "o", "\u001b[?2004h"] [52.500575, "o", "\u001b[1m\u001b[30m#\u001b[0m\u001b[39m"] [52.615768, "o", "\b\u001b[1m\u001b[30m#\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [54.416078, "o", "\b\b\u001b[1m\u001b[30m#\u001b[1m\u001b[30m \u001b[1m\u001b[30mA\u001b[0m\u001b[39m"] [54.61427, "o", "\b\u001b[1m\u001b[30mA\u001b[1m\u001b[30mn\u001b[0m\u001b[39m"] [54.697741, "o", "\b\u001b[1m\u001b[30mn\u001b[1m\u001b[30md\u001b[0m\u001b[39m"] [54.773046, "o", "\b\u001b[1m\u001b[30md\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [54.930814, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30ms\u001b[0m\u001b[39m"] [55.041847, "o", "\b\u001b[1m\u001b[30ms\u001b[1m\u001b[30mi\u001b[0m\u001b[39m"] [55.208351, "o", "\b\u001b[1m\u001b[30mi\u001b[1m\u001b[30mn\u001b[0m\u001b[39m"] [55.244913, "o", "\b\u001b[1m\u001b[30mn\u001b[1m\u001b[30mc\u001b[0m\u001b[39m"] [55.306743, "o", "\b\u001b[1m\u001b[30mc\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [55.347446, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [55.496123, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mw\u001b[0m\u001b[39m"] [55.647841, "o", "\b\u001b[1m\u001b[30mw\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [55.718209, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [55.863195, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mq\u001b[0m\u001b[39m"] [55.95291, "o", "\b\u001b[1m\u001b[30mq\u001b[1m\u001b[30mu\u001b[0m\u001b[39m"] [56.111735, "o", "\b\u001b[1m\u001b[30mu\u001b[1m\u001b[30mi\u001b[0m\u001b[39m"] [56.195887, "o", "\b\u001b[1m\u001b[30mi\u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [56.265323, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [56.448391, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [56.56379, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30mh\u001b[0m\u001b[39m"] [56.612261, "o", "\b\u001b[1m\u001b[30mh\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [56.699109, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [57.362277, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mi\u001b[0m\u001b[39m"] [57.522546, "o", "\b\u001b[1m\u001b[30mi\u001b[1m\u001b[30mn\u001b[0m\u001b[39m"] [57.587003, "o", "\b\u001b[1m\u001b[30mn\u001b[1m\u001b[30ms\u001b[0m\u001b[39m"] [57.715618, "o", "\b\u001b[1m\u001b[30ms\u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [57.83796, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30ma\u001b[0m\u001b[39m"] [57.915041, "o", "\b\u001b[1m\u001b[30ma\u001b[1m\u001b[30mn\u001b[0m\u001b[39m"] [57.98336, "o", "\b\u001b[1m\u001b[30mn\u001b[1m\u001b[30mc\u001b[0m\u001b[39m"] [58.052785, "o", "\b\u001b[1m\u001b[30mc\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [58.129937, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [58.237867, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mo\u001b[0m\u001b[39m"] [58.367743, "o", "\b\u001b[1m\u001b[30mo\u001b[1m\u001b[30mf\u001b[0m\u001b[39m"] [58.45754, "o", "\b\u001b[1m\u001b[30mf\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [58.589279, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mb\u001b[0m\u001b[39m"] [58.673972, "o", "\b\u001b[1m\u001b[30mb\u001b[1m\u001b[30mo\u001b[0m\u001b[39m"] [58.769003, "o", "\b\u001b[1m\u001b[30mo\u001b[1m\u001b[30mb\u001b[0m\u001b[39m"] [58.898781, "o", "\b\u001b[1m\u001b[30mb\u001b[1m\u001b[30m,\u001b[0m\u001b[39m"] [58.994803, "o", "\b\u001b[1m\u001b[30m,\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [59.157135, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mw\u001b[0m\u001b[39m"] [59.332243, "o", "\b\u001b[1m\u001b[30mw\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [59.415563, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [59.492388, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mc\u001b[0m\u001b[39m"] [59.563192, "o", "\b\u001b[1m\u001b[30mc\u001b[1m\u001b[30ma\u001b[0m\u001b[39m"] [59.669657, "o", "\b\u001b[1m\u001b[30ma\u001b[1m\u001b[30mn\u001b[0m\u001b[39m"] [59.788831, "o", "\b\u001b[1m\u001b[30mn\u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [60.30644, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30m'\u001b[0m\u001b[39m"] [60.893462, "o", "\b\b\u001b[1m\u001b[30mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b"] [61.020872, "o", "\b\b\u001b[1m\u001b[30mn\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b"] [61.203948, "o", "\b\u001b[1m\u001b[30mn\u001b[1m\u001b[30m'\u001b[0m\u001b[39m"] [61.323915, "o", "\b\u001b[1m\u001b[30m'\u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [61.506352, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [61.700781, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mr\u001b[0m\u001b[39m"] [61.780133, "o", "\b\u001b[1m\u001b[30mr\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [61.925858, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [62.05761, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30mr\u001b[0m\u001b[39m"] [62.198445, "o", "\b\u001b[1m\u001b[30mr\u001b[1m\u001b[30mi\u001b[0m\u001b[39m"] [62.30557, "o", "\b\u001b[1m\u001b[30mi\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [62.443085, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30mv\u001b[0m\u001b[39m"] [62.51056, "o", "\b\u001b[1m\u001b[30mv\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [62.596494, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [62.92443, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mi\u001b[0m\u001b[39m"] [63.095964, "o", "\b\u001b[1m\u001b[30mi\u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [63.199589, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [63.296012, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mf\u001b[0m\u001b[39m"] [63.435393, "o", "\b\u001b[1m\u001b[30mf\u001b[1m\u001b[30mr\u001b[0m\u001b[39m"] [63.478495, "o", "\b\u001b[1m\u001b[30mr\u001b[1m\u001b[30mo\u001b[0m\u001b[39m"] [63.635312, "o", "\b\u001b[1m\u001b[30mo\u001b[1m\u001b[30mm\u001b[0m\u001b[39m"] [63.73524, "o", "\b\u001b[1m\u001b[30mm\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [64.245442, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mn\u001b[0m\u001b[39m"] [64.323737, "o", "\b\u001b[1m\u001b[30mn\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [64.42844, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30mt\u001b[0m\u001b[39m"] [64.573704, "o", "\b\u001b[1m\u001b[30mt\u001b[1m\u001b[30mw\u001b[0m\u001b[39m"] [64.641164, "o", "\b\u001b[1m\u001b[30mw\u001b[1m\u001b[30mo\u001b[0m\u001b[39m"] [64.775938, "o", "\b\u001b[1m\u001b[30mo\u001b[1m\u001b[30mr\u001b[0m\u001b[39m"] [64.824792, "o", "\b\u001b[1m\u001b[30mr\u001b[1m\u001b[30mk\u001b[0m\u001b[39m"] [65.100005, "o", "\b\u001b[1m\u001b[30mk\u001b[1m\u001b[30m.\u001b[0m\u001b[39m"] [65.324098, "o", "\u001b[?1l\u001b>"] [65.328114, "o", "\u001b[?2004l\r\r\n"] [65.330857, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [65.331904, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [65.332884, "o", "\u001b[?1h\u001b="] [65.33457, "o", "\u001b[?2004h"] [66.221421, "o", "\u001b[1m\u001b[30m#\u001b[0m\u001b[39m"] [66.339489, "o", "\b\u001b[1m\u001b[30m#\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [66.443802, "o", "\b\b\u001b[1m\u001b[30m#\u001b[1m\u001b[30m \u001b[1m\u001b[30ml\u001b[0m\u001b[39m"] [66.602202, "o", "\b\u001b[1m\u001b[30ml\u001b[1m\u001b[30mi\u001b[0m\u001b[39m"] [67.462451, "o", "\b\u001b[1m\u001b[30mi\u001b[1m\u001b[30mk\u001b[0m\u001b[39m"] [67.57586, "o", "\b\u001b[1m\u001b[30mk\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [67.659472, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [67.920771, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mw\u001b[0m\u001b[39m"] [68.070952, "o", "\b\u001b[1m\u001b[30mw\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [68.15706, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [68.239042, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30md\u001b[0m\u001b[39m"] [68.330446, "o", "\b\u001b[1m\u001b[30md\u001b[1m\u001b[30mi\u001b[0m\u001b[39m"] [68.423091, "o", "\b\u001b[1m\u001b[30mi\u001b[1m\u001b[30md\u001b[0m\u001b[39m"] [68.538495, "o", "\b\u001b[1m\u001b[30md\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [68.656006, "o", "\b\u001b[1m\u001b[30m \u001b[1m\u001b[30mb\u001b[0m\u001b[39m"] [68.740476, "o", "\b\u001b[1m\u001b[30mb\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [68.869553, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30mf\u001b[0m\u001b[39m"] [69.035796, "o", "\b\u001b[1m\u001b[30mf\u001b[1m\u001b[30mo\u001b[0m\u001b[39m"] [69.169299, "o", "\b\u001b[1m\u001b[30mo\u001b[1m\u001b[30mr\u001b[0m\u001b[39m"] [69.244378, "o", "\b\u001b[1m\u001b[30mr\u001b[1m\u001b[30me\u001b[0m\u001b[39m"] [69.311449, "o", "\b\u001b[1m\u001b[30me\u001b[1m\u001b[30m.\u001b[0m\u001b[39m"] [69.504344, "o", "\b\u001b[1m\u001b[30m.\u001b[1m\u001b[30m \u001b[0m\u001b[39m"] [69.947017, "o", "\b\b\u001b[1m\u001b[30m.\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b"] [70.356133, "o", "\u001b[?1l\u001b>"] [70.358766, "o", "\u001b[?2004l\r\r\n"] [70.361407, "o", "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r"] [70.361888, "o", "\u001b]2;sahib@werkbank: ~/dev/brig\u0007\u001b]1;~/dev/brig\u0007"] [70.362171, "o", "\r\u001b[0m\u001b[27m\u001b[24m\u001b[Jλ \u001b[K"] [70.362785, "o", "\u001b[?1h\u001b="] [70.365401, "o", "\u001b[?2004h"] [71.101157, "o", "\u001b[?2004l\r\r\n"] ================================================ FILE: docs/conf.py ================================================ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # brig documentation build configuration file, created by # sphinx-quickstart on Sun Dec 24 00:44:21 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import subprocess # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.mathjax', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.githubpages', 'sphinxcontrib.fulltoc', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'brig' copyright = '2016-2020, Chris Pahl' author = 'Chris Pahl' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # The short X.Y version. def get_version_from_git(): try: proc = subprocess.run( ["git", "tag", "--sort", "v:refname"], capture_output=True, text=True, check=True, ) tag_list = [tag.strip() for tag in proc.stdout.split('\n') if tag] return tag_list[-1].strip() except Exception as e: print("-- note: could not read version from git: {}".format(e)) return 'v0.0.0' # The full version, including alpha/beta/rc tags. release = get_version_from_git() version = release # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'talk/*.rst'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' try: import sphinx_bootstrap_theme html_theme = 'bootstrap' html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() except ImportError: print("-- no »sphinx_bootstrap_theme« is installed.") print("-- install it via requirements.txt, if you want it.") print("-- falling back to default theme.") html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { # Navigation bar title. (Default: ``project`` value) 'navbar_title': "brig", # Tab name for entire site. (Default: "Site") 'navbar_site_name': "Table of Contents", # A list of tuples containing pages or urls to link to. # Valid tuples should be in the following forms: # (name, page) # a link to a page # (name, "/aa/bb", 1) # a link to an arbitrary relative url # (name, "http://example.com", True) # arbitrary absolute url # Note the "1" or "True" value above as the third argument to indicate # an arbitrary url. 'navbar_links': [ ("GitHub", "https://github.com/sahib/brig", True), ("Build Status", "https://travis-ci.org/sahib/brig", True), ], # Render the next and previous page links in navbar. (Default: true) 'navbar_sidebarrel': False, # Render the current pages TOC in the navbar. (Default: true) 'navbar_pagenav': False, # Tab name for the current pages TOC. (Default: "Page") 'navbar_pagenav_name': "Page", # Global TOC depth for "site" navbar tab. (Default: 1) # Switching to -1 shows all levels. 'globaltoc_depth': 2, # Include hidden TOCs in Site navbar? # # Note: If this is "false", you cannot have mixed ``:hidden:`` and # non-hidden ``toctree`` directives in the same page, or else the build # will break. # # Values: "true" (default) or "false" 'globaltoc_includehidden': "true", # HTML navbar class (Default: "navbar") to attach to
element. # For black navbar, do "navbar navbar-inverse" 'navbar_class': "navbar", # Fix navigation bar to top of page? # Values: "true" (default) or "false" 'navbar_fixed_top': "true", # Location of link to source. # Options are "nav" (default), "footer" or anything else to exclude. 'source_link_position': "footer", # Bootswatch (http://bootswatch.com/) theme. # # Options are nothing (default) or the name of a valid theme # such as "cosmo" or "sandstone". # # The set of valid themes depend on the version of Bootstrap # that's used (the next config option). # # Currently, the supported themes are: # - Bootstrap 2: https://bootswatch.com/2 # - Bootstrap 3: https://bootswatch.com/3 'bootswatch_theme': "flatly", # Choose Bootstrap version. # Values: "3" (default) or "2" (in quotes) 'bootstrap_version': "3", } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { 'tutorial/*': [ 'localtoc.html', ], 'quickstart*': [ 'localtoc.html', ], 'installation*': [ 'localtoc.html', ], 'faq*': [ 'localtoc.html', ], 'roadmap*': [ 'localtoc.html', ], 'feature*': [ 'localtoc.html', ], 'contributing*': [ 'localtoc.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'brigdoc' # # -- Options for LaTeX output --------------------------------------------- # # latex_elements = { # # The paper size ('letterpaper' or 'a4paper'). # # # # 'papersize': 'letterpaper', # # # The font size ('10pt', '11pt' or '12pt'). # # # # 'pointsize': '10pt', # # # Additional stuff for the LaTeX preamble. # # # # 'preamble': '', # # # Latex figure (float) alignment # # # # 'figure_align': 'htbp', # } # # # Grouping the document tree into LaTeX files. List of tuples # # (source start file, target name, title, # # author, documentclass [howto, manual, or own class]). # latex_documents = [ # (master_doc, 'brig.tex', 'brig Documentation', # 'Chris Pahl', 'manual'), # ] # # # # -- Options for manual page output --------------------------------------- # # # One entry per manual page. List of tuples # # (source start file, name, description, authors, manual section). # man_pages = [ # (master_doc, 'brig', 'brig Documentation', # [author], 1) # ] # # # # -- Options for Texinfo output ------------------------------------------- # # # Grouping the document tree into Texinfo files. List of tuples # # (source start file, target name, title, author, # # dir menu entry, description, category) # texinfo_documents = [ # (master_doc, 'brig', 'brig Documentation', # author, 'brig', 'One line description of project.', # 'Miscellaneous'), # ] def setup(app): # app.add_stylesheet('css/custom.css') app.add_css_file('css/custom.css') ================================================ FILE: docs/contributing.rst ================================================ How to contribute ================= Something we would be especially interested in are *experience reports:* We want you to try out the current state of the software and write down the following: - Was it easy to get »brig« running? - Was it easy to understand its concepts? - What is your intended usecase for it? Could you make it work? - If no, what's missing in your opinion to make the usecase possible? - Anything else that you feel free to share. Those reports should be posted as GitHub issue. They will help us to develop brig further in the "bigger picture". Also, the developer of this software is currently doing all of this is in his free time. If you're willing to offer any financial support feel free to contact me. What to improve --------------- We try to open a ticket for anything that can be worked right now The following general improvements are of course also greatly appreciated: - Bug reports & fixes. - Documentation improvements. - Writing more and better tests. - Porting to other platforms. Workflow -------- Please adhere to the general `GitHub workflow_`, i.e. fork the repository, make your changes and open a pull request that can be discussed. .. _`Github workflow`: https://help.github.com/articles/about-pull-requests Here's a small checklist before publishing your pull request: * Did you ``go fmt`` all code? * Does your code style fit with the rest of the code base? * Did you run ``task lint``? * Did you write tests if necessary? * Did you consider if changes to the docs are necessary? * Did you check if you need something to CHANGELOG.md? Thank you for your contribution. ================================================ FILE: docs/faq.rst ================================================ Frequently Asked Questions ========================== General questions ----------------- 1. Why is the software named ``brig``? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is named after the ship with the same name. When we named it, we thought it's a good name for the following reason: - A ``brig`` is a very lightweight and fast ship. - It was commonly used to transport small amount of goods. - A ship operates on streams (sorry 😛) - The name is short and somewhat similar to ``git``. - It gives you a few nautical metaphors and a logo for free. - Words like »bright«, »brigade« and many others start with it Truth be told, only half of the two name givers thought it's a good name, but I still kinda like it. 2. Who develops it? ~~~~~~~~~~~~~~~~~~~ Although this documentation sometimes speaks of »we«, the only developer is currently `Chris Pahl `_. He writes it entirely in his free time, mostly during commuting with the train. Technical questions ------------------- 1. How is the encryption working? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A stream is chunked into equal sized blocks that are encrypted in GCM mode using AES-256. Additionally ChaCha20 (with Poly1305) is currently supported but it might be removed soon. The overall file format is somewhat similar to NaCL secretboxes, but it is more tailored to supporting efficient seeking. The current default is ``ChaCha20``, although machines with the ``aes-ni`` instruction set might yield significant higher throughput. The source of the `encryption layer can be found here `_. Here's a basic overview over the format: .. image:: _static/format-encryption.svg :width: 66% :align: center The key of each file is currently being derived from the content hash of the file (See also `Convergent Encryption `_). If the content changes later, the key does not change since the key is only generated once during the first staging of the file. Please refer to the implementation for all implementation details for now. No security audits of the implementation have been done yet, therefore I'd appreciate every pair of eyes. Especially while everything is still in flux and won't harm any users. 2. Is there compression implemented? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Yes. The compression is being done before encryption and is only enabled if the file looks compression-worthy. The »worthiness« is determined by looking at its header to guess a mime-type. Depending on the mime-type either ``snappy`` or ``lz4`` is selected or no compression is added at all. The source of the `compression layer can be found here `_. Here's a basic overview over the format: .. image:: _static/format-compression.svg :width: 66% :align: center 3. What hash algorithms are used? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Two algorithms are used: * ``SHA256`` is used by ``IPFS`` for every backend hash. * ``SHA3-256`` is used as general purpose hash for everything ``brig`` internal (Content and Tree hash). Each hash is encoded as `multihash `_. For output purposes this representation is encoded additionally in ``base58``. Therefore, all hashes that start with ``W1`` are ``sha3-256`` hashes while the ones starting with ``Qm`` are ``sha256`` hashes. Keep in mind that ``base58`` is case-sensitive. 4. What kind of deduplication is currently used? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is currently only possible to deduplicate between individual versions of a file. And there also only the portion before the modification. ``IPFS`` implements deduplication, but it is circumvented by encrypting blocks before giving them over to the backend. Implementing a more proper and informed deduplication is one of the long term goals, which require more thorough interaction with ``IPFS``. It is also possible to do some basic deduplication purely on ``brig`` side since we have more info on the file than ``IPFS`` has. 5. How fast is the I/O when using ``brig``? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here are some rather outdated graphs where you can get a rough feeling how fast it can be. There are a few rules of thumb with mostly obvious content: * It it goes over the network, it's the network speed plus a smaller constant overhead. * If it comes over FUSE, it is quite a bit slower than over ``brig cat``. * If you do not use compression, writing and reading will be faster. The graphs below only measure in-memory performance compared to a ``dd`` like speed (see the »baseline« line). .. image:: _static/movie_read.svg :width: 66% .. image:: _static/movie_write.svg :width: 66% Your mileage may vary and you better do your own benchmarks for now. .. todo:: Explain/Update those graphs. ================================================ FILE: docs/features.rst ================================================ .. _features-page: Features ======== .. note:: The featuers below are actually available, but before version 1.0.0 we won't give any guarantees regarding stability or edge cases. Your mileage may vary currently. Encrypted and compression built-in ---------------------------------- * All data is encrypted during storage and transport using AES-256 in GCM mode. * Optional compression algorithm is selected based on the file type. * Hints can be given to change the default algorithm for certain or all files. * Keys are stored as part of the metadata during synchronisation. Easy Version control -------------------- * Simplified ``git``-like version control only limited by your storage space. * Synchronization algorithm that can handle `moved files `_ and `empty directories `_ and files. * Auto-updating facility that will sync on any change. * Configurable conflict handling. Separation between data and metadata ------------------------------------ * Your data does not need to be stored on the device you are currently using. * Pin the data you want to use to your local storage. Every repository acts as cache of all the files you have access to. * Keep a range of versions cached locally and delete older versions if they exceed a quota. Truly Distributed ----------------- * No central server at all. All infrastructure is based on IPFS. * Still, central architectures can be build with ``brig``. * Simple user identification and discovery. * You do not store data you don't want to store. Pin what you need, fetch the rest from the network on request. FUSE filesystem --------------- * FUSE filesystem mirrors your data to a local directory. * Allows your normal tools to work seamlessly with brig. * Mounts can be persisted to stay where they are. * Not high performance, but fast enough for daily usage. Gateway and Web UI ------------------ * Gateway to share normal HTTP/S links with other users. * Simple UI provided to execute the typical tasks without the need of a command line. * User and right management included. .. image:: _static/gateway-files.png :alt: Gateway files view :width: 66% 100% Open-Source ---------------- * Completely free software under the terms of the ``AGPL``. * Development driven by the community. * Written in Go and Elm. --------- Comparison with other tools =========================== When showing ``brig`` (or any other software in general) to someone the first question is usually something like *»But isn't there already X?«* and sometimes even *»Why don't you just contribute to other projects?«*. This section tries to find an answer to both questions. The answer will obviously be biased, so take it with a fair grain of salt. Yes, there is other software in this world. But this is always a matter of trade offs the author of each individual package has chosen. One application might not run on your platform, the next might not be secure enough for your needs, the other one is proprietary or has something else that does not fit your liking. I won't go into an exhaustive list of competitors, but more highlight the things that are special in ``brig`` and cannot be done easily in other systems. I said »competitors« earlier, which is a silly term, since I don't see this as a competition. For me it's more about giving the user a choice and improving by adapting good ideas from other implementations. Let's list a few of those »competitors« to give you an impression about the place of ``brig`` in the world: * `Syncthing `_: Probably conceptually the nearest relative. Also a peer-to-peer based filesystem, but with its own protocol. Focus seems to be on ease-of-use and general high quality usability. Does not have strong versioning. Excellent tool and battle tested. * `Resilio `_: Proprietary solution based on BitTorrent. Seems to focus on performance and enterprise level resilience. Being proprietary is a show stopper for me. * `Perkeep `_: Not focused on files, but on storing personal »objects«. Would be probably more interesting as a backend for ``brig``. * `Upspin `_: A global name system that glues together filesystems and other data storage. Could be also a backend for ``brig`` and is not directly targeted to end users. * `Bazil `_: Basically ``brig`` minus IPFS. While apparently discontinued it seems to have a great deal of common features with ``brig``. The same author also maintains the FUSE bindings of FUSE and his writeups helped me writing the FUSE implementation of ``brig``. Thank you very much for your work `@tv42 `_! * `Git LFS `_: The large file storage extension to ``git``. Similar to ``brig``'s pinning in the sense that large files are replaced with links that will be fetched from a LFS server. * `git annex `_: Extension to ``git`` that tracks filenames and metadata instead of file content. Has a great deal of powerful features but can be a bit intimidating to the end users since it does not seem to focus much on usability. Features like the number of minimum copies a file must have before you can delete it are still on ``brig``'s roadmap. There are probably more. Some of these inspired quite a bit how ``brig`` looks today. So what are the unique features of ``brig`` that you would not get easily with other tools? * **Pinning:** The fact that not all data needs to be on the same machine as the ``brig`` daemon opens up interesting possibilities. Also the ability of repinning is something I did not see in other tools. * **Strong versioning of big files:** High level versioning that is comparable to ``git``, but simplified and meant for whole-file version control (and not for individual diffs). Of course there are drawbacks. Choosing ``brig`` currently means using software that is not in widespread use. It did not go through a security audit. It is by far not as efficient as other tools in all use cases. But many of the current hurdles are solvable and it's just a matter of time. The best advice I can give you: Try it out and see if it fits your use case. If it doesn't I'm happy to hear from you and wish you all the best with another tool. ================================================ FILE: docs/index.rst ================================================ ``brig`` - decentralized & secure synchronization ================================================= .. image:: _static/logo.png :width: 50% :align: center What is ``brig``? ----------------- ``brig`` is a distributed & secure file synchronization tool with version control. It is based on IPFS, written in Go and will feel familiar to ``git`` users. Think of it as a swiss army knife for file synchronization or as a peer to peer alternative to *Dropbox.* **Key feature highlights:** * Encryption of data during storage and transport, plus optional compression on the fly. * Simplified ``git`` version control only limited by your storage space. * Synchronization algorithm that can handle moved files and empty directories and files. * Your data does not need to be stored on the device you are currently using. * FUSE filesystem that feels like a normal sync folder. * No central server at all. Still, central architectures can be build with ``brig``. * Gateway and Web based UI to share normal HTTP/S links with other users. * Auto-updating facility that will sync on any change. * Completely free software under the terms of the ``AGPL``. * ... Please refer to the :ref:`features-page` for more details. If you want a visual hint how ``brig`` looks on the commandline, refer to the :ref:`quickstart`. What is ``brig`` not? --------------------- ``brig`` tries to focus on being up conceptually simple, by hiding a lot of complicated details regarding storage and security. Therefore the end result is hopefully easy and pleasant to use, while being secure by default. Since ``brig`` is a »general purpose« tool for file synchronization it of course cannot excel in all areas. It won't replace high performance network file systems and should not be used when you are in need of high throughput - at least not at the moment. I have questions! ----------------- Please ask in one of those places: * `GitHub Issue Tracker `_: All things like bug reports or feature requests. * The matrix chat room ``#brig`` on ``matrix.org``. Just `pick a client `_ and join the room or click `this link `_ directly. Current Status -------------- **This software is in active development and probably not suited for production use yet!** But to get it in a stable state, it is **essential** that people play around with it. Consider this is as an open beta phase. Also don't take anything granted for now, everything might change wildly before version ``1.0.0``. With that being said, ``brig`` is near a somewhat usable state where you can play around with it quite well. All aforementioned features do work, besides possibly being a little harder to use than ideally possible. A lot of work is currently going into stabilizing the current feature set. At this moment ``brig`` is **only tested on Linux**. Porting and testing efforts are welcome. Other platforms should be able to compile, but there are currently not guarantees that it will work. Table of Contents ----------------- .. toctree:: :maxdepth: 2 :caption: Installation: installation.rst .. toctree:: :maxdepth: 2 :caption: User manual tutorial/intro.rst tutorial/init.rst tutorial/coreutils.rst tutorial/mounts.rst tutorial/remotes.rst tutorial/vcs.rst tutorial/pinning.rst tutorial/gateway.rst tutorial/config.rst .. toctree:: :maxdepth: 2 :caption: Additional resources quickstart.rst faq.rst features.rst .. toctree:: :maxdepth: 2 :caption: Development roadmap.rst contributing.rst ================================================ FILE: docs/installation.rst ================================================ Installation ------------ We provide pre-compiled binaries on every release. ``brig`` comes to your computer as a single binary that includes everything you need. See here for the release list: https://github.com/sahib/brig/releases Just download the binary for you platform, unpack it and put in somewhere in your ``$PATH`` (for example ``/usr/local/bin``). If you trust us well enough, you can also use this online installer to download the latest stable ``brig`` binary to your current working directory: .. code-block:: bash $ bash <(curl -s https://raw.githubusercontent.com/sahib/brig/master/scripts/install.sh) Specific distributions ---------------------- Some distributions can install ``brig`` directly via their package manager. Those are currently: * Arch Linux (`PKGBUILD `_; builds ``develop`` branch) Compiling yourself ------------------ If you use a platform we don't provide binaries for or if you want to use a development version, you're going have to compile ``brig`` yourself. But don't worry that's quite easy. We do not have many dependencies. You only need two things: The programming language *Go* and the version control system ``git``. Step 0: Installing Go ~~~~~~~~~~~~~~~~~~~~~ This is only required if you don't already have ``Go`` installed. Please consult your package manager for that. .. warning:: ``brig`` only works with a newer version of Go (>= 1.10). The version in your package manager might be too outdated, if you're on e.g. Debian. Make sure it's rather up to date! If it's too old you can always use tools like ``gvm`` to get a more recent version. If you did not do that, you gonna need to install ``Go``. `Refere here `_ for possible ways of doing so. Remember to set the ``GOPATH`` environment variable to a place where you'd like to have your ``.go`` sources being placed. For example you can put this in your ``.bashrc``: .. code:: bash # Place the go sources in a "go" directory inside your home directory: export GOPATH=~/go # This is needed for the go toolchain: export GOBIN="$GOPATH/bin" # Make sure that our shell finds the go binaries: export PATH="$GOPATH/bin:$PATH" By choosing to have the ``GOPATH`` in your home directory you're not required to have ``sudo`` permissions later on. You also need to have ``git`` `installed `_ for the next step. Step 1: Compile & Install ``brig`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This step requires setting ``GOPATH``, as discussed in the previous section. .. code:: bash $ go get -d -v -u github.com/sahib/brig # Download the sources. $ cd $GOPATH/src/github.com/sahib/brig # Go to the source directory. $ ./scripts/install-task.sh # Install the build system. $ task # Build the binary. Execution might take a few minutes though because all of ``brig`` is being compiled during the ``task`` step - this also includes the download of all dependencies. If you cannot or want to install ``git`` for some reason, you can `manually download a zip `_ from GitHub and place its contents into ``$GOPATH/src/github.com/sahib/brig``. In this case, you can skip the ``go get`` step. Step 2: Test if the installation is working ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If everything worked, there will be a ``brig`` binary in ``$GOBIN``. .. code:: bash $ brig help If above command prints out documentation on how to use the program's commandline switches then the installation worked. Happy file shipping! Setting up IPFS --------------- ``brig`` requires a running *IPFS* daemon. While ``brig`` has ways to do install a IPFS daemon for you, it is preferable to install it via your package manager or via the official way: https://docs.ipfs.io/introduction/install ----- Continue with :ref:`getting_started` or directly go to :ref:`quickstart` if you just need a refresh on the details. ================================================ FILE: docs/make.bat ================================================ @ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=_build set SPHINXPROJ=brig if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% :end popd ================================================ FILE: docs/quickstart.rst ================================================ .. warning:: The examples below are slightly outdated and will be revisited at some point. All commands should still work, but the output might be a little different now. Please refer to the :ref:`getting_started` guide for a more up-to-date version. .. _quickstart: Quickstart ========== This does not really explain the philosophy behind ``brig``, but gives a good idea what the tool is able to do and how it's supposed to be used. Users familiar to ``git`` should be able to grok most of the commands intuitively. 1. Init ------- Before you can do anything with ``brig`` you need to create a repository. During this step, also your online identity will be created. So make sure to use a sane username (``sahib@wald.de``) and resource (``laptop``). .. raw:: html As an alternative to entering your password manually, you can use an existing password manager: .. raw:: html 2. Adding files --------------- Before synchronizing them, you need to *stage* them. The files will be stored encrypted (and possibly compressed) in blobs on your hard disks. .. raw:: html 3. Coreutils ------------ ``brig`` provides implementations of most file related core utils like ``mv``, ``cp``, ``rm``, ``mkdir`` or ``cat``. Handling of files should thus feel familiar for users that know the command line. .. raw:: html 4. Mounting ----------- For daily use and for use with other tools you might prefer a folder that contains the file you gave to ``brig``. This can be done via the built-in FUSE layer. .. raw:: html .. note:: Some built-in commands provided by brig are faster. ``brig cp`` for example only copies metadata, while the real ``cp`` will copy the whole file. If you wish to always have the mount when ``brig`` is running, you should look into :ref:`permanent-mounts`. 5. Commits ---------- In it's heart, ``brig`` is very similar to ``git`` and also supports versioning via commits. In contrast to ``git`` however, there are no branches and you can't go back in history -- you can only bring the history back up front. .. raw:: html 6. History ---------- Each file (and directory) maintains a history of the operations that were done to this file. .. raw:: html 7. Discovery & Remotes ---------------------- In order to sync with your buddies, you need to add their *fingerprint* as remotes. How do you get their fingerprint? In the best case by using a separate side channel like telephone, encrypted email or elsewhise. But ``brig`` can assist finding remotes via the ``brig net locate`` command. .. raw:: html .. note:: You should **always** verify the fingerprint is really the one of your buddy. ``brig`` cannot do this for you. 8. Sync & Diff -------------- Once both parties have setup each other as remotes, we can easily view and sync with their data. .. raw:: html 9. Pinning ---------- By default ``brig`` will only keep the most recent files. All other files will be marked to deletions after a certain timeframe. This is done via *Pins*. If a file is pinned, it won't get deleted. If you don't need a file in local storage, you can also unpin it. On the next access ``brig`` will try to load it again from a peer that provides it (if possible). .. raw:: html ================================================ FILE: docs/requirements.txt ================================================ sphinx_bootstrap_theme==0.6.5 sphinxcontrib-fulltoc==1.2.0 ================================================ FILE: docs/roadmap.rst ================================================ Roadmap ======= This document lists the improvements that can be done to ``brig`` and (if possible) when. All features below are not guaranteed to be implemented and, can be seen more as possible improvements that might change during implementation. Also it should be noted that each future is only an idea and not a fleshed ou implementation plan. Bug fixes and minor changes in handling are not included since this document is only for »big picture« ideas. Also excluded are stability/performance improvements, documentation and testing work, since this is part of the »normal« development. Current state ------------- The first real release (0.3.0 »Galloping Galapagos«) was released on the 7th December 2018. It includes all basic features and is working somewhat. The original goals were met: - Stable command line interface. - Git-like version control - User discovery - User authentication - Fuse filesystem For day-to-day use there are quite some other features that make brig easier to use and capable of forming a Dropbox-like backend out of several nodes. **There will be no stability guarantees before version 1.0.0.** Future ------ Those features should be considered after releasing the first prototype. A certain amount of first user input should be collected to see if the direction we're going is valid. .. role:: strikethrough * **Gateway:** :strikethrough:`Provide a built-in (and optional) http server, that can »bridge« between the internal ipfs network and people that use a regular browser. Instances that run on a public server can then provide hyperlinks of files to non-brig users.` *Done as of version 0.3.0.* * **Config profiles:** Make it easy to configure brig in a way to serve either as thin client or as archival node. Archival nodes can be used in cases where a brig network spans over computers that lie in a different timezone. The archival node would accumulate all changes and repositories would see it as some sort of "blessed repository" which holds the latest and greatest state. * **Automatic syncing:** :strikethrough:`Automatically publish changes after a short amount of time. If an instance modified some file other nodes are notified and can decide to pull the change.` *Done as of version 0.4.0.* * **Intelligent pinning strategies:** :strikethrough:`By default only the most recent layer of files are being kept. This is very basic and can't be configured currently. Some users might only want to have only the last few used files pinned, archive instances might want to pin almost everything up to a certain depth.` *Done as of version 0.4.0 (see repinning)* * *Improve read/write performance:* Big files are currently hold in memory completely by the fuse layer (when doing a flush). This is suboptimal and needs more intelligent handling and out-of-memory caching of writes. Also, the network performance is often very low and ridden by network errors and timeouts. This can be tackled since IPFS v0.4.19 supports an --offline switch to error out early if a file is not available locally. * *More automated authentication scheme:* E-Mail-like usernames could be used to verify a user without exchanging fingerprints. This could be done by e.g. sending an activation code to the email of an user (assuming the brig name is the same as his email), which the brig daemon on his side could read and send back. * *Format and read fingerprint from QR-Code:* Fingerprints are hard to read and not so easy to transfer and verify. QR-Code could be a solution here, since we could easily snap a picture with a phone camera or print it on a business card. Far Future ---------- Those features are also important, but require some more in-depth research or more work and are not the highest priority currently. * **Port to other platforms:** Especially Windows and eventually Android. This relies on external help, since I'm neither capable of porting it, nor really a fan of both operating systems. * **Implement alternative to FUSE:** FUSE currently only works on Linux and is therefore not usable outside of that. Windows has something similar (called Dokan_). Alternatively we could also go on by implementing a WebDAV server, which can also be mounted. * **Implement the encryption in IPFS:** Having the encryption/compression layer in brig effectively disables the usage of deduplication. This is unfortunate and could be mitigated by either implementing deduplication ourselves or moving to a block based encryption scheme. .. _dokan: https://github.com/keybase/kbfs/tree/master/dokan * **Ensure N-Copies:** It should be possible to define a minimum amount of copies a file has to have on different peers. This could be maybe incorporated into the pinning concept. If a user wants to remove a file, brig should warn him if he would violate the min-copies rule. This idea is shamelessly stolen from ``git-annex``. ================================================ FILE: docs/talk/Makefile ================================================ all: hovercraft -N -s index.rst ================================================ FILE: docs/talk/demo.rst ================================================ 0. Preparation ============== - Windows: Chrome Incognito (slides, presenter console), Monitor Settings, Terminal (docker, ipfs, hovercraft), Terminal (empty) ----- - setxkbmap us && xmodmap ~/.xmodmaprc - check sound. - Check that docker is running. - Check that no other brig instance is up. - Check: /tmp/{repo,mount} is empty. - Do a "bob-brig ls and bob-brig rmt ls" to do some pre-caching. - Source autocompletion. 1. Init ======= Usage is very close to ``git``. .. code-block:: bash $ mkdir repo && cd repo # Create a new repository in here: # Command started einen daemon im Hintergrund! $ brig init alice@wonderland.de/laptop # Anschaut was brig so angestellt hat: $ ls # Dann schauen wir mal ob man die Datei ausgeben kann: $ brig cat README.md 2. Adding files =============== .. code-block:: bash $ brig stage ~/music.mp3 $ brig ls # Pfadnamen, virtueller root. $ brig tree $ brig cat music.mp3 | mpv - 3. Coreutils ============ .. code-block:: bash $ brig mkdir sub $ brig cp music.mp3 sub $ brig tree # ähnlich zu `stat` unter linux: $ brig info README.md $ brig edti README.md $ brig edit README.md # Hash hat sich nach Edit-Vorgang geändert: $ brig info README.md # Man kann sich ansehen was für daten ipfs dann speichert: $ ipfs cat -> garbled bullshit. 4. Mounting =========== .. code-block:: bash $ mkdir /tmp/mount $ ls /tmp/mount # Empty. $ brig mount /tmp/mount # Ta-da, alle dateien die man sonst so hat sind auch hier vorhanden: $ nautilus /tmp/mount # Man kann ganz normal dateien editieren: $ vi /tmp/mount/new-file $ brig ls # Noch nicht sehr performant, aber sowas geht schon: $ cp ~/rrd.mkv /tmp/mount $ mpv /tmp/mount/rrd.mkv 5. Commits ========== .. code-block:: bash $ brig log $ brig diff $ brig commit -m 'Added darth vader' $ brig log $ brig edit README.md $ brig mv sub/music.mp3 sub/else.mp3 $ brig diff # Should print mergeable and moved file. 6. History ========== (optional) .. code-block:: bash # Etwas anders als git: kein diff an sich: $ brig history new-file $ brig edit new-file $ brig commit -m 'edited new-file' $ brig reset HEAD^ new-file $ brig cat new-file 7. Discovery & Remotes ====================== .. code-block:: bash # bob läuft in einem container auf dem gleichen computer: $ bob-brig ls $ brig whoami # Erst ausführen, dauert etwas: $ brig net locate bob $ brig remote add $(bob-brig whoami -f) $ bob-brig remote add $(brig whoami -f) $ brig remote ls $ brig remote edit 8 Sync & Diff ============= .. code-block:: bash $ brig remote ls $ brig diff bob $ brig sync bob $ brig log $ brig ls 9 Pinning ========= .. code-block:: bash $ brig pin rm # geht. $ brig gc $ brig cat # geht. $ $ brig gc $ brig cat ...blocks... 10 Misc ======= .. code-block:: bash $ brig $ brig help stage $ brig docs $ brig bug ================================================ FILE: docs/talk/index.rst ================================================ :title: brig :author: Chris Pahl :css: style.css :data-transition-duration: 350 :data-perspective: 5000 .. role:: white-bg .. role:: title-logo .. role:: strike .. role:: donald .. role:: github .. role:: www .. role:: rtd .. role:: underline .. role:: small ---- .. image:: images/logo.png :title-logo:`»brig«` :white-bg:`Ein Werkzeug zur sicheren und verteilten` :white-bg:`Dateisynchronisation` .. note:: - Meine schwer versehrten Damen und Herren - Dies ist eine Projektvorstellung. - Ihr werdet heute zu Versuchskaninchen ausgebildet. - "Unverständlichste Folie" - Begriffserklärung des Titels. - Unterscheidung: Synchronisieren / Austauschen - "sicher" ist schwammig - "dezentral" heißt ohne zentralen Server (wie git) - Name: Zweimaster, wendig, leichtgewichtig, verteilt Datenströme. ---- Um was geht's? ============== | * Einführung * Das Problem * Demo * Was hab ich da grad gesehen? * Hilfe! * :strike:`Applaus!` Fragen? .. note:: - Demo nimmt ca. 50% Zeit ein, wird also nicht so trocken. - Viel Terminal, wenig Bling-Bling. Also sehr technisch orientiert. - Viel Stoff für 45 minuten, muss schnell reden, ihr werdet am Ende abgefragt. Fragen bitte erst gegen Schluss stellen, bei der Demo könnt ihr aber gern was fragen wenn ihr was nicht versteht oder seht. ---- Wer ist'n das? ============== .. note:: - Aus dieser Hochschule. - Vollzeit München. - Open Source Entwickler (rmlint) - Wer mehr über mich wissen will, darf gern nachher fragen. - Wer ich bin, ist ja eigentlich unwichtig… Darum geht's in dem Vortrag auch nicht. | **Chris Pahl.** | | :small:`Wer mehr über mich wissen will:` :small:`https://sahib.github.io` ---- Es war einmal… ============== | .. note:: - Dann mal rein ins Thema... - Umfrage: Wer benutzt... * Dropbox oder andere Cloud Storage Anbieter (OneDrive, Google Drive) * ownCloud oder nextCloud * Syncthing, git-annex, resilio * Was selbst gebasteltetes? * git Ihr seht schon: Es gib so einige Tools die man benutzen kann und alle haben unterschiedliche Stärken. Dropbox ist wohl eins der am meisten genutzten Tools. .. image:: images/dropbox.png ---- Das Problem =========== .. note:: - Ihr erwartet jetzt sicherlich, dass ich euch sage was schlecht an Dropbox ist. - Erstmal nicht so viel, es ist sehr einfach benutzbar und meist verfügbar -> verbreitet. - zentral, unsicher by default (Zusatzsoftware Boxcryptor), us unternehmen, proprietär. - Zusammenarbeit über Dropbox (zB an Quelltext) funktioniert nicht wirklich. - Dateiaustausch ist eine Art Babel: Jeder benutzt was anderes. - Am längsten dauert der Handshake bis man sich auf's Tool geeinigt hat. - Siehe comic. .. image:: images/xkcd-file-transfer.png :width: 75% ---- Was ist das Ziel? ================= | .. image:: images/donald.png :align: center :width: 50% | :donald:`MAKE FILE SYNCING GREAT AGAIN!` .. note:: Und das machen wir ganz offensichtlich indem wir eine Mauer um die Cloud bauen. :) It will be tremendous. Great stuff. ---- Geht das auch detaillierter? ============================ Dinge die Dateiaustausch sein :underline:`sollte`: | * Einfach. * Sicher. * Schnell. * Versioniert. * Dezentral. * Frei. .. note:: Viele Buzzwords. Und viele davon widersprechen sich auch noch. Aber wir reden ja von einer idealen Lösung. Spruch: "Ein Tool das alles kann, kann nichts richtig gut" * Einfach: User Discovery, FUSE Filesystem, ist kompatibel, nervt nicht. * Sicher: Daten sind by default stets verschlüsselt. * Schnell: Eigentlich optional, aber Video Streaming ist nett. * Versioniert: git junkie, Zusammenarbeit wird möglich, keine revisions filenames mehr. * Dezentral: Datenhoheit. Dropbox hinterlässt ein schlechten Nachgeschmack. * Frei: Im Sinne von GPL. Sichere Software muss frei sein - andere Gründe zählen natürlich auch. ---- Aber, aber… =========== **Ja**, es gibt schon einige dezentrale Tools. .. image:: images/other-tools.png :width: 120% :class: inline * Anderer Fokus. * Andere Basis. * Andere Features. * Gleiches Ziel. :small:`(Siehe: https://brig.readtheodocs.org/comparison.html)` .. note:: - So Vergleichdiskussion sind müßig und können den ganzen Tag dauern, ohne dass am Ende was dabei rauskommt... - Resilio ist proprietär, Syncthing fokusiert sich weniger auf Versionierung mehr auf Usability, git-annex genau andersrum. - Mein Tool macht aber auch einige Dinge anders, die nicht direkt vergleichbar sind. Jetzt machen wir hier gedanklich mal einen Cut. ----- IPFS ==== .. image:: images/ipfs.png »Inter-Planetary-File-System« .. note:: - Ist wie beim Trinken: Man braucht eine gute Basis. - Interplanetary Filesystem. Das ist wörtlich zu verstehen. - Das ganze soll eine Art dezentrale, sichere versionierte Alternative zum heutigen Internet werden. Jeder Nutzer ist Server und Client zugleich und hat eine eindeutige ID. ---- Was kann das so? ================ | .. code-block:: bash $ echo 'Hallo Augsburg!' | ipfs add added QmbLr7bEQkC85EEGEmQk42dLz25VBy2L6iHyZQu | .. code-block:: bash $ ipfs cat QmbLr7bEQkC85EEGEmQk42dLz25VBy2L6iHyZQu Hallo Augsburg! .. note:: Vorteil: Ganz ohne zentralen Server. Nachteil: Kann bereits zum filesharing benutzt werden, aber nur sehr rudiemntär. | .. code-block:: bash $ ipfs id -f '\n' QmeLNNcryy9Ky1dXnfnEPaDQ2KuJ6yafaSRZssjQ83ie84 ---- »brig« ====== .. image:: images/tux.png :class: img-tux :width: 25% .. image:: images/gopher.png :class: img-gopher :width: 33% * Hash Nanny für ipfs. * In ``Go`` geschrieben. * **Zielgruppe:** Linux User. Erstmal. | Entwicklungsgeschichte: ----------------------- * Ende 2015: *Masterprojekt.* * Ende 2016: *Pausiert.* * Ende 2017: *Hobbyprojekt.* * **Erster Beta Release heute!** .. note:: - Betonung auf Hash Nanny. - Das ist das erste "beta" release (0.1.0-beta) - WELTPREMIERE! - Mit sehr viel Vorsicht benutzen. - Alles kann sich auserdem noch ändern. - Release early, release often. ---- :class: small-list Kurz gesagt: Fokus ================== - Balance zwischen Sicherheit und Usability. - Effizienz ist nett, aber kein primärer Fokus. - Kompatibilität zu gewohnten Konzepten. - Komplexität hinter einfachen Interface. .. note:: Natürlich kann kein Tool gleichzeitig einfach zu benutzen, sicher und effizient sein. Es soll eine Balance zwischen Benutzbarkeit und Sicherheit geben - die Effizienz (hat zumindest momentan) eher drunter gelitten. Siehe Demo. ---- Demo ---- .. code-block:: bash $ brig mv raiders twix # sonst ändert sich aber nix. ---- :class: small-list Workflow ======== .. note:: - Synchronisieren kleines Ein mal Eins * Initialer Setup :small:`(nur einmal)` * Remotes finden & hinzufügen :small:`(nur einmal)` * Diff anzeigen :small:`(optional)` * Synchronisieren * Konflikte beheben :small:`(eventuell)` .. image:: images/workflow.png :width: 50% :class: workflow ---- :data-rotate: 0 Disclaimer: Sicherheit? ======================= .. note:: Ich hab ziemlich oft schon das Wort "sicher" benutzt. Wenn ich sagen würde, dass »brig« sicher ist, dann heißt das eigentlich nur dass ich beim Schreiben der Software die Absicht hatte, sichere Software zu schreiben. Es kommt auf die Angriffsvektoren an. Und selbst wenn ich das geschafft hätte, dann kann man das Tool sicher benutzen, aber jemand könnte immer noch an deinen ungelockten PC gehen... (uvm) Übertragung und Speicherung ist sicher gemacht, aber man könnte zb derzeit trotzdem mit wenig Mühe herausfinden wer mit wem kommuniziert. Philosophie ist allgemein: Ein Schloss, dass man nur unter Mühe öffnen kann, benutzt kaum einer. | .. image:: images/xkcd-security.png :width: 110% ---- :data-rotate: 90 Dezentralität ============= .. note:: - Was heißt jetzt eigentlich dezentral? - Problem: Beide müssen zur selben Zeit online sein. - Braucht evtl. archiv instanz. - funktioniert besser oder schlechter abhaengig vom Usecase: - Austausch von Folien und Notizen zwischen Studenten und Professoren: gut. - Einseitiges Herunterladen von Formularen bei einer Behoerde: schlecht. .. image:: images/map.png :width: 120% ---- :class: small-list :data-rotate: 180 Nutzermanagement ================ …existiert nicht. | .. note:: - ...Ist nicht wirklich vorhanden. - Es gibt keine registrierten Nutzer. - Zwei Nutzer können den selben Displaynamen haben! - Aber nicht den selben Fingerprint. - Email bzw. Jabber ID ähnlich. | .. image:: images/id.png :width: 100% ---- :data-rotate: 270 Versionierung ------------- .. image:: images/mona.png :width: 100% .. note:: - brig = git - diff - versionierung hilft im Alltag, aber git ist normal nicht tauglich dafür. ---- :data-rotate: 360 Pinning ======= .. note:: - Pinning Beispiel: Musik ordner auf Handy und Server. - Mechanismus um Sachen wie "Speichere alle Dateiversionen der 30 letzten Tage" zu implementieren. - Komplette Separation von Daten und Metadaten. | | .. image:: images/pin.png :width: 40% ---- Roadmap ======= :class: small-list - Selektives Sharing. - Gateway für Hyperlinks. - Archive Instances. - Autosync und vieles mehr... - Performance, mehr Dokumentation... | .. image:: images/binocs.png :class: future :width: 40% | **Hauptproblem:** Nur ein Entwickler. .. note:: ... und der arbeitet nen Vollzeitjob. Keine gute Basis für eine stabile Weiterentwicklung. Features die noch kommen sollen: - Knoten, die automatisch synchroniseren (als »blessed repo« wie bei git) - Fingerprints als QR Code - Mobile Version mit simplen Dateibrowser. - Verbessertes User-Management. ---- :id: help-slide Hilfe? Erwünscht. ================= .. note:: Problem: Man macht ein Release und kriegt 20 Feature Requests, mit teils total widersprüchlichen Anforderungen. Das artet in Feature-itis aus (-> Wollmilchsau) Am Ende steht man mit eine Software da, die Kaffee kochen kann, dafür aber nur so mittel und dessen Name mit "j" beginnt. (Müsst ihr mal drauf aufpassen... jDownloader, jQuery, java) Experience Reports: - Fokus auf Problemen, nicht auf Lösungen. - Was ihr tun wolltet - Was ihr eigentlich gemacht/erwartet habt - Warum das nicht so ganz funktioniert hat - Bitte dafür ein issue report auf github aufmachen. Mithilfe via **Experience Reports.** 1. Was wolltet ihr machen? 2. Was habt ihr versucht? 3. Warum ging es nicht? .. image:: images/wollmilchsau.png :class: wollmilchsau :width: 45% | Und sonst? ---------- - Bug reports. :small:`(brig bug)` - Pull requests mit Fixes. - **Keine** Feature Requests! ---- Probem gelöst? ============== .. note:: Sagt ihr es mir... - Ja, die Lösung ist also ganz einfach... man schreibt einfach ein Tool das alles richtig macht, jeder nutzt das und gut ist. - Abe ja, sagt ihr es mir: Waere so ein Tool hilfreich fuer manche von euch? - Zugegeben: Es ist noch nicht so einfach benutzbar dank Terminal, aber das kann ja noch werden. Geht ja erstmal um die technische Basis. - Die Zeit wird es zeigen. .. image:: images/xkcd-standards.png :width: 110% ---- Letzte Worte ============= .. note:: Wer Fragen oder Anmerkungen jetzt abgeben will, der möges dies nun tun oder für immer schweigen. Ansonsten könnt ihr mich natürlich auch einfach auf dem Gang ansprechen. Slides sind online auf der online schedule. | :rtd:`http://brig.rtfd.org` :github:`github.com/sahib/brig` :www:`http://sahib.github.io/brig/public` | *Fragen?* ================================================ FILE: docs/talk/requirements.txt ================================================ argh==0.26.2 docutils==0.14 hovercraft==2.5 lxml==4.6.2 pathtools==0.1.2 Pygments==2.2.0 PyYAML>=4.2b1 svg.path==2.2 watchdog==0.8.3 ================================================ FILE: docs/talk/style.css ================================================ @import url(http://fonts.googleapis.com/css?family=Vollkorn); body { background-image: url(images/noise.png); background-color: #0094d0; color: #eee; } a { color: #aaaaee; } ul { font-family: Vollkorn; font-size: 45px; font-style: normal; font-variant: normal; font-weight: 400; display: block; margin: auto; margin-bottom: 10px; width: fit-content; } ol { font-family: Vollkorn; font-size: 30px; font-style: normal; font-variant: normal; font-weight: 400; display: block; margin: auto; margin-bottom: 10px; width: fit-content; min-width: 25vw; } h1, h2, h3 { display: block; margin: auto; margin-bottom: 30px; width: fit-content; padding-left: 5px; padding-right: 10px; padding-top: 10px; background-color: white; color: #124255; font-family: Vollkorn; font-size: 60px; font-style: normal; font-variant: normal; font-weight: 700; } h2 { font-size: 40px; } img { display: block; margin: auto; } p { font-family: Vollkorn; font-size: 44px; font-style: normal; font-variant: normal; font-weight: 400; line-height: 50px; display: block; margin: auto; margin-top: 3vh; margin-bottom: 3vh; width: fit-content; } blockquote { font-family: Vollkorn; font-size: 40px; font-style: normal; font-variant: normal; font-weight: 400; line-height: 30px; } pre { font-family: Fira Code; font-size: 25px; font-style:normal; font-weight: 400; line-height: 40px; padding: 20px; /* background-color: #000; border-color: #000; */ border-style: solid; border-radius: 10px; display: block; margin-right: 30vw; min-width: 40vw; color: #0094d0; background-color: #fff; border-color: #fff; } /* Special roles */ .slide-number { ;font-family: Vollkorn; font-style: italic; font-size: 100px; font-weight: 700; opacity: 0.3; } .title-logo { font-size: 100px; font-weight: 700; display: block; text-align: center; } .donald { font-variant: small-caps; font-weight: 700; font-size: 40px; padding-left: 12px; padding-right: 12px; padding-top: 12px; padding-bottom: 5px; border-width: 12px; border-style: solid; border-color: white; } .white-bg { background-color: white; color: #124255; font-size: 40px; font-weight: 700; padding-left: 5px; padding-right: 5px; padding-top: 5px; display: block; margin: auto; margin-bottom: 10px; width: fit-content; } .strike { text-decoration: line-through; } .github:before { background-image: url(images/github.png); background-size: 1em 1em; background-repeat: no-repeat; display: inline-block; width: 1em; height: 1em; position: relative; top: 0.2em; padding-right: 0.3em; content: " "; } .www:before { background-image: url(images/www.png); background-size: 1em 1em; background-repeat: no-repeat; display: inline-block; width: 1em; height: 1em; position: relative; top: 0.2em; padding-right: 0.3em; content: " "; } .rtd:before { background-image: url(images/rtd.png); background-size: 1em 1em; background-repeat: no-repeat; display: inline-block; width: 1em; height: 1em; position: relative; top: 0.2em; padding-right: 0.3em; content: " "; } .workflow { position: fixed; bottom: -15vh; right: 0vw; } .wollmilchsau { position: fixed; bottom: 5vh; right: -12vw; } .img-tux { position: fixed; bottom: 10vh; right: -12vw; } .img-gopher { position: fixed; top: 10vh; left: -12vw; } .small-list > ul { font-size: 40px; } .help { font-size: 12px; } .small { font-size: 32px; font-style: italic; } .underline { text-decoration: underline; font-style: italic; } ================================================ FILE: docs/tutorial/config.rst ================================================ .. _configurations: Configuration ------------- As mentioned earlier, we can use the built-in configuration system to configure many aspects of ``brig`` functionality to our liking. Every config entry of ``brig`` consists of 4 values: * Key - always a dotted, hierarchical path like ``fs.sync.ignore_moved``. * Value - some value that is validated depending on the key. * Default - The default value. * Documentation - A short description of what this entry can do for you. * Needs restart - A boolean indicating whether you have to restart the service to take effect. When you type ``brig cfg`` you will see all keys with the aforementioned entries: .. code-block:: bash $ brig config ls [...] fs.sync.ignore_moved: false (default) Default: false Documentation: Do not move what the remote moved Needs restart: no [...] Additionally, we support of course the usual operations: .. code-block:: bash $ brig config get repo.password_command pass brig/repo/password $ brig config set repo.password_command "pass brig/repo/my-password" Profiles ~~~~~~~~ .. todo:: Implement configuration profiles. ================================================ FILE: docs/tutorial/coreutils.rst ================================================ Adding & Viewing files ---------------------- Now let's add some files to ``brig``. We do this by using ``brig stage``. It's called ``stage`` because all files first get added to a staging area. If you want, and are able to remember that easier, you can also use ``brig add``. .. code-block:: bash $ echo "Hello World" > /tmp/hello.world $ brig stage /tmp/hello.world $ brig cat hello.world Hello World $ brig ls SIZE MODTIME PATH PIN 986 B Mon Mar 4 23:04:07 CET 2019 /README.md ✔ 12 B Mon Mar 4 23:04:23 CET 2019 /hello.world ✔ This adds the content of ``/tmp/hello.world`` to a new file in ``brig`` called ``/hello.world``. The name was automatically chosen from looking at the base name of the added file. All files in ``brig`` have their own name, possibly differing from the content of the file they originally came from. Of course, you can also add whole directories. .. note:: ``brig`` always copy the data. If you happen to change the original file, the change will not progpagate to the file in ``brig``. You have to re-stage it to reflect the change. If you want to use a different name, you can simply pass the new name as second argument to ``stage``: .. code-block:: bash $ brig stage /tmp/hello.world /hallo.welt You also previously saw ``brig cat`` which can be used to get the content of a file again. ``brig ls`` in contrast shows you a list of currently existing files, including their size, last modification time, path and pin state [#]_. One useful feature of ``brig cat`` is that you can output directories as well. When specifying a directory as path, a ``.tar`` archive is being outputted. You can use that easily to store whole directories on your disk or archive in order to send it to some client for example: .. code-block:: bash # Create a tar from root and unpack it to the current directory. $ brig cat | tar xfv - # Create .tar.gz out of of the /photos directory. $ brig cat photos | gzip -f > photos.tar.gz .. [#] Pinning and pin states are explained :ref:`pinning-section` and are not important for now. Coreutils --------- You probably already noticed that a lot of commands you'd type in a terminal on a normal day have a sibling as ``brig`` command. Here is a short overview of the available commands: .. code-block:: bash $ brig mkdir photos $ brig touch photos/me.png $ brig tree • ✔ ├── hello.world ✔ ├── photos/ ✔ │ └── me.png ✔ └── README.md ✔ 2 directories, 2 files $ brig cp photos/me.png photos/moi.png $ brig mv photos/me.png photos/ich.png # NOTE: There is no "-r" switch. Directories are always deleted recursively. $ brig rm photos Please refer to ``brig help `` for more information about those. They work in most cases like their pendant. Also note that there is no ``brig cd`` currently. All paths must be absolute. Hints - Configuring encryption & compression -------------------------------------------- Often times you might want not encrypt all files. A typical use case would be to have a ``/public`` folder where you put in files to share with your friends. Probably there are some freely available files in there, you got from some corners of the internet (for example your excellent meme collection). Those files don't need encryption and probably not even compression. If you want to exclude the ``/public`` folder from both you can give ``brig`` a hint: .. code-block:: bash # let's assume /public exists already: $ brig hints set /public --compression none --encryption none $ brig hints PATH ENCRYPTION COMPRESSION / aes256gcm guess /public none none As you might notice, there is already one hint set by default for the root directory. If you want to change the global defaults, you can simply modify this one. Below you see the hint you just created. This however does not change any existing files. It just tells ``brig`` »next time you modify those files, please use those algorithms«. If you want to make sure the files are changed to use the algorithm you set, then you can use the ``stage --recode`` command: .. code-block:: bash $ brig stage --recode /public If you do this, you can observe a small change when looking at the ``IsRaw`` attribute of the file's info: .. code-block:: bash # This was 'true' before the recode. $ brig info --format '{{ .IsRaw }}' /public/cat-meme.png false The ``IsRaw`` attribute tells you if you could download this file by its hash from an IPFS gateway. If its true, ``brig`` does not touch it at all. This is an useful attribute you want to share a file with your non-tech friends who prefer to click on a regular HTTP URL: You can just point them a [IPFS gateway](https://docs.ipfs.io/concepts/ipfs-gateway). Available encryption algorithms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: The ``THROUGHPUT`` numbers shows the relative, average performance compared to ``none``. Your mileage may vary a lot. Those number should serve as rough guideline and were obtained by the built-in ``briog debug iobench`` utility using the ``fuse-{read,write}-mem`` benchmark. If you want the details you can run the benchmarks yourself. As you can see from the numbers, the additional encoding by brig does not make things substantially slower. If you wonder how some benchmark are faster than ``none``: Compression compacts the stream heavily (if the data is well compressible). Therefore less bytes need to be transferred and encrpyted or decrypted. Quite surprisingly, in some cases compression can make things faster. Also note that this was measured without caching. If no data is modified your operating system will likely cache data for you and speed up things. +----------------+----------------------------------------------------------------------------+--------------------+-----------------+ | NAME | DESCRIPTION | READ THROUGHPUT | WRITE THROGHPUT | +================+============================================================================+====================+=================+ | ``aes256-gcm`` | The default. AES with 256 bit key in GCM cipher mode. Fast on modern CPUs. | 80-85% | 85-95% | +----------------+----------------------------------------------------------------------------+--------------------+-----------------+ | ``chacha20`` | Streaming cipher with Poly1305 MAC. Good for old CPUs without AES-NI. | 70-85% | 80-90% | +----------------+----------------------------------------------------------------------------+--------------------+-----------------+ | ``none`` | Disables encryption. Fast, but only good for public files. | 100% | 100% | +----------------+----------------------------------------------------------------------------+--------------------+-----------------+ Available compression algorithms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------+----------------------------------------------------------------------------+-------------------+---------------------+ | NAME | DESCRIPTION | READ THROUGHPUT | WRITE THROUGHPUT | +================+============================================================================+===================+=====================+ | ``snappy`` | High throughput, relative low compression ratio. | 80-105% | 95-130% | +----------------+----------------------------------------------------------------------------+-------------------+---------------------+ | ``lz4`` | Middle throughput, slightly higher compression ratio than snappy. | 77-93% | 85-105% | +----------------+----------------------------------------------------------------------------+-------------------+---------------------+ | ``zstd`` | Low throughput, highest compression ratio. | 55-95% | 35-100% | +----------------+----------------------------------------------------------------------------+-------------------+---------------------+ | ``guess`` | Chooses suitable algorithm based on file ending, size and mime type. | ``-`` | ``-`` | +----------------+----------------------------------------------------------------------------+-------------------+---------------------+ | ``none`` | Disables compression. | 100% | 100% | +----------------+----------------------------------------------------------------------------+-------------------+---------------------+ ================================================ FILE: docs/tutorial/gateway.rst ================================================ Using the gateway / UI ---------------------- Gateway Screenshots ~~~~~~~~~~~~~~~~~~~ The gateway UI consists of several tabs, which are briefly shown below to give you a short impression of it. Login screen ^^^^^^^^^^^^ Allows you to login. You can also come back here to change the user. It is also possible to login anonymously, as you will see below. .. image:: ../_static/gateway-login.png :alt: Gateway login screen :width: 66% File Browser ^^^^^^^^^^^^ The main view. Lists the directory tree and file attributes. Allows for modification, uploading and everything what you'd expect. .. image:: ../_static/gateway-files.png :alt: Gateway files view :width: 66% Changelog View ^^^^^^^^^^^^^^ A list of commits. You are able to jump back to a specific commit. .. image:: ../_static/gateway-changelog.png :alt: Gateway changelog view :width: 66% Trashbin ^^^^^^^^ A list of deleted files. If you deleted something you will be able to get it back here. .. image:: ../_static/gateway-trashbin.png :alt: Gateway trashbin view :width: 66% Remote List ^^^^^^^^^^^ If your user is privileged enough, you can see and edit the list of remotes and adjust settings in it. .. image:: ../_static/gateway-remotes.png :alt: Gateway remotes view :width: 66% Remote Add Dialog ^^^^^^^^^^^^^^^^^ A sample dialog. The UI uses many of them. .. image:: ../_static/gateway-add-remote.png :alt: Gateway add remote view :width: 66% --------- Introduction ~~~~~~~~~~~~ Many users will not run ``brig`` themselves, so you won't be able to ``brig sync`` with them. Chances are that you still want to send or present them your files without too much hassle. ``brig`` features a *Gateway* to HTTP(S), which comes particularly handy if you happen to run a public server and/or want to provide a GUI to your users. It also includes an easy to use UI that is enabled by default. Before you do anything, you need to a »user« to your gateway. This user is different than remotes and describes what credentials can be used to access the gateway. You can add add a new user like this: .. code-block:: bash $ brig gateway user add admin my-password # or shorter: # brig gw u a admin my-password $ brig gateway user list NAME FOLDERS admin / The gateway is disabled by default. If you want to start it, use this command: .. code-block:: bash $ brig gateway start Without further configuration, this will create a HTTP (**not HTTPS!**) server on port ``6001``, which can be used already. If you access it under ``http://localhost:6001`` you will see a login mask where you can log yourself in with the credentials you entered earlier. If you'd like to use another port than ``6001``, you can do so by setting the respective config key: .. code-block:: bash $ brig cfg set gateway.port 7777 .. note:: You can always check the status of the gateway: .. code-block:: bash $ brig gateway status This will also print helpful diagnostics if something might be wrong. The gateway can be stopped anytime with the following command: .. code-block:: bash $ brig gateway stop There is also a small helper that will print you a nice hyperlink to a certain file called ``brig gateway url``: .. code-block:: bash $ brig gateway url README.md http://localhost:6001/get/README.md Folder management ~~~~~~~~~~~~~~~~~ You probably do not want to offer your files to everyone that have a link. Therefore you can restrict access to a few folders (``/public`` for example) for individual users. By default a user is allowed to see everything. If you want a user that can only access the ``/public`` folder simply add him as follows: .. code-block:: bash $ brig gw user add my-new-user /public Now only the files in ``/public`` (and including ``/public`` itself) are accessible from the gateway. User right management ~~~~~~~~~~~~~~~~~~~~~ We already discussed the adding of a user above. There is a little more to that though. You can add users with different rights. In total there are 5 different rights currently: * **fs.view**: View and list all files. * **fs.edit**: Edit and create new files. * **fs.download**: Download file content. * **remotes.view**: View the remotes tab. * **remotes.edit**: Edit the remotes tab. When you add users you can give a new user a comma separated list of rights via the ``-r`` switch: .. code-block:: bash $ brig gw user add my-new-user -r 'remotes.view,remotes.edit' For your convenience there are a bunch of presets which will do the work for you in 99% of the cases: * ``--role-admin, -a``: Add this user as admin (short for »-r 'fs.view,fs.edit,fs.download,remotes.view,remotes.edit'«) * ``--role-editor, -b``: Add this user as collaborator (short for »-r 'fs.view,fs.edit,fs.download,remotes.view'«) * ``--role-collaborator, -c``: Add this user as collaborator (short for »-r 'fs.view,fs.edit,fs.download'«) * ``--role-viewer, -d``: Add this user as viewer (short for »-r 'fs.view,fs.download'«) * ``--role-link-only, -e``: Add this user as linker (short for »-r 'fs.download'«) Running the gateway with HTTPS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, we run with ``http`` only. If you want to expose the gateway under a domain to the internet you should secure it with ``https``. Since brig's gateway might is likely not the only service you want to expose we recommend a webserver like `Caddy`_ (which is great software in general!) that automatically fetches certificates and reverse-proxies traffic to the gateway. A minimal ``Caddyfile`` can look like this: .. code-block:: # Assumption: # brig gateway runs on the same server as Caddy # and is reachable under localhost:6001. your.domain.org { route /gateway/* { uri strip_prefix /gateway reverse_proxy http://localhost:6001 } } With this setup, your gateway would be reachable under ``https://your.domain.org/gateway``. You can of course choose a different route or even a sub-domain. Maybe you also want to setup compression or require a client certificate. Refer to the Caddy documentation for more information. .. _Caddy: https://caddyserver.com/docs/getting-started Allowing anonymous access ~~~~~~~~~~~~~~~~~~~~~~~~~ If you want to run a public gateway (for example for a group of friends), then you might want to enable anonymous access. In this mode you will be logged in right away to the gateway without facing the login screen. You still have the option to go to the login screen and become another user. You can enable the anonymous mode like this: .. code-block:: bash $ brig cfg set gateway.auth.anon_allowed true Additionally you have to create an ``anon`` user. This allows you to define what rights the anonymous users have and what folders they may access: .. code-block:: bash # Give the anonymous users only access to /public and don't let them modify anything: $ brig gw u add anon anon --role-viewer /public If you want to change the name of the ``anon`` user to something else (for whatever reason) you can do so by setting the ``auth.anon_user`` variable. You also have to re-add the user above with the new name. .. code-block:: bash $ brig cfg set gateway.auth.anon_user some_other_anon_name_that_is_not_used ================================================ FILE: docs/tutorial/init.rst ================================================ Creating a repository --------------------- You need a central place where ``brig`` stores its metadata. This place is called a »repository« or short »repo«. This is not the place, where your files are stored. Those are copied (if you did setup IPFS in a normal way) to ``~/.ipfs``. Keep in mind that ``brig`` will copy files and thus will never modify the original files on your hard drive. By creating a new repository you also generate your identity, under which your buddies can later **find** and **authenticate** you. But enough of the mere theory, let's get started: .. code-block:: bash # Create a place where we store our metadata. # The repository is created by default in the current working directory. # (This can be changed via `brig --repo`) $ mkdir repo && cd repo $ brig init ali@woods.org/desktop -w 'echo my-password' _____ / /\ ___ / /\ / /::\ / /::\ / /\ / /:/_ / /:/\:\ / /:/\:\ / /:/ / /:/ /\ / /:/~/::\ / /:/~/:/ /__/::\ / /:/_/::\ /__/:/ /:/\:| /__/:/ /:/___ \__\/\:\__ /__/:/__\/\:\ \ \:\/:/~/:/ \ \:\/:::::/ \ \:\/\ \ \:\ /~~/:/ \ \::/ /:/ \ \::/~~~~ \__\::/ \ \:\ /:/ \ \:\/:/ \ \:\ /__/:/ \ \:\/:/ \ \::/ \ \:\ \__\/ \ \::/ \__\/ \__\/ \__\/ A new file README.md was automatically added. Use 'brig cat README.md' to view it & get started. $ ls config.yml gateway immutable.yml keyring metadata README.md remotes.yml The name you specified after the ``init`` is the name that will be shown to other users and by which you are searchable in the network. See :ref:`about_names` for more details on the subject. Once the ``init`` ran successfully there will be a daemon process running in the background. Every other ``brig`` commands will communicate with it via a local network socket. If the daemon does not run yet, it will be started for you in the background without you noticing. .. note:: If no IPFS daemon is running, ``brig`` will start one for you. If you don't have ``ipfs`` installed, it will even install and set it up for you. By default, ``brig init`` will also set some default options that help ``brig`` to run a bit smoother. If you do not want those, please add ``--no-ipfs-optimization`` to the ``init`` command above. .. _about_names: Choosing and finding names ~~~~~~~~~~~~~~~~~~~~~~~~~~ You might wonder what the name you pass to ``init`` is actually for. As previously noted, there is no real restriction for choosing a name, so all of the following are indeed valid names: - ``ali`` - ``ali@woods.org`` - ``ali@woods.org/desktop`` - ``ali/desktop`` It's however recommended to choose a name that is formatted like a XMPP/Jabber-ID. Those IDs can look like plain emails, but can optionally have a »resource« part as suffix (separated by a »/« like ``desktop``). Choosing such a name has two advantages: - Other peers can find you by only specifying parts of your name. Imagine all of the *Smith* family members use ``brig``, then they'd possibly those names: * ``dad@smith.org/desktop`` * ``mom@smith.org/tablet`` * ``son@smith.org/laptop`` When ``dad`` now sets up ``brig`` on his server, he can use ``brig net locate -m domain 'smith.org'`` to get all fingerprints of all family members. Note however that ``brig net locate`` **is not secure**. Its purpose is solely discovery, but is not able to verify that the fingerprints really correspond to the persons they claim to be. This due to the distributed nature of ``brig`` where there is no central or federated authority that coordinate user name registrations. So it is perfectly possible that one name can be taken by several repositories - only the fingerprint is unique. - Later development of ``brig`` might interpret the user name and domain as email and might use your email account for verification purposes. Having a resource part is optional, but can help if you have several instances of ``brig`` on your machines. i.e. one user name could be ``dad@smith.org/desktop`` and the other ``dad@smith.org/server``. Running the daemon and viewing logs ----------------------------------- The following sections are not a required read. They are useful to keep in mind, but in the ideal case you're don't even need to think about the daemon. As discussed before, the daemon is being started on demand in the background. Subsequent commands will then use the daemon. For debugging purposes it can be useful to run in the daemon in the foreground. You can do this with the ``brig daemon`` commands: .. code-block:: bash # Make sure no prior daemon is running: $ brig daemon quit # Start the daemon in the foreground and log to stdout: $ brig daemon launch -s If you want to quit the instance, either just hit CTRL-C or type ``brig daemon quit`` into another terminal window. Logging ~~~~~~~ Unless you pass the ``-s`` (``--log-to-stdout`` flag) as above, all logs are being piped to the system log. You can follow the log like this: .. code-block:: bash # Follow the actual daemon log: $ journalctl -ft brig This assumes you're using a ``systemd``-based distribution. If not, refer to the documentation of your syslog daemon. Using several repositories in parallel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It can be useful to run more than one instance of the ``brig`` daemon in parallel. Either for testing purposes or as actual production configuration. In order for the ``brig`` client to know what daemon to talk to, you have to be specific about the repository (``--repo``) path. Here is an example: .. code-block:: bash # Be explicit $ brig --repo /tmp/ali init ali -x --ipfs-path ~/.ipfs $ brig --repo /tmp/bob init bob -x --ipfs-path ~/.ipfs2 # Since you specified --repo we know what daemon to talk to. # You can also set BRIG_PATH for the same effect: $ BRIG_PATH=/tmp/ali brig ls # Add some alias to your .bashrc to save you some typing: $ alias brig-ali="brig --repo /tmp/ali" $ alias brig-bob="brig --repo /tmp/bob" # Now you can use them normally, # e.g. by adding them as remotes each: $ brig-ali remote add bob $(brig-bob whoami -f) $ brig-bob remote add ali $(brig-ali whoami -f) .. note:: It is possible to have several repositories per IPFS instances. Since things might get confusing though when it comes to pinning, it is recommended to have several IPFS daemons running in this case. This is done via the ``--ipfs-port`` flag in the example above. Locking the repository. ----------------------- The repository on disk is not encrypted. If you plan on moving the repository to somewhere else, e.g. by copying it onto an USB stick and physically moving it somewhere else you should always consider to first create an encrypted archive out of it and unpack it on the target machine. ``brig`` has a built-in helper for this. Please refer to ``brig pack-repo --help`` and ``brig unpack-repo --help``. ================================================ FILE: docs/tutorial/intro.rst ================================================ .. _getting_started: Getting started ================ This guide will walk you through the steps of synchronizing your first files over ``brig``. You will learn about the concepts behind it along the way. Most of the steps here will include working in a terminal, since this is the primary way to interact with ``brig``. Once setup you have to choice to use a browser application though. Precursor: The help system -------------------------- Before we dive in, we go over a few things that will make your life easier along the way. ``brig`` has some built-in helpers to serve as support for your memory. If you're not interested in that you can skip right to the next section. But please check those help texts before asking questions. Built-in reference documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Every command offers detailed built-in help, which you can view using the ``brig help`` command. This often usage examples too: .. code-block:: bash $ brig help stage NAME: brig stage - Add a local file to the storage USAGE: brig stage [command options] ( []|--stdin ) CATEGORY: WORKING TREE COMMANDS DESCRIPTION: Read a local file (given by »local-path«) and try to read it. This is the conceptual equivalent of »git add«. [...] EXAMPLES: $ brig stage file.png # gets added as /file.png $ brig stage file.png /photos/me.png # gets added as /photos/me.png $ cat file.png | brig stage --stdin /file.png # gets added as /file.png OPTIONS: --stdin, -i Read data from stdin Shell autocompletion ~~~~~~~~~~~~~~~~~~~~ .. warning:: The shell autocompletion is still under development. It might still yield weird results and the usability needs to be improved definitely. Any help welcome! If you don't like to remember the exact name of each command, you can use the provided autocompletion. For this to work you have to insert this at the end of your ``.bashrc``: .. code-block:: bash source $GOPATH/src/github.com/sahib/brig/autocomplete/bash_autocomplete Or if you happen to use ``zsh``, append this to your ``.zshrc``: .. code-block:: bash source $GOPATH/src/github.com/sahib/brig/autocomplete/zsh_autocomplete After starting a new shell you should be able to autocomplete most commands. Try this for example by typing ``brig remote ``. Other shells are not supported right now sadly. Open the online documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By typing ``brig docs`` you'll get a tab opened in your default browser with this domain loaded. Please stop typing ``brig docs`` into Google and save some energy. Reporting bugs ~~~~~~~~~~~~~~~ If you need to report a bug (thank you!) you can use a built-in utility to do that. It will gather all relevant information, create a report and open a tab with the *GitHub* issue tracker in a browser for you. Only thing left for you is to fill out some questions in the report and include anything you think is relevant. .. code-block:: bash $ brig bug To actually create the issue you sadly need an *GitHub* `account `_. If you don't have internet or do not want to sign up, you can still generate a bug report template via ``brig bug -s``. ================================================ FILE: docs/tutorial/mounts.rst ================================================ Mounting repositories --------------------- Using commands like ``brig cp`` might not feel very seamless, especially when being used to tools like file browsers. And indeed, those commands are only supposed to serve as a low-level way of interacting with ``brig`` and as way for scripting own, more elaborate workflows. For your daily workflow it is far easier to mount all files known to ``brig`` to a directory of your choice and use it with the tools you are used to. To accomplish that ``brig`` supports a FUSE filesystem that can be controlled via the ``mount`` and ``fstab`` commands. Let's look at ``brig mount``: .. code-block:: bash $ mkdir ~/data $ brig mount ~/data $ cd ~/data $ cat hello-world Hello World $ echo 'Salut le monde!' > salut-monde.txt # There is no difference between brig's "virtual view" # and the contents of the mount: $ brig cat salut-monde.txt Salut le monde! You can use this directory like a normal one, but check for the CAVEATS below. You can have any number of mounts. This proves especially useful when only mounting a subdirectory (let's say we have a directory called ``/Public``) with the ``--root`` option of ``brig mount`` and mounting all other files as read only (``--readonly``). .. code-block:: bash $ brig mount ~/data --readonly $ brig mkdir /writable $ brig touch /writable/please-edit-me $ mkdir ~/rw-data $ brig mount ~/rw-data --root /writable $ echo 'writable?' > ~/data/test read-only file system: ~/data/test $ echo 'writable!' > ~/rw-data/test $ cat ~/rw-data/test writable! An existing mount can be removed again with ``brig unmount ``: .. code-block:: bash $ brig unmount ~/data $ brig unmount ~/rw-data $ brig rm writable Remote access ~~~~~~~~~~~~~ Working with remote data does often not work extremely well with the file abstraction that does not play well with timeouts. This often causes applications to hang for indefinite times, since they are not most of the time not build for data that might not be delivered immediately. For this very common case we have the ``--offline`` flag. It will error out immediately on files that are not in our local cache: .. code-block:: bash $ brig mount /tmp/mount --offline # Or with fstab: $ brig fstab add some-mount /tmp/mount --offline If you have a remote file you want to read, you can do this to make it cached locally: .. code-block:: bash $ brig cat /remote-file > /dev/null After ``brig cat`` run, you should be able to view the file normally in the mount. .. _permanent-mounts: Making mounts permanent ~~~~~~~~~~~~~~~~~~~~~~~ All mounts that are created via ``brig mount`` will be gone after a daemon restart. If you a typical set of mounts, you can persist them with the ``brig fstab`` facility: .. code-block:: bash $ brig fstab add tmp_rw_mount /tmp/rw-mount $ brig fstab add tmp_ro_mount /tmp/ro-mount -r $ brig fstab NAME PATH READ_ONLY ROOT ACTIVE tmp_ro_mount /tmp/ro-mount yes / tmp_rw_mount /tmp/rw-mount no / $ brig fstab apply $ brig fstab NAME PATH READ_ONLY ROOT ACTIVE tmp_ro_mount /tmp/ro-mount yes / ✔ tmp_rw_mount /tmp/rw-mount no / ✔ $ brig fstab apply -u NAME PATH READ_ONLY ROOT ACTIVE tmp_ro_mount /tmp/ro-mount yes / tmp_rw_mount /tmp/rw-mount no / Et Voilà, all mounts will be created and mounted once you enter ``brig fstab apply`` or restart the daemon. The opposite can be achieved by executing ``brig fstab apply --unmount``. *CAVEATS:* The FUSE filesystem is not yet perfect and somewhat experimental. Keep those points in mind: - **Performance:** Writing to FUSE is currently somewhat *memory and CPU intensive*. Generally, reading should be fast enough for most basic use cases, but also is not enough for high performance needs. If you need to edit a file many times, it is recommended to copy the file somewhere to your local storage (e.g. ``brig cat the_file > /tmp/the_file``), edit it there and save it back for syncing purpose. Future releases will work on optimizing the performance. - **Timeouts:** Although it tries not to look like one, we're operating on a networking filesystem. Every file you access might come from a different computer. If no other machine can serve this file we might block for a long time, causing application hangs and general slowness. This is a problem that still needs a proper solution and leaves much to be desired in the current implementation. ================================================ FILE: docs/tutorial/pinning.rst ================================================ .. _pinning-section: Pinning ------- How can we control what files are stored locally and which should be retrieved from the network? You can do this by **pinning** each file or directory you want to keep locally. Normally, files that are not pinned may be cleaned up from time to time, that means they are evaded from the local cache and need to be fetched again from the network when being accessed again. Since you still have the metadata for this file, you won't notice the difference beside some possible network lag. When you pin a file however, it will not be garbage collected and stays in your local cache until unpinned. ``brig`` knows of two types of pins: **Explicit** and **implicit**. - **Implicit pins:** This kind of pin is created automatically by ``brig`` and cannot be created by the user. In the command line output it is always shows as blue pin. Implicit pins are created by ``brig`` whenever you create a new file, or update the contents of a file. Implicit pins are managed by ``brig`` and as you will see later, it might decide to save you some space by unpinning old versions. - **Explicit pins:** This kind of pin is created by the user explicitly (hence the name) and is never done by ``brig`` automatically. It has the same effect as an implicit pin, but cannot be removed again by ``brig``, unless explicitly unpinned by the user. This is a good way of telling ``brig`` to never unpin this specific version. Use this with care, since it is easy to forget about explicit pins. When syncing with somebody, all files retrieved by them are by default **not pinned**. If you want to keep them for longer, make sure to pin them explicitly. Garbage collection ~~~~~~~~~~~~~~~~~~ Strongly related to pinning is garbage collection. Whenever you need to clean up some space, you can just type ``brig gc`` to remove all unpinned files from the cache. By default, the garbage collector is also run once every hour. You can change this interval by setting ``brig config set repo.autogc.interval`` to ``30m`` for example. You can also disable this automatic garbage collection by issuing ``brig config set repo.autogc.enabled false``. Repinning ~~~~~~~~~ Repinning allows you to control how many versions of each file you want to store and/or how much space you want to store at most. The repinning feature is controlled by the following configuration variables: - **fs.repin.quota**: Maximum amount of data to store in a repository. - **fs.repin.min_depth**: Keep this many versions definitely pinned. Trumps quota. - **fs.repin.max_depth**: Unpin versions beyond this depth definitely. Trumps quota. - **fs.repin.enabled**: Wether we should allow the repinning to run at all. - **fs.repin.interval**: How much time to wait between calling repinning automatically. Normally repinning will run for you every 15 minutes. You can also trigger it manually: .. code-block:: bash $ brig pin repin By default, ``brig`` will keep 1 version definitely (**fs.repin.min_depth**) and delete all versions starting with the 10th (**fs.repin.max_depth**). The default quota (**fs.repin.quota**) is 5GB. If repin detects files that need to be unpinned, then it will first unpin all files that are beyond the max depth setting. If this is not sufficient to stay under the quota, it will delete old versions, layer by layer starting with the biggest version first. ================================================ FILE: docs/tutorial/remotes.rst ================================================ Remotes ------- Until now, all our operations were tied only to our local computer. But ``brig`` is a synchronization tool and that would be hardly very useful without supporting other peers. We call other peers »remotes« similar to the term used in the ``git`` world. A remote consists of three things: - **A human readable name:** This name can be choose by the user and can take pretty much any form, but we recommend to sticking for a form that resembles an extended email [#]_ like »ali@woods.org/desktop«. This name is **not** guaranteed to be unique! In theory everyone could take it and it is therefore only used for display purposes. There is no central place where users are registered. - **A unique fingerprint:** This serves both as address for a certain repository and as certificate of identity. It is long and hard to remember, which is the reason why ``brig`` offers to loosely link a human readable to it. - **A bunch of settings and state:** ``brig`` knows about every remote if it is online and/or authenticated. Additionally you can set a few remote-specific configuration settings like automatic updating. .. [#] To be more exact, it resembles an `XMPP or Jabber-ID `_. If we want to find out what *our own* name and fingerprint is, we can use the ``brig whoami`` command to ask a very existential questions: .. code-block:: bash # NOTE: The hash will look different for you: $ brig whoami ali@woods.org/desktop QmTTJbkfG267gidFKfDTV4j1c843z4tkUG93Hw8r6kZ17a:W1nayTG5UMcVxy9mFFNjuZDUb7uVTnmwFYiJ4Ajr1TP3bg .. note:: The fingerprint consists of two hashes divided by a colon (:). The first part is the identity of your ``IPFS`` node, the second part is the fingerprint of a keypair that was generated by ``brig`` during init and will be used to authenticate other peers. When we want to synchronize with another repository, we need to exchange fingerprints and each other as remote. There are three typical scenarios here: 1. Both repositories are controlled by you. In this case you can simple execute ``brig whoami`` on both repositories and add them with ``brig remote add`` as described in the following. 2. You want to sync with somebody you know. In this case you should both execute ``brig whoami`` and send its output over a trusted side channel. Personally, I use a `secure messenger like Signal `_, but you can also use any channel you like, including encrypted mail or meeting up with the person in question. 3. You don't know each other: Get to know each other and the proceed like in the second point. There is no way to know if somebody is the person he is pretending to be, so validate that over a separate channel - that's sadly something where ``brig`` can't help you yet. If you need to get a hint of what users use a certain domain, you can use ``brig net locate`` to get a list of those: .. code-block:: bash # This command might take some time to yield results: $ brig net locate -m domain woods.org NAME TYPE FINGERPRINT ali@woods.org domain QmTTJbk[...]:W1UDvKzjRPb4rbbk[...] Please note again: Do not blindly add the fingerprint you see here. Always make sure the person you're syncing with is the one you think they are. .. todo:: This seems currently broken as it does not yield any results. Once you have exchanged the fingerprints, you add each other as **remotes**. Let's call the other side *bob*: [#]_ .. code-block:: bash $ brig remote add bob \ QmUDSXt27LbCCG7NfNXfnwUkqwCig8RzV1wzB9ekdXaag7: W1e3rNGGCuuQnzyoiBKLdoN41yQ4NfNy9nRD3MwXk6h8Vy .. [#] The name you choose as remote can be anything you like and does not need to match the name the other person chose for themselves. It's not a bad idea though. *Bob* has do the same on his side. Otherwise the connection won't be established, because the other side won't be authenticated. By adding somebody as remote we **authenticate** them: .. code-block:: bash $ brig remote add ali \ QmTTJbkfG267gidFKfDTV4j1c843z4tkUG93Hw8r6kZ17a: W1nayTG5UMcVxy9mFFNjuZDUb7uVTnmwFYiJ4Ajr1TP3bg Thanks to the fingerprint, ``brig`` now knows how to reach the other repository over the network. This is done in the background via IPFS and might take a few moments until a valid route to the host was found. The remote list can tell us if a remote is online: .. code-block:: bash $ brig remote list NAME FINGERPRINT ROUNDTRIP ONLINE AUTHENTICATED LASTSEEN AUTO-UPDATE bob QmUDSXt27 0s ✔ ✔ Apr 16 17:31:01 no $ brig remote ping bob ping to bob: ✔ (0.00250s) Nice. Now we know that bob is online (✔) and also that he authenticated us (✔). Otherwise ``brig remote ping bob`` would have failed. .. note:: About open ports: While ``ipfs`` tries to do it's best to avoid having the user to open ports in his firewall/router. This mechanism might not be perfect though and maybe never is. If any of the following network operations might not work it might be necessary to open the port 4001 and/or enable UPnP. For security reasons we recommend to only open the required ports explicitly and not to use UPnP unless necessary. Syncing ------- Now that we added a remote, a whole new set of features are available to us. Before we move on to do our first synchronization, let's do a quick recap of what we have done so far: - Create a repository (``brig init ``) - This needs to be done only once. - Create optional mount points (``brig fstab add ``) - This needs to be done only once. - Find & add remotes (``brig remote add``) - This needs to be done once for each peer. - Add some files (``brig stage ``) - Do as often as you like. As you can see, there is a bit of initial setup work, but the actual syncing is pretty effortless now. Before we attempt to sync with anybody, it's always a good idea to see what changes they have. We can check this with ``brig diff ``: .. code-block:: bash # The "--missing" switch also tells us what files the remote does not possess: $ brig diff bob --missing • ├── _ hello.world ├── + videos/ └── README.md ⇄ README.md This output resembles the one we saw from ``brig tree`` earlier. Each node in this tree tells us about something that would happen when we merge. The prefix of each file and the color in the terminal indicate what would happen with this file. Refer to the table below to see what prefix relates to what action: ====== ==================================================================== Symbol Description ====== ==================================================================== ``+`` This file is only present on the remote side. ``-`` This file was removed on the remote side. ``→`` This file was moved to a new location. ``*`` This file was ignored because we chose to, due to our settings. ``⇄`` Both sides have changes, but they are compatible and can be merged. ``⚡`` Both sides have changes, but they are incompatible and result in conflict files. ``_`` This file is missing on the remote side (needs to be enabled with ``--missing``) ====== ==================================================================== .. note:: Remember that ``brig`` does not do any actual diffs between files, i.e. it will not show you what line changed. It does not care a lot about the content. It only records how the file metadata changes and what content hash the file has at a certain point. If you prefer a more traditional view, similar to ``git``, you can use ``--list`` on ``brig diff``. So in the above output we can tell that *Bob* added the directory ``/videos``, but does not possess the ``/hello.world`` file. He also apparently modified ``README.md``, but since we did not, it's safe for us to take over his changes. If we sync now we will get this directory from him: .. code-block:: bash $ brig sync bob $ brig ls SIZE MODTIME OWNER PATH PIN 443 B Dec 27 14:44:44 ali /README.md 🖈 443 B Dec 27 14:44:44 bob /README.md.conflict.0 12 B Dec 27 15:14:16 ali /hello.world 🖈 32 GB Dec 27 15:14:16 bob /videos You might notice that the ``sync`` step took only around one second, even though ``/videos`` is 32 GB in size. This is because ``sync`` **does not transfer actual data**. It only transferred the metadata, while the actual data will only be loaded when required. This might sound a little inconvenient at first. When I want to watch the video, I'd prefer to have it cached locally before viewing it to avoid stuttering playback. If you plan to use the files immediately, you should be using pinning (see :ref:`pinning-section`) Data retrieval ~~~~~~~~~~~~~~ If the data is not on your local machine, where is it then? Thanks to IPFS it can be transferred from any other peer that caches this particular content. Content is usually cached when the peer either really stores this file or if this peer recently used this content. In the latter case it will still be available in its cache. This property is particularly useful when having a small device for viewing data (e.g. a smartphone, granted ``brig`` would run there) and a big machine that acts as storage server (e.g. a desktop). How are the files secure then if they essentially could be everywhere? Every file is encrypted by ``brig`` before giving it to IPFS. The encryption key is part of the metadata and is only available to the peers that you chose to synchronize with. Think of each brig repository only as a cache for the whole network it is in. Partial synchronisation ~~~~~~~~~~~~~~~~~~~~~~~ Sometimes you only want to share certain things with certain people. You probably want to share all your ``/photos`` directory with your significant other, but not with your fellow students. On the other hand you maybe want to share the ``/lectures`` folder with them. In ``brig`` you can define what folder you want to share with what remote. If you do not limit this, **all folders will be open to a remote by default.** Also note, that if a remote already got some content of a folder you did not want to share, he will still be able to access it. If you're unsure, you should better be restrictive than too permissive. To add a folder for a specific remote, you can use the ``folders`` subcommand of ``brig remote``: .. code-block:: bash # Starting with next sync, bob will only see the /videos folder: $ brig remote folder add bob /videos $ brig remote folder ls bob /videos If you're tired of typing all of this, be reminded that there are very short aliases for most subcommands: .. code-block:: bash $ brig rmt f a bob /videos In some cases you might not trust your peers with some folders or don't want to have modifications in that specific folder. For this case, ``brig`` supports adding a folder as ``--read-only``. Other remotes still will have access to the folder, but whenever we sync with them the changes they made are ignored. You can add a read-only folder by adding the ``--read-only`` switch to the command above: .. code-block:: bash $ brig rmt f a bob /videos --read-only .. note:: If you want to overwrite an existing folder with new settings, you can use the ``set`` subcommand: .. code-block:: bash $ brig remote folder set bob /videos -c embrace --read-only See below for explanation on those additional options. Conflicts ~~~~~~~~~ Whenever two repositories have a file at the same path, ``brig`` needs to do some conflict resolving. If those files are equal or if they share common history and did not diverge there is nothing to fear. But what if both sides have different versions of a file without common history? In this case ``brig`` offers you to handle conflict by one of the three strategies: * ``ignore``: Ignore the change from the remote side. * ``embrace``: Ignore our state and take over the remote's change. * ``marker``: Create a conflict file with the same name but a ``.conflict`` ending. Leave it to the user to resolve the conflict. This is the **default.** You can configure this behavior by using ``brig cfg``: .. code-block:: bash $ brig cfg set fs.sync.conflict_strategy marker In some cases this might not be enough though. Sometimes you might want to say »I trust this remote, always accept their changes«. You can do this by setting the conflict strategy per remote. If no specific conflict strategy is set, ``fs.sync.conflict_strategy`` is used. You can set the strategy by using a subcommand of the ``brig remote`` family: .. code-block:: bash # Always take the versions of bob on conflicts: $ brig remote conflict-strategy embrace bob Still not enough? You can also set the conflict strategy per folder. This will trump the per-remote folder strategy: .. code-block:: bash # Use the default in all folders but use "embrace" in this one: $ brig remote folder add bob /collab -c embrace Automatic Updating ~~~~~~~~~~~~~~~~~~ .. warning:: This feature is experimental and builds upon the also experimental pubsub experiment of the IPFS project. Use with care. If you do not want to hit ``brig sync`` every time somebody in the network changed something, you can enable the automatic updating for any remote you like. Let's suppose we are ``ali`` and want to receive updates on every change of ``bob``, we should simply add the following: .. code-block:: bash $ brig remote auto-update enable bob # (You can also abbreviate most of that:) # brig rmt au e bob Alternatively, we could have used the ``-a`` switch when adding ``bob`` as remote: .. code-block:: bash $ brig remote add bob -a In any case, an initial sync is performed with this remote and a sync on every change that ``bob`` published. Keep in mind that ``bob`` will not receive your updates by default, he needs to decide to use auto updating for himself. You can watch the times when your repository was updated automatically by looking at ``brig log``: .. code-block:: bash $ brig log - Sun Dec 16 18:24:27 CET 2018 • (curr) W1kGKKviWCBY Sun Dec 16 18:24:27 CET 2018 sync due to notification from »bob« (head) ... Pushing changes ~~~~~~~~~~~~~~~ As you saw above, doing a ``brig sync`` won't do a bidirectional synchronisation. It will only fetch metadata from the remote and modify our local state with it. In some cases you might want to push data to a remote - especially when it is on one of your machines and you use for example as archival repository. By default pushing to a remote is rejected. You can enable it on a per-remote basis with this command out of the ``brig remote`` family of commands: .. code-block:: bash # Allow bob and charlie to auto push to us. $ brig remote auto-push enable bob charlie Now either ``bob`` or ``charlie`` can do this from their machines: .. code-block:: bash # bob's machine: $ brig push ali This will simply ask ``ali`` to do a sync with ``bob``. ================================================ FILE: docs/tutorial/vcs.rst ================================================ Version control --------------- One key feature of ``brig`` over other synchronisation tools is the built-in and quite capable version control. If you already know ``git`` that's a plus for this chapter since a lot of stuff will feel similar. This isn't a big surprise, since ``brig`` implements something like ``git`` internally. Don't worry, knowing ``git`` is however not needed at all for this chapter. Key concepts ~~~~~~~~~~~~ I'd like you to keep the following mantra in your head when thinking about versioning (repeating before you go to sleep may or may not help): **Metadata and actual data are separated.** This means that a repository may contain metadata about many files, including older versions of them. However, it is not guaranteed that a repository caches all actual data for each file or version. This is solely controlled by pinning described in the :ref:`pinning-section` section. If you check out earlier versions of a file, you're always able to see the metadata of it, but being able to view the actual data depends on having a peer that is being able to deliver the data in your network (which might be yourself). So in short: ``brig`` **only versions metadata and links to the respective data for each version**. This is a somewhat novel approach to versioning, so feel free to re-read the last paragraph, since we've found that it does not quite fit what most people are used to. Together with pinning this offers a high degree of freedom on how you can decide what repositories store what data. The price is that this fine-tuned control can get a little annoying. Future versions of ``brig`` will try to solve that. For some more background, you can invoke ``brig info`` to see what metadata is being saved per file version: .. code-block:: bash $ brig show README.md Path /README.md User ali Type file Size 832 bytes Inode 4 Pinned yes Explicit no ModTime 2018-10-14T22:46:00+02:00 Tree Hash W1gX8NMQ9m8SBnjHRGtamRAjJewbnSgi6C1P7YEunfgTA3 Content Hash W1pzHcGbVpXaePa1XpehW4HGPatDUJs8zZzSRbpNCGbN2u Backend Hash QmPvNjR1h56EFK1Sfb7vr7tFJ57A4JDJS9zwn7PeNbHCsK Most of it should be no big surprise. It might be a small surprise that three hashes are stored per file. The ``Backend Hash`` is really the link to the actual data. If you'd type ``ipfs cat QmPvNjR1h56EFK1Sfb7vr7tFJ57A4JDJS9zwn7PeNbHCsK`` you will get the encrypted version of your file dumped to your terminal. The ``Content Hash`` is being calculated before the encryption and is the same for two files with the same content. The ``Tree Hash`` is a hash that uniquely identifies this specific node for internal purposes. The ``Inode`` is a number that stays unique over the lifetime of a file (including moves and removes). It is used mostly in the FUSE filesystem. Commits ~~~~~~~ Now that we know that only metadata is versioned, we have to ask »what is the smallest unit of modification that can be saved?«. This smallest unit is a commit. A commit can be seen as a snapshot of the whole repository. The command ``brig log`` shows you a list of commits that were made already: .. code-block:: bash - Sun Oct 14 22:46:00 CEST 2018 • (curr) W1kAySD3aKLt Sun Oct 14 22:46:00 CEST 2018 user: Added ali-file (head) W1ocyBsS28SD Sun Oct 14 22:46:00 CEST 2018 user: Added initial README.md W1D9KsLNnAv4 Sun Oct 14 22:46:00 CEST 2018 initial commit (init) Each commit is identified by a hash (e.g. ``W1kAySD3aKLt``) and records the time when it was created. Apart from that, there is a message that describes the commit in some way. In contrast to ``git``, **commits are rarely done by the user themselve**. More often they are done by ``brig`` when synchronizing. All commits form a long chain (**no branches**, just a linear chain) with the very first empty commit called ``init`` and the still unfinished commit called ``curr``. Directly below ``curr`` there is the last finished commit called ``head``. .. note:: ``curr`` is what ``git`` users would call the staging area. While the staging area in ``git`` is "special", the ``curr`` commit can be used like any other one, with the sole difference that it does not have a proper hash yet. Sometimes you might want to do a snapshot or »savepoint« yourself. In this case you can do a commit yourself: .. code-block:: bash $ brig touch A_NEW_FILE $ brig commit -m 'better leave some breadcrumbs' $ brig log | head -n 2 - Mon Oct 15 00:27:37 CEST 2018 • (curr) W1hZoY7TrxyK Sun Oct 14 22:46:00 CEST 2018 user: better leave some bread crumbs (head) This snapshot can be useful later if you decide to revert to a certain version. The hash of the commit is of course hard to remember, so if you need it very often, you can give it a tag yourself. Tags are similar to the names, ``curr``, ``head`` and ``init`` but won't be changed by ``brig`` and won't move therefore: .. code-block:: bash # instead of "W1hZoY7TrxyK" you also could use "head" here: $ brig tag W1hZoY7TrxyK breadcrumbs $ brig log | grep breadcrumbs $ W1hZoY7TrxyK Sun Oct 14 22:46:00 CEST 2018 user: better leave some bread crumbs (breadcrumbs, head) File history ~~~~~~~~~~~~ Each file and directory in ``brig`` maintains its own history. Each entry of this history relates to exactly one distinct commit. In the life of a file or directory there are four things that can happen to it: - *added:* The file was added in this commit. - *moved:* The file was moved in this commit. - *removed:* The file was removed in this commit. - *modified:* The file's content (i.e. hash changed) was altered in this commit. You can check an individual file or directorie's history by using the ``brig history`` command: .. code-block:: bash # or "hst" for short: $ brig hst README.md CHANGE FROM TO WHEN added INIT W1ocyBsS28SD Oct 14 22:46:00 $ brig mv README.md README_LATER.md $ brig hst README_LATER.md CHANGE FROM TO HOW WHEN moved HEAD CURR /README.md → /README_LATER.md Oct 15 00:27:37 added INIT W1ocyBsS28SD Oct 14 22:46:0 As you can see, you will be shown one line per history entry. Each entry denotes which commit the change was in. Some commits were nothing was changed will be jumped over except if you pass ``--empty``. Viewing differences ~~~~~~~~~~~~~~~~~~~ If you're interested what changed in a range of your own commits, you can use the ``brig diff`` command as shown previously. The ``-s`` (``--self``) switch says that we want to compare only two of our own commits (as opposed to comparing with the commits of a remote). .. code-block:: bash # Let's compare the commit hashes from above: $ brig diff -s W1hZoY7TrxyK W1kAySD3aKLt • └── + A_NEW_FILE Often, those hashes are quite hard to remember and annoying to look up. That's why you can the special syntax ``^`` to denote that you want to go »one commit up«: .. code-block:: bash brig diff -s head head^ • └── + A_NEW_FILE # You can also use this several times: brig diff -s head^^^ head^^^^^ • └── + README.md If you just want to see what you changed since ``head``, you can simply type ``brig diff``. This is the same as ``brig diff -s curr head``: .. code-block:: bash $ brig diff • └── README.md → README_LATER.md $ brig diff -s curr head • └── README.md → README_LATER.md Reverting to previous state ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Until now we were only looking at the version history and didn't modify it. The most versatile command to do that is ``brig reset``. It is able to revert changes previously made: .. code-block:: bash # Reset to the "init" commit (the very first and empty commit) $ brig reset init $ brig ls # nothing, it's empty. The key here is that you did not loose any history: .. code-block:: bash $ brig log | head -2 - Mon Oct 15 00:51:12 CEST 2018 • (curr) W1hZoY7TrxyK Sun Oct 14 22:46:00 CEST 2018 user: better leave some bread crumbs (breadcrumbs) As you can see, we still have the previous commits. ``brig revert`` did one thing more than restoring the state of ``init`` and put that result in ``curr``. This also means that you can't really *modify* history. But you can revert it. Let's revert your complete wipe-out: .. code-block:: bash # Reset to the state we had in »breadcrumbs« $ brig reset breadcrumbs ``brig reset`` cannot only restore old commits, but individual files and directories: .. code-block:: bash $ brig reset head^^ README.md .. note:: It is a good idea to do a ``brig commit`` before a ``brig reset``. Since it modifies ``curr`` you might loose uncommitted changes. It will warn you about that, but you can overwrite that warning with ``--force``. If you did a ``brig commit`` you can simply use ``brig reset head`` to go back to the last good state. ================================================ FILE: events/backend/backend.go ================================================ package backend import ( "context" "io" ) // Message is returned by Subscribe. // It encapsulates a single event message coming // from another remote. type Message interface { // Data is the data that is sent alongside the message. Data() []byte // Source is the addr of the remote. Source() string } // Subscription is an iterator like interface for accessing and listening // for messages from other remotes. type Subscription interface { io.Closer // Next blocks until receiving a new message or fails with // context.Canceled if the cancel func was called. Next(ctx context.Context) (Message, error) } // Backend is the backend that backends of the event subsystem must fulfill. type Backend interface { // Subscribe returns a new Subscription iterator for `topic`. Subscribe(ctx context.Context, topic string) (Subscription, error) // PublishEvent sends `data` to all listening remotes on `topic`. PublishEvent(topic string, data []byte) error } ================================================ FILE: events/capnp/events_api.capnp ================================================ using Go = import "/go.capnp"; @0xfc8938b535319bfe; $Go.package("capnp"); $Go.import("github.com/sahib/brig/events/capnp"); struct Event $Go.doc("") { type @0 :Text; } ================================================ FILE: events/capnp/events_api.capnp.go ================================================ // Code generated by capnpc-go. DO NOT EDIT. package capnp import ( capnp "zombiezen.com/go/capnproto2" text "zombiezen.com/go/capnproto2/encoding/text" schemas "zombiezen.com/go/capnproto2/schemas" ) type Event struct{ capnp.Struct } // Event_TypeID is the unique identifier for the type Event. const Event_TypeID = 0x9c032508b61d1d09 func NewEvent(s *capnp.Segment) (Event, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) return Event{st}, err } func NewRootEvent(s *capnp.Segment) (Event, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) return Event{st}, err } func ReadRootEvent(msg *capnp.Message) (Event, error) { root, err := msg.RootPtr() return Event{root.Struct()}, err } func (s Event) String() string { str, _ := text.Marshal(0x9c032508b61d1d09, s.Struct) return str } func (s Event) Type() (string, error) { p, err := s.Struct.Ptr(0) return p.Text(), err } func (s Event) HasType() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s Event) TypeBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) return p.TextBytes(), err } func (s Event) SetType(v string) error { return s.Struct.SetText(0, v) } // Event_List is a list of Event. type Event_List struct{ capnp.List } // NewEvent creates a new list of Event. func NewEvent_List(s *capnp.Segment, sz int32) (Event_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) return Event_List{l}, err } func (s Event_List) At(i int) Event { return Event{s.List.Struct(i)} } func (s Event_List) Set(i int, v Event) error { return s.List.SetStruct(i, v.Struct) } func (s Event_List) String() string { str, _ := text.MarshalList(0x9c032508b61d1d09, s.List) return str } // Event_Promise is a wrapper for a Event promised by a client call. type Event_Promise struct{ *capnp.Pipeline } func (p Event_Promise) Struct() (Event, error) { s, err := p.Pipeline.Struct() return Event{s}, err } const schema_fc8938b535319bfe = "x\xda\x12\xd0s`\x12d\x8dg`\x08dae\xfb" + "\xcf)+\xbb\x8dC\x95y\x0e\x83\xa0\x1c\xe3\xff\x7f\xb3" + "\x0dM\xb7Zt\xfea`edg`0\x14Ub" + "\x14Te\x17T\x95\x17t\xb5g`\xfc\x9fZ\x96\x9a" + "WR\xac\x9f\xcc\x9cX\x90W\xa0\x0f\xe1\xc5'\x16d" + "\xea%\x83\x04\xac\\\xcb\xd8S\xf3J\x02\x18\x19\x03Y" + "\x18\x99\xfe\xc7M\x9e\x1f\xb8\xf7Z\xd7Q\x86@\x16&" + "FG\x01FF\x1e\x06\x06AF.\x06\xc6@\x16f" + "\x16\x06\x06\x16F\x06\x06A^-\x06\x86@\x0ef\xc6" + "@\x11&F\xfe\x92\xca\x82TF\x1e\x06&F\x1e\x06" + "F@\x00\x00\x00\xff\xff\x11'$\xac" func init() { schemas.Register(schema_fc8938b535319bfe, 0x9c032508b61d1d09) } ================================================ FILE: events/docs.go ================================================ // Package events is the event notification subsystem of brig. // It uses the backend's capabilities (in case of IPFS we use pubsub) // to publish and subscribe to a topic of events. If an event was received // it is forwarded to the caller side in order to react on it. package events ================================================ FILE: events/event.go ================================================ package events import ( "fmt" capnp_model "github.com/sahib/brig/events/capnp" capnp "zombiezen.com/go/capnproto2" ) const ( // UnknownEvent should not happen in practice. UnknownEvent = EventType(1 << iota) // FsEvent tells other remotes that our filesystem changed. FsEvent // NetEvent indicates to other peers that our network status changed. NetEvent ) // EventType is the type of a type EventType int // String returns a human readable representation of the event type func (ev EventType) String() string { switch ev { case FsEvent: return "fs" case NetEvent: return "net" default: return "unknown" } } // EventFromString tries to parse `ev` as event type. // If it fails, an error will be returned. func EventFromString(ev string) (EventType, error) { switch ev { case "fs": return FsEvent, nil case "net": return NetEvent, nil default: return UnknownEvent, fmt.Errorf("unknown EventType type: %s", ev) } } // Event is a event that can be published or received by the event subsystem. type Event struct { Type EventType Source string } func (msg *Event) encode() ([]byte, error) { capMsg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) if err != nil { return nil, err } capEv, err := capnp_model.NewRootEvent(seg) if err != nil { return nil, err } if err := capEv.SetType(msg.Type.String()); err != nil { return nil, err } return capMsg.Marshal() } func decodeMessage(data []byte) (*Event, error) { if data == nil { return nil, fmt.Errorf("received empty event") } capMsg, err := capnp.Unmarshal(data) if err != nil { return nil, err } capEv, err := capnp_model.ReadRootEvent(capMsg) if err != nil { return nil, err } capEvType, err := capEv.Type() if err != nil { return nil, err } ev, err := EventFromString(capEvType) if err != nil { return nil, err } return &Event{Type: ev}, nil } func dedupeEvents(evs []Event) []Event { seen := make(map[EventType]map[string]bool) dedupEvs := []Event{} for _, ev := range evs { seenSources, ok := seen[ev.Type] if ok { if seenSources[ev.Source] { continue } } else { seenSources = make(map[string]bool) seen[ev.Type] = seenSources } dedupEvs = append(dedupEvs, ev) seen[ev.Type][ev.Source] = true } return dedupEvs } ================================================ FILE: events/listener.go ================================================ package events import ( "context" "fmt" "io" "sync" "time" "github.com/sahib/brig/events/backend" "github.com/sahib/config" log "github.com/sirupsen/logrus" "golang.org/x/time/rate" ) const ( brigEventTopicPrefix = "brig/events/" maxBurstSize = 100 ) // Listener listens to incoming events from other remotes. // For every event, a registered callback can be executed. // It does not implement net.Listener and is only similar from a concept POV. type Listener struct { mu sync.Mutex bk backend.Backend cfg *config.Config callbacks map[EventType][]callback cancels map[string]context.CancelFunc evSendCh chan Event evRecvCh chan Event ownAddr string isClosed bool } type callback struct { fn func(*Event) notifyOnOwn bool } // NewListener constructs a new listener. // `cfg` is used to read the event subsystem config. // `bk` is a events.Backend. // `ownAddr` is the addr of our own node. func NewListener(cfg *config.Config, bk backend.Backend, ownAddr string) *Listener { lst := &Listener{ bk: bk, cfg: cfg, ownAddr: ownAddr, callbacks: make(map[EventType][]callback), cancels: make(map[string]context.CancelFunc), evSendCh: make(chan Event, maxBurstSize), evRecvCh: make(chan Event, maxBurstSize), } go lst.eventSendLoop() go lst.eventRecvLoop() return lst } // Close will close all open listeners and clean up internal resources. func (lst *Listener) Close() error { lst.mu.Lock() defer lst.mu.Unlock() if lst.isClosed { return nil } close(lst.evSendCh) close(lst.evRecvCh) for _, cancel := range lst.cancels { cancel() } lst.isClosed = true return nil } // RegisterEventHandler remembers that `hdl` should be called whenever a event // of type `ev` is being received. If `notifyOnOwn` is true, the handler // will only be called for changes that came from our own node. If it is `false` // it will only be called for func (lst *Listener) RegisterEventHandler(ev EventType, notifyOnOwn bool, hdl func(ev *Event)) { lst.mu.Lock() defer lst.mu.Unlock() if lst.isClosed { return } lst.callbacks[ev] = append(lst.callbacks[ev], callback{ fn: hdl, notifyOnOwn: notifyOnOwn, }) } func eventLoop(evCh chan Event, interval time.Duration, rps float64, fn func(ev Event)) { tckr := time.NewTicker(interval) defer tckr.Stop() // Use a time window approach to dedupe incoming events // and to process them in a batch (in order to avoid work) // We still rate limit while processing too many at the same time. events := []Event{} lim := rate.NewLimiter(rate.Limit(rps), maxBurstSize) for { select { case <-tckr.C: // Flush phase. Deduple all events and send them out to the handler // in a possibly time throttled manner. events = dedupeEvents(events) if len(events) == 0 { continue } // Apply the rate limiting only after r := lim.ReserveN(time.Now(), len(events)) if !r.OK() { // would only happen if the burst size is too big. // drop all events in this special case. events = []Event{} continue } delay := r.Delay() for _, ev := range events { fn(ev) // spread the work over the processing of all events: time.Sleep(delay / time.Duration(len(events))) } events = []Event{} case ev, ok := <-evCh: if !ok { return } if len(events) > maxBurstSize { // drop events if the list gets too big: continue } events = append(events, ev) } } } func (lst *Listener) eventRecvLoop() { recvInterval := lst.cfg.Duration("recv_interval") recvMaxEvRPS := lst.cfg.Float("recv_max_events_per_second") eventLoop(lst.evRecvCh, recvInterval, recvMaxEvRPS, func(ev Event) { lst.mu.Lock() if cbs, ok := lst.callbacks[ev.Type]; ok { for _, cb := range cbs { if !cb.notifyOnOwn { go cb.fn(&ev) } } } lst.mu.Unlock() }) } func (lst *Listener) eventSendLoop() { ownTopic := brigEventTopicPrefix + lst.ownAddr sendInterval := lst.cfg.Duration("send_interval") sendMaxEvRPS := lst.cfg.Float("send_max_events_per_second") eventLoop(lst.evSendCh, sendInterval, sendMaxEvRPS, func(ev Event) { data, err := ev.encode() if err != nil { log.Errorf("event: failed to encode: %v", err) return } if err := lst.bk.PublishEvent(ownTopic, data); err != nil { log.Errorf("event: failed to publish: %v", err) return } }) } func (lst *Listener) publishToSelf(ev Event) { if cbs, ok := lst.callbacks[ev.Type]; ok { for _, cb := range cbs { if cb.notifyOnOwn { go cb.fn(&ev) } } } } // PublishEvent notifies other peers that something on our // side changed. The "something" is defined by `ev`. // PublishEvent does not block. func (lst *Listener) PublishEvent(ev Event) error { lst.mu.Lock() defer lst.mu.Unlock() if lst.isClosed { return nil } if !lst.cfg.Bool("enabled") { return nil } // Some submodules (like the gateway) also want to be notified // when other parts of the same server (fuse, cmdline) changed something. lst.publishToSelf(ev) // Only send the event if we are not clogged up yet. // We prioritze the well-being of other systems more by // not allowing PublishEvent to block. select { case lst.evSendCh <- ev: return nil default: return fmt.Errorf("lost event: %v", ev) } } // SetupListeners sets up the listener to receive events from any of `addrs`. // If `ctx` is being canceled, all listeners will stop. // SetupListeners can be called several times, each time overwriting and stopping // previous listeners. func (lst *Listener) SetupListeners(ctx context.Context, addrs []string) error { if lst.isClosed { return nil } seen := make(map[string]bool) for _, addr := range addrs { seen[addr] = true cancel, ok := lst.cancels[addr] if ok { // We already have a listener for this. continue } ctx, cancel := context.WithCancel(ctx) lst.cancels[addr] = cancel go lst.listenSingle(ctx, brigEventTopicPrefix+addr) } // cancel all listeners that are not needed anymore. for addr, cancel := range lst.cancels { if !seen[addr] { cancel() } } return nil } func (lst *Listener) listenSingle(ctx context.Context, topic string) error { sub, err := lst.bk.Subscribe(ctx, topic) if err != nil { return err } defer sub.Close() log.Debugf("listening for events on %s", topic) defer log.Debugf("event listener on %s closing", topic) for { if !lst.cfg.Bool("enabled") { // Do not grind the cpu if it is not enabled. time.Sleep(2 * time.Second) continue } msg, err := sub.Next(ctx) if msg == nil { // Sometimes we might have a case where a ipfs daemon // returns an empty message very often - just sleep a bit // to save the cpu. time.Sleep(500 * time.Millisecond) continue } if err == io.EOF || err == context.Canceled { return nil } else if err != nil { return err } if msg.Source() == lst.ownAddr { continue } ev, err := decodeMessage(msg.Data()) if err != nil { log.Warningf("received bad message: %v", err) continue } ev.Source = msg.Source() if lst.isClosed { break } select { case lst.evRecvCh <- *ev: default: log.Warningf("dropped incoming event: %v", ev) } } return nil } ================================================ FILE: events/listener_test.go ================================================ package events import ( "context" "testing" "time" "github.com/sahib/brig/defaults" "github.com/sahib/brig/events/mock" "github.com/sahib/config" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" ) func withEventListener(t *testing.T, ownAddr string, fn func(lst *Listener)) { cfg, err := config.Open(nil, defaults.Defaults, config.StrictnessPanic) require.Nil(t, err) cfg.SetDuration("events.recv_interval", time.Millisecond*1) cfg.SetDuration("events.send_interval", time.Millisecond*1) cfg.SetFloat("events.recv_max_events_per_second", 0.1) cfg.SetFloat("events.send_max_events_per_second", 0.1) evb := mock.NewEventsBackend(ownAddr) lst := NewListener(cfg.Section("events"), evb, ownAddr) fn(lst) require.Nil(t, lst.Close()) } func withEventListenerPair(t *testing.T, addrA, addrB string, fn func(lstA, lstB *Listener)) { withEventListener(t, addrA, func(lstA *Listener) { withEventListener(t, addrB, func(lstB *Listener) { fn(lstA, lstB) }) }) } func TestBasicRun(t *testing.T) { log.SetLevel(log.DebugLevel) withEventListenerPair(t, "a", "b", func(lstA, lstB *Listener) { eventReceived := false lstB.RegisterEventHandler(FsEvent, false, func(ev *Event) { require.Equal(t, "a", ev.Source) require.Equal(t, FsEvent, ev.Type) eventReceived = true }) require.Nil(t, lstB.SetupListeners(context.Background(), []string{"a"})) for i := 0; i < 100; i++ { require.Nil(t, lstA.PublishEvent(Event{Type: FsEvent})) } time.Sleep(500 * time.Millisecond) require.True(t, eventReceived) // Do a double close: require.Nil(t, lstA.Close()) require.Nil(t, lstA.PublishEvent(Event{Type: NetEvent})) time.Sleep(200 * time.Millisecond) }) } ================================================ FILE: events/mock/mock.go ================================================ package mock import ( "context" "sync" eventsBackend "github.com/sahib/brig/events/backend" ) var subs map[string][]*mockSubscription var subsLock sync.Mutex func init() { subs = make(map[string][]*mockSubscription) } // EventsBackend fakes the event backend by setting up a very basic // message broker in memory and tunneling all messages over it. type EventsBackend struct { ownAddr string } // NewEventsBackend returns a new EventsBackend func NewEventsBackend(ownAddr string) *EventsBackend { return &EventsBackend{ ownAddr: ownAddr, } } type mockMessage struct { data []byte source string } func (mm mockMessage) Data() []byte { return mm.data } func (mm mockMessage) Source() string { return mm.source } type mockSubscription struct { msgs chan mockMessage } func (ms *mockSubscription) Next(ctx context.Context) (eventsBackend.Message, error) { select { case <-ctx.Done(): return nil, ctx.Err() case msg := <-ms.msgs: return msg, nil } } func (ms *mockSubscription) Close() error { return nil } // Subscribe is a mock implementation meant for testing. func (mb *EventsBackend) Subscribe(ctx context.Context, topic string) (eventsBackend.Subscription, error) { subsLock.Lock() defer subsLock.Unlock() newSub := &mockSubscription{ msgs: make(chan mockMessage, 100), } subs[topic] = append(subs[topic], newSub) return newSub, nil } // PublishEvent is a mock implementation meant for testing. func (mb *EventsBackend) PublishEvent(topic string, data []byte) error { subsLock.Lock() defer subsLock.Unlock() subs, ok := subs[topic] if !ok { return nil } for _, sub := range subs { dataCopy := make([]byte, len(data)) copy(dataCopy, data) sub.msgs <- mockMessage{ data: dataCopy, source: mb.ownAddr, } } return nil } ================================================ FILE: fuse/directory.go ================================================ // +build !windows package fuse import ( "os" "path" "time" "context" "bazil.org/fuse" "bazil.org/fuse/fs" log "github.com/sirupsen/logrus" ) // Directory represents a directory node. type Directory struct { path string m *Mount } // Attr is called to retrieve stat-metadata about the directory. func (dir *Directory) Attr(ctx context.Context, attr *fuse.Attr) error { defer logPanic("dir: attr") debugLog("Exec dir attr: %v", dir.path) info, err := dir.m.fs.Stat(dir.path) if err != nil { return errorize("dir-attr", err) } // Act like the file is owned by the user of the brig process. attr.Uid = uint32(os.Getuid()) attr.Gid = uint32(os.Getgid()) attr.Mode = os.ModeDir | 0755 attr.Size = info.Size attr.Mtime = info.ModTime attr.Inode = info.Inode return nil } // Lookup is called to lookup a direct child of the directory. func (dir *Directory) Lookup(ctx context.Context, name string) (fs.Node, error) { defer logPanic("dir: lookup") debugLog("Exec lookup: %v", name) if name == "." { return dir, nil } if name == ".." && dir.path != "/" { return &Directory{path: path.Dir(dir.path), m: dir.m}, nil } var result fs.Node childPath := path.Join(dir.path, name) info, err := dir.m.fs.Stat(childPath) if err != nil { return nil, errorize("dir-lookup", err) } if info.IsDir { result = &Directory{path: childPath, m: dir.m} } else { result = &File{path: childPath, m: dir.m} } return result, nil } // Mkdir is called to create a new directory node inside the receiver. func (dir *Directory) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { defer logPanic("dir: mkdir") debugLog("fuse-mkdir: %v", req.Name) childPath := path.Join(dir.path, req.Name) if err := dir.m.fs.Mkdir(childPath, false); err != nil { log.WithFields(log.Fields{ "path": childPath, "error": err, }).Warning("fuse-mkdir failed") return nil, fuse.EIO } notifyChange(dir.m, 100*time.Millisecond) return &Directory{path: childPath, m: dir.m}, nil } // Create is called to create an opened file or directory as child of the receiver. func (dir *Directory) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { defer logPanic("dir: create") var err error debugLog("fuse-create: %v", req.Name) childPath := path.Join(dir.path, req.Name) switch { case req.Mode&os.ModeDir != 0: err = dir.m.fs.Mkdir(childPath, false) default: err = dir.m.fs.Touch(childPath) } if err != nil { log.WithFields(log.Fields{ "path": childPath, "error": err, }).Warning("fuse-create failed") return nil, nil, fuse.EIO } fd, err := dir.m.fs.Open(childPath) if err != nil { return nil, nil, errorize("fuse-dir-create", err) } notifyChange(dir.m, 100*time.Millisecond) file := &File{path: childPath, m: dir.m} return file, &Handle{fd: fd, m: dir.m}, nil } // Remove is called when a direct child in the directory needs to be removed. func (dir *Directory) Remove(ctx context.Context, req *fuse.RemoveRequest) error { defer logPanic("dir: remove") path := path.Join(dir.path, req.Name) if err := dir.m.fs.Remove(path); err != nil { log.Errorf("fuse: dir-remove: `%s` failed: %v", path, err) return fuse.ENOENT } notifyChange(dir.m, 100*time.Millisecond) return nil } // ReadDirAll is called to get a directory listing of the receiver. func (dir *Directory) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { defer logPanic("dir: readdirall") debugLog("Exec read dir all") selfInfo, err := dir.m.fs.Stat(dir.path) if err != nil { log.Debugf("Failed to stat: %v", dir.path) return nil, errorize("fuse-dir-ls-stat", err) } parentDir := path.Dir(dir.path) parInfo, err := dir.m.fs.Stat(parentDir) if err != nil { log.Debugf("Failed to stat parent: %v", parentDir) return nil, errorize("fuse-dir-ls-stat-par", err) } fuseEnts := []fuse.Dirent{ { Inode: selfInfo.Inode, Type: fuse.DT_Dir, Name: ".", }, { Inode: parInfo.Inode, Type: fuse.DT_Dir, Name: "..", }, } entries, err := dir.m.fs.List(dir.path, 1) if err != nil { log.Warningf("Failed to list entries: %v", dir.path) return nil, errorize("fuse-dir-readall", err) } for _, entry := range entries { childType := fuse.DT_File if entry.IsDir { childType = fuse.DT_Dir } // If we return the same path (or just "/") to fuse // it will return a EIO to userland. Weird. if entry.Path == "/" || entry.Path == dir.path { continue } fuseEnts = append(fuseEnts, fuse.Dirent{ Inode: entry.Inode, Type: childType, Name: path.Base(entry.Path), }) } return fuseEnts, nil } // Rename or move files or directories // TODO: fix info availability, // somehow the info about moved item is not visible for a little while after move // It usually available after a second or two. // How to reproduce // mv file1 file2 // ls -l file2 // You will see that username, permission, size, date, and so on all in question marks // For what I can see. ls cannot access this particular file, even though // It will appear as an entry in the call to ReadDirAll done by `ls on_dir` // Seems to be cache related issue func (dir *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { defer logPanic("dir: rename") debugLog("exec dir rename") newParent, ok := newDir.(*Directory) if !ok { return fuse.EIO } oldPath := path.Join(dir.path, req.OldName) newPath := path.Join(newParent.path, req.NewName) if err := dir.m.fs.Move(oldPath, newPath); err != nil { log.Warningf("fuse: dir: mv: %v", err) return err } notifyChange(dir.m, 100*time.Millisecond) return nil } // Getxattr is called to get a single xattr (extended attribute) of a directory. func (dir *Directory) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { defer logPanic("dir: getxattr") // Do not worry about req.Size // fuse will cut it to allowed size and report to the caller that buffer need to be larger xattrs, err := getXattr(dir.m.fs, req.Name, dir.path) if err != nil { return err } resp.Xattr = xattrs return nil } // Setxattr is called by the setxattr syscall. func (dir *Directory) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { defer logPanic("dir: setxattr") return setXattr(dir.m.fs, req.Name, dir.path, req.Xattr) } // Listxattr is called to list all xattrs of this directory. func (dir *Directory) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { defer logPanic("dir: listxattr") // Do not worry about req.Size // fuse will cut it to allowed size and report to the caller that buffer need to be larger resp.Xattr = listXattr() return nil } var _ = fs.NodeGetxattrer(&Directory{}) var _ = fs.NodeListxattrer(&Directory{}) ================================================ FILE: fuse/doc.go ================================================ // Package fuse implements a FUSE layer for brig. // Using it, a repository may be represented as a "normal" directory. // There are three different structs in the FUSE API: // // - fuse.Node : A file or a directory (depending on it's type) // - fuse.FS : The filesystem. Used to find out the root node. // - fuse.Handle: An open file. // // This implementation offers File (a fuse.Node and fuse.Handle), // Dir (fuse.Node) and FS (fuse.FS). // // Fuse will call the respective handlers if it needs information about your // nodes. Each request handlers will usually get a `ctx` used to cancel // operations, a request structure `req` with detailed query infos and // a response structure `resp` where results are written. Usually the request // handlers might return an error or a new node/handle/fs. // // Every request handle that may run for a long time should be // made interruptible. Especially read and write operations should // check the ctx.Done() channel passed to each request handler. package fuse ================================================ FILE: fuse/file.go ================================================ // +build !windows package fuse import ( "context" "errors" "fmt" "os" "sync" log "github.com/sirupsen/logrus" "bazil.org/fuse" "bazil.org/fuse/fs" ) var ( // ErrNotCached is returned in offline mode when we don't have a file ErrNotCached = errors.New("content is not cached and need to be downloaded") // ErrTooManyWriters is returned when writers counter is about to be overfilled ErrTooManyWriters = errors.New("too many writers for the file") ) // File is a file inside a directory. type File struct { mu sync.Mutex // used during handle creation path string m *Mount hd *Handle } // Attr is called to get the stat(2) attributes of a file. func (fi *File) Attr(ctx context.Context, attr *fuse.Attr) error { defer logPanic("file: attr") log.Debugf("fuse-file-attr: %v", fi.path) info, err := fi.m.fs.Stat(fi.path) if err != nil { return err } debugLog("exec file attr: %v", fi.path) var filePerm os.FileMode = 0640 attr.Mode = filePerm if fi.m.options.Offline { isCached, err := fi.m.fs.IsCached(fi.path) if err != nil || !isCached { if err != nil { log.Errorf("IsCached failed for %s with error : %v", fi.path, err) } // Uncached file will be shown as symlink // We cannot read them in Offline mode, // but we can delete such link and overwrite its content attr.Mode = os.ModeSymlink | filePerm } } attr.Size = info.Size attr.Mtime = info.ModTime attr.Inode = info.Inode // Act like the file is owned by the user of the brig process. attr.Uid = uint32(os.Getuid()) attr.Gid = uint32(os.Getgid()) // tools like `du` rely on this for size calculation // (assuming every fs block takes actual storage, but we only emulate this // here for compatibility; see man 2 stat for the why for "512") attr.BlockSize = 4096 attr.Blocks = info.Size / 512 if info.Size%uint64(512) > 0 { attr.Blocks++ } return nil } // Open is called to get an opened handle of a file, suitable for reading and writing. func (fi *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { defer logPanic("file: open") debugLog("fuse-open: %s", fi.path) log.Debugf("fuse-file-open: %v with request %v", fi.path, req) // Check if the file is actually available locally. if fi.m.options.Offline { isCached, err := fi.m.fs.IsCached(fi.path) if err != nil { return nil, errorize("file-is-cached", err) } if !isCached { return nil, errorize("file-not-cached", ErrNotCached) } } fd, err := fi.m.fs.Open(fi.path) if err != nil { return nil, errorize("file-open", err) } fi.mu.Lock() if fi.hd == nil { hd := Handle{fd: fd, m: fi.m, wasModified: false} fi.hd = &hd } fi.hd.fd = fd fi.mu.Unlock() resp.Flags |= fuse.OpenKeepCache return fi.hd, nil } // Setattr is called once an attribute of a file changes. // Most importantly, size changes are reported here, e.g. after truncating a // file, the size change is noticed here before Open() is called. func (fi *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { defer logPanic("file: setattr") log.Debugf("fuse-file-setattr: request %v", req) // This is called when any attribute of the file changes, // most importantly the file size. For example it is called when truncating // the file to zero bytes with a size change of `0`. switch { case req.Valid&fuse.SetattrSize != 0: if err := fi.hd.truncate(req.Size); err != nil { return errorize("file-setattr-size", err) } case req.Valid&fuse.SetattrMtime != 0: if err := fi.m.fs.Touch(fi.path); err != nil { return errorize("file-setattr-mtime", err) } } return nil } // Fsync is called when any open buffers need to be written to disk. // Currently, fsync is completely ignored. func (fi *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { defer logPanic("file: fsync") log.Debugf("fuse-file-fsync: %v", fi.path) return nil } // Getxattr is called to get a single xattr (extended attribute) of a file. func (fi *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { defer logPanic("file: getxattr") // Do not worry about req.Size // fuse will cut it to allowed size and report to the caller that buffer need to be larger xattrs, err := getXattr(fi.m.fs, req.Name, fi.path) if err != nil { return err } resp.Xattr = xattrs return nil } // Setxattr is called by the setxattr syscall. func (fi *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { defer logPanic("file: setxattr") return setXattr(fi.m.fs, req.Name, fi.path, req.Xattr) } // Listxattr is called to list all xattrs of this file. func (fi *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { defer logPanic("file: listxattr") // Do not worry about req.Size // fuse will cut it to allowed size and report to the caller that buffer need to be larger resp.Xattr = listXattr() return nil } // Readlink reads a symbolic link. // This call is triggered when OS tries to see where symlink points func (fi *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { log.Debugf("fuse-file-readlink: %v", fi.path) info, err := fi.m.fs.Stat(fi.path) if err != nil { return "/brig/backend/ipfs/", err } return fmt.Sprintf("/brig/backend/ipfs/%s", info.BackendHash), nil } // Compile time checks to see which interfaces we implement: // Please update this list when modifying code here. var _ = fs.Node(&File{}) var _ = fs.NodeFsyncer(&File{}) var _ = fs.NodeGetxattrer(&File{}) var _ = fs.NodeListxattrer(&File{}) var _ = fs.NodeOpener(&File{}) var _ = fs.NodeSetattrer(&File{}) var _ = fs.NodeReadlinker(&File{}) var _ = fs.NodeSetxattrer(&File{}) // Other interfaces are available, but currently not needed or make sense: // var _ = fs.NodeRenamer(&File{}) // var _ = fs.NodeReadlinker(&File{}) // var _ = fs.NodeRemover(&File{}) // var _ = fs.NodeRemovexattrer(&File{}) // var _ = fs.NodeRequestLookuper(&File{}) // var _ = fs.NodeAccesser(&File{}) // var _ = fs.NodeForgetter(&File{}) // var _ = fs.NodeGetattrer(&File{}) // var _ = fs.NodeLinker(&File{}) // var _ = fs.NodeMkdirer(&File{}) // var _ = fs.NodeMknoder(&File{}) // var _ = fs.NodeStringLookuper(&File{}) // var _ = fs.NodeSymlinker(&File{}) ================================================ FILE: fuse/fs.go ================================================ // +build !windows package fuse import ( "bazil.org/fuse/fs" log "github.com/sirupsen/logrus" ) const ( enableDebugLogs = false ) func debugLog(format string, args ...interface{}) { if enableDebugLogs { log.Debugf(format, args...) } } // Filesystem is the entry point to the fuse filesystem type Filesystem struct { root string m *Mount } // Root returns the topmost directory node. // This depends on what the user choose to select, // but usually it's "/". func (fs *Filesystem) Root() (fs.Node, error) { return &Directory{path: fs.root, m: fs.m}, nil } ================================================ FILE: fuse/fstab.go ================================================ // +build !windows package fuse import ( "fmt" "os" "sort" "strings" "github.com/sahib/brig/util" "github.com/sahib/config" log "github.com/sirupsen/logrus" ) // FsTabAdd adds the mount at `path` with `name` and `opts` to `cfg`. // It does not yet do the mounting. func FsTabAdd(cfg *config.Config, name, path string, opts MountOptions) error { for _, key := range cfg.Keys() { if strings.HasSuffix(key, ".path") { if cfg.String(key) == path { return fmt.Errorf("mount `%s` at path `%s` already exists", name, path) } } } if cfg.String(name+".path") != "" { return fmt.Errorf("mount `%s` already exists", name) } if err := cfg.SetString(name+".path", path); err != nil { return err } if err := cfg.SetBool(name+".read_only", opts.ReadOnly); err != nil { return err } if err := cfg.SetBool(name+".offline", opts.Offline); err != nil { return err } if opts.Root == "" { opts.Root = "/" } return cfg.SetString(name+".root", opts.Root) } // FsTabRemove removes a mount. It does not directly unmount it, // call FsTabApply for this. func FsTabRemove(cfg *config.Config, name string) error { if !cfg.IsValidKey(name) { return fmt.Errorf("no such mount: %v", name) } return cfg.Reset(name) } // FsTabUnmountAll will unmount all currently mounted mounts. func FsTabUnmountAll(cfg *config.Config, mounts *MountTable) error { mounts.mu.Lock() defer mounts.mu.Unlock() errors := util.Errors{} for _, key := range cfg.Keys() { if strings.HasSuffix(key, ".path") { mountPath := cfg.String(key) log.Debugf("Unmount key %s %s", key, mountPath) if mountPath == "" { continue } if err := mounts.unmount(mountPath); err != nil { errors = append(errors, err) } } } return errors.ToErr() } // FsTabApply takes all configured mounts and makes sure that all of them are opened. func FsTabApply(cfg *config.Config, mounts *MountTable) error { mounts.mu.Lock() defer mounts.mu.Unlock() mountPaths := make(map[string]*MountOptions) for _, key := range cfg.Keys() { if strings.HasSuffix(key, ".path") { mountPath := cfg.String(key) entry := &MountOptions{} mountPaths[mountPath] = entry readOnlyKey := key[:len(key)-len(".path")] + ".read_only" entry.ReadOnly = cfg.Bool(readOnlyKey) offlineKey := key[:len(key)-len(".path")] + ".offline" entry.Offline = cfg.Bool(offlineKey) rootPathKey := key[:len(key)-len(".path")] + ".root" entry.Root = cfg.String(rootPathKey) if entry.Root == "" { entry.Root = "/" } } } errors := util.Errors{} for path, mount := range mounts.m { // Do not do anything when the path / options did not change. opts, isConfigured := mountPaths[path] if isConfigured && mount.EqualOptions(*opts) { delete(mountPaths, path) continue } if err := mounts.unmount(path); err != nil { errors = append(errors, err) } } for mountPath, options := range mountPaths { if err := os.MkdirAll(mountPath, 0700); err != nil { return err } if _, err := mounts.addMount(mountPath, *options); err != nil { errors = append(errors, err) } } return errors.ToErr() } // FsTabEntry is a representation of one entry in the filesystem tab. type FsTabEntry struct { Name string Path string Root string Active bool ReadOnly bool Offline bool } // FsTabList lists all entries in the filesystem tab in a nice way. func FsTabList(cfg *config.Config, mounts *MountTable) ([]FsTabEntry, error) { mounts.mu.Lock() defer mounts.mu.Unlock() mountMap := make(map[string]*FsTabEntry) for _, key := range cfg.Keys() { split := strings.Split(key, ".") if len(split) < 3 || split[0] != "mounts" { continue } mountName := split[1] if _, ok := mountMap[mountName]; !ok { mountMap[mountName] = &FsTabEntry{} } switch split[2] { case "path": path := cfg.String(key) mountMap[mountName].Path = path _, isActive := mounts.m[path] mountMap[mountName].Active = isActive case "read_only": mountMap[mountName].ReadOnly = cfg.Bool(key) case "offline": mountMap[mountName].Offline = cfg.Bool(key) case "root": mountMap[mountName].Root = cfg.String(key) } } sortedMounts := []FsTabEntry{} for name, entry := range mountMap { entry.Name = name sortedMounts = append(sortedMounts, *entry) } sort.Slice(sortedMounts, func(i, j int) bool { return sortedMounts[i].Name < sortedMounts[j].Name }) return sortedMounts, nil } ================================================ FILE: fuse/fuse_test.go ================================================ // +build !windows package fuse import ( "bytes" "context" "encoding/binary" "flag" "fmt" "io/ioutil" "net/http" "os" "path/filepath" "syscall" "testing" "github.com/sahib/brig/catfs" "github.com/sahib/brig/catfs/mio/pagecache/mdcache" "github.com/sahib/brig/defaults" "github.com/sahib/brig/util/testutil" "github.com/sahib/config" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "bazil.org/fuse/fs/fstestutil/spawntest" "bazil.org/fuse/fs/fstestutil/spawntest/httpjson" ) // The routines which manage fuse layer // and OS dependent code (os.Open, and similar) MUST BE SEPARATE OS PROCESSES. // Note: not different go routines but processes! // See https://github.com/bazil/fuse/issues/264#issuecomment-727269770 // This separation happens automatically during normal brig operations, but // TESTING FUSE LAYER IN GO IS VERY TRICKY. // See brig relevant discussion at // https://github.com/sahib/brig/pull/77#issuecomment-754831080 // However this issue is general for any go program from version 1.9, // as can be seen in references to the issue. // // bazil/fuse offers "bazil.org/fuse/fs/fstestutil/spawntest" // infrastructure which helps run tests in different communicating via socket processes. func init() { log.SetLevel(log.ErrorLevel) } func TestMain(m *testing.M) { helpers.AddFlag(flag.CommandLine) flag.Parse() helpers.RunIfNeeded() os.Exit(m.Run()) } type fuseCatFSHelp struct{} // These helpers will be requested from test and executed on the server // which is managing catfs-fuse connection (started within test) func (fch *fuseCatFSHelp) ServeHTTP(w http.ResponseWriter, req *http.Request) { switch req.URL.Path { case "/mount": httpjson.ServePOST(fch.makeCatfsAndFuseMount).ServeHTTP(w, req) case "/unmount": httpjson.ServePOST(fch.unmountFuseAndCloseDummyCatFS).ServeHTTP(w, req) case "/fuseReMount": httpjson.ServePOST(fch.makeFuseReMount).ServeHTTP(w, req) case "/catfsStage": httpjson.ServePOST(fch.catfsStage).ServeHTTP(w, req) case "/catfsGetData": httpjson.ServePOST(fch.catfsGetData).ServeHTTP(w, req) default: http.NotFound(w, req) } } func makeDummyCatFS(dbPath string) (catfsFuseInfo, error) { backend := catfs.NewMemFsBackend() owner := "alice" cfg, err := config.Open(nil, defaults.Defaults, config.StrictnessPanic) if err != nil { log.Fatalf("Could not get default catFS config: %v", err) return catfsFuseInfo{}, err } mdc, err := mdcache.New(mdcache.Options{ MaxMemoryUsage: 1024 * 1024 * 1024, }) if err != nil { log.Fatalf("unable to instance : %v", err) return catfsFuseInfo{}, err } cfs, err := catfs.NewFilesystem( backend, dbPath, owner, false, cfg.Section("fs"), nil, mdc, ) if err != nil { log.Fatalf("Failed to create catfs filesystem: %v", err) return catfsFuseInfo{}, err } cfInfo := catfsFuseInfo{} cfInfo.cfs = cfs cfInfo.dbPath = dbPath return cfInfo, err } type nothing struct{} // use it to send empty request or responses to server type catfsFuseInfo struct { cfs *catfs.FS dbPath string fuseMount *Mount } // cfInfo will be in the global space for the server // which manage fuse mount connection to the catFS var cfInfo catfsFuseInfo type mountingRequest struct { DbPath string MntPath string Opts MountOptions } func (fch *fuseCatFSHelp) makeCatfsAndFuseMount(ctx context.Context, req mountingRequest) (*nothing, error) { var err error cfInfo, err = makeDummyCatFS(req.DbPath) if err != nil { log.Errorf("cannot make catFS in %v", cfInfo.dbPath) return ¬hing{}, err } fuseMount, err := makeFuseMount(cfInfo.cfs, req.MntPath, req.Opts) if err != nil { log.Errorf("cannot mount catfs fuse file system to %v", req.MntPath) return ¬hing{}, err } cfInfo.fuseMount = fuseMount return ¬hing{}, err } func (fch *fuseCatFSHelp) makeFuseReMount(ctx context.Context, req mountingRequest) (*nothing, error) { fuseMount, err := makeFuseMount(cfInfo.cfs, req.MntPath, req.Opts) if err != nil { log.Errorf("cannot mount catfs fuse file system to %v", req.MntPath) return ¬hing{}, err } cfInfo.fuseMount = fuseMount return ¬hing{}, err } func (fch *fuseCatFSHelp) unmountFuseAndCloseDummyCatFS(ctx context.Context, req nothing) (*nothing, error) { defer os.RemoveAll(cfInfo.fuseMount.Dir) defer os.RemoveAll(cfInfo.dbPath) // first unmount fuse directory if err := lazyUnmount(cfInfo.fuseMount.Dir); err != nil { skipableErr := "exit status 1: fusermount: entry for " + cfInfo.fuseMount.Dir + " not found in /etc/mtab" log.Debug(skipableErr) if err.Error() != skipableErr { return ¬hing{}, err } } // now close catFS err := cfInfo.cfs.Close() if err != nil { log.Fatalf("Could not close catfs filesystem: %v", err) } return ¬hing{}, err } func makeFuseMount(cfs *catfs.FS, mntPath string, opts MountOptions) (*Mount, error) { // Make sure to unmount any mounts that are there. // Possibly there are some leftovers from previous failed runs. if err := lazyUnmount(mntPath); err != nil { skipableErr := "exit status 1: fusermount: entry for " + mntPath + " not found in /etc/mtab" log.Debug(skipableErr) if err.Error() != skipableErr { return nil, err } } if err := os.MkdirAll(mntPath, 0777); err != nil { log.Fatalf("Unable to create empty mount dir: %v", err) return nil, err } mount, err := NewMount(cfs, mntPath, nil, opts) if err != nil { log.Fatalf("Cannot create mount: %v", err) return nil, err } return mount, err } type catfsPayload struct { Path string Data []byte } func (fch *fuseCatFSHelp) catfsStage(ctx context.Context, req catfsPayload) (*nothing, error) { err := cfInfo.cfs.Stage(req.Path, bytes.NewReader(req.Data)) return ¬hing{}, err } // Get data from a file stored by catFS func (fch *fuseCatFSHelp) catfsGetData(ctx context.Context, req catfsPayload) (*catfsPayload, error) { out := catfsPayload{} out.Path = req.Path stream, err := cfInfo.cfs.Cat(req.Path) if err != nil { log.Fatalf("Could not get stream for a catfs file: %v", err) return &out, err } result := bytes.NewBuffer(nil) _, err = stream.WriteTo(result) if err != nil { log.Fatalf("Streaming to a buffer failed: %v", err) return &out, err } out.Data = result.Bytes() return &out, err } var helpers spawntest.Registry var fuseCatFSHelper = helpers.Register("fuseCatFSHelp", &fuseCatFSHelp{}) type mountInfo struct { // fuse related info available to OS layer Dir string Opts MountOptions } // Call helper for unmount and cleanup func callUnMount(ctx context.Context, t testing.TB, control *spawntest.Control) { if err := control.JSON("/unmount").Call(ctx, nothing{}, ¬hing{}); err != nil { t.Fatalf("calling helper: %v", err) } } // Spawns helper, prepare catFS, connects it to fuse layer, and execute function f func withMount(t testing.TB, opts MountOptions, f func(ctx context.Context, control *spawntest.Control, mount *mountInfo)) { // set up mounts ctx, cancel := context.WithCancel(context.Background()) defer cancel() control := fuseCatFSHelper.Spawn(ctx, t) defer control.Close() dbPath, err := ioutil.TempDir("", "catfs-db-test") if err != nil { t.Fatalf("Failed to create temp dir for catFS: %v", err) } req := mountingRequest{ DbPath: dbPath, MntPath: filepath.Join(os.TempDir(), "catfs-fuse-mountdir"), Opts: opts, } if err := control.JSON("/mount").Call(ctx, req, ¬hing{}); err != nil { t.Fatalf("calling helper: %v", err) } defer callUnMount(ctx, t, control) // function which required mounts f(ctx, control, &mountInfo{ Dir: req.MntPath, Opts: req.Opts, }) } func checkFuseFileMatchToCatFS(ctx context.Context, t *testing.T, control *spawntest.Control, fusePath string, catfsPath string) { // checks if OS file content matches catFS file content fuseData, err := ioutil.ReadFile(fusePath) require.NoError(t, err) // is catFS seeing the same data checkCatfsFileContent(ctx, t, control, catfsPath, fuseData) } func checkCatfsFileContent(ctx context.Context, t *testing.T, control *spawntest.Control, catfsPath string, expected []byte) { req := catfsPayload{Path: catfsPath} out := catfsPayload{} require.NoError(t, control.JSON("/catfsGetData").Call(ctx, req, &out)) require.Equal(t, len(out.Data), len(expected)) if out.Data == nil { // this is special for the 0 length data out.Data = []byte{} } require.Equal(t, out.Data, expected) } // Finally we ready to do tests // Tests for spawntest infrastructure related tests // Just checks that our catfsStage interface to catFS does not error out func TestCatfsStage(t *testing.T) { withMount(t, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { dataIn := []byte{1, 2, 3, 4} filePath := "StagingTest.bin" req := catfsPayload{Path: filePath, Data: dataIn} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) }) } func TestCatfsGetData(t *testing.T) { withMount(t, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { dataIn := []byte{1, 2, 3, 4} filePath := "StageAndReadTest.bin" req := catfsPayload{Path: filePath, Data: dataIn} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) req.Data = []byte{} out := catfsPayload{} require.NoError(t, control.JSON("/catfsGetData").Call(ctx, req, &out)) require.Equal(t, out.Data, dataIn) }) } // Main fuse layer tests var ( DataSizes = []int64{ 0, 1, 2, 4, 8, 16, 32, 64, 1024, 2048, 4095, 4096, 4097, 147611, 2*1024*1024 + 123, // in case if we have buffer size interference } ) func TestRead(t *testing.T) { withMount(t, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { for _, size := range DataSizes { t.Run(fmt.Sprintf("%d", size), func(t *testing.T) { helloData := testutil.CreateDummyBuf(size) // Add a simple file: catfsFilePath := fmt.Sprintf("/hello_from_catfs_%d", size) req := catfsPayload{Path: catfsFilePath, Data: helloData} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) checkCatfsFileContent(ctx, t, control, catfsFilePath, helloData) fuseFilePath := filepath.Join(mount.Dir, catfsFilePath) checkFuseFileMatchToCatFS(ctx, t, control, fuseFilePath, catfsFilePath) }) } }) } func TestFileXattr(t *testing.T) { withMount(t, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { size := int64(4) helloData := testutil.CreateDummyBuf(size) // Add a simple file: catfsFilePath := fmt.Sprintf("/hello_from_catfs_%d", size) req := catfsPayload{Path: catfsFilePath, Data: helloData} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) checkCatfsFileContent(ctx, t, control, catfsFilePath, helloData) fuseFilePath := filepath.Join(mount.Dir, catfsFilePath) // no let's see all the extended attributes list response := make([]byte, 1024*4) // large buffer to fit everything sz, err := syscall.Listxattr(fuseFilePath, response) require.NoError(t, err) response = response[:sz] receivedAttrs := bytes.Split(response, []byte{0}) // every response should belong to valid attributes for _, attr := range receivedAttrs { if len(attr) == 0 { // protecting against empty chunk after split delimiter continue } _, ok := xattrMap[string(attr)] require.Truef(t, ok, "Invalid extended attribute '%s'", attr) } // every valid attribute should be in received Attrs list for attr := range xattrMap { require.Containsf(t, receivedAttrs, []uint8(attr), "Received attributes are missing '%s'", attr) } // now let's check some attributes values // Note hashes are hard without direct access to catfs // which is accessed in different process response = make([]byte, 64) // large buffer to fit everything sz, err = syscall.Getxattr(fuseFilePath, "user.brig.pinned", response) require.NoError(t, err) response = response[:sz] require.Equal(t, "yes", string(response)) response = make([]byte, 64) // large buffer to fit everything sz, err = syscall.Getxattr(fuseFilePath, "user.brig.explicitly_pinned", response) require.NoError(t, err) response = response[:sz] require.Equal(t, "no", string(response)) }) } func TestWrite(t *testing.T) { withMount(t, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { for _, size := range DataSizes { t.Run(fmt.Sprintf("%d", size), func(t *testing.T) { helloData := testutil.CreateDummyBuf(size) catfsFilePath := fmt.Sprintf("/hello_from_fuse%d", size) fuseFilePath := filepath.Join(mount.Dir, catfsFilePath) // Write a simple file via the fuse layer: err := ioutil.WriteFile(fuseFilePath, helloData, 0644) if err != nil { t.Fatalf("Could not write simple file via fuse layer: %v", err) } checkCatfsFileContent(ctx, t, control, catfsFilePath, helloData) }) } }) } // Regression test for copying larger file to the mount. func TestTouchWrite(t *testing.T) { withMount(t, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { for _, size := range DataSizes { t.Run(fmt.Sprintf("%d", size), func(t *testing.T) { catfsFilePath := fmt.Sprintf("/empty_at_creation_by_catfs_%d", size) req := catfsPayload{Path: catfsFilePath, Data: []byte{}} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) checkCatfsFileContent(ctx, t, control, catfsFilePath, req.Data) fuseFilePath := filepath.Join(mount.Dir, catfsFilePath) // Write a simple file via the fuse layer: helloData := testutil.CreateDummyBuf(size) err := ioutil.WriteFile(fuseFilePath, helloData, 0644) if err != nil { t.Fatalf("Could not write simple file via fuse layer: %v", err) } checkCatfsFileContent(ctx, t, control, catfsFilePath, helloData) }) } }) } // Regression test for copying a file to a subdirectory. func TestTouchWriteSubdir(t *testing.T) { withMount(t, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { file := "donald.png" subDirPath := "sub" catfsFilePath := filepath.Join(subDirPath, file) fuseSubDirPath := filepath.Join(mount.Dir, subDirPath) fuseFilePath := filepath.Join(fuseSubDirPath, file) require.NoError(t, os.Mkdir(fuseSubDirPath, 0644)) expected := []byte{1, 2, 3} require.NoError(t, ioutil.WriteFile(fuseFilePath, expected, 0644)) checkCatfsFileContent(ctx, t, control, catfsFilePath, expected) }) } func TestReadOnlyFs(t *testing.T) { opts := MountOptions{ ReadOnly: true, } withMount(t, opts, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { xData := []byte{1, 2, 3} req := catfsPayload{Path: "/x.png", Data: xData} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) checkCatfsFileContent(ctx, t, control, "x.png", xData) // Do some allowed io to check if the fs is actually working. // The test does not check on the kind of errors otherwise. xPath := filepath.Join(mount.Dir, "x.png") checkFuseFileMatchToCatFS(ctx, t, control, xPath, "x.png") // Try creating a new file: yPath := filepath.Join(mount.Dir, "y.png") require.NotNil(t, ioutil.WriteFile(yPath, []byte{4, 5, 6}, 0600)) // Try modifying an existing file: require.NotNil(t, ioutil.WriteFile(xPath, []byte{4, 5, 6}, 0600)) dirPath := filepath.Join(mount.Dir, "sub") require.NotNil(t, os.Mkdir(dirPath, 0644)) }) } func TestWithRoot(t *testing.T) { withMount(t, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { data := []byte{1, 2, 3} // Populate catFS with some files in different directories req := catfsPayload{Path: "/u.png", Data: data} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) checkCatfsFileContent(ctx, t, control, req.Path, data) checkFuseFileMatchToCatFS(ctx, t, control, filepath.Join(mount.Dir, req.Path), req.Path) data = []byte{2, 3, 4} req = catfsPayload{Path: "/a/x.png", Data: data} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) checkCatfsFileContent(ctx, t, control, req.Path, data) checkFuseFileMatchToCatFS(ctx, t, control, filepath.Join(mount.Dir, req.Path), req.Path) data = []byte{3, 4, 5} req = catfsPayload{Path: "/a/b/y.png", Data: data} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) checkCatfsFileContent(ctx, t, control, req.Path, data) checkFuseFileMatchToCatFS(ctx, t, control, filepath.Join(mount.Dir, req.Path), req.Path) data = []byte{4, 5, 6} req = catfsPayload{Path: "/a/b/c/z.png", Data: data} require.NoError(t, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) checkCatfsFileContent(ctx, t, control, req.Path, data) checkFuseFileMatchToCatFS(ctx, t, control, filepath.Join(mount.Dir, req.Path), req.Path) // Now we need to remount fuse with different root directory remntReq := mountingRequest{ MntPath: mount.Dir, Opts: MountOptions{Root: "/a/b"}, } require.NoError(t, control.JSON("/fuseReMount").Call(ctx, remntReq, ¬hing{})) mount.Opts = remntReq.Opts // update with new mount options // See if fuse indeed provides different root // Read already existing file yPath := filepath.Join(mount.Dir, "y.png") checkFuseFileMatchToCatFS(ctx, t, control, yPath, "/a/b/y.png") // Write to a new file data = []byte{5, 6, 7} newPath := filepath.Join(mount.Dir, "new.png") require.NoError(t, ioutil.WriteFile(newPath, data, 0644)) checkCatfsFileContent(ctx, t, control, "/a/b/new.png", data) // Attempt to read file above mounted root inAccessiblePath := filepath.Join(mount.Dir, "u.png") _, err := ioutil.ReadFile(inAccessiblePath) require.NotNil(t, err) }) } // Benchmarks var ( BenchmarkDataSizes = []int64{ 0, 1024, 2 * 1024, 16 * 1024, 64 * 1024, 128 * 1024, 1 * 1024 * 1024, 16 * 1024 * 1024, } ) func stageAndRead(ctx context.Context, b *testing.B, control *spawntest.Control, mount *mountInfo, label string, data []byte) { size := len(data) // stage data to catFS catfsFilePath := fmt.Sprintf("%s_file_%d", label, size) req := catfsPayload{Path: catfsFilePath, Data: data} require.NoError(b, control.JSON("/catfsStage").Call(ctx, req, ¬hing{})) fuseFilePath := filepath.Join(mount.Dir, catfsFilePath) // Read it back via fuse b.Run(fmt.Sprintf("%s_Size_%d", label, size), func(b *testing.B) { for n := 0; n < b.N; n++ { ioutil.ReadFile(fuseFilePath) } }) } func BenchmarkRead(b *testing.B) { withMount(b, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { for _, size := range BenchmarkDataSizes { // Check how fast is readout of a file with compressible content data := testutil.CreateDummyBuf(size) stageAndRead(ctx, b, control, mount, "CompressibleContent", data) // Check how fast is readout of a file with random/uncompressible content data = testutil.CreateRandomDummyBuf(size, 1) stageAndRead(ctx, b, control, mount, "RandomContent", data) } }) } func writeDataNtimes(b *testing.B, data []byte, ntimes int) { // Writing could be very space demanding even for a small size, // Since benchmark runs many-many times, it will consume a lot of space. // We have to remount everything every time to start with clean catFS DB. // Consequently, this test takes long time, since mounting is long operation. require.True(b, ntimes > 0, "ntimes must be positive") // note ntimes =0 is bad too, // since execution time between StartTimer/StopTimer is too short/jittery // and benchmarks run forever label := "dummy" size := len(data) for n := 0; n < b.N; n++ { b.StopTimer() withMount(b, MountOptions{}, func(ctx context.Context, control *spawntest.Control, mount *mountInfo) { // Check how fast is write to a file with compressible content catfsFilePath := fmt.Sprintf("%s_file_%d", label, size) fuseFilePath := filepath.Join(mount.Dir, catfsFilePath) b.StartTimer() for i := 0; i < ntimes; i++ { if len(data) > 0 { // modification of one byte is enough // to generate new encrypted content for the backend binary.LittleEndian.PutUint64(data[0:8], uint64(i)) } require.NoError(b, ioutil.WriteFile(fuseFilePath, data, 0644)) } b.StopTimer() }) } } var ( // keep this low or you might run out of space NumberOfOverWrites = []int{ 1, 2, 5, } ) func BenchmarkWrite(b *testing.B) { size := int64(10 * 1024 * 1024) for _, Ntimes := range NumberOfOverWrites { // Check how fast is write to a file with compressible content data := testutil.CreateDummyBuf(size) prefix := fmt.Sprintf("Owerwrite_%d", Ntimes) label := fmt.Sprintf("%s/CompressibleContent_Size_%d", prefix, size) b.Run(label, func(b *testing.B) { writeDataNtimes(b, data, Ntimes) }) // Check how fast is write to a file with random/uncompressible content data = testutil.CreateRandomDummyBuf(size, 1) label = fmt.Sprintf("%s/RandomContent_Size_%d", prefix, size) b.Run(label, func(b *testing.B) { writeDataNtimes(b, data, Ntimes) }) } } ================================================ FILE: fuse/fusetest/client.go ================================================ package fusetest import ( "context" "net" "net/http" "github.com/sahib/brig/util" ) // Client allows controlling the type Client struct { httpClient *http.Client } // Dial returns a client suitable for controlling a fusetest server. func Dial(url string) (*Client, error) { scheme, addr, err := util.URLToSchemeAndAddr(url) if err != nil { return nil, err } httpClient := &http.Client{ Transport: &http.Transport{ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { return net.Dial(scheme, addr) }, }, } return &Client{ httpClient: httpClient, }, nil } // QuitServer sends a command that tells the server to quit. // The request will block until the quit was carried out. func (ctl *Client) QuitServer() error { req, err := http.NewRequest("GET", "/quit", nil) if err != nil { return err } _, err = ctl.httpClient.Do(req) return err } ================================================ FILE: fuse/fusetest/doc.go ================================================ // Package fusetest offers an easy way to test our fuse code. // // What this does is start another process with an HTTP server in it. // Beside the HTTP server the fuse mount is mounted at a specified path, // with the specified options. A client can connect to the server and control // it and/or a client program can access files in the fuse mount. // // The reason why this is another process is an issue with Go: // When serving and accessing the FUSE mount in the same we might enter // an unrecoverable deadlock where file I/O related syscalls get stuck // because the go routine that serve this syscall live in the same process // but do not get called because only N parallel go routines can be run. // // Reference: // // * https://github.com/bazil/fuse/issues/264#issuecomment-727269770 // * https://github.com/sahib/brig/pull/77#issuecomment-754831080 // // bazil/fuse offers an spawntest utility that does something very similar, // but we also this package in benchmarks and spawntest expects to be called // from tests and thus requires the testing package. package fusetest ================================================ FILE: fuse/fusetest/helper.go ================================================ package fusetest import ( "os" "os/exec" ) // LaunchAsProcess will start the fusemock server in another process. // This will serve a fuse mount on the specified directory and will listen // to commands on a http socket. // // NOTE: This will only work if you call this from a part of the main brig // executable. This relies on the executable to do start the server // when being called as »$0 debug fusemock«. Therefore this will not // work in tests, but it should be easy to adapt. // // The returned process can be used to terminate the program. // You should use the provided Dial() / Quit method to cleanup though. func LaunchAsProcess(opts Options) (*os.Process, error) { myself, err := os.Executable() if err != nil { return nil, err } args := []string{ "debug", "fusemock", "--mount-path", opts.MountPath, "--catfs-path", opts.CatfsPath, "--ipfs-path-or-multiaddr", opts.IpfsPathOrMultiaddr, "--url", opts.URL, } if opts.MountReadOnly { args = append(args, "--mount-ro") } if opts.MountOffline { args = append(args, "--mount-offline") } cmd := exec.Command(myself, args...) if err := cmd.Start(); err != nil { return nil, err } return cmd.Process, nil } ================================================ FILE: fuse/fusetest/server.go ================================================ package fusetest import ( "context" "fmt" "io/ioutil" "net" "net/http" "os" "os/signal" "syscall" "time" log "github.com/sirupsen/logrus" "github.com/gorilla/mux" "github.com/sahib/brig/backend/httpipfs" "github.com/sahib/brig/catfs" "github.com/sahib/brig/catfs/mio/pagecache/mdcache" "github.com/sahib/brig/defaults" "github.com/sahib/brig/fuse" "github.com/sahib/brig/repo/hints" "github.com/sahib/brig/util" "github.com/sahib/config" ) func makeFS(dbPath string, backend catfs.FsBackend) (*catfs.FS, error) { // open a dummy default config: cfg, err := config.Open(nil, defaults.Defaults, config.StrictnessPanic) if err != nil { return nil, err } hintMgr, err := hints.NewManager(nil) if err != nil { return nil, err } pageCache, err := mdcache.New(mdcache.Options{ MaxMemoryUsage: 1024 * 1024 * 1024, L1CacheMissRefill: true, }) if err != nil { return nil, err } cfs, err := catfs.NewFilesystem( backend, dbPath, "alice", false, cfg.Section("fs"), hintMgr, pageCache, ) if err != nil { log.Fatalf("Failed to create catfs filesystem: %v", err) return nil, err } return cfs, err } func mount(cfs *catfs.FS, mountPath string, opts Options) (*fuse.Mount, error) { if err := os.MkdirAll(mountPath, 0700); err != nil { return nil, err } return fuse.NewMount(cfs, mountPath, nil, fuse.MountOptions{ ReadOnly: opts.MountReadOnly, Offline: opts.MountOffline, Root: "/", }) } func serveHTTPServer(opts Options) error { scheme, addr, err := util.URLToSchemeAndAddr(opts.URL) if err != nil { return err } lst, err := net.Listen(scheme, addr) if err != nil { return err } // Needed for /quit. srv := &http.Server{} sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) // Properly exit when Ctrl-C is pressed. // (including unmounting!) go func() { <-sigCh ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() srv.Shutdown(ctx) }() // TODO: routes for stage / cat (although not really necessary...) // TODO: root for unmount. router := mux.NewRouter() router.HandleFunc("/quit", func(w http.ResponseWriter, r *http.Request) { go func() { // Close the server in a few ms, just not in th request itself. // Otherwise the client will block forever. time.Sleep(100 * time.Millisecond) if err := srv.Shutdown(r.Context()); err != nil { log.WithError(err).Warnf("failed to shutdown server") } }() }).Methods("GET") srv.Handler = router fmt.Println("serving...") defer fmt.Println("serving done...") return srv.Serve(lst) } // Options can be specified to control the behavior of the fusetest server. type Options struct { // MountPath is where the fuse mount will be available. MountPath string // CatfsPath is where the metdata is stored. CatfsPath string // IpfsPath tells us which IPFS repo to use. // If empty, use the mock backend. IpfsPathOrMultiaddr string // URL defines where the server can be reached. URL string // MountReadOnly = true means to not allow modifications. MountReadOnly bool // MountOffline= true means to not allow online queries. MountOffline bool } // Launch will launch a fuse test server. func Launch(opts Options) error { tmpDir, err := ioutil.TempDir("", "brig-debug-fuse-*") if err != nil { return err } defer os.RemoveAll(tmpDir) for _, path := range []string{opts.MountPath, opts.CatfsPath} { if err := os.MkdirAll(path, 0700); err != nil { return err } } var backend catfs.FsBackend if opts.IpfsPathOrMultiaddr != "" { backend, err = httpipfs.NewNode(opts.IpfsPathOrMultiaddr, "") } else { backend = catfs.NewMemFsBackend() } if err != nil { return err } cfs, err := makeFS(opts.CatfsPath, backend) if err != nil { return err } m, err := mount(cfs, opts.MountPath, opts) if err != nil { return err } // make sure it gets closed, even when no unmount is happening. defer func() { fmt.Println("Closing mount") if err := m.Close(); err != nil { log.WithError(err).Error("fuse mount close failed") } }() return serveHTTPServer(opts) } ================================================ FILE: fuse/handle.go ================================================ // +build !windows package fuse import ( "io" "sync" "syscall" "time" "context" "bazil.org/fuse" "bazil.org/fuse/fs" "github.com/sahib/brig/catfs" log "github.com/sirupsen/logrus" ) // Handle is an open Entry. type Handle struct { mu sync.Mutex fd *catfs.Handle m *Mount wasModified bool } // Read is called to read a block of data at a certain offset. func (hd *Handle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { hd.mu.Lock() defer hd.mu.Unlock() defer logPanic("handle: read") log.Debugf( "fuse-Read: %s (off: %d size: %d)", hd.fd.Path(), req.Offset, req.Size, ) n, err := hd.fd.ReadAt(resp.Data[:req.Size], req.Offset) if err != nil && err != io.EOF { return errorize("handle-read-io", err) } resp.Data = resp.Data[:n] return nil } // Write is called to write a block of data at a certain offset. // Note: do not assume that Write requests come in `fifo` order from the OS level!!! // I.e. during `cp largeFile /brig-fuse-mount/newFile` // the kernel might occasionally send write requests with blocks out of order!!! // In other words stream-like optimizations are not possible . func (hd *Handle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { start := time.Now() hd.mu.Lock() defer hd.mu.Unlock() defer logPanic("handle: write") log.Debugf( "fuse-Write: %s (off: %d size: %d)", hd.fd.Path(), req.Offset, len(req.Data), ) // Offset seems to be always provided from the start (i.e. 0) n, err := hd.writeAt(req.Data, req.Offset) resp.Size = n if err != nil { return errorize("handle-write-io", err) } if n != len(req.Data) { log.Panicf("written amount %d is not equal to requested %d", n, len(req.Data)) return err } log.Infof("fuse: Write time %v for %d bytes", time.Since(start), n) hd.wasModified = true return nil } // Writes data from `buf` at offset `off` counted from the start (0 offset). // Mimics `WriteAt` from `io` package https://golang.org/pkg/io/#WriterAt // Main idea is not bother with Seek pointer, since underlying `overlay` works // with intervals in memory and we do not need to `Seek` the backend which is very time expensive. func (hd *Handle) writeAt(buf []byte, off int64) (n int, err error) { n, err = hd.fd.WriteAt(buf, off) if n != len(buf) || err != nil { log.Errorf("fuse: were not able to save %d bytes at offset %d", len(buf), off) } return n, err } // Flush is called to make sure all written contents get synced to disk. func (hd *Handle) Flush(ctx context.Context, req *fuse.FlushRequest) error { return hd.flush() } // flush does the actual adding to brig. func (hd *Handle) flush() error { hd.mu.Lock() defer hd.mu.Unlock() log.Debugf("fuse-flush: %v", hd.fd.Path()) defer logPanic("handle: flush") if !hd.wasModified { return nil } start := time.Now() if err := hd.fd.Flush(); err != nil { return errorize("handle-flush", err) } log.Infof("fuse: Flashed `%s` in %v", hd.fd.Path(), time.Since(start)) hd.wasModified = false notifyChange(hd.m, 500*time.Millisecond) return nil } // Release is called to close this handle. func (hd *Handle) Release(ctx context.Context, req *fuse.ReleaseRequest) error { defer logPanic("handle: release") log.Debugf("fuse-release: %v", hd.fd.Path()) if req.Flags.IsReadOnly() { // we don't need to track read-only handles return nil } err := hd.flush() if err != nil { return errorize("handle-release", err) } return nil } // Truncates (or extends) data to the desired size func (hd *Handle) truncate(size uint64) error { log.Debugf("fuse-truncate: %v to size %d", hd.fd.Path(), size) defer logPanic("handle: truncate") err := hd.fd.Truncate(size) return err } // Poll checks that the handle is ready for I/O or not func (hd *Handle) Poll(ctx context.Context, req *fuse.PollRequest, resp *fuse.PollResponse) error { // Comment taken verbatim from fs/serve.go of bazil.org/fuse: // Poll checks whether the handle is currently ready for I/O, and // may request a wakeup when it is. // // Poll should always return quickly. Clients waiting for // readiness can be woken up by passing the return value of // PollRequest.Wakeup to fs.Server.NotifyPollWakeup or // fuse.Conn.NotifyPollWakeup. // // To allow supporting poll for only some of your Nodes/Handles, // the default behavior is to report immediate readiness. If your // FS does not support polling and you want to minimize needless // requests and log noise, implement NodePoller and return // syscall.ENOSYS. // // The Go runtime uses epoll-based I/O whenever possible, even for // regular files. // Here we implement a dummy response which reports "I am ready". // The access separation is handled by mutex, so go-rutines // will have to be blocked but its ok. We do not expect many // processes working with the same file // default always ready mask resp.REvents = fuse.DefaultPollMask // We also return ENOSYS error, which sort of invalidate our response, // the ENOSYS indicates that this call is not supported return syscall.ENOSYS } // Compiler checks to see if we got all the interfaces right: var _ = fs.HandleFlusher(&Handle{}) var _ = fs.HandleReader(&Handle{}) var _ = fs.HandleReleaser(&Handle{}) var _ = fs.HandleWriter(&Handle{}) var _ = fs.HandlePoller(&Handle{}) ================================================ FILE: fuse/mount.go ================================================ // +build !windows package fuse import ( "bytes" "errors" "fmt" "io/ioutil" "os/exec" "path" "sync" "time" "bazil.org/fuse" "bazil.org/fuse/fs" e "github.com/pkg/errors" "github.com/sahib/brig/catfs" "github.com/sahib/brig/util" log "github.com/sirupsen/logrus" ) // Notifier implementors can take notifications // from any events happening in the fuse mount. type Notifier interface { // PublishEvent is called whenever a modification happens. PublishEvent() } // MountOptions defines all possible knobs you can turn for a mount. // The zero value are the default options. type MountOptions struct { // ReadOnly makes the mount not modifyable ReadOnly bool // Root determines what the root directory is. Root string // Offline tells the mount to error out on files that would need // to be fetched from far. Offline bool } // This is very similar (and indeed mostly copied) code from: // https://github.com/bazil/fuse/blob/master/fs/fstestutil/mounted.go // Since that's "only" test module, api might change, so better have this // code here (also we might do a few things differently). // Mount represents a fuse endpoint on the filesystem. // It is used as top-level API to control a brigfs fuse mount. type Mount struct { Dir string filesys *Filesystem closed bool done chan util.Empty errors chan error conn *fuse.Conn server *fs.Server options MountOptions notifier Notifier fs *catfs.FS } // NewMount mounts a fuse endpoint at `mountpoint` retrieving data from `store`. func NewMount(cfs *catfs.FS, mountpoint string, notifier Notifier, opts MountOptions) (*Mount, error) { mountOptions := []fuse.MountOption{ fuse.FSName("brigfs"), fuse.Subtype("brig"), fuse.AllowNonEmptyMount(), // enabling MaxReadahead double or even triple Read throughput 12MB/s -> 25 or 33 MB/s fuse.MaxReadahead(128 * 1024), // kernel uses at max 128kB = 131072B // enabling WritebackCache doubles write speed to buffer 12MB/s -> 24MB/s fuse.WritebackCache(), // writes will happen in mach large blocks 128kB instead of 8kB } if opts.ReadOnly { mountOptions = append(mountOptions, fuse.ReadOnly()) } conn, err := fuse.Mount(mountpoint, mountOptions...) if err != nil { return nil, e.Wrapf(err, "fuse-mount") } if opts.Root == "" { opts.Root = "/" } info, err := cfs.Stat(opts.Root) if err != nil { return nil, e.Wrapf(err, "failed to lookup root node of mount: %v", mountpoint) } if !info.IsDir { return nil, e.Wrapf(err, "%s is not a directory", opts.Root) } mnt := &Mount{ conn: conn, server: fs.New(conn, nil), Dir: mountpoint, done: make(chan util.Empty), errors: make(chan error), options: opts, notifier: notifier, fs: cfs, } filesys := &Filesystem{m: mnt, root: opts.Root} mnt.filesys = filesys go func() { defer close(mnt.done) log.Debugf("serving fuse mount at %v", mountpoint) mnt.errors <- mnt.server.Serve(filesys) mnt.done <- util.Empty{} log.Debugf("stopped serving fuse at %v", mountpoint) }() select { case <-mnt.conn.Ready: if err := mnt.conn.MountError; err != nil { return nil, err } case err = <-mnt.errors: // Serve quit early if err != nil { return nil, err } return nil, errors.New("Serve exited early") } return mnt, nil } func lazyUnmount(dir string) error { cmd := exec.Command("fusermount", "-u", "-z", dir) // #nosec output, err := cmd.CombinedOutput() if err != nil { if len(output) > 0 { output = bytes.TrimRight(output, "\n") msg := err.Error() + ": " + string(output) err = errors.New(msg) } return err } return nil } // EqualOptions returns true when the options in `opts` have the same // option as currently set in the mount. If so, no re-mount is required. func (m *Mount) EqualOptions(opts MountOptions) bool { if m.options.ReadOnly != opts.ReadOnly { return false } return path.Clean(m.options.Root) == path.Clean(opts.Root) } // Close will wait until all I/O operations are done and unmount the fuse // mount again. func (m *Mount) Close() error { if m.closed { return nil } m.closed = true log.Infof("unmounting fuse mount at %v (this might take a bit)", m.Dir) couldUnmount := false waitTimeout := 1 * time.Second // Attempt unmounting several times: for tries := 0; tries < 10; tries++ { if err := fuse.Unmount(m.Dir); err != nil { log.Debugf("failed to graceful unmount: %v", err) time.Sleep(250 * time.Millisecond) continue } couldUnmount = true waitTimeout = 5 * time.Second break } if !couldUnmount { log.Warn("cant properly unmount; are there still processes using the mount?") log.Warn("attempting lazy umount (you might leak resources!)") if err := lazyUnmount(m.Dir); err != nil { log.Debugf("lazy unmount failed: %v", err) } } // Be sure to drain the error channel: select { case err := <-m.errors: if err != nil { log.Warningf("fuse returned an error: %v", err) } case <-time.NewTimer(waitTimeout).C: // blocking due to fuse freeze. } // Be sure to pull the item from the channel: select { case <-m.done: log.Debugf("gracefully shutting down") case <-time.NewTimer(waitTimeout).C: // success or blocking due to fuse freeze. } // If we could not unmount, schedule closing in the background. // This might be leaky, since Close might not ever return. // But usually we unmount on program exit anyways... if couldUnmount { if err := m.conn.Close(); err != nil { return err } } else { go m.conn.Close() } return nil } // MountTable is a mapping from the mountpoint to the respective // `Mount` struct. It's given as convenient way to maintain several mounts. // All operations on the table are safe to call from several goroutines. type MountTable struct { mu sync.Mutex m map[string]*Mount fs *catfs.FS notifier Notifier } // NewMountTable returns an empty mount table. func NewMountTable(fs *catfs.FS, notifier Notifier) *MountTable { return &MountTable{ m: make(map[string]*Mount), fs: fs, notifier: notifier, } } // AddMount calls NewMount and adds it to the table at `path`. func (t *MountTable) AddMount(path string, opts MountOptions) (*Mount, error) { t.mu.Lock() defer t.mu.Unlock() return t.addMount(path, opts) } func checkMountPath(path string) error { files, err := ioutil.ReadDir(path) if err != nil { // This will also fail if `path` is not a directory: return err } if len(files) > 0 { return fmt.Errorf("Refusing to mount over non-empty dir `%s`", path) } return nil } func (t *MountTable) addMount(path string, opts MountOptions) (*Mount, error) { if err := checkMountPath(path); err != nil { return nil, e.Wrapf(err, "dir check") } m, ok := t.m[path] if ok { return m, nil } m, err := NewMount(t.fs, path, t.notifier, opts) if err == nil { t.m[path] = m } return m, e.Wrapf(err, "new-mount") } // Unmount closes the mount at `path` and deletes it from the table. func (t *MountTable) Unmount(path string) error { t.mu.Lock() defer t.mu.Unlock() return t.unmount(path) } func (t *MountTable) unmount(path string) error { m, ok := t.m[path] if !ok { return fmt.Errorf("no mount at `%v`", path) } delete(t.m, path) return m.Close() } // Close unmounts all leftover mounts and clears the table. func (t *MountTable) Close() error { t.mu.Lock() defer t.mu.Unlock() var err error for _, mount := range t.m { if closeErr := mount.Close(); closeErr != nil { err = closeErr } } t.m = make(map[string]*Mount) return err } ================================================ FILE: fuse/stub.go ================================================ // Package fuse implements a stub. // +build windows // The following comment is only here to make sure golint ignores this file: // Code generated by me DO NOT EDIT. // This package is intentend for platforms that do not offer fuse. // It rebuilds the same API as the rest of the package with stubs. package fuse import ( "errors" "github.com/sahib/brig/catfs" "github.com/sahib/config" ) type Notifier interface { PublishEvent() } var ErrCompiledWithoutFuse = errors.New("brig was compiled without fuse support") type MountOptions struct { ReadOnly bool Root string } type Mount struct { Dir string } func NewMount(cfs *catfs.FS, mountpoint string, opts MountOptions) (*Mount, error) { return nil, ErrCompiledWithoutFuse } func (m *Mount) EqualOptions(opts MountOptions) bool { return false } func (m *Mount) Close() error { return ErrCompiledWithoutFuse } type MountTable struct{} func NewMountTable(fs *catfs.FS, notifier Notifier) *MountTable { return nil } func (t *MountTable) AddMount(path string, opts MountOptions) (*Mount, error) { return nil, ErrCompiledWithoutFuse } func (t *MountTable) Unmount(path string) error { return ErrCompiledWithoutFuse } func (t *MountTable) Close() error { return ErrCompiledWithoutFuse } type FsTabEntry struct { Name string Path string Root string Active bool ReadOnly bool } func FsTabAdd(cfg *config.Config, name, path string, opts MountOptions) error { return ErrCompiledWithoutFuse } func FsTabRemove(cfg *config.Config, name string) error { return ErrCompiledWithoutFuse } func FsTabUnmountAll(cfg *config.Config, mounts *MountTable) error { return ErrCompiledWithoutFuse } func FsTabApply(cfg *config.Config, mounts *MountTable) error { return ErrCompiledWithoutFuse } func FsTabList(cfg *config.Config, mounts *MountTable) ([]FsTabEntry, error) { return nil, ErrCompiledWithoutFuse } ================================================ FILE: fuse/util.go ================================================ // +build !windows package fuse import ( "fmt" "time" "bazil.org/fuse" "github.com/sahib/brig/catfs" ie "github.com/sahib/brig/catfs/errors" "github.com/sahib/brig/repo/hints" log "github.com/sirupsen/logrus" ) func errorize(name string, err error) error { if ie.IsNoSuchFileError(err) { log.Infof("errorize: %s: No such file: %v", name, err) return fuse.ENOENT } if err != nil { log.Warningf("fuse: %s: %v", name, err) return fuse.EIO } return nil } // logPanic logs any panics by being called in a defer. // A rather inconvenient behaviour of fuse is to not report panics. func logPanic(name string) { if err := recover(); err != nil { log.Errorf("bug: %s panicked: %v", name, err) } } type xattrHandler struct { get func(cfs *catfs.FS, info *catfs.StatInfo) ([]byte, error) set func(cfs *catfs.FS, path string, value []byte) error } var xattrMap = map[string]xattrHandler{ "user.brig.hash.content": { get: func(cfs *catfs.FS, info *catfs.StatInfo) ([]byte, error) { return []byte(info.ContentHash.B58String()), nil }, }, "user.brig.hash.tree": { get: func(cfs *catfs.FS, info *catfs.StatInfo) ([]byte, error) { return []byte(info.TreeHash.B58String()), nil }, }, "user.brig.hash.backend": { get: func(cfs *catfs.FS, info *catfs.StatInfo) ([]byte, error) { return []byte(info.BackendHash.B58String()), nil }, }, "user.brig.pinned": { get: func(cfs *catfs.FS, info *catfs.StatInfo) ([]byte, error) { if info.IsPinned { return []byte("yes"), nil } return []byte("no"), nil }, }, "user.brig.explicitly_pinned": { get: func(cfs *catfs.FS, info *catfs.StatInfo) ([]byte, error) { if info.IsExplicit { return []byte("yes"), nil } return []byte("no"), nil }, }, "user.brig.hints.encryption": { get: func(cfs *catfs.FS, info *catfs.StatInfo) ([]byte, error) { return []byte(cfs.Hints().Lookup(info.Path).EncryptionAlgo), nil }, set: func(cfs *catfs.FS, path string, val []byte) error { hint := cfs.Hints().Lookup(path) hint.EncryptionAlgo = hints.EncryptionHint(val) if !hint.IsValid() { return fmt.Errorf("bad encryption algorithm: %s", string(val)) } return cfs.Hints().Set(path, hint) }, }, "user.brig.hints.compression": { get: func(cfs *catfs.FS, info *catfs.StatInfo) ([]byte, error) { return []byte(cfs.Hints().Lookup(info.Path).CompressionAlgo), nil }, set: func(cfs *catfs.FS, path string, val []byte) error { hint := cfs.Hints().Lookup(path) hint.CompressionAlgo = hints.CompressionHint(val) if !hint.IsValid() { return fmt.Errorf("bad compression algorithm: %s", string(val)) } return cfs.Hints().Set(path, hint) }, }, } func listXattr() []byte { resp := []byte{} for k := range xattrMap { resp = append(resp, k...) resp = append(resp, '\x00') } return resp } func getXattr(cfs *catfs.FS, name, path string) ([]byte, error) { handler, ok := xattrMap[name] if !ok || handler.get == nil { return nil, fuse.ErrNoXattr } info, err := cfs.Stat(path) if err != nil { return nil, errorize("getxattr", err) } return handler.get(cfs, info) } func setXattr(cfs *catfs.FS, name, path string, val []byte) error { handler, ok := xattrMap[name] if !ok || handler.set == nil { return fuse.ErrNoXattr } if err := handler.set(cfs, path, val); err != nil { return fuse.EIO } return nil } func notifyChange(m *Mount, d time.Duration) { if m.notifier == nil { // this can happen in tests. return } time.AfterFunc(d, m.notifier.PublishEvent) } ================================================ FILE: gateway/db/capnp/user.capnp ================================================ using Go = import "/go.capnp"; @0xa0b1c18bd0f965c4; $Go.package("capnp"); $Go.import("github.com/sahib/brig/gateway/db/capnp"); struct User { name @0 :Text; passwordHash @1 :Text; salt @2 :Text; folders @3 :List(Text); rights @4 :List(Text); } ================================================ FILE: gateway/db/capnp/user.capnp.go ================================================ // Code generated by capnpc-go. DO NOT EDIT. package capnp import ( capnp "zombiezen.com/go/capnproto2" text "zombiezen.com/go/capnproto2/encoding/text" schemas "zombiezen.com/go/capnproto2/schemas" ) type User struct{ capnp.Struct } // User_TypeID is the unique identifier for the type User. const User_TypeID = 0x861de4463c5a4a22 func NewUser(s *capnp.Segment) (User, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 5}) return User{st}, err } func NewRootUser(s *capnp.Segment) (User, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 5}) return User{st}, err } func ReadRootUser(msg *capnp.Message) (User, error) { root, err := msg.RootPtr() return User{root.Struct()}, err } func (s User) String() string { str, _ := text.Marshal(0x861de4463c5a4a22, s.Struct) return str } func (s User) Name() (string, error) { p, err := s.Struct.Ptr(0) return p.Text(), err } func (s User) HasName() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } func (s User) NameBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) return p.TextBytes(), err } func (s User) SetName(v string) error { return s.Struct.SetText(0, v) } func (s User) PasswordHash() (string, error) { p, err := s.Struct.Ptr(1) return p.Text(), err } func (s User) HasPasswordHash() bool { p, err := s.Struct.Ptr(1) return p.IsValid() || err != nil } func (s User) PasswordHashBytes() ([]byte, error) { p, err := s.Struct.Ptr(1) return p.TextBytes(), err } func (s User) SetPasswordHash(v string) error { return s.Struct.SetText(1, v) } func (s User) Salt() (string, error) { p, err := s.Struct.Ptr(2) return p.Text(), err } func (s User) HasSalt() bool { p, err := s.Struct.Ptr(2) return p.IsValid() || err != nil } func (s User) SaltBytes() ([]byte, error) { p, err := s.Struct.Ptr(2) return p.TextBytes(), err } func (s User) SetSalt(v string) error { return s.Struct.SetText(2, v) } func (s User) Folders() (capnp.TextList, error) { p, err := s.Struct.Ptr(3) return capnp.TextList{List: p.List()}, err } func (s User) HasFolders() bool { p, err := s.Struct.Ptr(3) return p.IsValid() || err != nil } func (s User) SetFolders(v capnp.TextList) error { return s.Struct.SetPtr(3, v.List.ToPtr()) } // NewFolders sets the folders field to a newly // allocated capnp.TextList, preferring placement in s's segment. func (s User) NewFolders(n int32) (capnp.TextList, error) { l, err := capnp.NewTextList(s.Struct.Segment(), n) if err != nil { return capnp.TextList{}, err } err = s.Struct.SetPtr(3, l.List.ToPtr()) return l, err } func (s User) Rights() (capnp.TextList, error) { p, err := s.Struct.Ptr(4) return capnp.TextList{List: p.List()}, err } func (s User) HasRights() bool { p, err := s.Struct.Ptr(4) return p.IsValid() || err != nil } func (s User) SetRights(v capnp.TextList) error { return s.Struct.SetPtr(4, v.List.ToPtr()) } // NewRights sets the rights field to a newly // allocated capnp.TextList, preferring placement in s's segment. func (s User) NewRights(n int32) (capnp.TextList, error) { l, err := capnp.NewTextList(s.Struct.Segment(), n) if err != nil { return capnp.TextList{}, err } err = s.Struct.SetPtr(4, l.List.ToPtr()) return l, err } // User_List is a list of User. type User_List struct{ capnp.List } // NewUser creates a new list of User. func NewUser_List(s *capnp.Segment, sz int32) (User_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 5}, sz) return User_List{l}, err } func (s User_List) At(i int) User { return User{s.List.Struct(i)} } func (s User_List) Set(i int, v User) error { return s.List.SetStruct(i, v.Struct) } func (s User_List) String() string { str, _ := text.MarshalList(0x861de4463c5a4a22, s.List) return str } // User_Promise is a wrapper for a User promised by a client call. type User_Promise struct{ *capnp.Pipeline } func (p User_Promise) Struct() (User, error) { s, err := p.Pipeline.Struct() return User{s}, err } const schema_a0b1c18bd0f965c4 = "x\xda\\\xca1J\x03A\x18\xc5\xf1\xf7ff\x15$" + "$\x0e\xac\x9d\"\x0a6\x16\x06\xdb (\x16\"V\xf9" + "\x0a\x1b\x1b\x19\xdd11\xc4d\xd9Y\x89\x16\xa2B\x10" + "E\x05\x0f`\xa1\xe0\x05\xecl\x05\x0f\xe0!\xbc\x84\xd5" + "\xca\x06\xd2\xd8\xfd\xdf\x8f7\xdd\xd9P6\xda\x07\xc4D" + "\x13\xc5\xe2\xce\xde\xda\xd6\xcf\xdc\x0d\xec,\x8b/\xff\xfb" + "}\xff\xf9\xfe\x82(\x9a\x04Vg\xa6h\x97\xcaX\x98" + "'X\xb4\\\xee\x07\xee\xbc\xae\x93\x83\xfa\xa1K{i" + "\xfd4\xf8le\x94\x8d\xdd\xe03\xa0IJ\xac\x0d`" + "\x08\xd8\x8be@\xce4e\xa8h\xc9\x98%^w\x00" + "\xb9\xd2\x94\x07E\xabTL\x05\xd8\xbb\xf29\xd4\x94'" + "E\xabuL\x0d\xd8\xc7M@n5\xe5M\xd1\x1a\x13" + "\xd3\x00\xf6\xb5\x01\xc8\xb3\xa6|(\xd6z\xee\xc4\xb3\x02" + "\xc5\x0aX\xa4.\x84A?KP\xdbv\xa1=\xe6Z" + "p\xdd|<.\x8f\xfa\xdd\xc4g\x81U\xb0\xa99\xe2" + "*\xb8\x9e\x1d\xb7\xda\xf9\x7f\xfd\x0b\x00\x00\xff\xff\x84\x94" + ":\x18" func init() { schemas.Register(schema_a0b1c18bd0f965c4, 0x861de4463c5a4a22) } ================================================ FILE: gateway/db/db.go ================================================ package db import ( "crypto/rand" "crypto/subtle" "encoding/base64" "fmt" "sync" "sync/atomic" "time" badger "github.com/dgraph-io/badger/v3" capnp "github.com/sahib/brig/gateway/db/capnp" "github.com/sahib/brig/util" log "github.com/sirupsen/logrus" capnp_lib "zombiezen.com/go/capnproto2" ) const ( // RightDownload refers to the right to download/view a file. RightDownload = "fs.download" // RightFsView refers to the right to view everything related to // the filesystem and history. RightFsView = "fs.view" // RightFsEdit refers to the right to edit the filesystem. // This includes pinning. RightFsEdit = "fs.edit" // RightRemotesView is the right to view the remote list. RightRemotesView = "remotes.view" // RightRemotesEdit is the right to edit the remote list. RightRemotesEdit = "remotes.edit" ) var ( // DefaultRights is a list of rights that users will get // if no other explicit rights are given. They are identical // to the admin role currently. DefaultRights = []string{ RightDownload, RightFsView, RightFsEdit, RightRemotesView, RightRemotesEdit, } // AllRights is a map that can be quickly used to check // if a right is valid or not. AllRights = map[string]bool{ RightDownload: true, RightFsView: true, RightFsEdit: true, RightRemotesView: true, RightRemotesEdit: true, } ) // UserDatabase is a badger db that stores user information, // using the user name as unique key. type UserDatabase struct { isStopped int64 mu sync.Mutex db *badger.DB gcTicker *time.Ticker } // NewUserDatabase creates a new UserDatabase at `path` or loads // an existing one. func NewUserDatabase(path string) (*UserDatabase, error) { opts := badger.DefaultOptions(path). WithValueLogFileSize(10 * 1024 * 1024). //default is 2GB we should not need 2GB WithMemTableSize(10 * 1024 * 1024). //default is 64MB WithSyncWrites(false). WithLogger(nil) db, err := badger.Open(opts) if err != nil { return nil, err } gcTicker := time.NewTicker(5 * time.Minute) udb := &UserDatabase{db: db, gcTicker: gcTicker} go func() { for range gcTicker.C { if atomic.LoadInt64(&udb.isStopped) > 0 { return } if err := db.RunValueLogGC(0.5); err != nil { if err != badger.ErrNoRewrite { log.WithError(err).Warnf("badger gc failed") } } } }() return udb, nil } // Close cleans up all the resources used by a badger db. func (ub *UserDatabase) Close() error { ub.mu.Lock() defer ub.mu.Unlock() ub.gcTicker.Stop() atomic.StoreInt64(&ub.isStopped, 1) if err := ub.db.Close(); err != nil { return err } ub.db = nil return nil } func unmarshalUser(data []byte) (*User, error) { msg, err := capnp_lib.Unmarshal(data) if err != nil { return nil, err } capUser, err := capnp.ReadRootUser(msg) if err != nil { return nil, err } return UserFromCapnp(capUser) } // UserFromCapnp takes a capnp.user and returns a regular User from it. func UserFromCapnp(capUser capnp.User) (*User, error) { capFolders, err := capUser.Folders() if err != nil { return nil, err } folders := []string{} for idx := 0; idx < capFolders.Len(); idx++ { folder, err := capFolders.At(idx) if err != nil { return nil, err } folders = append(folders, folder) } capRights, err := capUser.Rights() if err != nil { return nil, err } rights := []string{} for idx := 0; idx < capRights.Len(); idx++ { right, err := capRights.At(idx) if err != nil { return nil, err } rights = append(rights, right) } name, err := capUser.Name() if err != nil { return nil, err } passwordHash, err := capUser.PasswordHash() if err != nil { return nil, err } salt, err := capUser.Salt() if err != nil { return nil, err } return &User{ Name: name, PasswordHash: passwordHash, Salt: salt, Folders: folders, Rights: rights, }, nil } func marshalUser(user *User) ([]byte, error) { msg, seg, err := capnp_lib.NewMessage(capnp_lib.SingleSegment(nil)) if err != nil { return nil, err } if _, err := UserToCapnp(user, seg); err != nil { return nil, err } return msg.Marshal() } // UserToCapnp converts a User to a capnp.User. func UserToCapnp(user *User, seg *capnp_lib.Segment) (*capnp.User, error) { capUser, err := capnp.NewRootUser(seg) if err != nil { return nil, err } capFolders, err := capnp_lib.NewTextList(seg, int32(len(user.Folders))) if err != nil { return nil, err } for idx, folder := range user.Folders { if err := capFolders.Set(idx, folder); err != nil { return nil, err } } if err := capUser.SetFolders(capFolders); err != nil { return nil, err } capRights, err := capnp_lib.NewTextList(seg, int32(len(user.Rights))) if err != nil { return nil, err } for idx, right := range user.Rights { if err := capRights.Set(idx, right); err != nil { return nil, err } } if err := capUser.SetRights(capRights); err != nil { return nil, err } if err := capUser.SetName(user.Name); err != nil { return nil, err } if err := capUser.SetPasswordHash(user.PasswordHash); err != nil { return nil, err } if err := capUser.SetSalt(user.Salt); err != nil { return nil, err } return &capUser, nil } // User is one user that is stored in the database. // The passwords are stored as scrypt hash with added salt. type User struct { Name string PasswordHash string Salt string Folders []string Rights []string } // CheckPassword checks if `password` matches the stored one. func (u User) CheckPassword(password string) (bool, error) { salt, err := base64.StdEncoding.DecodeString(u.Salt) if err != nil { return false, err } oldHash, err := base64.StdEncoding.DecodeString(u.PasswordHash) if err != nil { return false, err } newHash := util.DeriveKey([]byte(password), salt, 32) return subtle.ConstantTimeCompare(oldHash, newHash) == 1, nil } // HashPassword creates a new hash and salt from a password. func HashPassword(password string) (string, string, error) { // Read a new salt from a random source. // 8 bytes are considered enough by the scrypt documentation. salt := make([]byte, 8) if n, err := rand.Read(salt); err != nil { return "", "", err } else if n != 8 { return "", "", fmt.Errorf("did not read enough random bytes") } // Derive the actual hash and encode it to base64. hash := util.DeriveKey([]byte(password), salt, 32) encode := base64.StdEncoding.EncodeToString return encode(hash), encode(salt), nil } // Add adds a new user to the database. // If the user exists already, it is overwritten. func (ub *UserDatabase) Add(name, password string, folders []string, rights []string) error { ub.mu.Lock() defer ub.mu.Unlock() if len(folders) == 0 { folders = []string{"/"} } if len(rights) == 0 { rights = DefaultRights } for _, right := range rights { if !AllRights[right] { return fmt.Errorf("invalid right: %s", right) } } hashed, salt, err := HashPassword(password) if err != nil { return err } user := &User{ Name: name, PasswordHash: hashed, Salt: salt, Folders: folders, Rights: rights, } data, err := marshalUser(user) if err != nil { return err } return ub.db.Update(func(txn *badger.Txn) error { return txn.Set([]byte(name), data) }) } // Get returns a User, if it exists. If it does not exist, // an error will be returned. func (ub *UserDatabase) Get(name string) (User, error) { ub.mu.Lock() defer ub.mu.Unlock() user := User{} return user, ub.db.View(func(txn *badger.Txn) error { item, err := txn.Get([]byte(name)) if err != nil { return err } return item.Value(func(data []byte) error { decUser, err := unmarshalUser(data) if err != nil { return err } user = *decUser return nil }) }) } // Remove removes an existing user. func (ub *UserDatabase) Remove(name string) error { ub.mu.Lock() defer ub.mu.Unlock() return ub.db.Update(func(txn *badger.Txn) error { // Make sure to error out if the key did not exist: if _, err := txn.Get([]byte(name)); err != nil { return err } return txn.Delete([]byte(name)) }) } // List returns all users currently in the database. func (ub *UserDatabase) List() ([]User, error) { ub.mu.Lock() defer ub.mu.Unlock() users := []User{} return users, ub.db.View(func(txn *badger.Txn) error { iter := txn.NewIterator(badger.IteratorOptions{}) defer iter.Close() for iter.Rewind(); iter.Valid(); iter.Next() { err := iter.Item().Value(func(data []byte) error { user, err := unmarshalUser(data) if err != nil { return err } users = append(users, *user) return nil }) if err != nil { return err } } return nil }) } ================================================ FILE: gateway/db/db_test.go ================================================ package db import ( "io/ioutil" "os" "testing" "github.com/stretchr/testify/require" ) func withDummyDb(t *testing.T, fn func(db *UserDatabase)) { tmpPath, err := ioutil.TempDir("", "brig-gw-userdb") require.Nil(t, err) defer os.RemoveAll(tmpPath) userDb, err := NewUserDatabase(tmpPath) require.Nil(t, err) fn(userDb) require.Nil(t, userDb.Close()) } func TestAddGet(t *testing.T) { withDummyDb(t, func(db *UserDatabase) { require.Nil(t, db.Add("hello", "world", []string{"/"}, []string{"fs.view"})) user, err := db.Get("hello") require.Nil(t, err) require.Equal(t, "hello", user.Name) require.NotEmpty(t, user.PasswordHash) require.NotEmpty(t, user.Salt) require.Equal(t, []string{"/"}, user.Folders) require.Equal(t, []string{"fs.view"}, user.Rights) }) } ================================================ FILE: gateway/elm/.gitignore ================================================ elm-stuff ================================================ FILE: gateway/elm/Makefile ================================================ SOURCES=$(shell find . -iname '*.elm') all: build minify release: release-build minify build: @elm make ${SOURCES} --output ../static/js/app.js release-build: @elm make ${SOURCES} --output ../static/js/app.js --optimize minify: uglifyjs ../static/js/app.js --compress 'pure_funcs="F2,F3,F4,F5,F6,F7,F8,F9,A2,A3,A4,A5,A6,A7,A8,A9",pure_getters,keep_fargs=false,unsafe_comps,unsafe' | uglifyjs --mangle --output=../static/js/app.min.js mv ../static/js/app.min.js ../static/js/app.js ================================================ FILE: gateway/elm/elm.json ================================================ { "type": "application", "source-directories": [ "src" ], "elm-version": "0.19.0", "dependencies": { "direct": { "NoRedInk/elm-json-decode-pipeline": "1.0.0", "andrewMacmurray/elm-delay": "3.0.0", "basti1302/elm-human-readable-filesize": "1.2.0", "elm/browser": "1.0.0", "elm/core": "1.0.2", "elm/file": "1.0.1", "elm/html": "1.0.0", "elm/http": "2.0.0", "elm/json": "1.1.0", "elm/time": "1.0.0", "elm/url": "1.0.0", "elm-community/list-extra": "8.1.0", "jweir/elm-iso8601": "5.0.2", "rundis/elm-bootstrap": "5.0.0" }, "indirect": { "avh4/elm-color": "1.0.0", "elm/bytes": "1.0.7", "elm/regex": "1.0.0", "elm/virtual-dom": "1.0.0", "myrho/elm-round": "1.0.4" } }, "test-dependencies": { "direct": {}, "indirect": {} } } ================================================ FILE: gateway/elm/src/Clipboard.elm ================================================ port module Clipboard exposing (copyToClipboard) import Json.Encode as E port copyToClipboard : E.Value -> Cmd msg ================================================ FILE: gateway/elm/src/Commands.elm ================================================ module Commands exposing ( Commit , Diff , DiffPair , Entry , Folder , HistoryEntry , ListResponse , Log , LoginResponse , Remote , SelfResponse , WhoamiResponse , diffChangeCount , doCopy , doDeletedFiles , doHistory , doListAllDirs , doListQuery , doLog , doLogin , doLogout , doMkdir , doMove , doPin , doRemoteAdd , doRemoteDiff , doRemoteList , doRemoteModify , doRemoteRemove , doRemoteSync , doRemove , doReset , doSelfQuery , doUndelete , doUnpin , doUpload , doWhoami , emptyRemote , emptySelf ) import Bootstrap.Dropdown as Dropdown import File import Http import ISO8601 import Json.Decode as D import Json.Decode.Pipeline as DP import Json.Encode as E import Time import Url import Util -- REMOVE type alias RemoveQuery = { paths : List String } encodeRemoveQuery : RemoveQuery -> E.Value encodeRemoveQuery q = E.object [ ( "paths", E.list E.string q.paths ) ] decodeRemoveResponse : D.Decoder String decodeRemoveResponse = D.field "message" D.string doRemove : (Result Http.Error String -> msg) -> List String -> Cmd msg doRemove toMsg paths = Http.post { url = "/api/v0/remove" , body = Http.jsonBody <| encodeRemoveQuery <| RemoveQuery paths , expect = Http.expectJson toMsg decodeRemoveResponse } -- HISTORY timestampToPosix : D.Decoder Time.Posix timestampToPosix = D.int |> D.andThen (\ms -> D.succeed <| Time.millisToPosix ms) iso8601ToPosix : D.Decoder Time.Posix iso8601ToPosix = D.string |> D.andThen (\stamp -> case ISO8601.fromString stamp of Ok time -> D.succeed <| ISO8601.toPosix time Err msg -> D.fail msg ) type alias HistoryQuery = { path : String } type alias Commit = { date : Time.Posix , msg : String , tags : List String , hash : String , index : Int } type alias HistoryEntry = { head : Commit , path : String , change : String , isPinned : Bool , isExplicit : Bool } encodeHistoryQuery : HistoryQuery -> E.Value encodeHistoryQuery q = E.object [ ( "path", E.string q.path ) ] decodeCommit : D.Decoder Commit decodeCommit = D.succeed Commit |> DP.required "date" timestampToPosix |> DP.required "msg" D.string |> DP.required "tags" (D.list D.string) |> DP.required "hash" D.string |> DP.required "index" D.int decodeHistoryEntry : D.Decoder HistoryEntry decodeHistoryEntry = D.succeed HistoryEntry |> DP.required "head" decodeCommit |> DP.required "path" D.string |> DP.required "change" D.string |> DP.required "is_pinned" D.bool |> DP.required "is_explicit" D.bool decodeHistory : D.Decoder (List HistoryEntry) decodeHistory = D.field "entries" (D.list decodeHistoryEntry) doHistory : (Result Http.Error (List HistoryEntry) -> msg) -> String -> Cmd msg doHistory toMsg path = Http.post { url = "/api/v0/history" , body = Http.jsonBody <| encodeHistoryQuery <| HistoryQuery path , expect = Http.expectJson toMsg decodeHistory } -- RESET type alias ResetQuery = { path : String , revision : String , force : Bool } encodeResetQuery : ResetQuery -> E.Value encodeResetQuery q = E.object [ ( "path", E.string q.path ) , ( "revision", E.string q.revision ) ] decodeResetQuery : D.Decoder String decodeResetQuery = D.field "message" D.string doReset : (Result Http.Error String -> msg) -> String -> String -> Cmd msg doReset toMsg path revision = Http.post { url = "/api/v0/reset" , body = Http.jsonBody <| encodeResetQuery <| ResetQuery path revision True , expect = Http.expectJson toMsg decodeResetQuery } -- MOVE type alias MoveQuery = { sourcePath : String , destinationPath : String } encodeMoveQuery : MoveQuery -> E.Value encodeMoveQuery q = E.object [ ( "source", E.string <| Util.prefixSlash q.sourcePath ) , ( "destination", E.string <| Util.prefixSlash q.destinationPath ) ] decodeMoveResponse : D.Decoder String decodeMoveResponse = D.field "message" D.string doMove : (Result Http.Error String -> msg) -> String -> String -> Cmd msg doMove toMsg src dst = Http.post { url = "/api/v0/move" , body = Http.jsonBody <| encodeMoveQuery <| MoveQuery src dst , expect = Http.expectJson toMsg decodeMoveResponse } -- COPY type alias CopyQuery = { sourcePath : String , destinationPath : String } encodeCopyQuery : CopyQuery -> E.Value encodeCopyQuery q = E.object [ ( "source", E.string <| Util.prefixSlash q.sourcePath ) , ( "destination", E.string <| Util.prefixSlash q.destinationPath ) ] decodeCopyResponse : D.Decoder String decodeCopyResponse = D.field "message" D.string doCopy : (Result Http.Error String -> msg) -> String -> String -> Cmd msg doCopy toMsg src dst = Http.post { url = "/api/v0/copy" , body = Http.jsonBody <| encodeCopyQuery <| CopyQuery src dst , expect = Http.expectJson toMsg decodeCopyResponse } -- ALL DIRS decodeAllDirsResponse : D.Decoder (List String) decodeAllDirsResponse = D.field "paths" (D.list D.string) doListAllDirs : (Result Http.Error (List String) -> msg) -> Cmd msg doListAllDirs toMsg = Http.post { url = "/api/v0/all-dirs" , body = Http.emptyBody , expect = Http.expectJson toMsg decodeAllDirsResponse } -- LIST type alias ListQuery = { root : String , filter : String } type alias Entry = { dropdown : Dropdown.State , path : String , user : String , size : Int , inode : Int , depth : Int , lastModified : Time.Posix , isDir : Bool , isPinned : Bool , isExplicit : Bool } type alias ListResponse = { self : Entry , isFiltered : Bool , entries : List Entry } encodeListResponse : ListQuery -> E.Value encodeListResponse q = E.object [ ( "root", E.string q.root ) , ( "filter", E.string q.filter ) ] decodeListResponse : D.Decoder ListResponse decodeListResponse = D.map3 ListResponse (D.field "self" decodeEntry) (D.field "is_filtered" D.bool) (D.field "files" (D.list decodeEntry)) decodeEntry : D.Decoder Entry decodeEntry = D.succeed (Entry Dropdown.initialState) |> DP.required "path" D.string |> DP.required "user" D.string |> DP.required "size" D.int |> DP.required "inode" D.int |> DP.required "depth" D.int |> DP.required "last_modified_ms" timestampToPosix |> DP.required "is_dir" D.bool |> DP.required "is_pinned" D.bool |> DP.required "is_explicit" D.bool doListQuery : (Result Http.Error ListResponse -> msg) -> String -> String -> Cmd msg doListQuery toMsg path filter = Http.post { url = "/api/v0/ls" , body = Http.jsonBody <| encodeListResponse <| ListQuery path filter , expect = Http.expectJson toMsg decodeListResponse } -- UPLOAD doUpload : (String -> Result Http.Error () -> msg) -> String -> File.File -> Cmd msg doUpload toMsg destPath file = Http.request { method = "POST" , url = "/api/v0/upload?root=" ++ Url.percentEncode destPath , headers = [] , body = Http.multipartBody [ Http.filePart "files[]" file ] , expect = Http.expectWhatever (toMsg (File.name file)) , timeout = Nothing , tracker = Just ("upload-" ++ File.name file) } -- MKDIR type alias MkdirQuery = { path : String } encodeMkdirQuery : MkdirQuery -> E.Value encodeMkdirQuery q = E.object [ ( "path", E.string q.path ) ] decodeMkdirResponse : D.Decoder String decodeMkdirResponse = D.field "message" D.string doMkdir : (Result Http.Error String -> msg) -> String -> Cmd msg doMkdir toMsg path = Http.post { url = "/api/v0/mkdir" , body = Http.jsonBody <| encodeMkdirQuery <| MkdirQuery path , expect = Http.expectJson toMsg decodeMkdirResponse } -- LOGIN type alias LoginQuery = { username : String , password : String } type alias LoginResponse = { username : String , rights : List String , isAnon : Bool , anonIsAllowed : Bool } encodeLoginQuery : LoginQuery -> E.Value encodeLoginQuery q = E.object [ ( "username", E.string q.username ) , ( "password", E.string q.password ) ] decodeLoginResponse : D.Decoder LoginResponse decodeLoginResponse = D.map4 LoginResponse (D.field "username" D.string) (D.field "rights" (D.list D.string)) (D.field "is_anon" D.bool) (D.field "anon_is_allowed" D.bool) doLogin : (Result Http.Error LoginResponse -> msg) -> String -> String -> Cmd msg doLogin toMsg user pass = Http.post { url = "/api/v0/login" , body = Http.jsonBody <| encodeLoginQuery <| LoginQuery user pass , expect = Http.expectJson toMsg decodeLoginResponse } -- LOGOUT QUERY doLogout : (Result Http.Error Bool -> msg) -> Cmd msg doLogout msg = Http.post { url = "/api/v0/logout" , body = Http.emptyBody , expect = Http.expectJson msg (D.field "success" D.bool) } -- WHOAMI QUERY type alias WhoamiResponse = { username : String , isLoggedIn : Bool , isAnon : Bool , anonIsAllowed : Bool , rights : List String } decodeWhoami : D.Decoder WhoamiResponse decodeWhoami = D.map5 WhoamiResponse (D.field "user" D.string) (D.field "is_logged_in" D.bool) (D.field "is_anon" D.bool) (D.field "anon_is_allowed" D.bool) (D.field "rights" (D.list D.string)) doWhoami : (Result Http.Error WhoamiResponse -> msg) -> Cmd msg doWhoami msg = Http.post { url = "/api/v0/whoami" , body = Http.emptyBody , expect = Http.expectJson msg decodeWhoami } -- LOG type alias LogQuery = { offset : Int , limit : Int , filter : String } type alias Log = { haveStagedChanges : Bool , commits : List Commit } encodeLog : LogQuery -> E.Value encodeLog q = E.object [ ( "offset", E.int q.offset ) , ( "limit", E.int q.limit ) , ( "filter", E.string q.filter ) ] decodeLog : D.Decoder Log decodeLog = D.map2 Log (D.field "have_staged_changes" D.bool) (D.field "commits" (D.list decodeCommit)) doLog : (Result Http.Error Log -> msg) -> Int -> Int -> String -> Cmd msg doLog msg offset limit filter = Http.post { url = "/api/v0/log" , body = Http.jsonBody <| encodeLog <| LogQuery offset limit filter , expect = Http.expectJson msg decodeLog } -- DELETED FILES type alias DeletedFilesQuery = { offset : Int , limit : Int , filter : String } encodeDeletedFiles : DeletedFilesQuery -> E.Value encodeDeletedFiles q = E.object [ ( "offset", E.int q.offset ) , ( "limit", E.int q.limit ) , ( "filter", E.string q.filter ) ] decodeDeletedFiles : D.Decoder (List Entry) decodeDeletedFiles = D.field "entries" (D.list decodeEntry) doDeletedFiles : (Result Http.Error (List Entry) -> msg) -> Int -> Int -> String -> Cmd msg doDeletedFiles msg offset limit filter = Http.post { url = "/api/v0/deleted" , body = Http.jsonBody <| encodeDeletedFiles <| DeletedFilesQuery offset limit filter , expect = Http.expectJson msg decodeDeletedFiles } -- UNDELETE type alias UndeleteQuery = { path : String } encodeUndeleteQuery : UndeleteQuery -> E.Value encodeUndeleteQuery q = E.object [ ( "path", E.string q.path ) ] decodeUndeleteResponse : D.Decoder String decodeUndeleteResponse = D.field "message" D.string doUndelete : (Result Http.Error String -> msg) -> String -> Cmd msg doUndelete toMsg path = Http.post { url = "/api/v0/undelete" , body = Http.jsonBody <| encodeUndeleteQuery <| UndeleteQuery path , expect = Http.expectJson toMsg decodeUndeleteResponse } -- REMOTE LIST type alias Folder = { folder : String , readOnly : Bool , conflictStrategy : String } type alias Remote = { name : String , folders : List Folder , fingerprint : String , acceptAutoUpdates : Bool , isOnline : Bool , isAuthenticated : Bool , lastSeen : Time.Posix , acceptPush : Bool , conflictStrategy : String } emptyRemote : Remote emptyRemote = { name = "" , folders = [] , fingerprint = "" , acceptAutoUpdates = False , isOnline = False , isAuthenticated = False , lastSeen = Time.millisToPosix 0 , conflictStrategy = "" , acceptPush = False } decodeRemoteListResponse : D.Decoder (List Remote) decodeRemoteListResponse = D.field "remotes" (D.list decodeRemote) decodeRemote : D.Decoder Remote decodeRemote = D.succeed Remote |> DP.required "name" D.string |> DP.required "folders" (D.oneOf [ D.list decodeFolder, D.null [] ]) |> DP.required "fingerprint" D.string |> DP.required "accept_auto_updates" D.bool |> DP.required "is_online" D.bool |> DP.required "is_authenticated" D.bool |> DP.required "last_seen" iso8601ToPosix |> DP.required "accept_push" D.bool |> DP.required "conflict_strategy" D.string decodeFolder : D.Decoder Folder decodeFolder = D.succeed Folder |> DP.required "folder" D.string |> DP.required "read_only" D.bool |> DP.required "conflict_strategy" D.string doRemoteList : (Result Http.Error (List Remote) -> msg) -> Cmd msg doRemoteList toMsg = Http.post { url = "/api/v0/remotes/list" , body = Http.emptyBody , expect = Http.expectJson toMsg decodeRemoteListResponse } -- REMOTE REMOVE type alias RemoteRemoveQuery = { name : String } encodeRemoteRemoveQuery : RemoteRemoveQuery -> E.Value encodeRemoteRemoveQuery q = E.object [ ( "name", E.string q.name ) ] decodeRemoteRemoveQuery : D.Decoder String decodeRemoteRemoveQuery = D.field "message" D.string doRemoteRemove : (Result Http.Error String -> msg) -> String -> Cmd msg doRemoteRemove toMsg name = Http.post { url = "/api/v0/remotes/remove" , body = Http.jsonBody <| encodeRemoteRemoveQuery <| RemoteRemoveQuery name , expect = Http.expectJson toMsg decodeRemoteRemoveQuery } -- REMOTE SYNC type alias RemoteSyncQuery = { name : String } encodeRemoteSyncQuery : RemoteSyncQuery -> E.Value encodeRemoteSyncQuery q = E.object [ ( "name", E.string q.name ) ] decodeRemoteSyncQuery : D.Decoder String decodeRemoteSyncQuery = D.field "message" D.string doRemoteSync : (Result Http.Error String -> msg) -> String -> Cmd msg doRemoteSync toMsg name = Http.post { url = "/api/v0/remotes/sync" , body = Http.jsonBody <| encodeRemoteSyncQuery <| RemoteSyncQuery name , expect = Http.expectJson toMsg decodeRemoteSyncQuery } -- REMOTE ADD type alias RemoteAddQuery = { name : String , fingerprint : String , folders : List Folder , doAutoUpdate : Bool , acceptPush : Bool , conflictStrategy : String } encodeFolder : Folder -> E.Value encodeFolder f = E.object [ ( "folder", E.string f.folder ) , ( "read_only", E.bool f.readOnly ) , ( "conflict_strategy", E.string f.conflictStrategy ) ] encodeRemoteAddQuery : RemoteAddQuery -> E.Value encodeRemoteAddQuery q = E.object [ ( "name", E.string q.name ) , ( "fingerprint", E.string q.fingerprint ) , ( "accept_auto_updates", E.bool q.doAutoUpdate ) , ( "folders", E.list encodeFolder q.folders ) , ( "accept_push", E.bool q.acceptPush ) , ( "conflict_strategy", E.string q.conflictStrategy ) ] decodeRemoteAddQuery : D.Decoder String decodeRemoteAddQuery = D.field "message" D.string doRemoteAdd : (Result Http.Error String -> msg) -> String -> String -> Bool -> Bool -> String -> List Folder -> Cmd msg doRemoteAdd toMsg name fingerprint doAutoUpdate acceptPush conflictStrategy folders = Http.post { url = "/api/v0/remotes/add" , body = Http.jsonBody <| encodeRemoteAddQuery <| { name = name , fingerprint = fingerprint , doAutoUpdate = doAutoUpdate , folders = folders , acceptPush = acceptPush , conflictStrategy = conflictStrategy } , expect = Http.expectJson toMsg decodeRemoteAddQuery } doRemoteModify : (Result Http.Error String -> msg) -> Remote -> Cmd msg doRemoteModify toMsg remote = Http.post { url = "/api/v0/remotes/modify" , body = Http.jsonBody <| encodeRemoteAddQuery <| { name = remote.name , fingerprint = remote.fingerprint , doAutoUpdate = remote.acceptAutoUpdates , folders = remote.folders , acceptPush = remote.acceptPush , conflictStrategy = remote.conflictStrategy } , expect = Http.expectJson toMsg decodeRemoteAddQuery } -- REMOTE SELF type alias SelfResponse = { self : Identity , defaultConflictStrategy : String } type alias Identity = { name : String , fingerprint : String } emptySelf : SelfResponse emptySelf = SelfResponse (Identity "" "") "marker" decodeSelfResponse : D.Decoder SelfResponse decodeSelfResponse = D.succeed SelfResponse |> DP.required "self" decodeIdentity |> DP.required "default_conflict_strategy" D.string decodeIdentity : D.Decoder Identity decodeIdentity = D.succeed Identity |> DP.required "name" D.string |> DP.required "fingerprint" D.string doSelfQuery : (Result Http.Error SelfResponse -> msg) -> Cmd msg doSelfQuery toMsg = Http.post { url = "/api/v0/remotes/self" , body = Http.emptyBody , expect = Http.expectJson toMsg decodeSelfResponse } -- REMOTE DIFF type alias DiffPair = { src : Entry , dst : Entry } type alias Diff = { added : List Entry , removed : List Entry , ignored : List Entry , missing : List Entry , moved : List DiffPair , merged : List DiffPair , conflict : List DiffPair } diffChangeCount : Diff -> Int diffChangeCount diff = List.length diff.added + List.length diff.removed + List.length diff.ignored + List.length diff.missing + List.length diff.moved + List.length diff.merged + List.length diff.conflict type alias RemoteDiffQuery = { name : String } encodeRemoteDiffQuery : RemoteDiffQuery -> E.Value encodeRemoteDiffQuery q = E.object [ ( "name", E.string q.name ) ] decodeDiffPair : D.Decoder DiffPair decodeDiffPair = D.map2 DiffPair (D.field "src" decodeEntry) (D.field "dst" decodeEntry) decodeDiffResponse : D.Decoder Diff decodeDiffResponse = D.field "diff" decodeDiff decodeDiff : D.Decoder Diff decodeDiff = D.succeed Diff |> DP.required "added" (D.list decodeEntry) |> DP.required "removed" (D.list decodeEntry) |> DP.required "ignored" (D.list decodeEntry) |> DP.required "missing" (D.list decodeEntry) |> DP.required "moved" (D.list decodeDiffPair) |> DP.required "merged" (D.list decodeDiffPair) |> DP.required "conflict" (D.list decodeDiffPair) doRemoteDiff : (Result Http.Error Diff -> msg) -> String -> Cmd msg doRemoteDiff toMsg name = Http.post { url = "/api/v0/remotes/diff" , body = Http.jsonBody <| encodeRemoteDiffQuery <| RemoteDiffQuery name , expect = Http.expectJson toMsg decodeDiffResponse } -- PIN type alias PinQuery = { path : String , revision : String } encodePinQuery : PinQuery -> E.Value encodePinQuery q = E.object [ ( "path", E.string q.path ) , ( "revision", E.string q.revision ) ] decodePinResponse : D.Decoder String decodePinResponse = D.field "message" D.string doPin : (Result Http.Error String -> msg) -> String -> String -> Cmd msg doPin toMsg path revision = Http.post { url = "/api/v0/pin" , body = Http.jsonBody <| encodePinQuery <| PinQuery path revision , expect = Http.expectJson toMsg decodePinResponse } doUnpin : (Result Http.Error String -> msg) -> String -> String -> Cmd msg doUnpin toMsg path revision = Http.post { url = "/api/v0/unpin" , body = Http.jsonBody <| encodePinQuery <| PinQuery path revision , expect = Http.expectJson toMsg decodePinResponse } ================================================ FILE: gateway/elm/src/Main.elm ================================================ module Main exposing (init, main, subscriptions, update, view) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Form as Form import Bootstrap.Form.Input as Input import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Text as Text import Browser import Browser.Navigation as Nav import Commands import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Html.Lazy as Lazy import Http import Json.Decode as D import List import Pinger import Routes.Commits as Commits import Routes.DeletedFiles as DeletedFiles import Routes.Diff as Diff import Routes.Ls as Ls import Routes.Remotes as Remotes import Task import Time import Url import Util exposing (..) import Websocket -- MAIN main : Program () Model Msg main = Browser.application { init = init , update = update , subscriptions = subscriptions , view = view , onUrlChange = UrlChanged , onUrlRequest = LinkClicked } -- MESSAGES type Msg = GotLoginResp (Result Http.Error Commands.LoginResponse) | GotWhoamiResp (Result Http.Error Commands.WhoamiResponse) | GotLogoutResp Bool (Result Http.Error Bool) | AdjustTimeZone Time.Zone | LinkClicked Browser.UrlRequest | UrlChanged Url.Url | UsernameInput String | PasswordInput String | LoginSubmit | LogoutSubmit Bool | GotoLogin | PingerIn String | WebsocketIn String -- View parent messages: | ListMsg Ls.Msg | CommitsMsg Commits.Msg | DeletedFilesMsg DeletedFiles.Msg | RemotesMsg Remotes.Msg | DiffMsg Diff.Msg -- MODEL type View = ViewList | ViewCommits | ViewRemotes | ViewDeletedFiles | ViewDiff | ViewNotFound type alias ViewState = { listState : Ls.Model , commitsState : Commits.Model , remoteState : Remotes.Model , deletedFilesState : DeletedFiles.Model , diffState : Diff.Model , loginName : String , currentView : View , rights : List String , isAnon : Bool , anonIsAllowed : Bool } type LoginState = LoginLimbo -- weird state where we do not know if we're logged in yet. | LoginReady String String | LoginLoading String String | LoginFailure String String String | LoginSuccess ViewState type alias Model = { zone : Time.Zone , key : Nav.Key , url : Url.Url , loginState : LoginState , serverIsOnline : Bool } init : () -> Url.Url -> Nav.Key -> ( Model, Cmd Msg ) init _ url key = ( { zone = Time.utc , key = key , url = url , loginState = LoginLimbo , serverIsOnline = True } , Cmd.batch [ Task.perform AdjustTimeZone Time.here , Commands.doWhoami GotWhoamiResp ] ) -- UPDATE withSubUpdate : subMsg -> (ViewState -> subModel) -> Model -> (subMsg -> Msg) -> (subMsg -> subModel -> ( subModel, Cmd subMsg )) -> (ViewState -> subModel -> ViewState) -> ( Model, Cmd Msg ) withSubUpdate subMsg subModel model msg subUpdate viewStateUpdate = case model.loginState of LoginSuccess viewState -> let ( newSubModel, newSubCmd ) = subUpdate subMsg (subModel viewState) in ( { model | loginState = LoginSuccess (viewStateUpdate viewState newSubModel) }, Cmd.map msg newSubCmd ) _ -> ( model, Cmd.none ) doInitAfterLogin : Model -> String -> List String -> Bool -> Bool -> ( Model, Cmd Msg ) doInitAfterLogin model loginName rights isAnon anonIsAllowed = let newViewState = { listState = Ls.newModel model.key model.url rights , commitsState = Commits.newModel model.url model.key model.zone rights , deletedFilesState = DeletedFiles.newModel model.url model.key model.zone rights , remoteState = Remotes.newModel model.key model.zone rights , diffState = Diff.newModel model.key model.url model.zone , loginName = loginName , currentView = viewFromUrl rights model.url , rights = rights , isAnon = isAnon , anonIsAllowed = anonIsAllowed } in ( { model | loginState = LoginSuccess newViewState } , Cmd.batch [ Cmd.map ListMsg <| Ls.doListQueryFromUrl model.url , Websocket.open () , Cmd.map DeletedFilesMsg <| DeletedFiles.reload newViewState.deletedFilesState , Cmd.map CommitsMsg <| Commits.reload newViewState.commitsState , Cmd.map RemotesMsg <| Remotes.reload , Cmd.map DiffMsg <| Diff.reload newViewState.diffState model.url ] ) viewFromUrl : List String -> Url.Url -> View viewFromUrl rights url = case List.head <| List.drop 1 <| String.split "/" url.path of Nothing -> ViewNotFound Just first -> case first of "view" -> ViewList "log" -> ViewCommits "remotes" -> ViewRemotes "deleted" -> ViewDeletedFiles "diff" -> ViewDiff "" -> if List.member "fs.view" rights then ViewList else ViewRemotes _ -> ViewNotFound viewToString : View -> String viewToString v = case v of ViewList -> "/view" ViewCommits -> "/log" ViewRemotes -> "/remotes" ViewDeletedFiles -> "/deleted" ViewDiff -> "/Diff" ViewNotFound -> "/nothing" eventType : String -> String eventType data = let result = D.decodeString (D.field "data" D.string) data in case result of Ok typ -> typ Err _ -> "failed" pingerMsgToBool : String -> Bool pingerMsgToBool data = let result = D.decodeString (D.field "isOnline" D.bool) data in case result of Ok typ -> typ Err _ -> False update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of AdjustTimeZone newZone -> case model.loginState of LoginSuccess viewState -> ( { model | zone = newZone , loginState = LoginSuccess { viewState | listState = Ls.changeTimeZone newZone viewState.listState } } , Cmd.none ) _ -> ( { model | zone = newZone }, Cmd.none ) GotWhoamiResp result -> case result of Ok whoami -> -- Immediately hit off a list query, which will in turn populate -- the list view. Take the path from the current URL. case whoami.isLoggedIn of True -> doInitAfterLogin model whoami.username whoami.rights whoami.isAnon whoami.anonIsAllowed False -> ( { model | loginState = LoginReady "" "" }, Cmd.none ) Err _ -> ( { model | loginState = LoginReady "" "" }, Cmd.none ) GotLoginResp result -> case result of Ok response -> -- Immediately hit off a list query, which will in turn populate -- the list view. Take the path from the current URL. doInitAfterLogin model response.username response.rights response.isAnon response.anonIsAllowed Err err -> ( { model | loginState = LoginFailure "" "" (Util.httpErrorToString err) }, Cmd.none ) GotLogoutResp mayReloginAsAnon _ -> case model.loginState of LoginSuccess viewState -> if mayReloginAsAnon && viewState.anonIsAllowed then ( model, Commands.doWhoami GotWhoamiResp ) else ( { model | loginState = LoginReady "" "" }, Cmd.none ) _ -> ( { model | loginState = LoginReady "" "" }, Cmd.none ) LinkClicked urlRequest -> case urlRequest of Browser.Internal url -> -- Special case: /get/ requests should be routed as if we clicked an -- external link. case String.startsWith "/get" url.path of True -> ( model, Nav.load (Url.toString url) ) False -> ( model, Nav.pushUrl model.key (Url.toString { url | query = Nothing }) ) Browser.External href -> let currUrl = Url.toString model.url in case href of "" -> ( model, Cmd.none ) "#" -> ( model, Cmd.none ) _ -> ( model , if href == currUrl then Cmd.none else Nav.load href ) UrlChanged url -> case model.loginState of LoginSuccess viewState -> case viewFromUrl viewState.rights url of ViewList -> ( { model | url = url , loginState = LoginSuccess { viewState | currentView = ViewList , listState = Ls.changeUrl url viewState.listState , commitsState = Commits.updateUrl viewState.commitsState url , deletedFilesState = DeletedFiles.updateUrl viewState.deletedFilesState url , diffState = Diff.updateUrl viewState.diffState url } } , Cmd.map ListMsg <| Ls.doListQueryFromUrl url ) ViewDiff -> ( { model | url = url , loginState = LoginSuccess { viewState | currentView = ViewDiff , commitsState = Commits.updateUrl viewState.commitsState url , deletedFilesState = DeletedFiles.updateUrl viewState.deletedFilesState url , diffState = Diff.updateUrl viewState.diffState url } } , Cmd.map DiffMsg <| Diff.reload viewState.diffState url ) ViewCommits -> ( { model | url = url , loginState = LoginSuccess { viewState | currentView = ViewCommits , commitsState = Commits.updateUrl viewState.commitsState url , deletedFilesState = DeletedFiles.updateUrl viewState.deletedFilesState url , diffState = Diff.updateUrl viewState.diffState url } } , Cmd.map CommitsMsg <| Commits.reloadIfNeeded viewState.commitsState ) ViewDeletedFiles -> ( { model | url = url , loginState = LoginSuccess { viewState | currentView = ViewDeletedFiles , deletedFilesState = DeletedFiles.updateUrl viewState.deletedFilesState url , commitsState = Commits.updateUrl viewState.commitsState url , diffState = Diff.updateUrl viewState.diffState url } } , Cmd.map DeletedFilesMsg <| DeletedFiles.reloadIfNeeded viewState.deletedFilesState ) other -> ( { model | url = url , loginState = LoginSuccess { viewState | currentView = other , commitsState = Commits.updateUrl viewState.commitsState url , deletedFilesState = DeletedFiles.updateUrl viewState.deletedFilesState url , diffState = Diff.updateUrl viewState.diffState url } } , Cmd.none ) _ -> ( { model | url = url }, Cmd.none ) UsernameInput username -> case model.loginState of LoginReady _ password -> ( { model | loginState = LoginReady username password }, Cmd.none ) LoginFailure _ password _ -> ( { model | loginState = LoginFailure username password "" }, Cmd.none ) _ -> ( model, Cmd.none ) PasswordInput password -> case model.loginState of LoginReady username _ -> ( { model | loginState = LoginReady username password }, Cmd.none ) LoginFailure username _ _ -> ( { model | loginState = LoginFailure username password "" }, Cmd.none ) _ -> ( model, Cmd.none ) LoginSubmit -> case model.loginState of LoginReady username password -> ( { model | loginState = LoginLoading username password } , Commands.doLogin GotLoginResp username password ) LoginFailure username password _ -> ( { model | loginState = LoginLoading username password } , Commands.doLogin GotLoginResp username password ) _ -> ( model, Cmd.none ) LogoutSubmit mayReloginAsAnon -> ( model, Commands.doLogout (GotLogoutResp mayReloginAsAnon) ) GotoLogin -> ( { model | loginState = LoginReady "" "" }, Cmd.none ) PingerIn pingMsg -> ( { model | serverIsOnline = pingerMsgToBool pingMsg }, Cmd.none ) WebsocketIn event -> -- The backend lets us know that some of the data changed. -- Depending on the event type these are currently either -- filesystem entries or remotes. case eventType event of "pin" -> case model.loginState of LoginSuccess viewState -> ( model, Cmd.map ListMsg <| Ls.doListQueryFromUrl model.url ) _ -> ( model, Cmd.none ) "fs" -> case model.loginState of LoginSuccess viewState -> ( model , Cmd.batch [ Cmd.map ListMsg <| Ls.doListQueryFromUrl model.url , Cmd.map DeletedFilesMsg <| DeletedFiles.reload viewState.deletedFilesState , Cmd.map CommitsMsg <| Commits.reload viewState.commitsState ] ) _ -> ( model, Cmd.none ) "remotes" -> ( model, Cmd.map RemotesMsg Remotes.reload ) _ -> ( model, Cmd.none ) ListMsg subMsg -> withSubUpdate subMsg .listState model ListMsg Ls.update (\viewState newSubModel -> { viewState | listState = newSubModel }) CommitsMsg subMsg -> withSubUpdate subMsg .commitsState model CommitsMsg Commits.update (\viewState newSubModel -> { viewState | commitsState = newSubModel }) DeletedFilesMsg subMsg -> withSubUpdate subMsg .deletedFilesState model DeletedFilesMsg DeletedFiles.update (\viewState newSubModel -> { viewState | deletedFilesState = newSubModel }) RemotesMsg subMsg -> withSubUpdate subMsg .remoteState model RemotesMsg Remotes.update (\viewState newSubModel -> { viewState | remoteState = newSubModel }) DiffMsg subMsg -> withSubUpdate subMsg .diffState model DiffMsg Diff.update (\viewState newSubModel -> { viewState | diffState = newSubModel }) -- VIEW view : Model -> Browser.Document Msg view model = { title = "Gateway" , body = case model.loginState of LoginLimbo -> [ text "Waiting for login data" ] LoginReady _ _ -> [ Lazy.lazy viewLoginForm model ] LoginFailure _ _ _ -> [ Lazy.lazy viewLoginForm model ] LoginLoading _ _ -> [ Lazy.lazy viewLoginForm model ] LoginSuccess viewState -> viewMainContent model viewState } viewAppIcon : Model -> Html Msg viewAppIcon model = a [ class "nav-link active", href "/view" ] (if model.serverIsOnline then [ span [ class "fas fa-2x fa-fw logo fa-torii-gate" ] [] , span [ class "badge badge-success text-center" ] [ text "beta" ] ] else [ span [ class "fas fa-2x fa-fw logo logo-failure fa-torii-gate" ] [] , span [ class "badge badge-danger text-center" ] [ text "offline" ] ] ) viewOfflineMarker : Html Msg viewOfflineMarker = div [ class "row h-100" ] [ div [ class "col-12 my-auto text-center w-100 text-muted" ] [ span [ class "fas fa-4x fa-fw logo-failure fa-plug" ] [] , br [] [] , br [] [] , text "It seems that we have lost connection to the server." , br [] [] , text "This application will go into a working state again when we have a connection again." ] ] viewMainContent : Model -> ViewState -> List (Html Msg) viewMainContent model viewState = [ div [ class "container-fluid" ] [ div [ class "row wrapper" ] [ aside [ class "col-12 col-md-2 p-0 bg-light tabbar" ] [ nav [ class "navbar navbar-expand-md navbar-light bg-align-items-start flex-md-column flex-row" ] [ viewAppIcon model , a [ class "navbar-toggler", attribute "data-toggle" "collapse", attribute "data-target" ".sidebar" ] [ span [ class "navbar-toggler-icon" ] [] ] , div [ class "collapse navbar-collapse sidebar" ] [ viewSidebarItems model viewState ] ] , viewSidebarBottom model ] , main_ [ class "col" ] (if model.serverIsOnline then [ viewCurrentRoute model viewState , Html.map ListMsg (Ls.buildModals viewState.listState) , Html.map RemotesMsg (Remotes.buildModals viewState.remoteState) ] else [ viewOfflineMarker ] ) ] ] ] viewCurrentRoute : Model -> ViewState -> Html Msg viewCurrentRoute model viewState = case viewState.currentView of ViewList -> Html.map ListMsg <| Ls.view viewState.listState ViewCommits -> Html.map CommitsMsg <| Commits.view viewState.commitsState ViewDeletedFiles -> Html.map DeletedFilesMsg <| DeletedFiles.view viewState.deletedFilesState ViewRemotes -> Html.map RemotesMsg <| Remotes.view viewState.remoteState ViewDiff -> Html.map DiffMsg <| Diff.view viewState.diffState ViewNotFound -> text "You seem to have hit a route that does not exist..." viewLoginInputs : String -> String -> List (Html Msg) viewLoginInputs username password = [ h2 [ class "login-header" ] [ text "Login" ] , Input.text [ Input.id "username-input" , Input.attrs [ class "login-input" ] , Input.large , Input.placeholder "Username" , Input.value username , Input.onInput UsernameInput ] , Input.password [ Input.id "password-input" , Input.attrs [ class "login-input" ] , Input.large , Input.placeholder "Password" , Input.value password , Input.onInput PasswordInput ] ] viewLoginButton : String -> String -> Bool -> Html Msg viewLoginButton username password isLoading = let loadingClass = if isLoading then "fa fa-sync fa-sync-animate" else "" in Button.button [ Button.primary , Button.attrs [ onClick <| LoginSubmit , class "login-btn" , type_ "submit" , disabled (String.length (String.trim username) == 0 || String.length (String.trim password) == 0 || isLoading ) ] ] [ span [ class loadingClass ] [], text " Log in" ] viewLoginForm : Model -> Html Msg viewLoginForm model = Grid.containerFluid [ class "login-background" ] [ Grid.row [] [ Grid.col [ Col.lg8 , Col.textAlign Text.alignXsCenter , Col.attrs [ class "login-form" ] ] [ Form.form [ onSubmit LoginSubmit ] [ Form.group [] (case model.loginState of LoginReady username password -> viewLoginInputs username password ++ [ viewLoginButton username password False ] LoginLoading username password -> viewLoginInputs username password ++ [ viewLoginButton username password True ] LoginFailure username password _ -> viewLoginInputs username password ++ [ Alert.simpleDanger [] [ text "Login failed, please try again." ] , viewLoginButton username password False ] _ -> -- This should not happen. [] ) ] ] ] ] hasRight : ViewState -> String -> List (Html Msg) -> List (Html Msg) hasRight viewState right elements = if List.member right viewState.rights then elements else [] viewSidebarItems : Model -> ViewState -> Html Msg viewSidebarItems model viewState = let isActiveClass = \v -> if v == viewState.currentView then class "nav-link active" else class "nav-link" in ul [ class "flex-column navbar-nav w-100 text-left" ] (hasRight viewState "fs.view" [ li [ class "nav-item" ] [ a [ isActiveClass ViewList, href (viewToString ViewList) ] [ span [] [ text "Files" ] ] ] ] ++ hasRight viewState "fs.view" [ li [ class "nav-item" ] [ a [ isActiveClass ViewCommits, href (viewToString ViewCommits) ] [ span [] [ text "Changelog" ] ] ] ] ++ hasRight viewState "fs.view" [ li [ class "nav-item" ] [ a [ isActiveClass ViewDeletedFiles, href (viewToString ViewDeletedFiles) ] [ span [] [ text "Trashbin" ] ] ] ] ++ hasRight viewState "remotes.view" [ li [ class "nav-item" ] [ a [ isActiveClass ViewRemotes, href (viewToString ViewRemotes) ] [ span [] [ text "Remotes" ] ] ] ] ++ (if viewState.isAnon then [ li [ class "nav-item" ] [ a [ class "nav-link pl-0", href "#", onClick (LogoutSubmit False) ] [ span [] [ text "Login page" ] ] ] ] else [ li [ class "nav-item" ] [ a [ class "nav-link pl-0", href "#", onClick (LogoutSubmit True) ] [ span [] [ text ("Logout »" ++ viewState.loginName ++ "«") ] ] ] ] ) ) viewSidebarBottom : Model -> Html Msg viewSidebarBottom model = -- Make sure to not display that on small devices: div [ id "sidebar-bottom", class "d-none d-lg-block" ] [ hr [] [] , p [ id "sidebar-bottom-text", class "text-muted" ] [ span [] [ text "Powered by " , a [ href "https://github.com/sahib/brig" ] [ text "brig" ] ] ] ] -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = case model.loginState of LoginSuccess viewState -> Sub.batch [ Sub.map ListMsg (Ls.subscriptions viewState.listState) , Sub.map CommitsMsg (Commits.subscriptions viewState.commitsState) , Sub.map RemotesMsg (Remotes.subscriptions viewState.remoteState) , Sub.map DeletedFilesMsg (DeletedFiles.subscriptions viewState.deletedFilesState) , Websocket.incoming WebsocketIn , Pinger.pinger PingerIn ] _ -> Sub.none ================================================ FILE: gateway/elm/src/Modals/History.elm ================================================ module Modals.History exposing (Model, Msg, newModel, show, subscriptions, update, view) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.ButtonGroup as ButtonGroup import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Grid.Row as Row import Bootstrap.ListGroup as ListGroup import Bootstrap.Modal as Modal import Browser.Events as Events import Commands import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Decode as D import List import Time import Util type alias Model = { modal : Modal.Visibility , alert : Alert.Visibility , history : Maybe (Result Http.Error (List Commands.HistoryEntry)) , rights : List String , lastPath : String } type Msg = ModalShow | GotHistoryResponse String (Result Http.Error (List Commands.HistoryEntry)) | GotResetResponse (Result Http.Error String) | GotPinResponse (Result Http.Error String) | ResetClicked String String | AnimateModal Modal.Visibility | AlertMsg Alert.Visibility | ModalClose | KeyPress String | PinClicked String String Bool -- INIT newModel : List String -> Model newModel rights = { modal = Modal.hidden , alert = Alert.shown , history = Nothing , rights = rights , lastPath = "" } -- UPDATE update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of GotHistoryResponse path result -> ( { model | modal = Modal.shown , history = Just result , lastPath = path } , Cmd.none ) ResetClicked path revision -> ( model, Commands.doReset GotResetResponse path revision ) GotResetResponse result -> case result of Ok _ -> ( { model | modal = Modal.hidden, history = Nothing }, Cmd.none ) Err err -> ( { model | history = Just (Err err) }, Cmd.none ) GotPinResponse result -> case result of Ok _ -> -- Update the whole history to get the latest pin state. -- This is kinda wasteful and might be optimized later if we need to. ( model, Commands.doHistory (GotHistoryResponse model.lastPath) model.lastPath ) Err err -> ( { model | history = Just (Err err) }, Cmd.none ) AnimateModal visibility -> ( { model | modal = visibility }, Cmd.none ) ModalShow -> ( { model | modal = Modal.shown }, Cmd.none ) ModalClose -> ( { model | modal = Modal.hidden, history = Nothing }, Cmd.none ) AlertMsg vis -> ( { model | alert = vis }, Cmd.none ) PinClicked path revision shouldBePinned -> ( model , if shouldBePinned then Commands.doPin GotPinResponse path revision else Commands.doUnpin GotPinResponse path revision ) KeyPress key -> if model.modal == Modal.hidden then ( model, Cmd.none ) else case key of "Enter" -> ( { model | modal = Modal.hidden, history = Nothing }, Cmd.none ) _ -> ( model, Cmd.none ) -- VIEW viewChangeColor : String -> Html Msg viewChangeColor change = case change of "added" -> span [ class "text-success" ] [ text change ] "modified" -> span [ class "text-warning" ] [ text change ] "removed" -> span [ class "text-danger" ] [ text change ] "moved" -> span [ class "text-info" ] [ text change ] _ -> span [ class "text-muted" ] [ text change ] joinChanges : List (Html Msg) -> List (Html Msg) joinChanges changes = List.intersperse (text ", ") changes viewChangeSet : String -> Html Msg viewChangeSet change = let changes = List.map viewChangeColor (String.split "|" change) in span [] (joinChanges changes) viewPinIcon : Bool -> Bool -> Html msg viewPinIcon isPinned isExplicit = case ( isPinned, isExplicit ) of ( True, True ) -> span [ class "fa fa-map-marker", class "text-success" ] [] ( True, False ) -> span [ class "fa fa-map-marker-alt", class "text-warning" ] [] _ -> span [ class "fa fa-times", class "text-danger" ] [] viewPinButton : Model -> Commands.HistoryEntry -> Html Msg viewPinButton model entry = Button.button [ Button.outlinePrimary , Button.attrs [ disabled (not (List.member "fs.edit" model.rights)) , onClick (PinClicked entry.path entry.head.hash (not entry.isPinned)) ] ] [ viewPinIcon entry.isPinned entry.isExplicit ] viewHistoryEntry : Model -> Bool -> Commands.HistoryEntry -> Html Msg viewHistoryEntry model isFirst entry = Grid.row [] [ Grid.col [ Col.xs9 ] [ p [] [ text entry.path , br [] [] , viewChangeSet entry.change , span [ class "text-muted" ] [ text " at " ] , text <| Util.formatLastModified Time.utc entry.head.date , text ": " , span [ class "text-muted" ] [ text entry.head.msg ] ] ] , Grid.col [ Col.xs3 ] [ ButtonGroup.buttonGroup [] [ ButtonGroup.button [ Button.outlinePrimary , Button.attrs [ onClick <| ResetClicked entry.path entry.head.hash , disabled isFirst ] ] [ text "Revert" ] , ButtonGroup.button [ Button.outlinePrimary , Button.attrs [ disabled (not (List.member "fs.edit" model.rights)) , onClick (PinClicked entry.path entry.head.hash (not entry.isPinned)) ] ] [ viewPinIcon entry.isPinned entry.isExplicit ] ] ] ] viewHistoryEntries : Model -> List Commands.HistoryEntry -> Html Msg viewHistoryEntries model entries = Grid.row [] [ Grid.col [] [ ListGroup.ul (List.indexedMap (\idx e -> ListGroup.li [] [ viewHistoryEntry model (idx == 0) e ]) entries) ] ] viewHistory : Model -> List (Grid.Column Msg) viewHistory model = [ Grid.col [ Col.xs12 ] [ case model.history of Nothing -> text "" Just result -> case result of Ok entries -> viewHistoryEntries model entries Err err -> Util.buildAlert model.alert AlertMsg Alert.danger "Oh no!" ("Could not read history: " ++ Util.httpErrorToString err) ] ] view : Model -> Html Msg view model = Modal.config ModalClose |> Modal.large |> Modal.withAnimation AnimateModal |> Modal.header [ class "modal-title modal-header-success" ] [ h4 [] [ text "History" ] ] |> Modal.body [] [ Grid.containerFluid [] [ Grid.row [ Row.attrs [ class "scrollable-modal-row" ] ] (viewHistory model) ] ] |> Modal.footer [] [ Button.button [ Button.outlinePrimary , Button.attrs [ onClick <| AnimateModal Modal.hiddenAnimated ] ] [ text "Close" ] ] |> Modal.view model.modal show : String -> Cmd Msg show path = Commands.doHistory (GotHistoryResponse path) path -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Modal.subscriptions model.modal AnimateModal , Alert.subscriptions model.alert AlertMsg , Events.onKeyPress (D.map KeyPress <| D.field "key" D.string) ] ================================================ FILE: gateway/elm/src/Modals/Mkdir.elm ================================================ module Modals.Mkdir exposing (Model, Msg, newModel, show, subscriptions, update, view) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Form.Input as Input import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Modal as Modal import Browser.Events as Events import Commands import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Decode as D import Url import Util type State = Ready | Fail String type alias Model = { state : State , inputName : String , modal : Modal.Visibility , alert : Alert.Visibility } type Msg = CreateDir String | InputChanged String | ModalShow | GotResponse (Result Http.Error String) | AnimateModal Modal.Visibility | AlertMsg Alert.Visibility | ModalClose | KeyPress String String -- INIT newModel : Model newModel = { state = Ready , modal = Modal.hidden , inputName = "" , alert = Alert.shown } -- UPDATE update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of CreateDir path -> ( model, Commands.doMkdir GotResponse path ) InputChanged inputName -> ( { model | inputName = inputName }, Cmd.none ) GotResponse result -> case result of Ok _ -> -- New list model means also new checked entries. ( { model | state = Ready, modal = Modal.hidden }, Cmd.none ) Err err -> ( { model | state = Fail <| Util.httpErrorToString err }, Cmd.none ) AnimateModal visibility -> ( { model | modal = visibility }, Cmd.none ) ModalShow -> ( { model | modal = Modal.shown, inputName = "" }, Cmd.none ) ModalClose -> ( { model | modal = Modal.hidden, state = Ready }, Cmd.none ) AlertMsg vis -> ( { model | alert = vis }, Cmd.none ) KeyPress path key -> if model.modal == Modal.hidden then ( model, Cmd.none ) else case key of "Enter" -> ( model, Commands.doMkdir GotResponse path ) _ -> ( model, Cmd.none ) -- VIEW showPathCollision : Model -> Bool -> Html Msg showPathCollision model doesExist = if doesExist then span [ class "text-left" ] [ span [ class "fas fa-md fa-exclamation-triangle text-warning" ] [] , span [ class "text-muted" ] [ text (" »" ++ model.inputName ++ "« exists already. Please choose another name.\u{00A0}\u{00A0}\u{00A0}") ] ] else span [] [] viewMkdirContent : Model -> List (Grid.Column Msg) viewMkdirContent model = [ Grid.col [ Col.xs12 ] [ Input.text [ Input.id "mkdir-input" , Input.large , Input.placeholder "Directory name" , Input.onInput InputChanged , Input.attrs [ autofocus True ] ] , br [] [] , case model.state of Ready -> text "" Fail message -> Util.buildAlert model.alert AlertMsg Alert.danger "Oh no!" ("Could not create directory: " ++ message) ] ] pathFromUrl : Url.Url -> Model -> String pathFromUrl url model = Util.joinPath [ Util.urlToPath url, model.inputName ] view : Model -> Url.Url -> (String -> Bool) -> Html Msg view model url existChecker = let path = Util.urlToPath url hasPathCollision = existChecker model.inputName in Modal.config ModalClose |> Modal.large |> Modal.withAnimation AnimateModal |> Modal.header [ class "modal-title modal-header-primary" ] [ h4 [] [ text "Create a new directory in " , span [] [ text "»" , text (if path == "/" then "Home" else path ) , text "«" ] ] ] |> Modal.body [] [ Grid.containerFluid [] [ Grid.row [] (viewMkdirContent model) ] ] |> Modal.footer [] [ showPathCollision model hasPathCollision , Button.button [ Button.primary , Button.attrs [ onClick (CreateDir (pathFromUrl url model)) , type_ "submit" , disabled (String.length model.inputName == 0 || (case model.state of Fail _ -> True _ -> False ) || hasPathCollision ) ] ] [ text "Create" ] , Button.button [ Button.outlinePrimary , Button.attrs [ onClick <| AnimateModal Modal.hiddenAnimated ] ] [ text "Cancel" ] ] |> Modal.view model.modal show : Msg show = ModalShow -- SUBSCRIPTIONS subscriptions : Url.Url -> Model -> Sub Msg subscriptions url model = Sub.batch [ Modal.subscriptions model.modal AnimateModal , Alert.subscriptions model.alert AlertMsg , Events.onKeyPress (D.map (KeyPress <| pathFromUrl url model) <| D.field "key" D.string) ] ================================================ FILE: gateway/elm/src/Modals/MoveCopy.elm ================================================ module Modals.MoveCopy exposing ( Model , Msg , newCopyModel , newMoveModel , show , subscriptions , update , view , viewDirList , viewSearchBox ) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Form.Input as Input import Bootstrap.Form.InputGroup as InputGroup import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Grid.Row as Row import Bootstrap.Modal as Modal import Bootstrap.Table as Table import Browser.Events as Events import Commands import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Decode as D import List import Util type State = Ready (List String) | Loading | Fail String type alias Model = { state : State , action : Type , destPath : String , sourcePath : String , filter : String , modal : Modal.Visibility , alert : Alert.Visibility } type Msg = DoAction | DirChosen String | SearchInput String | ModalShow String | GotAllDirsResponse (Result Http.Error (List String)) | GotActionResponse (Result Http.Error String) | AnimateModal Modal.Visibility | AlertMsg Alert.Visibility | ModalClose | KeyPress String -- INIT type Type = Move | Copy typeToString : Type -> String typeToString typ = case typ of Move -> "Move" Copy -> "Copy" newMoveModel : Model newMoveModel = { state = Loading , modal = Modal.hidden , action = Move , destPath = "" , sourcePath = "" , filter = "" , alert = Alert.shown } newCopyModel : Model newCopyModel = { state = Loading , modal = Modal.hidden , action = Copy , destPath = "" , sourcePath = "" , filter = "" , alert = Alert.shown } -- UPDATE fixPath : String -> String fixPath path = if path == "/" then "Home" else String.join "/" (Util.splitPath path) filterInvalidTargets : String -> String -> Bool filterInvalidTargets sourcePath path = (path /= Util.dirname sourcePath) && not (String.startsWith path sourcePath) fixAllDirResponse : Model -> List String -> List String fixAllDirResponse model paths = List.filter (filterInvalidTargets model.sourcePath) paths |> List.map fixPath filterAllDirs : String -> List String -> List String filterAllDirs filter dirs = let lowerFilter = String.toLower filter in List.filter (String.contains lowerFilter) dirs doAction : Model -> Cmd Msg doAction model = case model.action of Move -> Commands.doMove GotActionResponse model.sourcePath model.destPath Copy -> Commands.doCopy GotActionResponse model.sourcePath model.destPath update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of DoAction -> ( model, doAction model ) DirChosen path -> ( { model | destPath = path }, Cmd.none ) SearchInput filter -> ( { model | filter = filter }, Cmd.none ) GotAllDirsResponse result -> case result of Ok dirs -> ( { model | state = Ready (fixAllDirResponse model dirs) }, Cmd.none ) Err err -> ( { model | state = Fail <| Util.httpErrorToString err }, Cmd.none ) GotActionResponse result -> case result of Ok _ -> ( { model | modal = Modal.hidden }, Cmd.none ) Err err -> ( { model | state = Fail <| Util.httpErrorToString err }, Cmd.none ) AnimateModal visibility -> ( { model | modal = visibility }, Cmd.none ) ModalShow sourcePath -> ( { model | modal = Modal.shown , sourcePath = sourcePath , destPath = "" , state = Loading } , Commands.doListAllDirs GotAllDirsResponse ) ModalClose -> ( { model | modal = Modal.hidden }, Cmd.none ) AlertMsg vis -> ( { model | alert = vis }, Cmd.none ) KeyPress key -> ( model , if model.modal == Modal.hidden || model.destPath == "" then Cmd.none else case key of "Enter" -> doAction model _ -> Cmd.none ) -- VIEW viewDirEntry : (String -> msg) -> String -> Table.Row msg viewDirEntry clickMsg path = Table.tr [] [ Table.td [ Table.cellAttr <| onClick (clickMsg path) ] [ span [ class "fas fa-lg fa-folder text-xs-right file-list-icon" ] [] ] , Table.td [ Table.cellAttr <| onClick (clickMsg path) ] [ text path ] ] viewDirList : (String -> msg) -> String -> List String -> Html msg viewDirList clickMsg filter dirs = Table.table { options = [ Table.hover ] , thead = Table.thead [ Table.headAttr (style "display" "none") ] [ Table.tr [] [ Table.th [ Table.cellAttr (style "width" "10%") ] [] , Table.th [ Table.cellAttr (style "width" "90%") ] [] ] ] , tbody = Table.tbody [] (List.map (viewDirEntry clickMsg) (filterAllDirs filter dirs)) } viewSearchBox : (String -> msg) -> String -> Html msg viewSearchBox searchMsg filter = InputGroup.config (InputGroup.text [ Input.placeholder "Filter directory list" , Input.attrs [ onInput searchMsg , value filter ] ] ) |> InputGroup.successors [ InputGroup.span [ class "input-group-addon" ] [ button [] [ span [ class "fas fa-search fa-xs input-group-addon" ] [] ] ] ] |> InputGroup.attrs [ class "stylish-input-group input-group" ] |> InputGroup.view viewContent : Model -> List (Grid.Column Msg) viewContent model = [ Grid.col [ Col.xs12 ] [ case model.state of Ready dirs -> div [] [ viewSearchBox SearchInput model.filter , viewDirList DirChosen model.filter dirs ] Loading -> text "Loading." Fail message -> Util.buildAlert model.alert AlertMsg Alert.danger "Oh no!" ("Could not move or copy path: " ++ message) ] ] view : Model -> Html Msg view model = Modal.config ModalClose |> Modal.large |> Modal.withAnimation AnimateModal |> Modal.header [ class "modal-title modal-header-primary" ] [ h4 [] [ text (typeToString model.action ++ " ") , span [] [ text "»" , text (Util.basename model.sourcePath) , text "«" ] , if String.length model.destPath > 0 then span [] [ text " into »" , text model.destPath , text "«" ] else text " into ..." ] ] |> Modal.body [] [ Grid.containerFluid [] [ Grid.row [ Row.attrs [ class "scrollable-modal-row" ] ] (viewContent model) ] ] |> Modal.footer [] [ Button.button [ Button.primary , Button.attrs [ onClick DoAction , type_ "submit" , disabled (String.length model.destPath == 0 || (case model.state of Fail _ -> True _ -> False ) ) ] ] [ text (typeToString model.action) ] , Button.button [ Button.outlinePrimary , Button.attrs [ onClick <| AnimateModal Modal.hiddenAnimated ] ] [ text "Cancel" ] ] |> Modal.view model.modal show : String -> Msg show sourcePath = ModalShow sourcePath -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Modal.subscriptions model.modal AnimateModal , Alert.subscriptions model.alert AlertMsg , Events.onKeyPress (D.map KeyPress <| D.field "key" D.string) ] ================================================ FILE: gateway/elm/src/Modals/RemoteAdd.elm ================================================ module Modals.RemoteAdd exposing (Model, Msg, newModel, show, subscriptions, update, view) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Dropdown as Dropdown import Bootstrap.Form.Input as Input import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Modal as Modal import Browser.Events as Events import Commands import Dict import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Decode as D import Util type State = Ready | Fail String type alias Model = { state : State , name : String , fingerprint : String , doAutoUdate : Bool , acceptPush : Bool , conflictStrategy : String , modal : Modal.Visibility , alert : Alert.Visibility , conflictDropdown : Dropdown.State } type Msg = RemoteAdd | NameInputChanged String | FingerprintInputChanged String | AutoUpdateChanged Bool | AcceptPushChanged Bool | ConflictStrategyChanged String | ConflictDropdownMsg Dropdown.State | ModalShow | GotResponse (Result Http.Error String) | AnimateModal Modal.Visibility | AlertMsg Alert.Visibility | ModalClose | KeyPress String -- INIT newModel : Model newModel = newModelWithState Modal.hidden newModelWithState : Modal.Visibility -> Model newModelWithState state = { state = Ready , modal = state , name = "" , fingerprint = "" , doAutoUdate = False , acceptPush = False , alert = Alert.shown , conflictDropdown = Dropdown.initialState , conflictStrategy = "" } -- UPDATE submit : Model -> Cmd Msg submit model = Commands.doRemoteAdd GotResponse model.name model.fingerprint model.doAutoUdate model.acceptPush model.conflictStrategy [] update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of RemoteAdd -> ( model, submit model ) NameInputChanged name -> ( { model | name = name }, Cmd.none ) FingerprintInputChanged fingerprint -> ( { model | fingerprint = fingerprint }, Cmd.none ) AutoUpdateChanged doAutoUdate -> ( { model | doAutoUdate = doAutoUdate }, Cmd.none ) AcceptPushChanged acceptPush -> ( { model | acceptPush = acceptPush }, Cmd.none ) GotResponse result -> case result of Ok _ -> -- New list model means also new checked entries. ( { model | state = Ready, modal = Modal.hidden }, Cmd.none ) Err err -> ( { model | state = Fail <| Util.httpErrorToString err }, Cmd.none ) AnimateModal visibility -> ( { model | modal = visibility }, Cmd.none ) ModalShow -> ( newModelWithState Modal.shown, Cmd.none ) ModalClose -> ( { model | modal = Modal.hidden }, Cmd.none ) AlertMsg vis -> ( { model | alert = vis }, Cmd.none ) ConflictDropdownMsg state -> ( { model | conflictDropdown = state }, Cmd.none ) ConflictStrategyChanged state -> ( { model | conflictStrategy = state }, Cmd.none ) KeyPress key -> if model.modal == Modal.hidden then ( model, Cmd.none ) else case key of "Enter" -> ( model, submit model ) _ -> ( model, Cmd.none ) -- VIEW viewRemoteAddContent : Model -> List (Grid.Column Msg) viewRemoteAddContent model = [ Grid.col [ Col.xs12 ] [ Input.text [ Input.id "remote-name-input" , Input.large , Input.placeholder "Remote name" , Input.onInput NameInputChanged , Input.attrs [ autofocus True ] ] , br [] [] , Input.text [ Input.id "remote-fingerprint-input" , Input.large , Input.placeholder "Remote fingerprint" , Input.onInput FingerprintInputChanged ] , br [] [] , span [] [ Util.viewToggleSwitch AutoUpdateChanged "Accept automatic updates?" model.doAutoUdate False ] , br [] [] , span [] [ Util.viewToggleSwitch AcceptPushChanged "Accept other remotes pushing data to us?" model.acceptPush False ] , br [] [] , span [] [ span [ class "text-muted" ] [ text "The current conflict strategy is" ] , viewConflictDropdown model , span [ class "text-muted" ] [ text "." ] ] , case model.state of Ready -> text "" Fail message -> Util.buildAlert model.alert AlertMsg Alert.danger "Oh no!" ("Could not add remote: " ++ message) ] ] showCurrentConflictStrategy : Model -> Html Msg showCurrentConflictStrategy model = case model.conflictStrategy of "" -> span [] [ text "Marker ", span [ class "fas fa-marker" ] [] ] "ignore" -> span [] [ text "Ignore ", span [ class "fas fa-eject" ] [] ] "marker" -> span [] [ text "Marker ", span [ class "fas fa-marker" ] [] ] "embrace" -> span [] [ text "Embrace ", span [ class "fas fa-handshake" ] [] ] _ -> span [] [ text "Unknown ", span [ class "fas fa-question" ] [] ] viewConflictDropdown : Model -> Html Msg viewConflictDropdown model = Dropdown.dropdown model.conflictDropdown { options = [ Dropdown.alignMenuRight , Dropdown.attrs [ id "remote-add-conflict-dropdown" ] ] , toggleMsg = ConflictDropdownMsg , toggleButton = Dropdown.toggle [ Button.roleLink ] [ showCurrentConflictStrategy model ] , items = [ Dropdown.buttonItem [ onClick (ConflictStrategyChanged "ignore") ] [ span [ class "fas fa-md fa-eject" ] [], text " Ignore" ] , Dropdown.buttonItem [ onClick (ConflictStrategyChanged "marker") ] [ span [ class "fas fa-md fa-marker" ] [], text " Marker" ] , Dropdown.buttonItem [ onClick (ConflictStrategyChanged "embrace") ] [ span [ class "fas fa-md fa-handshake" ] [], text " Embrace" ] , Dropdown.buttonItem [ onClick (ConflictStrategyChanged "") ] [ span [ class "fas fa-md fa-eraser" ] [], text " Default" ] ] } view : Model -> Html Msg view model = Modal.config ModalClose |> Modal.large |> Modal.withAnimation AnimateModal |> Modal.header [ class "modal-title modal-header-primary" ] [ h4 [] [ text "Add a new remote" ] ] |> Modal.body [] [ Grid.containerFluid [] [ Grid.row [] (viewRemoteAddContent model) ] ] |> Modal.footer [] [ Button.button [ Button.primary , Button.attrs [ onClick RemoteAdd , type_ "submit" , disabled (String.length model.name == 0 || String.length model.fingerprint == 0 || (case model.state of Fail _ -> True _ -> False ) ) ] ] [ text "Create" ] , Button.button [ Button.outlinePrimary , Button.attrs [ onClick <| AnimateModal Modal.hiddenAnimated ] ] [ text "Cancel" ] ] |> Modal.view model.modal show : Msg show = ModalShow -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Modal.subscriptions model.modal AnimateModal , Alert.subscriptions model.alert AlertMsg , Events.onKeyPress (D.map KeyPress <| D.field "key" D.string) , Dropdown.subscriptions model.conflictDropdown ConflictDropdownMsg ] ================================================ FILE: gateway/elm/src/Modals/RemoteFolders.elm ================================================ module Modals.RemoteFolders exposing ( Model , Msg , newModel , show , subscriptions , update , view ) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Dropdown as Dropdown import Bootstrap.Form.Input as Input import Bootstrap.Form.InputGroup as InputGroup import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Grid.Row as Row import Bootstrap.ListGroup as ListGroup import Bootstrap.Modal as Modal import Bootstrap.Table as Table import Bootstrap.Text as Text import Browser.Events as Events import Commands import Dict import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Decode as D import List.Extra as LE import Modals.MoveCopy as MoveCopy import Set import Util type State = Ready | Fail String type alias Model = { state : State , allDirs : List String , filter : String , remote : Commands.Remote , modal : Modal.Visibility , alert : Alert.Visibility , conflictDropdowns : Dict.Dict String Dropdown.State } type Msg = ModalShow Commands.Remote | FolderRemove String | ReadOnlyChanged String Bool | GotResponse (Result Http.Error String) | AnimateModal Modal.Visibility | AlertMsg Alert.Visibility | ModalClose | GotAllDirsResponse (Result Http.Error (List String)) | DirChosen String | SearchInput String | ConflictStrategyToggled String String | ConflictDropdownMsg String Dropdown.State -- INIT newModel : Model newModel = newModelWithState Modal.hidden Commands.emptyRemote newModelWithState : Modal.Visibility -> Commands.Remote -> Model newModelWithState state remote = { state = Ready , modal = state , allDirs = [] , filter = "" , remote = remote , alert = Alert.shown , conflictDropdowns = Dict.empty } -- UPDATE submit : Commands.Remote -> Cmd Msg submit remote = Commands.doRemoteModify GotResponse remote fixFolder : String -> String fixFolder path = Util.prefixSlash path addFolder : Model -> String -> ( Model, Cmd Msg ) addFolder model folder = let oldRemote = model.remote cleanFolder = Commands.Folder (fixFolder folder) False "" newRemote = { oldRemote | folders = List.sortBy .folder <| cleanFolder :: oldRemote.folders } upModel = { model | remote = newRemote } in ( upModel, submit upModel.remote ) update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of GotResponse result -> case result of Ok _ -> -- New list model means also new checked entries. ( { model | state = Ready }, Cmd.none ) Err err -> ( { model | state = Fail <| Util.httpErrorToString err }, Cmd.none ) FolderRemove folder -> let oldRemote = model.remote newRemote = { oldRemote | folders = List.filter (\f -> f.folder /= folder) oldRemote.folders } upModel = { model | remote = newRemote } in ( upModel, submit upModel.remote ) AnimateModal visibility -> ( { model | modal = visibility }, Cmd.none ) ModalShow remote -> ( newModelWithState Modal.shown remote , Commands.doListAllDirs GotAllDirsResponse ) GotAllDirsResponse result -> case result of Ok allDirs -> ( { model | allDirs = allDirs }, Cmd.none ) Err _ -> ( model, Cmd.none ) DirChosen choice -> addFolder model choice SearchInput filter -> ( { model | filter = filter }, Cmd.none ) ModalClose -> ( { model | modal = Modal.hidden, filter = "" }, Cmd.none ) AlertMsg vis -> ( { model | alert = vis }, Cmd.none ) ReadOnlyChanged path state -> let oldRemote = model.remote newRemote = { oldRemote | folders = List.map (\f -> if f.folder == path then { f | readOnly = state } else f ) model.remote.folders } in ( { model | remote = newRemote }, submit newRemote ) ConflictDropdownMsg folder state -> ( { model | conflictDropdowns = Dict.insert folder state model.conflictDropdowns }, Cmd.none ) ConflictStrategyToggled folder strategy -> let oldRemote = model.remote newFolders = List.map (\f -> if f.folder == folder then { f | conflictStrategy = strategy } else f ) model.remote.folders newRemote = { oldRemote | folders = newFolders } upModel = { model | remote = newRemote } in ( upModel, submit upModel.remote ) -- VIEW viewRow : Html Msg -> Html Msg -> Html Msg -> Html Msg viewRow a b c = Grid.row [] [ Grid.col [ Col.xs1, Col.textAlign Text.alignXsRight ] [ a ] , Grid.col [ Col.xs8, Col.textAlign Text.alignXsLeft ] [ b ] , Grid.col [ Col.xs3, Col.textAlign Text.alignXsLeft ] [ c ] ] conflictStrategyToIconName : String -> String conflictStrategyToIconName strategy = case strategy of "" -> "fa-marker text-muted" "ignore" -> "fa-eject" "marker" -> "fa-marker" "embrace" -> "fa-handshake" _ -> "fa-question" viewConflictDropdown : Model -> Commands.Folder -> Html Msg viewConflictDropdown model folder = Dropdown.dropdown (Maybe.withDefault Dropdown.initialState (Dict.get folder.folder model.conflictDropdowns)) { options = [ Dropdown.alignMenuRight ] , toggleMsg = ConflictDropdownMsg folder.folder , toggleButton = Dropdown.toggle [ Button.roleLink ] [ span [ class "fas", class <| conflictStrategyToIconName folder.conflictStrategy ] [] ] , items = [ Dropdown.buttonItem [ onClick (ConflictStrategyToggled folder.folder "ignore") ] [ span [ class "fas fa-md fa-eject" ] [], text " Ignore" ] , Dropdown.buttonItem [ onClick (ConflictStrategyToggled folder.folder "marker") ] [ span [ class "fas fa-md fa-marker" ] [], text " Marker" ] , Dropdown.buttonItem [ onClick (ConflictStrategyToggled folder.folder "embrace") ] [ span [ class "fas fa-md fa-handshake" ] [], text " Embrace" ] , Dropdown.buttonItem [ onClick (ConflictStrategyToggled folder.folder "") ] [ span [ class "fas fa-md fa-eraser" ] [], text " Default" ] ] } viewFolder : Model -> Commands.Folder -> Table.Row Msg viewFolder model folder = Table.tr [] [ Table.td [] [ span [ class "fas fa-md fa-folder text-muted" ] [] ] , Table.td [] [ text folder.folder ] , Table.td [] [ viewConflictDropdown model folder ] , Table.td [] [ Util.viewToggleSwitch (ReadOnlyChanged folder.folder) "" folder.readOnly False ] , Table.td [] [ Button.button [ Button.attrs [ class "close", onClick <| FolderRemove folder.folder ] ] [ span [ class "fas fa-xs fa-times text-muted" ] [] ] ] ] viewFolders : Model -> Commands.Remote -> Html Msg viewFolders model remote = Table.table { options = [ Table.hover , Table.attr (class "borderless-table") ] , thead = Table.thead [] [ Table.tr [] [ Table.th [ Table.cellAttr (style "width" "5%") ] [ text "" ] , Table.th [ Table.cellAttr (style "width" "55%") ] [ span [ class "text-muted small" ] [ text "Name" ] ] , Table.th [ Table.cellAttr (style "width" "20%") ] [ span [ class "text-muted small" ] [ text "Conflict Strategy" ] ] , Table.th [ Table.cellAttr (style "width" "15%") ] [ span [ class "text-muted small" ] [ text "Read Only?" ] ] , Table.th [ Table.cellAttr (style "width" "5%") ] [] ] ] , tbody = Table.tbody [] (List.map (\f -> viewFolder model f) remote.folders ) } viewMaybeFolders : Model -> Commands.Remote -> Html Msg viewMaybeFolders model remote = let folders = LE.uniqueBy .folder remote.folders in if List.length folders <= 0 then span [ class "text-muted text-center" ] [ text "No folders. This means this user can see everthing." , br [] [] , text "Add a new folder below to limit what this remote can see." , br [] [] , br [] [] ] else div [] [ viewFolders model remote , br [] [] , hr [] [] ] viewRemoteFoldersContent : Model -> List (Grid.Column Msg) viewRemoteFoldersContent model = [ Grid.col [ Col.xs12 ] [ h4 [] [ span [ class "text-muted text-center" ] [ text "Visible folders" ] ] , viewMaybeFolders model model.remote , br [] [] , br [] [] , h4 [] [ span [ class "text-muted text-center" ] [ text "All folders" ] ] , MoveCopy.viewSearchBox SearchInput model.filter , MoveCopy.viewDirList DirChosen model.filter model.allDirs , case model.state of Ready -> text "" Fail message -> Util.buildAlert model.alert AlertMsg Alert.danger "Oh no!" ("Could not add remote: " ++ message) ] ] view : Model -> Html Msg view model = Modal.config ModalClose |> Modal.large |> Modal.withAnimation AnimateModal |> Modal.header [ class "modal-title modal-header-primary" ] [ h4 [] [ text "Edit folders of »", text model.remote.name, text "«" ] ] |> Modal.body [] [ Grid.containerFluid [] [ Grid.row [ Row.attrs [ style "min-width" "60vh", class "scrollable-modal-row" ] ] (viewRemoteFoldersContent model) ] ] |> Modal.footer [] [ Button.button [ Button.outlinePrimary , Button.attrs [ onClick <| AnimateModal Modal.hiddenAnimated ] ] [ text "Close" ] ] |> Modal.view model.modal show : Commands.Remote -> Msg show remote = ModalShow remote -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Modal.subscriptions model.modal AnimateModal , Alert.subscriptions model.alert AlertMsg , Sub.batch (List.map (\( name, state ) -> Dropdown.subscriptions state (ConflictDropdownMsg name)) (Dict.toList model.conflictDropdowns) ) ] ================================================ FILE: gateway/elm/src/Modals/RemoteRemove.elm ================================================ module Modals.RemoteRemove exposing (Model, Msg, newModel, show, subscriptions, update, view) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Modal as Modal import Browser.Events as Events import Commands import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Decode as D import Util type State = Ready | Fail String type alias Model = { state : State , name : String , modal : Modal.Visibility , alert : Alert.Visibility } type Msg = DoRemove | ModalShow String | GotResponse (Result Http.Error String) | AnimateModal Modal.Visibility | AlertMsg Alert.Visibility | ModalClose | KeyPress String -- INIT newModel : Model newModel = newModelWithState "" Modal.hidden newModelWithState : String -> Modal.Visibility -> Model newModelWithState name state = { state = Ready , modal = state , name = name , alert = Alert.shown } -- UPDATE submit : Model -> Cmd Msg submit model = Commands.doRemoteRemove GotResponse model.name update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of DoRemove -> ( model, submit model ) GotResponse result -> case result of Ok _ -> ( { model | modal = Modal.hidden }, Cmd.none ) Err err -> ( { model | state = Fail <| Util.httpErrorToString err }, Cmd.none ) AnimateModal visibility -> ( { model | modal = visibility }, Cmd.none ) ModalShow path -> ( newModelWithState path Modal.shown, Cmd.none ) ModalClose -> ( { model | modal = Modal.hidden }, Cmd.none ) AlertMsg vis -> ( { model | alert = vis }, Cmd.none ) KeyPress key -> if model.modal == Modal.hidden then ( model, Cmd.none ) else case key of "Enter" -> ( model, submit model ) _ -> ( model, Cmd.none ) -- VIEW viewRemoteAddContent : Model -> List (Grid.Column Msg) viewRemoteAddContent model = [ Grid.col [ Col.xs12 ] [ text ("Removing »" ++ model.name ++ "« cannot be reverted. If you are the last one caching the data of this remote," ++ " the data might vanish forever and cannot be restored." ) ] ] view : Model -> Html Msg view model = Modal.config ModalClose |> Modal.large |> Modal.withAnimation AnimateModal |> Modal.header [ class "modal-title modal-header-danger" ] [ h4 [] [ text "Really remove?" ] ] |> Modal.body [] [ Grid.containerFluid [] [ Grid.row [] (viewRemoteAddContent model) ] ] |> Modal.footer [] [ Button.button [ Button.danger , Button.attrs [ onClick DoRemove , type_ "submit" ] ] [ text "Remove" ] , Button.button [ Button.outlinePrimary , Button.attrs [ onClick <| AnimateModal Modal.hiddenAnimated ] ] [ text "Cancel" ] ] |> Modal.view model.modal show : String -> Msg show name = ModalShow name -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Modal.subscriptions model.modal AnimateModal , Alert.subscriptions model.alert AlertMsg , Events.onKeyPress (D.map KeyPress <| D.field "key" D.string) ] ================================================ FILE: gateway/elm/src/Modals/Remove.elm ================================================ module Modals.Remove exposing (Model, Msg, newModel, show, subscriptions, update, view) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Modal as Modal import Browser.Events as Events import Commands import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Decode as D import Util type State = Ready | Fail String type alias Model = { state : State , modal : Modal.Visibility , alert : Alert.Visibility , selected : List String } type Msg = RemoveAll (List String) | ModalShow (List String) | GotResponse (Result Http.Error String) | AnimateModal Modal.Visibility | AlertMsg Alert.Visibility | ModalClose | KeyPress String -- INIT newModel : Model newModel = { state = Ready , modal = Modal.hidden , alert = Alert.shown , selected = [] } -- UPDATE update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of RemoveAll paths -> ( model, Commands.doRemove GotResponse paths ) GotResponse result -> case result of Ok _ -> -- New list model means also new checked entries. ( { model | state = Ready, modal = Modal.hidden }, Cmd.none ) Err err -> ( { model | state = Fail <| Util.httpErrorToString err }, Cmd.none ) AnimateModal visibility -> ( { model | modal = visibility }, Cmd.none ) ModalShow paths -> ( { model | modal = Modal.shown, selected = paths }, Cmd.none ) ModalClose -> ( { model | modal = Modal.hidden, state = Ready }, Cmd.none ) AlertMsg vis -> ( { model | alert = vis }, Cmd.none ) KeyPress key -> ( model , if model.modal == Modal.hidden then Cmd.none else case key of "Enter" -> Commands.doRemove GotResponse model.selected _ -> Cmd.none ) -- VIEW pluralizeItems : Int -> String pluralizeItems count = if count == 1 then "item" else "items" viewRemoveContent : Model -> Int -> List (Grid.Column Msg) viewRemoveContent model nSelected = [ Grid.col [ Col.xs12 ] [ case model.state of Ready -> text ("This would remove the " ++ String.fromInt nSelected ++ " selected " ++ pluralizeItems nSelected ++ ".") Fail message -> Util.buildAlert model.alert AlertMsg Alert.danger "Oh no!" ("Could not remove directory: " ++ message) ] ] view : Model -> List String -> Html Msg view model selectedPaths = Modal.config ModalClose |> Modal.large |> Modal.withAnimation AnimateModal |> Modal.header [ class "modal-title modal-header-warning" ] [ h4 [] [ text "Really remove?" ] ] |> Modal.body [] [ Grid.containerFluid [] [ Grid.row [] (viewRemoveContent model (List.length selectedPaths)) ] ] |> Modal.footer [] [ Button.button [ Button.warning , Button.attrs [ onClick <| RemoveAll selectedPaths , disabled (case model.state of Fail _ -> True _ -> False ) ] ] [ text "Remove" ] , Button.button [ Button.outlinePrimary , Button.attrs [ onClick <| AnimateModal Modal.hiddenAnimated ] ] [ text "Cancel" ] ] |> Modal.view model.modal show : List String -> Msg show paths = ModalShow paths -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Modal.subscriptions model.modal AnimateModal , Alert.subscriptions model.alert AlertMsg , Events.onKeyPress (D.map KeyPress <| D.field "key" D.string) ] ================================================ FILE: gateway/elm/src/Modals/Rename.elm ================================================ module Modals.Rename exposing (Model, Msg, newModel, show, subscriptions, update, view) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Form.Input as Input import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Modal as Modal import Browser.Events as Events import Commands import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Decode as D import Util type State = Ready | Fail String type alias Model = { state : State , currPath : String , inputName : String , modal : Modal.Visibility , alert : Alert.Visibility } type Msg = DoRename | InputChanged String | ModalShow String | GotResponse (Result Http.Error String) | AnimateModal Modal.Visibility | AlertMsg Alert.Visibility | ModalClose | KeyPress String -- INIT newModel : Model newModel = { state = Ready , modal = Modal.hidden , inputName = "" , currPath = "" , alert = Alert.shown } -- UPDATE triggerRename : String -> String -> Cmd Msg triggerRename sourcePath newName = Commands.doMove GotResponse sourcePath (Util.joinPath [ Util.dirname sourcePath, Util.basename newName ]) update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of DoRename -> ( model, triggerRename model.currPath model.inputName ) InputChanged inputName -> ( { model | inputName = inputName }, Cmd.none ) GotResponse result -> case result of Ok _ -> -- New list model means also new checked entries. ( { model | state = Ready, modal = Modal.hidden }, Cmd.none ) Err err -> ( { model | state = Fail <| Util.httpErrorToString err }, Cmd.none ) AnimateModal visibility -> ( { model | modal = visibility }, Cmd.none ) ModalShow currPath -> ( { model | modal = Modal.shown, inputName = "", currPath = currPath }, Cmd.none ) ModalClose -> ( { model | modal = Modal.hidden, state = Ready }, Cmd.none ) AlertMsg vis -> ( { model | alert = vis }, Cmd.none ) KeyPress key -> if model.modal == Modal.hidden then ( model, Cmd.none ) else case key of "Enter" -> ( model, triggerRename model.currPath model.inputName ) _ -> ( model, Cmd.none ) -- VIEW viewRenameContent : Model -> List (Grid.Column Msg) viewRenameContent model = [ Grid.col [ Col.xs12 ] [ Input.text [ Input.id "rename-input" , Input.large , Input.placeholder "New name" , Input.onInput InputChanged , Input.attrs [ autofocus True ] ] , br [] [] , case model.state of Ready -> text "" Fail message -> Util.buildAlert model.alert AlertMsg Alert.danger "Oh no!" ("Could not rename path: " ++ message) ] ] view : Model -> Html Msg view model = Modal.config ModalClose |> Modal.large |> Modal.withAnimation AnimateModal |> Modal.header [ class "modal-title modal-header-primary" ] [ h4 [] [ text "Rename " , span [] [ text "»" , text (Util.basename model.currPath) , text "«" ] , if String.length model.inputName > 0 then span [] [ text " to " , span [] [ text "»" , text model.inputName , text "«" ] ] else text "" ] ] |> Modal.body [] [ Grid.containerFluid [] [ Grid.row [] (viewRenameContent model) ] ] |> Modal.footer [] [ Button.button [ Button.primary , Button.attrs [ onClick DoRename , type_ "submit" , disabled (String.length model.inputName == 0 || (case model.state of Fail _ -> True _ -> False ) ) ] ] [ text "Rename" ] , Button.button [ Button.outlinePrimary , Button.attrs [ onClick <| AnimateModal Modal.hiddenAnimated ] ] [ text "Cancel" ] ] |> Modal.view model.modal show : String -> Msg show currPath = ModalShow currPath -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Modal.subscriptions model.modal AnimateModal , Alert.subscriptions model.alert AlertMsg , Events.onKeyPress (D.map KeyPress <| D.field "key" D.string) ] ================================================ FILE: gateway/elm/src/Modals/Share.elm ================================================ module Modals.Share exposing (Model, Msg, newModel, show, subscriptions, update, view) import Bootstrap.Button as Button import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Grid.Row as Row import Bootstrap.Modal as Modal import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Url import Util type alias Model = { paths : List String , modal : Modal.Visibility } type Msg = ModalShow (List String) | AnimateModal Modal.Visibility | ModalClose -- INIT newModel : Model newModel = { paths = [] , modal = Modal.hidden } -- UPDATE update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of AnimateModal visibility -> ( { model | modal = visibility }, Cmd.none ) ModalShow paths -> ( { model | modal = Modal.shown, paths = paths }, Cmd.none ) ModalClose -> ( { model | modal = Modal.hidden, paths = [] }, Cmd.none ) -- VIEW formatEntry : Url.Url -> String -> Html msg formatEntry url path = let link = Util.urlPrefixToString url ++ "get" ++ Util.urlEncodePath path in li [] [ a [ href link ] [ text link ] ] viewShare : Model -> Url.Url -> List (Grid.Column Msg) viewShare model url = [ Grid.col [ Col.xs12 ] [ p [] [ text "Use those links to share the selected files with people that do not use brig." ] , p [] [ b [] [ text "Note:" ], text " Remember, they still need to authenticate themselves." ] , ul [ id "share-list" ] (List.map (formatEntry url) model.paths) ] ] view : Model -> Url.Url -> Html Msg view model url = Modal.config ModalClose |> Modal.large |> Modal.withAnimation AnimateModal |> Modal.header [ class "modal-title modal-header-primary" ] [ h4 [] [ text "Share hyperlinks" ] ] |> Modal.body [] [ Grid.containerFluid [] [ Grid.row [ Row.attrs [ class "scrollable-modal-row" ] ] (viewShare model url) ] ] |> Modal.footer [] [ Button.button [ Button.outlinePrimary , Button.attrs [ onClick <| AnimateModal Modal.hiddenAnimated ] ] [ text "Close" ] ] |> Modal.view model.modal show : List String -> Msg show paths = ModalShow paths -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Modal.subscriptions model.modal AnimateModal ] ================================================ FILE: gateway/elm/src/Modals/Upload.elm ================================================ module Modals.Upload exposing ( Model , Msg , buildButton , newModel , subscriptions , update , viewUploadState ) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Progress as Progress import Bootstrap.Text as Text import Commands import Delay import Dict import File import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Decode as D import List import Tuple type alias Alertable = { alert : Alert.Visibility , path : String } type alias Model = { uploads : Dict.Dict String Float , failed : List Alertable , success : List Alertable } type Msg = UploadSelectedFiles String (List File.File) | UploadProgress String Http.Progress | Uploaded String (Result Http.Error ()) | UploadCancel String | AlertMsg String Alert.Visibility -- INIT newModel : Model newModel = { uploads = Dict.empty , failed = [] , success = [] } alertMapper : String -> Alert.Visibility -> Alertable -> Alertable alertMapper path vis a = case a.path == path of True -> { a | alert = vis } False -> a -- UPDATE update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of UploadSelectedFiles root files -> let newUploads = Dict.union model.uploads <| Dict.fromList (List.map (\f -> ( File.name f, 0 )) files) in ( { model | uploads = newUploads } , Cmd.batch (List.map (Commands.doUpload Uploaded root) files) ) UploadProgress path progress -> case progress of Http.Sending p -> ( { model | uploads = Dict.insert path (Http.fractionSent p) model.uploads }, Cmd.none ) Http.Receiving _ -> ( model, Cmd.none ) Uploaded path result -> let newUploads = Dict.remove path model.uploads in case result of Ok _ -> ( { model | uploads = newUploads , success = Alertable Alert.shown path :: model.success } , Delay.after 5 Delay.Second (AlertMsg path Alert.closed) ) Err _ -> ( { model | uploads = newUploads , failed = Alertable Alert.shown path :: model.failed } , Delay.after 30 Delay.Second (AlertMsg path Alert.closed) ) UploadCancel path -> ( { model | uploads = Dict.remove path model.uploads } , Http.cancel ("upload-" ++ path) ) AlertMsg path vis -> ( { model | success = List.map (alertMapper path vis) model.success , failed = List.map (alertMapper path vis) model.failed } , Cmd.none ) -- VIEW filesDecoder : D.Decoder (List File.File) filesDecoder = D.at [ "target", "files" ] (D.list File.decoder) buildButton : Model -> Bool -> String -> (Msg -> msg) -> Html msg buildButton model currIsFile currRoot toMsg = label [ class "btn btn-file btn-link btn-default text-left" , id "action-btn" , if currIsFile then class "disabled" else class "btn-default" ] [ span [ class "fas fa-plus" ] [] , span [ class "d-lg-inline d-none" ] [ text "\u{00A0}\u{00A0}Upload" ] , input [ type_ "file" , multiple True , on "change" (D.map toMsg (D.map (UploadSelectedFiles currRoot) filesDecoder ) ) , style "display" "none" , disabled currIsFile ] [] ] clampText : String -> Int -> String clampText text length = if String.length text <= length then text else String.slice 0 length text ++ "…" viewAlert : Alert.Visibility -> String -> Bool -> Html Msg viewAlert alert path isSuccess = Alert.config |> Alert.dismissableWithAnimation (AlertMsg path) |> (if isSuccess then Alert.success else Alert.danger ) |> Alert.children [ Grid.row [] [ Grid.col [ Col.xs10 ] [ span [ if isSuccess then class "fas fa-xs fa-check" else class "fas fa-xs fa-exclamation-circle" ] [] , text (" " ++ clampText path 15) ] , Grid.col [ Col.xs2, Col.textAlign Text.alignXsRight ] [ Button.button [ Button.roleLink , Button.attrs [ class "notification-close-btn" , onClick (AlertMsg path Alert.closed) ] ] [ span [ class "fas fa-xs fa-times" ] [] ] ] ] ] |> Alert.view alert viewProgressIndicator : String -> Float -> Html Msg viewProgressIndicator path fraction = Grid.row [] [ Grid.col [ Col.md10 ] [ Progress.progress [ Progress.value (100 * fraction) , Progress.customLabel [ text (clampText path 25) ] , Progress.attrs [ style "height" "25px" ] , Progress.wrapperAttrs [ style "height" "25px" ] ] ] , Grid.col [ Col.md2 ] [ Button.button [ Button.roleLink , Button.attrs [ class "progress-cancel", onClick (UploadCancel path) ] ] [ span [ class "fas fa-xs fa-times" ] [] ] ] ] viewUploadState : Model -> Html Msg viewUploadState model = div [] [ br [] [] , br [] [] , ul [ class "notification-list list-group" ] (List.map (\a -> viewAlert a.alert a.path True) model.success ++ List.map (\a -> viewAlert a.alert a.path False) model.failed ++ List.map (\p -> viewProgressIndicator (Tuple.first p) (Tuple.second p)) (Dict.toList model.uploads) ) ] -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Sub.batch (List.map (\p -> Http.track ("upload-" ++ p) (UploadProgress p)) (Dict.keys model.uploads) ) , Sub.batch (List.map (\a -> Alert.subscriptions a.alert (AlertMsg a.path)) model.success) , Sub.batch (List.map (\a -> Alert.subscriptions a.alert (AlertMsg a.path)) model.failed) ] ================================================ FILE: gateway/elm/src/Pinger.elm ================================================ port module Pinger exposing (pinger) port pinger : (String -> msg) -> Sub msg ================================================ FILE: gateway/elm/src/Routes/Commits.elm ================================================ module Routes.Commits exposing ( Model , Msg , newModel , reload , reloadIfNeeded , subscriptions , update , updateUrl , view ) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Form.Input as Input import Bootstrap.Form.InputGroup as InputGroup import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Grid.Row as Row import Bootstrap.ListGroup as ListGroup import Bootstrap.Table as Table import Bootstrap.Text as Text import Browser.Navigation as Nav import Commands import Delay import Dict import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Html.Lazy as Lazy import Http import Scroll import Time import Url import Util -- MODEL: loadLimit : Int loadLimit = 20 type State = Loading | Failure String | Success (List Commands.Commit) type alias Model = { key : Nav.Key , state : State , zone : Time.Zone , filter : String , offset : Int , alert : Util.AlertState , url : Url.Url , haveStagedChanges : Bool , rights : List String } newModel : Url.Url -> Nav.Key -> Time.Zone -> List String -> Model newModel url key zone rights = { key = key , state = Loading , zone = zone , filter = "" , offset = 0 , alert = Util.defaultAlertState , url = url , haveStagedChanges = False , rights = rights } updateUrl : Model -> Url.Url -> Model updateUrl model url = { model | url = url } -- MESSAGES: type Msg = GotLogResponse Bool (Result Http.Error Commands.Log) | GotResetResponse (Result Http.Error String) | CheckoutClicked String | SearchInput String | OnScroll Scroll.ScreenData | AlertMsg Alert.Visibility -- UPDATE: reload : Model -> Cmd Msg reload model = Commands.doLog (GotLogResponse True) 0 (model.offset + loadLimit) model.filter reloadIfNeeded : Model -> Cmd Msg reloadIfNeeded model = case model.state of Success commits -> if List.length commits == 0 then reload model else Cmd.none _ -> Cmd.none reloadWithoutFlush : Model -> Int -> Cmd Msg reloadWithoutFlush model newOffset = Commands.doLog (GotLogResponse False) newOffset loadLimit model.filter toMap : List Commands.Commit -> Dict.Dict Int Commands.Commit toMap commits = Dict.fromList (List.map (\c -> ( c.index, c )) commits) mergeCommits : List Commands.Commit -> List Commands.Commit -> List Commands.Commit mergeCommits old new = Dict.union (toMap new) (toMap old) |> Dict.toList |> List.map (\( _, v ) -> v) |> List.reverse showAlert : Model -> Float -> Util.AlertType -> String -> ( Model, Cmd Msg ) showAlert model duration modalTyp message = let newAlert = Util.AlertState message modalTyp Alert.shown in ( { model | alert = newAlert } , Cmd.batch [ Delay.after duration Delay.Second (AlertMsg Alert.closed) ] ) update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of GotLogResponse doFlush result -> case result of Ok log -> -- Got a new load of data. Merge it with the previous dataset, -- unless we want to flush the current view. let ( prevCommits, newOffset ) = if doFlush then ( [], 0 ) else case model.state of Success oldCommits -> ( oldCommits, model.offset + loadLimit ) _ -> ( [], model.offset ) in ( { model | state = Success (mergeCommits prevCommits log.commits) , offset = newOffset , haveStagedChanges = log.haveStagedChanges } , Cmd.none ) Err err -> ( { model | state = Failure (Util.httpErrorToString err) }, Cmd.none ) GotResetResponse result -> case result of Ok _ -> showAlert model 5 Util.Success "Succesfully reset state." Err err -> showAlert model 15 Util.Danger ("Failed to reset: " ++ Util.httpErrorToString err) CheckoutClicked hash -> ( model, Commands.doReset GotResetResponse "/" hash ) SearchInput filter -> let upModel = { model | filter = filter } in ( upModel, reload upModel ) OnScroll data -> if String.startsWith "/log" model.url.path then if Scroll.hasHitBottom data then ( model, reloadWithoutFlush model (model.offset + loadLimit) ) else -- We don't need to reload yet. ( model, Cmd.none ) else -- We're currently not visible. Forget updating. ( model, Cmd.none ) AlertMsg vis -> let newAlert = Util.AlertState model.alert.message model.alert.typ vis in ( { model | alert = newAlert }, Cmd.none ) -- VIEW: viewSearchBox : Model -> Html Msg viewSearchBox model = InputGroup.config (InputGroup.text [ Input.placeholder "Search" , Input.attrs [ onInput SearchInput , value model.filter ] ] ) |> InputGroup.successors [ InputGroup.span [ class "input-group-addon" ] [ button [] [ span [ class "fas fa-search fa-xs input-group-addon" ] [] ] ] ] |> InputGroup.attrs [ class "stylish-input-group input-group" ] |> InputGroup.view viewCommit : Model -> Commands.Commit -> ListGroup.Item Msg viewCommit model commit = ListGroup.li [] [ Grid.row [] [ Grid.col [ Col.xs1 , Col.textAlign Text.alignXsLeft ] [ span [ class "fas fa-lg fa-save text-xs-right" ] [] ] , Grid.col [ Col.xs8, Col.textAlign Text.alignXsLeft ] [ text commit.msg ] , Grid.col [ Col.xs3 , Col.textAlign Text.alignXsRight ] [ Button.button [ Button.outlineDanger , Button.attrs [ onClick <| CheckoutClicked commit.hash , disabled ((not model.haveStagedChanges && List.member "head" commit.tags) || not (List.member "fs.edit" model.rights) ) ] ] [ text "Checkout" ] ] ] ] viewCommitList : Model -> List Commands.Commit -> Html Msg viewCommitList model commits = ListGroup.ul (List.map (viewCommit model) (List.filter (\c -> String.length c.msg > 0) commits)) viewCommitListContainer : Model -> List Commands.Commit -> Html Msg viewCommitListContainer model commits = Grid.row [] [ Grid.col [ Col.lg2, Col.attrs [ class "d-none d-lg-block" ] ] [] , Grid.col [ Col.lg8, Col.md12 ] [ h4 [ class "text-muted text-center" ] [ text "Commits" ] , Util.viewAlert AlertMsg model.alert , br [] [] , viewCommitList model commits , br [] [] ] , Grid.col [ Col.lg2, Col.attrs [ class "d-none d-lg-block" ] ] [] ] view : Model -> Html Msg view model = case model.state of Loading -> text "Still loading" Failure err -> text ("Failed to load log: " ++ err) Success commits -> Grid.row [] [ Grid.col [ Col.lg12 ] [ Grid.row [ Row.attrs [ id "main-header-row" ] ] [ Grid.col [ Col.xl9 ] [ text "" ] , Grid.col [ Col.xl3 ] [ Lazy.lazy viewSearchBox model ] ] , Grid.row [ Row.attrs [ id "main-content-row" ] ] [ Grid.col [ Col.xl10 ] [ viewCommitListContainer model commits ] ] ] ] -- SUBSCRIPTIONS: subscriptions : Model -> Sub Msg subscriptions model = Scroll.scrollOrResize OnScroll ================================================ FILE: gateway/elm/src/Routes/DeletedFiles.elm ================================================ module Routes.DeletedFiles exposing ( Model , Msg , newModel , reload , reloadIfNeeded , subscriptions , update , updateUrl , view ) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Form.Input as Input import Bootstrap.Form.InputGroup as InputGroup import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Grid.Row as Row import Bootstrap.Table as Table import Bootstrap.Text as Text import Browser.Navigation as Nav import Commands import Delay import Dict import Filesize import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Html.Lazy as Lazy import Http import Scroll import Time import Url import Util -- MODEL: loadLimit : Int loadLimit = 25 type State = Loading | Failure String | Success (List Commands.Entry) type alias Model = { key : Nav.Key , state : State , zone : Time.Zone , filter : String , offset : Int , alert : Util.AlertState , url : Url.Url , rights : List String } newModel : Url.Url -> Nav.Key -> Time.Zone -> List String -> Model newModel url key zone rights = { key = key , state = Loading , zone = zone , filter = "" , offset = 0 , alert = Util.defaultAlertState , url = url , rights = rights } updateUrl : Model -> Url.Url -> Model updateUrl model url = { model | url = url } -- MESSAGES: type Msg = GotDeletedPathsResponse Bool (Result Http.Error (List Commands.Entry)) | GotUndeleteResponse (Result Http.Error String) | UndeleteClicked String | SearchInput String | AlertMsg Alert.Visibility | OnScroll Scroll.ScreenData -- UPDATE: reload : Model -> Cmd Msg reload model = Commands.doDeletedFiles (GotDeletedPathsResponse True) 0 (model.offset + loadLimit) model.filter reloadIfNeeded : Model -> Cmd Msg reloadIfNeeded model = case model.state of Success commits -> if List.length commits == 0 then reload model else Cmd.none _ -> Cmd.none reloadWithoutFlush : Model -> Int -> Cmd Msg reloadWithoutFlush model newOffset = Commands.doDeletedFiles (GotDeletedPathsResponse False) newOffset loadLimit model.filter toMap : List Commands.Entry -> Dict.Dict String Commands.Entry toMap entries = Dict.fromList (List.map (\e -> ( e.path, e )) entries) sortEntries : Commands.Entry -> Commands.Entry -> Order sortEntries a b = let inv = \v -> if v then 0 else 1 in case compare (inv a.isDir) (inv b.isDir) of EQ -> compare a.path b.path other -> other mergeEntries : List Commands.Entry -> List Commands.Entry -> List Commands.Entry mergeEntries old new = Dict.union (toMap new) (toMap old) |> Dict.toList |> List.map (\( _, v ) -> v) |> List.sortWith sortEntries update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of GotDeletedPathsResponse doFlush result -> case result of Ok entries -> let ( prevEntries, newOffset ) = if doFlush then ( [], 0 ) else case model.state of Success oldEntries -> ( oldEntries, model.offset + loadLimit ) _ -> ( [], model.offset ) in ( { model | state = Success (mergeEntries prevEntries entries) , offset = newOffset } , Cmd.none ) Err err -> ( { model | state = Failure (Util.httpErrorToString err) }, Cmd.none ) UndeleteClicked path -> ( model, Commands.doUndelete GotUndeleteResponse path ) SearchInput filter -> let upModel = { model | filter = filter } in ( upModel, reload upModel ) GotUndeleteResponse result -> case result of Ok _ -> let newAlert = Util.AlertState "Succcesfully undeleted one item." Util.Success Alert.shown in ( { model | alert = newAlert } , Cmd.batch [ reload model , Delay.after 5 Delay.Second (AlertMsg Alert.closed) ] ) Err err -> let newAlert = Util.AlertState ("Failed to undelete: " ++ Util.httpErrorToString err) Util.Danger Alert.shown in ( model , Cmd.batch [ reload model , Delay.after 15 Delay.Second (AlertMsg Alert.closed) ] ) AlertMsg vis -> let newAlert = Util.AlertState model.alert.message model.alert.typ vis in ( { model | alert = newAlert }, Cmd.none ) OnScroll data -> if String.startsWith "/deleted" model.url.path then if Scroll.hasHitBottom data then ( model, reloadWithoutFlush model (model.offset + loadLimit) ) else ( model, Cmd.none ) else -- We're currently not visible. Forget updating. ( model, Cmd.none ) -- VIEW: viewSearchBox : Model -> Html Msg viewSearchBox model = InputGroup.config (InputGroup.text [ Input.placeholder "Search" , Input.attrs [ onInput SearchInput , value model.filter ] ] ) |> InputGroup.successors [ InputGroup.span [ class "input-group-addon" ] [ button [] [ span [ class "fas fa-search fa-xs input-group-addon" ] [] ] ] ] |> InputGroup.attrs [ class "stylish-input-group input-group" ] |> InputGroup.view filterEntries : String -> List Commands.Entry -> List Commands.Entry filterEntries filter entries = case filter of "" -> entries _ -> List.filter (\e -> String.contains filter e.path) entries viewEntryIcon : Commands.Entry -> Html Msg viewEntryIcon entry = case entry.isDir of True -> span [ class "fas fa-lg fa-folder text-xs-right file-list-icon" ] [] False -> span [ class "far fa-lg fa-file text-xs-right file-list-icon" ] [] viewDeletedEntry : Model -> Commands.Entry -> Table.Row Msg viewDeletedEntry model entry = Table.tr [] [ Table.td [] [ viewEntryIcon entry ] , Table.td [] [ text entry.path ] , Table.td [] [ text <| Util.formatLastModified model.zone entry.lastModified ] , Table.td [] [ text <| Filesize.format entry.size ] , Table.td [] [ Button.button [ Button.outlineSuccess , Button.attrs [ onClick <| UndeleteClicked entry.path , disabled (not (List.member "fs.edit" model.rights)) ] ] [ text "Undelete" ] ] ] viewDeletedList : Model -> List Commands.Entry -> Html Msg viewDeletedList model entries = let filteredEntries = filterEntries model.filter entries in Table.table { options = [ Table.hover , Table.attr (class "borderless-table") ] , thead = Table.thead [] [ Table.tr [] [ Table.th [ Table.cellAttr (style "width" "5%") ] [] , Table.th [ Table.cellAttr (style "width" "55%") ] [ text "Name" ] , Table.th [ Table.cellAttr (style "width" "20%") ] [ text "Deleted at" ] , Table.th [ Table.cellAttr (style "width" "15%") ] [ text "Size" ] , Table.th [ Table.cellAttr (style "width" "5%") ] [] ] ] , tbody = Table.tbody [] (List.map (viewDeletedEntry model) filteredEntries ) } maybeViewDeletedList : Model -> List Commands.Entry -> Html Msg maybeViewDeletedList model entries = if List.length entries > 0 then viewDeletedList model entries else Grid.row [] [ Grid.col [ Col.xs12, Col.textAlign Text.alignXsCenter ] [ span [ class "text-muted" ] (if String.length model.filter == 0 then [ text " The " , span [ class "fas fa-md fa-trash-alt" ] [] , text " is empty. If you delete something, it will appear here." ] else [ text " Search did not find anything. Remove the query to go back. " ] ) ] ] viewDeletedContainer : Model -> List Commands.Entry -> Html Msg viewDeletedContainer model entries = Grid.row [] [ Grid.col [ Col.lg1, Col.attrs [ class "d-none d-lg-block" ] ] [] , Grid.col [ Col.lg10, Col.md12 ] [ h4 [ class "text-muted text-center" ] [ text "Deleted files" ] , br [] [] , Util.viewAlert AlertMsg model.alert , maybeViewDeletedList model entries , br [] [] ] , Grid.col [ Col.lg1, Col.attrs [ class "d-none d-lg-block" ] ] [] ] view : Model -> Html Msg view model = case model.state of Loading -> text "Still loading" Failure err -> text ("Failed to load log: " ++ err) Success entries -> Grid.row [] [ Grid.col [ Col.lg12 ] [ Grid.row [ Row.attrs [ id "main-header-row" ] ] [ Grid.col [ Col.xl9 ] [] , Grid.col [ Col.xl3 ] [ Lazy.lazy viewSearchBox model ] ] , Grid.row [ Row.attrs [ id "main-content-row" ] ] [ Grid.col [ Col.xl10 ] [ viewDeletedContainer model entries ] ] ] ] -- SUBSCRIPTIONS: subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Scroll.scrollOrResize OnScroll , Alert.subscriptions model.alert.vis AlertMsg ] ================================================ FILE: gateway/elm/src/Routes/Diff.elm ================================================ module Routes.Diff exposing ( Model , Msg , newModel , reload , subscriptions , update , updateUrl , view ) import Bootstrap.Button as Button import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Grid.Row as Row import Browser.Navigation as Nav import Commands import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Time import Url import Url.Parser exposing ((), parse, s, string) import Util -- MODEL: type State = Loading | Finished (Result String Commands.Diff) type alias Model = { key : Nav.Key , url : Url.Url , zone : Time.Zone , state : State } newModel : Nav.Key -> Url.Url -> Time.Zone -> Model newModel key url zone = { key = key , url = url , zone = zone , state = Loading } updateUrl : Model -> Url.Url -> Model updateUrl model url = { model | url = url } nameFromUrl : Url.Url -> String nameFromUrl url = Maybe.withDefault "" (parse (s "diff" string) url) reload : Model -> Url.Url -> Cmd Msg reload model url = let remoteName = nameFromUrl url in if String.length remoteName > 0 then Commands.doRemoteDiff GotResponse remoteName else Cmd.none -- MESSAGES: type Msg = GotResponse (Result Http.Error Commands.Diff) | BackClicked -- UPDATE: update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of GotResponse result -> case result of Ok diff -> ( { model | state = Finished (Ok diff) }, Cmd.none ) Err err -> ( { model | state = Finished (Err (Util.httpErrorToString err)) }, Cmd.none ) BackClicked -> ( model, Nav.back model.key 1 ) -- VIEW: viewLine : String -> Html Msg viewLine line = span [] [ text line, br [] [] ] viewSingle : List Commands.Entry -> Html Msg -> Html Msg viewSingle entries header = if List.length entries > 0 then span [] [ header , span [] (List.map (\e -> viewLine <| " " ++ e.path) entries) , br [] [] ] else text "" viewPairs : List Commands.DiffPair -> Html Msg -> Html Msg viewPairs entries header = if List.length entries > 0 then span [] [ header , span [] (List.map (\p -> viewLine (" " ++ p.src.path ++ " ↔ " ++ p.dst.path)) entries) , br [] [] ] else text "" viewHeading : String -> String -> Html Msg viewHeading className message = h5 [ class className ] [ text message ] viewDiff : Model -> Commands.Diff -> Html Msg viewDiff model diff = let nChanges = Commands.diffChangeCount diff in case nChanges of 0 -> text "There are no differences!" n -> div [] [ viewSingle diff.added (viewHeading "text-success" "Added") , viewSingle diff.removed (viewHeading "text-warning" "Removed") , viewSingle diff.ignored (viewHeading "text-muted" "Ignored") -- , viewSingle diff.missing (viewHeading "text-secondary" "Missing") , viewPairs diff.moved (viewHeading "text-primary" "Moved") , viewPairs diff.merged (viewHeading "text-info" "Merged") , viewPairs diff.conflict (viewHeading "text-danger" "Conflicts") , br [] [] , br [] [] , text (String.fromInt n ++ " changes in total") ] viewDiffContainer : Model -> Result String Commands.Diff -> Html Msg viewDiffContainer model result = Grid.row [] [ Grid.col [ Col.lg2, Col.attrs [ class "d-none d-lg-block" ] ] [] , Grid.col [ Col.lg8, Col.md12 ] [ h4 [ class "text-center" ] [ span [ class "text-muted" ] [ text "Difference to »" ] , text (nameFromUrl model.url) , span [ class "text-muted" ] [ text "«" ] , Button.button [ Button.roleLink , Button.attrs [ onClick BackClicked ] ] [ span [ class "font-weight-light" ] [ text "(go back)" ] ] ] , br [] [] , case result of Ok diff -> viewDiff model diff Err err -> text err , br [] [] ] , Grid.col [ Col.lg2, Col.attrs [ class "d-none d-lg-block" ] ] [] ] view : Model -> Html Msg view model = case model.state of Loading -> text "Still loading" Finished result -> Grid.row [] [ Grid.col [ Col.lg12 ] [ Grid.row [ Row.attrs [ id "main-content-row" ] ] [ Grid.col [ Col.xl10 ] [ viewDiffContainer model result ] ] ] ] -- SUBSCRIPTIONS: subscriptions : Model -> Sub Msg subscriptions model = Sub.none ================================================ FILE: gateway/elm/src/Routes/Ls.elm ================================================ module Routes.Ls exposing ( Model , Msg , buildModals , changeTimeZone , changeUrl , doListQueryFromUrl , newModel , subscriptions , update , view ) import Bootstrap.Alert as Alert import Bootstrap.Breadcrumb as Breadcrumb import Bootstrap.Button as Button import Bootstrap.ButtonGroup as ButtonGroup import Bootstrap.Dropdown as Dropdown import Bootstrap.Form.Input as Input import Bootstrap.Form.InputGroup as InputGroup import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Grid.Row as Row import Bootstrap.ListGroup as ListGroup import Bootstrap.Table as Table import Bootstrap.Text as Text import Browser.Navigation as Nav import Commands import Filesize import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Html.Lazy as Lazy import Http import Modals.History as History import Modals.Mkdir as Mkdir import Modals.MoveCopy as MoveCopy import Modals.Remove as Remove import Modals.Rename as Rename import Modals.Share as Share import Modals.Upload as Upload import Set import Time import Url import Url.Builder as UrlBuilder import Url.Parser as UrlParser import Url.Parser.Query as Query import Util -- MODEL type alias ActualModel = { entries : List Commands.Entry , checked : Set.Set String , isFiltered : Bool , self : Commands.Entry , sortState : ( SortDirection, SortKey ) } type State = Failure | Loading | Success ActualModel type alias Model = { key : Nav.Key , url : Url.Url , zone : Time.Zone , state : State , alert : Alert.Visibility , currError : String , rights : List String -- Sub models (for modals and dialogs): , historyState : History.Model , renameState : Rename.Model , moveState : MoveCopy.Model , copyState : MoveCopy.Model , uploadState : Upload.Model , mkdirState : Mkdir.Model , removeState : Remove.Model , shareState : Share.Model } newModel : Nav.Key -> Url.Url -> List String -> Model newModel key url rights = { key = key , url = url , zone = Time.utc , state = Loading , alert = Alert.closed , rights = rights , currError = "" , historyState = History.newModel rights , renameState = Rename.newModel , moveState = MoveCopy.newMoveModel , copyState = MoveCopy.newCopyModel , uploadState = Upload.newModel , mkdirState = Mkdir.newModel , removeState = Remove.newModel , shareState = Share.newModel } changeUrl : Url.Url -> Model -> Model changeUrl url model = { model | url = url } changeTimeZone : Time.Zone -> Model -> Model changeTimeZone zone model = { model | zone = zone } nSelectedItems : Model -> Int nSelectedItems model = case model.state of Success actualModel -> Set.filter (\e -> String.isEmpty e |> not) actualModel.checked |> Set.size _ -> 0 selectedPaths : Model -> List String selectedPaths model = case model.state of Success actualModel -> Set.filter (\e -> String.isEmpty e |> not) actualModel.checked |> Set.toList _ -> [] currIsFile : Model -> Bool currIsFile model = case model.state of Success actualModel -> not actualModel.self.isDir _ -> False currRoot : Model -> Maybe String currRoot model = case model.state of Success actualModel -> Just actualModel.self.path _ -> Nothing currTotalSize : Model -> Int currTotalSize model = case model.state of Success actualModel -> actualModel.self.size _ -> 0 currSelectedSize : Model -> Int currSelectedSize model = case model.state of Success actualModel -> let entryToSizeIfSelected = \e -> if Set.member e.path actualModel.checked then e.size else 0 in List.foldl (+) 0 (List.map entryToSizeIfSelected actualModel.entries) _ -> 0 existsInCurr : Model -> String -> Bool existsInCurr model name = case model.state of Success actualModel -> case actualModel.isFiltered of True -> False False -> List.any (\e -> name == Util.basename e.path) actualModel.entries _ -> False -- MESSAGES type SortKey = None | Name | ModTime | Pin | Size type SortDirection = Ascending | Descending type Msg = GotResponse (Result Http.Error Commands.ListResponse) | GotPinResponse (Result Http.Error String) | CheckboxTick String Bool | CheckboxTickAll Bool | ActionDropdownMsg Commands.Entry Dropdown.State | RowClicked Commands.Entry | RemoveClicked Commands.Entry | HistoryClicked Commands.Entry | RemoveResponse (Result Http.Error String) | SortBy SortDirection SortKey | AlertMsg Alert.Visibility | SearchInput String | PinClicked String Bool -- Sub messages: | HistoryMsg History.Msg | RenameMsg Rename.Msg | MoveMsg MoveCopy.Msg | CopyMsg MoveCopy.Msg -- Modal sub messages: | UploadMsg Upload.Msg | MkdirMsg Mkdir.Msg | RemoveMsg Remove.Msg | ShareMsg Share.Msg -- UPDATE fixDropdownState : Commands.Entry -> Dropdown.State -> Commands.Entry -> Commands.Entry fixDropdownState refEntry state entry = if entry.path == refEntry.path then { entry | dropdown = state } else entry sortBy : ActualModel -> SortDirection -> SortKey -> ActualModel sortBy model direction key = case direction of Ascending -> { model | entries = sortByAscending model key , sortState = ( Ascending, key ) } Descending -> { model | entries = List.reverse (sortByAscending model key) , sortState = ( Descending, key ) } entryPinToSortKey : Commands.Entry -> Int entryPinToSortKey entry = case ( entry.isPinned, entry.isExplicit ) of ( True, True ) -> 2 ( True, False ) -> 1 _ -> 0 sortByAscending : ActualModel -> SortKey -> List Commands.Entry sortByAscending model key = case key of Name -> List.sortBy (\e -> String.toLower (Util.basename e.path)) model.entries ModTime -> List.sortBy (\e -> Time.posixToMillis e.lastModified) model.entries Pin -> List.sortBy (\e -> entryPinToSortKey e) model.entries Size -> List.sortBy .size model.entries None -> model.entries updateCheckboxTickActual : String -> Bool -> ActualModel -> ActualModel updateCheckboxTickActual path isChecked model = case isChecked of True -> let updatedSet = Set.insert path model.checked in { model | checked = if Set.size updatedSet == List.length model.entries then Set.insert "" updatedSet else updatedSet } False -> { model | checked = Set.remove "" <| Set.remove path model.checked } updateCheckboxTick : String -> Bool -> Model -> Model updateCheckboxTick path isChecked model = case model.state of Success actualModel -> { model | state = Success (updateCheckboxTickActual path isChecked actualModel) } _ -> model updateCheckboxTickAllActual : Bool -> ActualModel -> ActualModel updateCheckboxTickAllActual isChecked model = case isChecked of True -> { model | checked = Set.fromList (List.map (\e -> e.path) model.entries ++ [ "" ]) } False -> { model | checked = Set.empty } updateCheckboxTickAll : Bool -> Model -> Model updateCheckboxTickAll isChecked model = case model.state of Success actualModel -> { model | state = Success (updateCheckboxTickAllActual isChecked actualModel) } _ -> model setDropdownState : Model -> Commands.Entry -> Dropdown.State -> Model setDropdownState model entry state = case model.state of Success actualModel -> { model | state = Success { actualModel | entries = List.map (fixDropdownState entry state) actualModel.entries } } _ -> model update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of ActionDropdownMsg entry state -> ( setDropdownState model entry state, Cmd.none ) RowClicked entry -> ( model, Nav.pushUrl model.key ("/view" ++ Util.urlEncodePath entry.path) ) RemoveClicked entry -> ( setDropdownState model entry Dropdown.initialState , Commands.doRemove RemoveResponse [ entry.path ] ) SearchInput query -> ( model -- Save the filter query in the URL itself. -- This way the query can be shared amongst users via link. , Nav.pushUrl model.key <| model.url.path ++ (if String.length query == 0 then "" else UrlBuilder.toQuery [ UrlBuilder.string "filter" query ] ) ) HistoryClicked entry -> ( setDropdownState model entry Dropdown.initialState , Cmd.map HistoryMsg (History.show entry.path) ) SortBy direction key -> case model.state of Success actualModel -> ( { model | state = Success (sortBy actualModel direction key) }, Cmd.none ) _ -> ( model, Cmd.none ) RemoveResponse result -> case result of Ok _ -> ( model, Cmd.none ) Err err -> ( { model | currError = Util.httpErrorToString err , alert = Alert.shown } , Cmd.none ) GotResponse result -> case result of Ok response -> -- New list model means also new checked entries. ( { model | state = Success <| { entries = response.entries , isFiltered = response.isFiltered , checked = if response.self.isDir then Set.empty else Set.singleton response.self.path , self = response.self , sortState = ( Ascending, None ) } } , Cmd.none ) Err _ -> ( { model | state = Failure }, Cmd.none ) GotPinResponse result -> case result of Ok _ -> ( model, Cmd.none ) -- TODO: Error handling? Err _ -> ( model, Cmd.none ) CheckboxTick path isChecked -> ( updateCheckboxTick path isChecked model, Cmd.none ) CheckboxTickAll isChecked -> ( updateCheckboxTickAll isChecked model, Cmd.none ) PinClicked path shouldBePinned -> if shouldBePinned then ( model, Commands.doPin GotPinResponse path "curr" ) else ( model, Commands.doUnpin GotPinResponse path "curr" ) AlertMsg state -> ( { model | alert = state }, Cmd.none ) HistoryMsg subMsg -> let ( newSubModel, newSubCmd ) = History.update subMsg model.historyState in ( { model | historyState = newSubModel }, Cmd.map HistoryMsg newSubCmd ) RenameMsg subMsg -> let ( newSubModel, newSubCmd ) = Rename.update subMsg model.renameState in ( { model | renameState = newSubModel }, Cmd.map RenameMsg newSubCmd ) MoveMsg subMsg -> let ( newSubModel, newSubCmd ) = MoveCopy.update subMsg model.moveState in ( { model | moveState = newSubModel }, Cmd.map MoveMsg newSubCmd ) CopyMsg subMsg -> let ( newSubModel, newSubCmd ) = MoveCopy.update subMsg model.copyState in ( { model | copyState = newSubModel }, Cmd.map CopyMsg newSubCmd ) UploadMsg subMsg -> let ( newSubModel, newSubCmd ) = Upload.update subMsg model.uploadState in ( { model | uploadState = newSubModel }, Cmd.map UploadMsg newSubCmd ) MkdirMsg subMsg -> let ( newSubModel, newSubCmd ) = Mkdir.update subMsg model.mkdirState in ( { model | mkdirState = newSubModel }, Cmd.map MkdirMsg newSubCmd ) RemoveMsg subMsg -> let ( newSubModel, newSubCmd ) = Remove.update subMsg model.removeState in ( { model | removeState = newSubModel }, Cmd.map RemoveMsg newSubCmd ) ShareMsg subMsg -> let ( newSubModel, newSubCmd ) = Share.update subMsg model.shareState in ( { model | shareState = newSubModel }, Cmd.map ShareMsg newSubCmd ) -- VIEW showAlert : Model -> Html Msg showAlert model = Alert.config |> Alert.dismissable AlertMsg |> Alert.danger |> Alert.children [ Alert.h4 [] [ text "Oh, something went wrong! :(" ] , text ("The exact error was: " ++ model.currError) ] |> Alert.view model.alert viewMetaRow : String -> Html msg -> Html msg viewMetaRow key value = Grid.row [] [ Grid.col [ Col.xs4, Col.textAlign Text.alignXsLeft ] [ span [ class "text-muted" ] [ text key ] ] , Grid.col [ Col.xs8, Col.textAlign Text.alignXsRight ] [ value ] ] viewDownloadButton : Model -> ActualModel -> Url.Url -> Html msg viewDownloadButton model actModel url = Button.linkButton [ Button.outlinePrimary , Button.attrs (if mayDownload model then [ href (Util.urlPrefixToString url ++ "get" ++ Util.urlEncodePath actModel.self.path ++ "?direct=yes" ) ] else [ class "text-muted", style "opacity" "0.1" ] ) ] [ span [ class "fas fa-download" ] [], text " Download" ] viewViewButton : Model -> ActualModel -> Url.Url -> Html msg viewViewButton model actModel url = Button.linkButton [ Button.outlinePrimary , Button.attrs (if mayDownload model then [ href (Util.urlPrefixToString url ++ "get" ++ Util.urlEncodePath actModel.self.path ) ] else [ class "text-muted", style "opacity" "0.1" ] ) ] [ span [ class "fas fa-eye" ] [], text " View" ] viewPinIcon : Bool -> Bool -> Html msg viewPinIcon isPinned isExplicit = case ( isPinned, isExplicit ) of ( True, True ) -> span [ class "fa fa-map-marker", class "text-success" ] [] ( True, False ) -> span [ class "fa fa-map-marker-alt", class "text-warning" ] [] _ -> span [ class "fa fa-times", class "text-danger" ] [] viewPinButton : Model -> Commands.Entry -> Html Msg viewPinButton model entry = Button.button [ Button.roleLink , Button.attrs [ disabled (not (List.member "fs.edit" model.rights)) , onClick (PinClicked entry.path (not entry.isPinned)) ] ] [ viewPinIcon entry.isPinned entry.isExplicit ] viewSingleEntry : Model -> ActualModel -> Time.Zone -> Html Msg viewSingleEntry model actualModel zone = Grid.row [] [ Grid.col [ Col.xs2 ] [] , Grid.col [ Col.xs8, Col.textAlign Text.alignXsCenter ] [ ListGroup.ul [ ListGroup.li [] [ viewMetaRow "Path" (text <| actualModel.self.path) ] , ListGroup.li [] [ viewMetaRow "Size" (text <| Filesize.format actualModel.self.size) ] , ListGroup.li [] [ viewMetaRow "Owner" (text <| actualModel.self.user) ] , ListGroup.li [] [ viewMetaRow "Last Modified" (text <| Util.formatLastModified zone actualModel.self.lastModified) ] , ListGroup.li [] [ viewMetaRow "Pinned" (viewPinButton model actualModel.self) ] , ListGroup.li [ ListGroup.light ] [ viewDownloadButton model actualModel model.url , text " " , viewViewButton model actualModel model.url ] ] ] , Grid.col [ Col.xs2 ] [] ] viewList : Model -> Time.Zone -> Html Msg viewList model zone = case model.state of Failure -> div [] [ text "Sorry, something did not work out as expected." ] Loading -> text "Loading..." Success actualModel -> case actualModel.self.isDir of True -> div [] [ showAlert model , Lazy.lazy3 entriesToHtml model zone actualModel ] False -> div [] [ showAlert model , Lazy.lazy3 viewSingleEntry model actualModel zone ] buildBreadcrumbs : List String -> List String -> List (Breadcrumb.Item msg) buildBreadcrumbs names previous = let displayName = \n -> if String.length n <= 0 then "Home" else n in case names of [] -> -- Recursion stop. [] [ name ] -> -- Final element in the breadcrumbs. -- Already selected therefore. [ Breadcrumb.item [] [ text (displayName name) ] ] name :: rest -> -- Some intermediate element. Breadcrumb.item [] [ a [ href ("/view/" ++ String.join "/" (name :: previous)) ] [ text (displayName name) ] ] :: buildBreadcrumbs rest (previous ++ [ name ]) viewBreadcrumbs : Model -> Html msg viewBreadcrumbs model = div [ id "breadcrumbs-box" ] [ Breadcrumb.container (buildBreadcrumbs ("" :: (Util.urlToPath model.url |> Util.splitPath)) [] ) ] viewEntryIcon : Commands.Entry -> Html Msg viewEntryIcon entry = case entry.isDir of True -> span [ class "fas fa-lg fa-folder text-xs-right file-list-icon" ] [] False -> span [ class "far fa-lg fa-file text-xs-right file-list-icon" ] [] makeCheckbox : Bool -> (Bool -> Msg) -> Html Msg makeCheckbox isChecked msg = div [ class "checkbox" ] [ label [] [ input [ type_ "checkbox", onCheck msg, checked isChecked ] [] , span [ class "cr" ] [ i [ class "cr-icon fas fa-lg fa-check" ] [] ] ] ] readCheckedState : ActualModel -> String -> Bool readCheckedState model path = Set.member path model.checked formatPath : ActualModel -> Commands.Entry -> String formatPath model entry = case model.isFiltered of True -> String.join "/" (Util.splitPath entry.path) False -> Util.basename entry.path mayDownload : Model -> Bool mayDownload model = List.member "fs.download" model.rights mayEdit : Model -> Bool mayEdit model = List.member "fs.edit" model.rights buildActionDropdown : Model -> ActualModel -> Commands.Entry -> Html Msg buildActionDropdown model actModel entry = Dropdown.dropdown entry.dropdown { options = [] , toggleMsg = ActionDropdownMsg entry , toggleButton = Dropdown.toggle [ Button.roleLink ] [ span [ class "fas fa-ellipsis-h" ] [] ] , items = [ Dropdown.buttonItem [ onClick (HistoryClicked entry) ] [ span [ class "fa fa-md fa-history" ] [] , text " History" ] , Dropdown.divider , Dropdown.anchorItem [ href ("/get" ++ Util.urlEncodePath (Util.joinPath [ actModel.self.path, Util.basename entry.path ]) ++ "?direct=yes" ) , onClick (ActionDropdownMsg entry Dropdown.initialState) , disabled (not (mayDownload model)) ] [ span [ class "fa fa-md fa-file-download" ] [] , text " Download" ] , Dropdown.anchorItem [ href ("/get" ++ Util.urlEncodePath (Util.joinPath [ actModel.self.path, Util.basename entry.path ]) ) , onClick (ActionDropdownMsg entry Dropdown.initialState) , disabled (not (mayDownload model)) ] [ span [ class "fa fa-md fa-eye" ] [] , text " View" ] , Dropdown.anchorItem [ onClick (ShareMsg <| Share.show [ entry.path ]) ] [ span [ class "fa fa-md fa-share-alt" ] [] , text " Share" ] , Dropdown.divider , Dropdown.buttonItem [ onClick (RemoveClicked entry) , disabled (not (mayEdit model)) ] [ span [ class "fa fa-md fa-trash" ] [] , text " Delete" ] , Dropdown.divider , Dropdown.buttonItem [ onClick (RenameMsg (Rename.show entry.path)) , disabled (not (mayEdit model)) ] [ span [ class "fa fa-md fa-file-signature" ] [] , text " Rename" ] , Dropdown.buttonItem [ onClick (MoveMsg (MoveCopy.show entry.path)) , disabled (not (mayEdit model)) ] [ span [ class "fa fa-md fa-arrow-right" ] [] , text " Move" ] , Dropdown.buttonItem [ onClick (CopyMsg (MoveCopy.show entry.path)) , disabled (not (mayEdit model)) ] [ span [ class "fa fa-md fa-copy" ] [] , text " Copy" ] ] } entryToHtml : Model -> ActualModel -> Time.Zone -> Commands.Entry -> Table.Row Msg entryToHtml model actModel zone e = Table.tr [] [ Table.td [] [ makeCheckbox (readCheckedState actModel e.path) (CheckboxTick e.path) ] , Table.td [ Table.cellAttr (class "icon-column"), Table.cellAttr (onClick (RowClicked e)) ] [ viewEntryIcon e ] , Table.td [ Table.cellAttr (onClick (RowClicked e)) ] [ a [ "/view" ++ e.path |> href ] [ text (formatPath actModel e) ] ] , Table.td [ Table.cellAttr (onClick (RowClicked e)) ] [ Util.formatLastModifiedOwner zone e.lastModified e.user ] , Table.td [ Table.cellAttr (onClick (RowClicked e)) ] [ text (Filesize.format e.size) ] , Table.td [] [ viewPinButton model e ] , Table.td [] [ buildActionDropdown model actModel e ] ] buildSortControl : String -> ActualModel -> SortKey -> Html Msg buildSortControl name model key = let ascClass = if ( Ascending, key ) == model.sortState then "sort-button-selected" else "" descClass = if ( Descending, key ) == model.sortState then "sort-button-selected" else "" in span [ class "sort-button-container text-muted" ] [ span [] [ text (name ++ " ") ] , span [ class "sort-button" ] [ Button.linkButton [ Button.small , Button.attrs [ onClick (SortBy Ascending key), class "sort-button" ] ] [ span [ class "fas fa-xs fa-arrow-up", class ascClass ] [] ] , Button.linkButton [ Button.small , Button.attrs [ onClick (SortBy Descending key), class "sort-button" ] ] [ span [ class "fas fa-xs fa-arrow-down", class descClass ] [] ] ] ] entriesToHtml : Model -> Time.Zone -> ActualModel -> Html Msg entriesToHtml model zone actModel = Table.table { options = [ Table.hover ] , thead = Table.simpleThead [ Table.th [ Table.cellAttr (style "width" "5%") ] [ makeCheckbox (readCheckedState actModel "") CheckboxTickAll ] , Table.th [ Table.cellAttr (style "width" "5%") ] [ text "" ] , Table.th [ Table.cellAttr (style "width" "37.5%") ] [ buildSortControl "Name" actModel Name ] , Table.th [ Table.cellAttr (style "width" "27.5%") ] [ buildSortControl "Modified" actModel ModTime ] , Table.th [ Table.cellAttr (style "width" "7.5%") ] [ buildSortControl "Size" actModel Size ] , Table.th [ Table.cellAttr (style "width" "10%") ] [ buildSortControl "Pin" actModel Pin ] , Table.th [ Table.cellAttr (style "width" "5%") ] [ text "" ] ] , tbody = Table.tbody [] (List.map (entryToHtml model actModel zone) actModel.entries) } buildModals : Model -> Html Msg buildModals model = let paths = selectedPaths model in span [] [ Html.map HistoryMsg (History.view model.historyState) , Html.map RenameMsg (Rename.view model.renameState) , Html.map MoveMsg (MoveCopy.view model.moveState) , Html.map CopyMsg (MoveCopy.view model.copyState) , Html.map MkdirMsg (Mkdir.view model.mkdirState model.url (existsInCurr model)) , Html.map RemoveMsg (Remove.view model.removeState paths) , Html.map ShareMsg (Share.view model.shareState model.url) ] searchQueryFromUrl : Url.Url -> String searchQueryFromUrl url = Maybe.withDefault "" (UrlParser.parse (UrlParser.query (Query.map (Maybe.withDefault "") (Query.string "filter")) ) { url | path = "" } ) doListQueryFromUrl : Url.Url -> Cmd Msg doListQueryFromUrl url = let path = Util.urlToPath url filter = searchQueryFromUrl url in Commands.doListQuery GotResponse path filter viewSearchBox : Model -> Html Msg viewSearchBox model = InputGroup.config (InputGroup.text [ Input.placeholder "Search" , Input.attrs [ onInput SearchInput , value (searchQueryFromUrl model.url) ] ] ) |> InputGroup.successors [ InputGroup.span [ class "input-group-addon" ] [ button [] [ span [ class "fas fa-search fa-xs input-group-addon" ] [] ] ] ] |> InputGroup.attrs [ class "stylish-input-group input-group" ] |> InputGroup.view buildActionButton : Msg -> String -> String -> Bool -> Html Msg buildActionButton msg iconName labelText isDisabled = Button.button [ Button.block , Button.small , Button.roleLink , Button.attrs [ class "text-left", disabled isDisabled, onClick msg ] ] [ span [ class "fas fa-lg", class iconName ] [] , span [ class "d-lg-inline d-none" ] [ text (" " ++ labelText) ] ] labelSelectedItems : Model -> Int -> Html Msg labelSelectedItems model num = if currIsFile model then text "" else case num of 0 -> p [] [ text " Nothing selected" , br [] [] , text (Filesize.format (currTotalSize model) ++ " in total") ] 1 -> p [] [ text " 1 item" , br [] [] , text (Filesize.format (currSelectedSize model)) ] n -> p [] [ text (" " ++ String.fromInt n ++ " items") , br [] [] , text (Filesize.format (currSelectedSize model)) ] buildDownloadUrl : Model -> String buildDownloadUrl model = UrlBuilder.absolute ("get" :: (Util.splitPath <| Util.urlToPath model.url)) (UrlBuilder.string "direct" "yes" :: (if nSelectedItems model > 0 then List.map (UrlBuilder.string "include") (selectedPaths model) else [] ) ) viewSidebarDownloadButton : Model -> Html Msg viewSidebarDownloadButton model = let nSelected = nSelectedItems model disabledClass = if currIsFile model || not (List.member "fs.download" model.rights) then class "disabled" else class "btn-default" in Button.linkButton [ Button.block , Button.attrs [ class "text-left btn-link download-btn" , disabledClass , href (buildDownloadUrl model) ] ] [ span [ class "fas fa-lg fa-file-download" ] [] , span [ id "action-btn", class "d-none d-lg-inline" ] [ if nSelected > 0 then text " Download selected " else text " Download all" ] ] needsRight : Model -> String -> something -> something -> something needsRight model right entry default = if List.member right model.rights then entry else default viewActionList : Model -> Html Msg viewActionList model = let nSelected = nSelectedItems model root = Maybe.withDefault "/" (currRoot model) in div [] [ div [ class "d-flex flex-lg-column flex-row" ] [ p [ class "text-muted", id "select-label" ] [ labelSelectedItems model nSelected ] , div [ class "d-flex flex-column" ] [ Upload.buildButton model.uploadState (currIsFile model || not (List.member "fs.download" model.rights)) root UploadMsg , viewSidebarDownloadButton model ] , div [ class "d-flex flex-column" ] [ buildActionButton (ShareMsg <| Share.show (selectedPaths model)) "fa-share-alt" "Share" (nSelected == 0) , buildActionButton (MkdirMsg <| Mkdir.show) "fa-edit" "New Folder" (currIsFile model || not (List.member "fs.edit" model.rights)) ] , div [ class "d-flex flex-column" ] [ buildActionButton (RemoveMsg <| Remove.show (selectedPaths model)) "fa-trash" "Delete" (currIsFile model || nSelected == 0 || not (List.member "fs.edit" model.rights)) ] ] , div [] [ Html.map UploadMsg (Upload.viewUploadState model.uploadState) ] ] view : Model -> Html Msg view model = Grid.row [] [ Grid.col [ Col.lg12 ] [ Grid.row [ Row.attrs [ id "main-header-row" ] ] [ Grid.col [ Col.xl9 ] [ viewBreadcrumbs model ] , Grid.col [ Col.xl3 ] [ Lazy.lazy viewSearchBox model ] ] , Grid.row [ Row.attrs [ id "main-content-row" ] ] [ Grid.col [ Col.xl10 ] [ viewList model model.zone ] , Grid.col [ Col.xl2 ] [ Lazy.lazy viewActionList model ] ] ] ] -- SUBSCRIPTIONS subscriptions : Model -> Sub Msg subscriptions model = case model.state of Success actualModel -> Sub.batch [ Alert.subscriptions model.alert AlertMsg , Sub.map HistoryMsg (History.subscriptions model.historyState) , Sub.map RenameMsg (Rename.subscriptions model.renameState) , Sub.map MoveMsg (MoveCopy.subscriptions model.moveState) , Sub.map CopyMsg (MoveCopy.subscriptions model.copyState) , Sub.map UploadMsg (Upload.subscriptions model.uploadState) , Sub.map MkdirMsg (Mkdir.subscriptions model.url model.mkdirState) , Sub.map RemoveMsg (Remove.subscriptions model.removeState) , Sub.map ShareMsg (Share.subscriptions model.shareState) , Sub.batch (List.map (\e -> Dropdown.subscriptions e.dropdown (ActionDropdownMsg e)) actualModel.entries ) ] _ -> Sub.none ================================================ FILE: gateway/elm/src/Routes/Remotes.elm ================================================ module Routes.Remotes exposing ( Model , Msg , buildModals , newModel , reload , subscriptions , update , view ) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Dropdown as Dropdown import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Grid.Row as Row import Bootstrap.ListGroup as ListGroup import Bootstrap.Table as Table import Bootstrap.Text as Text import Browser.Navigation as Nav import Clipboard import Commands import Delay import Dict import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Json.Encode as E import Modals.RemoteAdd as RemoteAdd import Modals.RemoteFolders as RemoteFolders import Modals.RemoteRemove as RemoteRemove import Time import Tuple import Url import Util -- MODEL: type State = Loading | Failure String | Success (List Commands.Remote) type alias Model = { key : Nav.Key , zone : Time.Zone , state : State , self : Commands.SelfResponse , alert : Util.AlertState , remoteAddState : RemoteAdd.Model , remoteRemoveState : RemoteRemove.Model , remoteFoldersState : RemoteFolders.Model , actionDropdowns : Dict.Dict String Dropdown.State , conflictDropdowns : Dict.Dict String Dropdown.State , rights : List String } newModel : Nav.Key -> Time.Zone -> List String -> Model newModel key zone rights = { key = key , zone = zone , state = Loading , rights = rights , self = Commands.emptySelf , remoteAddState = RemoteAdd.newModel , remoteRemoveState = RemoteRemove.newModel , remoteFoldersState = RemoteFolders.newModel , actionDropdowns = Dict.empty , conflictDropdowns = Dict.empty , alert = Util.defaultAlertState } -- MESSAGES: type Msg = GotRemoteListResponse (Result Http.Error (List Commands.Remote)) | GotSyncResponse (Result Http.Error String) | GotSelfResponse (Result Http.Error Commands.SelfResponse) | GotRemoteModifyResponse (Result Http.Error String) | SyncClicked String | AutoUpdateToggled Commands.Remote Bool | AcceptPushToggled Commands.Remote Bool | ConflictStrategyToggled Commands.Remote String | ClipboardCopyClicked String -- Sub messages: | RemoteAddMsg RemoteAdd.Msg | RemoteRemoveMsg RemoteRemove.Msg | RemoteFolderMsg RemoteFolders.Msg | ActionDropdownMsg String Dropdown.State | ConflictDropdownMsg String Dropdown.State | AlertMsg Alert.Visibility -- UPDATE: reload : Cmd Msg reload = Cmd.batch [ Commands.doRemoteList GotRemoteListResponse , Commands.doSelfQuery GotSelfResponse ] showAlert : Model -> Float -> Util.AlertType -> String -> ( Model, Cmd Msg ) showAlert model duration modalTyp message = let newAlert = Util.AlertState message modalTyp Alert.shown in ( { model | alert = newAlert } , Cmd.batch [ Delay.after duration Delay.Second (AlertMsg Alert.closed) ] ) update : Msg -> Model -> ( Model, Cmd Msg ) update msg model = case msg of GotRemoteListResponse result -> case result of Ok remotes -> ( { model | state = Success remotes }, Cmd.none ) Err err -> ( { model | state = Failure (Util.httpErrorToString err) }, Cmd.none ) GotSyncResponse result -> case result of Ok _ -> showAlert model 5 Util.Success "Succesfully synchronized!" Err err -> showAlert model 20 Util.Danger ("Failed to sync: " ++ Util.httpErrorToString err) GotRemoteModifyResponse result -> case result of Ok _ -> ( model, Cmd.none ) Err err -> showAlert model 20 Util.Danger ("Failed to set auto update: " ++ Util.httpErrorToString err) GotSelfResponse result -> case result of Ok self -> ( { model | self = self }, Cmd.none ) Err err -> showAlert model 20 Util.Danger ("Failed to get information about ourselves: " ++ Util.httpErrorToString err) ActionDropdownMsg name state -> ( { model | actionDropdowns = Dict.insert name state model.actionDropdowns }, Cmd.none ) ConflictDropdownMsg name state -> ( { model | conflictDropdowns = Dict.insert name state model.conflictDropdowns }, Cmd.none ) SyncClicked name -> ( model, Commands.doRemoteSync GotSyncResponse name ) AutoUpdateToggled remote state -> ( model, Commands.doRemoteModify GotRemoteModifyResponse { remote | acceptAutoUpdates = state } ) AcceptPushToggled remote state -> ( model, Commands.doRemoteModify GotRemoteModifyResponse { remote | acceptPush = state } ) ConflictStrategyToggled remote state -> ( model, Commands.doRemoteModify GotRemoteModifyResponse { remote | conflictStrategy = state } ) RemoteAddMsg subMsg -> let ( upModel, upCmd ) = RemoteAdd.update subMsg model.remoteAddState in ( { model | remoteAddState = upModel }, Cmd.map RemoteAddMsg upCmd ) RemoteRemoveMsg subMsg -> let ( upModel, upCmd ) = RemoteRemove.update subMsg model.remoteRemoveState in ( { model | remoteRemoveState = upModel }, Cmd.map RemoteRemoveMsg upCmd ) RemoteFolderMsg subMsg -> let ( upModel, upCmd ) = RemoteFolders.update subMsg model.remoteFoldersState in ( { model | remoteFoldersState = upModel }, Cmd.map RemoteFolderMsg upCmd ) AlertMsg vis -> let newAlert = Util.AlertState model.alert.message model.alert.typ vis in ( { model | alert = newAlert }, Cmd.none ) ClipboardCopyClicked fingerprint -> let newModelCmd = showAlert model 2 Util.Info "Copied full fingerprint to clipboard." in ( Tuple.first newModelCmd , Cmd.batch [ Clipboard.copyToClipboard <| E.string fingerprint , Tuple.second newModelCmd ] ) -- VIEW: viewAutoUpdatesIcon : Bool -> Commands.Remote -> Bool -> Html Msg viewAutoUpdatesIcon state remote isDisabled = Util.viewToggleSwitch (AutoUpdateToggled remote) "" state isDisabled viewAcceptPushToggle : Bool -> Commands.Remote -> Bool -> Html Msg viewAcceptPushToggle state remote isDisabled = Util.viewToggleSwitch (AcceptPushToggled remote) "" state isDisabled viewRemoteState : Model -> Commands.Remote -> Html Msg viewRemoteState model remote = if remote.isAuthenticated then if remote.isOnline then span [ class "fas fa-md fa-circle text-success" ] [] else span [ class "text-warning" ] [ text <| Util.formatLastModified model.zone remote.lastSeen ] else span [ class "text-danger" ] [ text "not authenticated" ] viewFingerprintButton : String -> Html Msg viewFingerprintButton fingerprint = Button.button [ Button.roleLink , Button.attrs [ onClick <| ClipboardCopyClicked fingerprint ] ] [ viewFingerprint fingerprint ] viewFingerprint : String -> Html Msg viewFingerprint fingerprint = String.split ":" fingerprint |> List.map (\t -> span [ class "text-muted" ] [ text (String.slice 0 10 t) ]) |> List.intersperse (span [] [ text ":" ]) |> span [ class "fingerprint" ] viewActionDropdown : Model -> Commands.Remote -> Html Msg viewActionDropdown model remote = Dropdown.dropdown (Maybe.withDefault Dropdown.initialState (Dict.get remote.name model.actionDropdowns)) { options = [ Dropdown.alignMenuRight ] , toggleMsg = ActionDropdownMsg remote.name , toggleButton = Dropdown.toggle [ Button.roleLink ] [ span [ class "fas fa-ellipsis-h" ] [] ] , items = [ Dropdown.buttonItem [ onClick (SyncClicked remote.name) , disabled (not remote.isAuthenticated || not (List.member "fs.edit" model.rights) ) ] [ span [ class "fas fa-md fa-sync-alt" ] [], text " Sync" ] , Dropdown.anchorItem [ disabled (not remote.isAuthenticated || not (List.member "fs.view" model.rights) ) , if remote.isAuthenticated then href ("/diff/" ++ Url.percentEncode remote.name) else class "text-muted" ] [ span [ class "fas fa-md fa-search-minus" ] [], text " Diff" ] , Dropdown.divider , Dropdown.buttonItem [ onClick (RemoteRemoveMsg <| RemoteRemove.show remote.name) , disabled (not (List.member "remotes.edit" model.rights)) ] [ span [ class "text-danger" ] [ span [ class "fas fa-md fa-times" ] [] , text " Remove" ] ] ] } conflictStrategyToIconName : Model -> String -> String conflictStrategyToIconName model strategy = case strategy of "" -> if model.self.defaultConflictStrategy == "" then "fa-question text-muted" else conflictStrategyToIconName model model.self.defaultConflictStrategy "ignore" -> "fa-eject" "marker" -> "fa-marker" "embrace" -> "fa-handshake" _ -> "fa-question" viewConflictDropdown : Model -> Commands.Remote -> Bool -> Html Msg viewConflictDropdown model remote isDisabled = Dropdown.dropdown (Maybe.withDefault Dropdown.initialState (Dict.get remote.name model.conflictDropdowns)) { options = [ Dropdown.alignMenuRight , Dropdown.attrs [ disabled isDisabled ] ] , toggleMsg = ConflictDropdownMsg remote.name , toggleButton = Dropdown.toggle [ Button.roleLink, Button.attrs [ disabled isDisabled ] ] [ span [ class "fas", class <| conflictStrategyToIconName model remote.conflictStrategy ] [] ] , items = [ Dropdown.buttonItem [ onClick (ConflictStrategyToggled remote "ignore") , disabled isDisabled ] [ span [ class "fas fa-md fa-eject" ] [], text " Ignore" ] , Dropdown.buttonItem [ onClick (ConflictStrategyToggled remote "marker") , disabled isDisabled ] [ span [ class "fas fa-md fa-marker" ] [], text " Marker" ] , Dropdown.buttonItem [ onClick (ConflictStrategyToggled remote "embrace") , disabled isDisabled ] [ span [ class "fas fa-md fa-handshake" ] [], text " Embrace" ] , Dropdown.buttonItem [ onClick (ConflictStrategyToggled remote "") , disabled isDisabled ] [ span [ class "fas fa-md fa-eraser" ] [], text " Default" ] ] } viewRemote : Model -> Commands.Remote -> Table.Row Msg viewRemote model remote = let isDisabled = not (List.member "remotes.edit" model.rights) in Table.tr [] [ Table.td [] [ span [ class "fas fa-lg fa-user-circle text-xs-right" ] [] ] , Table.td [] [ text <| " " ++ remote.name ] , Table.td [] [ viewRemoteState model remote ] , Table.td [] [ span [ class "text-muted" ] [ viewFingerprintButton remote.fingerprint ] ] , Table.td [] [ viewAutoUpdatesIcon remote.acceptAutoUpdates remote isDisabled ] , Table.td [] [ viewAcceptPushToggle remote.acceptPush remote isDisabled ] , Table.td [] [ viewConflictDropdown model remote isDisabled ] , Table.td [] [ Button.button [ Button.roleLink , Button.attrs [ onClick <| RemoteFolderMsg (RemoteFolders.show remote) , disabled isDisabled ] ] [ span [] [ case List.length remote.folders of 0 -> span [ class "fas fa-xs fa-asterisk" ] [] n -> text <| String.fromInt n ] ] ] , Table.td [ Table.cellAttr (class "text-right") ] [ viewActionDropdown model remote ] ] viewRemoteList : Model -> List Commands.Remote -> Html Msg viewRemoteList model remotes = Table.table { options = [ Table.hover , Table.attr (class "borderless-table") ] , thead = Table.thead [] [ Table.tr [] [ Table.th [ Table.cellAttr (style "width" "5%") ] [ text "" ] , Table.th [ Table.cellAttr (style "width" "20%") ] [ span [ class "text-muted remote-heading" ] [ text "Name" ] ] , Table.th [ Table.cellAttr (style "width" "20%") ] [ span [ class "text-muted remote-heading" ] [ text "Online" ] ] , Table.th [ Table.cellAttr (style "width" "30%") ] [ span [ class "text-muted remote-heading" ] [ text "Fingerprint" ] ] , Table.th [ Table.cellAttr (style "width" "10%") ] [ span [ class "text-muted remote-heading" ] [ text "Auto Update" ] ] , Table.th [ Table.cellAttr (style "width" "10%") ] [ span [ class "text-muted remote-heading" ] [ text "May Push" ] ] , Table.th [ Table.cellAttr (style "width" "10%") ] [ span [ class "text-muted remote-heading" ] [ text "Conflicts" ] ] , Table.th [ Table.cellAttr (style "width" "10%") ] [ span [ class "text-muted remote-heading" ] [ text "Folders" ] ] , Table.th [ Table.cellAttr (style "width" "5%") ] [] ] ] , tbody = Table.tbody [] (List.map (viewRemote model) remotes ) } viewMetaRow : String -> Html msg -> Html msg viewMetaRow key value = Grid.row [] [ Grid.col [ Col.xs4, Col.textAlign Text.alignXsLeft ] [ span [ class "text-muted" ] [ text key ] ] , Grid.col [ Col.xs8, Col.textAlign Text.alignXsRight ] [ value ] ] viewSelf : Model -> Html Msg viewSelf model = Grid.row [] [ Grid.col [ Col.lg2, Col.attrs [ class "d-none d-lg-block" ] ] [] , Grid.col [ Col.xs12, Col.lg8, Col.textAlign Text.alignXsCenter ] [ ListGroup.ul [ ListGroup.li [] [ viewMetaRow "Name" (text model.self.self.name) ] , ListGroup.li [] [ viewMetaRow "Fingerprint" (viewFingerprintButton model.self.self.fingerprint) ] ] ] , Grid.col [ Col.lg2, Col.attrs [ class "d-none d-lg-block" ] ] [] ] viewRemoteListContainer : Model -> List Commands.Remote -> Html Msg viewRemoteListContainer model remotes = Grid.row [] [ Grid.col [ Col.lg1, Col.attrs [ class "d-none d-lg-block" ] ] [] , Grid.col [ Col.xs12, Col.lg10 ] [ Util.viewAlert AlertMsg model.alert , viewRemoteList model remotes , div [ class "text-left" ] [ Button.button [ Button.roleLink , Button.attrs [ onClick <| RemoteAddMsg RemoteAdd.show , disabled (not (List.member "remotes.edit" model.rights)) ] ] [ span [ class "fas fa-lg fa-plus" ] [] , text " Add new" ] ] ] , Grid.col [ Col.lg1, Col.attrs [ class "d-none d-lg-block" ] ] [] ] view : Model -> Html Msg view model = case model.state of Loading -> text "Still loading" Failure err -> text ("Failed to load remote list: " ++ err) Success remotes -> Grid.row [] [ Grid.col [ Col.lg12 ] [ Grid.row [ Row.attrs [ id "main-header-row" ] ] [] , Grid.row [ Row.attrs [ id "main-content-row" ] ] [ Grid.col [ Col.xl10 ] [ h4 [ class "text-center text-muted" ] [ text "Own data" ] , br [] [] , viewSelf model , br [] [] , br [] [] , br [] [] , br [] [] , h4 [ class "text-center text-muted" ] [ text "Other remotes" ] , br [] [] , viewRemoteListContainer model remotes ] ] ] ] buildModals : Model -> Html Msg buildModals model = span [] [ Html.map RemoteAddMsg (RemoteAdd.view model.remoteAddState) , Html.map RemoteRemoveMsg (RemoteRemove.view model.remoteRemoveState) , Html.map RemoteFolderMsg (RemoteFolders.view model.remoteFoldersState) ] -- SUBSCRIPTIONS: subscriptions : Model -> Sub Msg subscriptions model = Sub.batch [ Alert.subscriptions model.alert.vis AlertMsg , Sub.map RemoteAddMsg <| RemoteAdd.subscriptions model.remoteAddState , Sub.map RemoteRemoveMsg <| RemoteRemove.subscriptions model.remoteRemoveState , Sub.map RemoteFolderMsg <| RemoteFolders.subscriptions model.remoteFoldersState , Sub.batch (List.map (\( name, state ) -> Dropdown.subscriptions state (ActionDropdownMsg name)) (Dict.toList model.actionDropdowns) ) , Sub.batch (List.map (\( name, state ) -> Dropdown.subscriptions state (ConflictDropdownMsg name)) (Dict.toList model.conflictDropdowns) ) ] ================================================ FILE: gateway/elm/src/Scroll.elm ================================================ port module Scroll exposing (ScreenData, hasHitBottom, scrollOrResize) type alias ScreenData = { scrollTop : Int , pageHeight : Int , viewportHeight : Int , viewportWidth : Int } port scrollOrResize : (ScreenData -> msg) -> Sub msg percFloat : ScreenData -> Float percFloat data = toFloat (data.scrollTop * 100) / toFloat (data.pageHeight - data.viewportHeight) hasHitBottom : ScreenData -> Bool hasHitBottom data = percFloat data >= 95 ================================================ FILE: gateway/elm/src/Util.elm ================================================ module Util exposing ( AlertState , AlertType(..) , basename , buildAlert , defaultAlertState , dirname , formatLastModified , formatLastModifiedOwner , httpErrorToString , joinPath , monthToInt , prefixSlash , splitPath , urlEncodePath , urlPrefixToString , urlToPath , viewAlert , viewToggleSwitch ) import Bootstrap.Alert as Alert import Bootstrap.Button as Button import Bootstrap.Grid as Grid import Bootstrap.Grid.Col as Col import Bootstrap.Text as Text import Html exposing (..) import Html.Attributes exposing (..) import Html.Events exposing (..) import Http import Time import Url monthToInt : Time.Month -> Int monthToInt month = -- This feels stupid. case month of Time.Jan -> 1 Time.Feb -> 2 Time.Mar -> 3 Time.Apr -> 4 Time.May -> 5 Time.Jun -> 6 Time.Jul -> 7 Time.Aug -> 8 Time.Sep -> 9 Time.Oct -> 10 Time.Nov -> 11 Time.Dec -> 12 formatLastModifiedOwner : Time.Zone -> Time.Posix -> String -> Html.Html msg formatLastModifiedOwner z t owner = p [] [ text (formatLastModified z t), span [ class "text-muted" ] [ text " by " ], text owner ] formatLastModified : Time.Zone -> Time.Posix -> String formatLastModified z t = String.join " " -- Day portion: [ String.join "/" [ Time.toDay z t |> String.fromInt , Time.toMonth z t |> monthToInt |> String.fromInt , Time.toYear z t |> String.fromInt ] -- Time portion: , String.join ":" [ Time.toHour z t |> String.fromInt |> String.padLeft 2 '0' , Time.toMinute z t |> String.fromInt |> String.padLeft 2 '0' , Time.toSecond z t |> String.fromInt |> String.padLeft 2 '0' ] ] splitPath : String -> List String splitPath path = List.filter (\s -> String.length s > 0) (String.split "/" path) joinPath : List String -> String joinPath paths = "/" ++ String.join "/" (List.foldr (++) [] (List.map splitPath paths)) urlToPath : Url.Url -> String urlToPath url = let decodeUrlPart = \e -> case Url.percentDecode e of Just val -> val Nothing -> "" in case splitPath url.path of [] -> "/" _ :: xs -> "/" ++ String.join "/" (List.map decodeUrlPart xs) basename : String -> String basename path = let split = List.reverse (splitPath path) in case split of [] -> "/" x :: _ -> x prefixSlash : String -> String prefixSlash path = if String.startsWith "/" path then path else "/" ++ path dirname : String -> String dirname path = let split = splitPath path in case split of [] -> "/" _ -> joinPath <| List.take (List.length split - 1) split buildAlert : Alert.Visibility -> (Alert.Visibility -> msg) -> (Alert.Config msg -> Alert.Config msg) -> String -> String -> Html msg buildAlert visibility msg severity title message = Alert.config |> Alert.dismissableWithAnimation msg |> severity |> Alert.children [ if String.length title > 0 then Alert.h4 [] [ text title ] else text "" , text message ] |> Alert.view visibility httpErrorToString : Http.Error -> String httpErrorToString err = case err of Http.BadUrl msg -> "Bad url: " ++ msg Http.Timeout -> "Timeout" Http.NetworkError -> "Network error" Http.BadStatus status -> "Bad status: " ++ String.fromInt status Http.BadBody msg -> "Could not decode body: " ++ msg urlPrefixToString : Url.Url -> String urlPrefixToString url = (case url.protocol of Url.Https -> "https://" Url.Http -> "http://" ) ++ url.host ++ (case url.port_ of Just port_ -> ":" ++ String.fromInt port_ Nothing -> "" ) ++ "/" urlEncodePath : String -> String urlEncodePath path = joinPath (List.map Url.percentEncode (splitPath path)) viewToggleSwitch : (Bool -> msg) -> String -> Bool -> Bool -> Html msg viewToggleSwitch toMsg message isChecked isDisabled = span [] [ span [] [ label [ class "toggle-switch" , disabled isDisabled , if isDisabled then class "toggle-switch-disabled" else class "" ] [ input [ type_ "checkbox", onCheck toMsg, checked isChecked, disabled isDisabled ] [] , span [ class "toggle-slider toggle-round" ] [] ] ] , span [ class "text-muted" ] [ text (" " ++ message) ] ] -- ALERT UTILS type AlertType = Danger | Success | Info type alias AlertState = { message : String , typ : AlertType , vis : Alert.Visibility } defaultAlertState : AlertState defaultAlertState = { message = "" , typ = Info , vis = Alert.closed } iconFromAlertType : AlertType -> Html msg iconFromAlertType typ = case typ of Danger -> span [ class "fas fa-xs fa-exclamation-circle" ] [] Success -> span [ class "fas fa-xs fa-check" ] [] _ -> text "" visualFromAlertType : AlertType -> (Alert.Config msg -> Alert.Config msg) visualFromAlertType typ = case typ of Danger -> Alert.danger Success -> Alert.success _ -> Alert.info viewAlert : (Alert.Visibility -> msg) -> AlertState -> Html msg viewAlert toMsg alert = Alert.config |> Alert.dismissableWithAnimation toMsg |> visualFromAlertType alert.typ |> Alert.children [ Grid.row [] [ Grid.col [ Col.xs10 ] [ iconFromAlertType alert.typ , text (" " ++ alert.message) ] , Grid.col [ Col.xs2, Col.textAlign Text.alignXsRight ] [ Button.button [ Button.roleLink , Button.attrs [ class "notification-close-btn" , onClick (toMsg Alert.closed) ] ] [ span [ class "fas fa-xs fa-times" ] [] ] ] ] ] |> Alert.view alert.vis ================================================ FILE: gateway/elm/src/Websocket.elm ================================================ port module Websocket exposing (incoming, open) port incoming : (String -> msg) -> Sub msg port open : () -> Cmd msg ================================================ FILE: gateway/endpoints/all_dirs.go ================================================ package endpoints import ( "net/http" "sort" "strings" "github.com/sahib/brig/gateway/db" ) // AllDirsHandler implements http.Handler. // This endpoint returns all directories that the client may see. // It is used in the client to offer the user a list of directories // to move or copy files to. type AllDirsHandler struct { *State } // NewAllDirsHandler returns a new AllDirsHandler. func NewAllDirsHandler(s *State) *AllDirsHandler { return &AllDirsHandler{State: s} } // AllDirsResponse is the response sent to the client. type AllDirsResponse struct { Success bool `json:"success"` Paths []string `json:"paths"` } func (ah *AllDirsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if user := getUserName(ah.store, w, r); user == "" { jsonifyErrf(w, http.StatusForbidden, "bad user") return } if !checkRights(w, r, db.RightFsView) { return } nodes, err := ah.fs.List("/", -1) if err != nil { jsonifyErrf(w, http.StatusInternalServerError, "failed to list") return } paths := []string{} for _, node := range nodes { if !node.IsDir || !ah.validatePath(node.Path, w, r) { continue } paths = append(paths, node.Path) } // Sort dirs before files and sort each part alphabetically sort.Slice(paths, func(i, j int) bool { return strings.ToLower(paths[i]) < strings.ToLower(paths[j]) }) jsonify(w, http.StatusOK, &AllDirsResponse{ Success: true, Paths: paths, }) } ================================================ FILE: gateway/endpoints/all_dirs_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) func TestAllDirsSuccess(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/a") require.Nil(t, s.fs.Mkdir("/a/b/c", true)) require.Nil(t, s.fs.Mkdir("/d/e/f", true)) resp := s.mustRun( t, NewAllDirsHandler(s.State), "POST", "http://localhost:5000/api/v0/all_dirs", nil, ) require.Equal(t, http.StatusOK, resp.StatusCode) allDirsResp := &AllDirsResponse{} mustDecodeBody(t, resp.Body, allDirsResp) require.Equal(t, true, allDirsResp.Success) require.Equal(t, []string{"/a", "/a/b", "/a/b/c"}, allDirsResp.Paths) }) } ================================================ FILE: gateway/endpoints/copy.go ================================================ package endpoints import ( "encoding/json" "fmt" "net/http" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) // CopyHandler implements http.Handler. type CopyHandler struct { *State } // NewCopyHandler creates a new copy handler. func NewCopyHandler(s *State) *CopyHandler { return &CopyHandler{State: s} } // CopyRequest is the request that can be send to this endpoint. type CopyRequest struct { // Source is the path to the old node. Source string `json:"source"` // Destination is the path of the new node. Destination string `json:"destination"` } func (ch *CopyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsEdit) { return } copyReq := CopyRequest{} if err := json.NewDecoder(r.Body).Decode(©Req); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } src := prefixRoot(copyReq.Source) dst := prefixRoot(copyReq.Destination) if !ch.validatePath(src, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "source path forbidden") return } if !ch.validatePath(dst, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "destination path forbidden") return } if err := ch.fs.Copy(src, dst); err != nil { log.Debugf("failed to copy %s -> %s: %v", src, dst, err) jsonifyErrf(w, http.StatusInternalServerError, "failed to copy") return } msg := fmt.Sprintf("copied »%s« to »%s«", src, dst) if !ch.commitChange(msg, w, r) { return } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/copy_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) type copyResponse struct { Success bool `json:"success"` } func TestCopySuccess(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.fs.Mkdir("/hinz", true)) resp := s.mustRun( t, NewCopyHandler(s.State), "POST", "http://localhost:5000/api/v0/copy", &CopyRequest{ Source: "/hinz", Destination: "/kunz", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) copyResp := ©Response{} mustDecodeBody(t, resp.Body, copyResp) require.Equal(t, true, copyResp.Success) hinzInfo, err := s.fs.Stat("/hinz") require.Nil(t, err) require.Equal(t, "/hinz", hinzInfo.Path) kunzInfo, err := s.fs.Stat("/kunz") require.Nil(t, err) require.Equal(t, "/kunz", kunzInfo.Path) }) } func TestCopyDisallowedSource(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/kunz") require.Nil(t, s.fs.Mkdir("/hinz", true)) resp := s.mustRun( t, NewCopyHandler(s.State), "POST", "http://localhost:5000/api/v0/copy", &CopyRequest{ Source: "/hinz", Destination: "/kunz", }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) }) } func TestCopyDisallowedDest(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/hinz") require.Nil(t, s.fs.Mkdir("/hinz", true)) resp := s.mustRun( t, NewCopyHandler(s.State), "POST", "http://localhost:5000/api/v0/copy", &CopyRequest{ Source: "/hinz", Destination: "/kunz", }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) }) } ================================================ FILE: gateway/endpoints/deleted.go ================================================ package endpoints import ( "encoding/json" "net/http" "sort" "strings" "github.com/sahib/brig/catfs" "github.com/sahib/brig/gateway/db" "github.com/sahib/brig/util" ) // DeletedPathsHandler implements http.Handler. // This endpoint returns all directories that the client may see. // It is used in the client to offer the user a list of directories // to move or copy files to. type DeletedPathsHandler struct { *State } // NewDeletedPathsHandler returns a new DeletedPathsHandler. func NewDeletedPathsHandler(s *State) *DeletedPathsHandler { return &DeletedPathsHandler{State: s} } // DeletedPathsResponse is the response sent to the client. type DeletedPathsResponse struct { Success bool `json:"success"` Entries []*StatInfo `json:"entries"` } // DeletedRequest is the data sent to this endpoint. type DeletedRequest struct { Offset int64 `json:"offset"` Limit int64 `json:"limit"` Filter string `json:"filter"` } func matchEntry(info *catfs.StatInfo, filter string) bool { if filter == "" { return true } return strings.Contains(strings.ToLower(info.Path), filter) } func (dh *DeletedPathsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsView) { return } delReq := DeletedRequest{} if err := json.NewDecoder(r.Body).Decode(&delReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } if delReq.Offset < 0 { jsonifyErrf(w, http.StatusBadRequest, "negative offset") return } nodes, err := dh.fs.DeletedNodes("/") if err != nil { jsonifyErrf(w, http.StatusInternalServerError, "failed to list") return } filter := strings.ToLower(delReq.Filter) filteredNodes := []*catfs.StatInfo{} for _, node := range nodes { if !matchEntry(node, filter) { continue } filteredNodes = append(filteredNodes, node) } entries := []*StatInfo{} if delReq.Offset >= int64(len(filteredNodes)) { jsonify(w, http.StatusOK, &DeletedPathsResponse{ Success: true, Entries: entries, }) return } filteredNodes = filteredNodes[delReq.Offset:] if delReq.Limit >= 0 { filteredNodes = filteredNodes[:util.Min64(int64(len(filteredNodes)), delReq.Limit)] } for _, node := range filteredNodes { if !dh.validatePath(node.Path, w, r) { continue } entries = append(entries, toExternalStatInfo(node)) } // Sort dirs before files and sort each part alphabetically sort.Slice(entries, func(i, j int) bool { if entries[i].IsDir != entries[j].IsDir { return entries[i].IsDir } return strings.ToLower(entries[i].Path) < strings.ToLower(entries[j].Path) }) jsonify(w, http.StatusOK, &DeletedPathsResponse{ Success: true, Entries: entries, }) } ================================================ FILE: gateway/endpoints/deleted_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) func TestDeletedPathSuccess(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/a") require.Nil(t, s.fs.Touch("/a/b/c1")) require.Nil(t, s.fs.Touch("/a/b/c2")) require.Nil(t, s.fs.Touch("/d/e/f1")) require.Nil(t, s.fs.Touch("/d/e/f2")) require.Nil(t, s.fs.MakeCommit("add")) require.Nil(t, s.fs.Remove("/a/b/c1")) require.Nil(t, s.fs.Remove("/a/b/c2")) require.Nil(t, s.fs.Remove("/d/e/f1")) require.Nil(t, s.fs.Remove("/d/e/f2")) require.Nil(t, s.fs.MakeCommit("rm")) resp := s.mustRun( t, NewDeletedPathsHandler(s.State), "POST", "http://localhost:5000/api/v0/deleted", &DeletedRequest{ Offset: 0, Limit: -1, Filter: "", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) deletedResp := &DeletedPathsResponse{} mustDecodeBody(t, resp.Body, deletedResp) require.Equal(t, true, deletedResp.Success) require.Equal(t, 2, len(deletedResp.Entries)) paths := []string{} for _, entry := range deletedResp.Entries { paths = append(paths, entry.Path) } require.Equal( t, []string{"/a/b/c1", "/a/b/c2"}, paths, ) }) } ================================================ FILE: gateway/endpoints/events.go ================================================ package endpoints import ( "context" "net/http" "sync" "time" "github.com/gorilla/websocket" "github.com/sahib/brig/events" "github.com/sahib/brig/gateway/db" "github.com/sahib/brig/gateway/remotesapi" log "github.com/sirupsen/logrus" ) var upgrader = websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, } // EventsHandler implements http.Handler type EventsHandler struct { mu sync.Mutex id int chs map[int]chan string rapi remotesapi.RemotesAPI evListener *events.Listener changeOnce sync.Once // only true while unit tests. // circumvents the right check, // that can't be mocked away easily. testing bool } // NewEventsHandler returns a new EventsHandler func NewEventsHandler(rapi remotesapi.RemotesAPI, ev *events.Listener) *EventsHandler { hdl := &EventsHandler{ chs: make(map[int]chan string), rapi: rapi, } if ev != nil { // Incoming events from our own node: ev.RegisterEventHandler(events.FsEvent, true, func(ev *events.Event) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() hdl.notify(ctx, "fs", true, false) }) // Incoming events from other nodes: ev.RegisterEventHandler(events.FsEvent, false, func(ev *events.Event) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() hdl.notify(ctx, "fs", false, false) }) hdl.evListener = ev } return hdl } // Notify sends `msg` to all connected clients, but stops in case `ctx` // was canceled before sending it all. func (eh *EventsHandler) Notify(ctx context.Context, msg string) error { return eh.notify(ctx, msg, true, true) } func (eh *EventsHandler) notify(ctx context.Context, msg string, isOwnEvent, triggerPublish bool) error { eh.mu.Lock() chs := []chan string{} for _, ch := range eh.chs { chs = append(chs, ch) } eh.mu.Unlock() for _, ch := range chs { select { case <-ctx.Done(): return ctx.Err() case ch <- msg: continue } } // We can only trigger fs events in the gateway: event := events.Event{ Type: events.FsEvent, } if !isOwnEvent && triggerPublish && eh.evListener != nil { return eh.evListener.PublishEvent(event) } return nil } // Shutdown closes all open websockets. func (eh *EventsHandler) Shutdown() { eh.mu.Lock() defer eh.mu.Unlock() for _, ch := range eh.chs { close(ch) } } func (eh *EventsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !eh.testing { if !checkRights(w, r, db.RightFsView) { return } } conn, err := upgrader.Upgrade(w, r, nil) if err != nil { log.Warningf("failed to upgrade to websocket: %v", err) return } // We setup the on change handler only here, // since calling OnChange in init might deadlock // since the real implementation might call Repo() eh.changeOnce.Do(func() { eh.rapi.OnChange(func() { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() eh.Notify(ctx, "remotes") }) }) eh.mu.Lock() id := eh.id eh.id++ ch := make(chan string, 20) eh.chs[id] = ch eh.mu.Unlock() defer func() { eh.mu.Lock() delete(eh.chs, id) eh.mu.Unlock() }() defer conn.Close() for { select { case msg, ok := <-ch: if !ok { return } if err := conn.WriteMessage(websocket.TextMessage, []byte(msg)); err != nil { log.Debugf("failed to write to websocket, closing: %v", err) return } } } } ================================================ FILE: gateway/endpoints/events_test.go ================================================ package endpoints import ( "context" "net/http" "runtime" "testing" "time" "github.com/gorilla/websocket" "github.com/posener/wstest" "github.com/stretchr/testify/require" ) func TestEvents(t *testing.T) { withState(t, func(s *testState) { // This is stupid. I couldn't get DialContext() // to pass the user value to the actual handler. // Pretty sure it was a problem on my side though... s.evHdl.testing = true // This call evHdl.ServeHTTP when sending something on conn. dialer := wstest.NewDialer(s.evHdl) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() conn, resp, err := dialer.DialContext(ctx, "ws://whatever/ws", nil) require.Nil(t, err) if got, want := resp.StatusCode, http.StatusSwitchingProtocols; got != want { t.Fatalf("resp.StatusCode = %q, want %q", got, want) } go func() { // give it a little time so ServeHTTP() of the events handler // can reach the "please notify me now" stage. time.Sleep(100 * time.Millisecond) // trigger an event: resp := s.mustRun( t, NewMkdirHandler(s.State), "POST", "http://localhost:5000/api/v0/events", &MkdirRequest{ Path: "/test", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) }() done := make(chan bool) go func() { typ, data, err := conn.ReadMessage() require.Nil(t, err) require.Equal(t, websocket.TextMessage, typ) require.Equal(t, []byte("fs"), data) done <- true }() select { case <-done: case <-time.After(10 * time.Second): buf := make([]byte, 1<<20) stacklen := runtime.Stack(buf, true) t.Logf(string(buf[:stacklen])) t.Fatalf("test took too long") } }) } ================================================ FILE: gateway/endpoints/get.go ================================================ package endpoints import ( "fmt" "io" "net/http" "net/url" "path" "strconv" "strings" "github.com/sahib/brig/catfs" ie "github.com/sahib/brig/catfs/errors" "github.com/sahib/brig/catfs/mio" "github.com/sahib/brig/gateway/db" "github.com/sahib/brig/util" log "github.com/sirupsen/logrus" ) // GetHandler implements http.Handler type GetHandler struct { *State } // NewGetHandler returns a new GetHandler func NewGetHandler(s *State) *GetHandler { return &GetHandler{State: s} } func mimeTypeFromStream(stream mio.Stream) (io.ReadSeeker, string) { hdr, newStream, err := util.PeekHeader(stream, 512) if err != nil { return stream, "application/octet-stream" } return newStream, http.DetectContentType(hdr) } // setContentDisposition sets the Content-Disposition header, based on // the content we are serving. It tells a browser if it should open // a save dialog or display it inline (and how) func setContentDisposition(info *catfs.StatInfo, hdr http.Header, dispoType string) { basename := path.Base(info.Path) if info.IsDir { if basename == "/" { basename = "root" } basename += ".tar" } hdr.Set( "Content-Disposition", fmt.Sprintf( "%s; filename*=UTF-8''%s", dispoType, url.QueryEscape(basename), ), ) } func (gh *GetHandler) checkBasicAuth(nodePath string, w http.ResponseWriter, r *http.Request) bool { name, pass, ok := r.BasicAuth() // No basic auth sent. If a browser send the request: ask him to // show a user/password form that gives a chance to change that. if !ok { w.Header().Set("WWW-Authenticate", "Basic realm=\"brig gateway\"") return false } // Check is the basic auth credentials are valid. user, err := gh.userDb.Get(name) if err != nil { return false } hasRight := false for _, right := range user.Rights { if right == db.RightDownload { hasRight = true break } } if !hasRight { return false } isValid, err := user.CheckPassword(pass) if !isValid { if err != nil { log.Warningf("get: failed to check password: %v", err) } return false } // Check again if this user has access to the path: if !gh.validatePathForUser(nodePath, user, w, r) { return false } return true } func (gh *GetHandler) checkDownloadRight(w http.ResponseWriter, r *http.Request) bool { name := getUserName(gh.store, w, r) if name == "" { return false } return gh.checkDownloadRightByName(name, w, r) } func (gh *GetHandler) checkDownloadRightByName(name string, w http.ResponseWriter, r *http.Request) bool { user, err := gh.userDb.Get(name) if err != nil { return false } for _, right := range user.Rights { if right == db.RightDownload { return true } } return false } func (gh *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // get the file nodePath including the leading slash: fullURL := r.URL.EscapedPath() nodePath, err := url.PathUnescape(fullURL[4:]) if nodePath == "" { nodePath = "/" } if err != nil { log.Debugf("received malformed url: %s", fullURL) http.Error(w, "malformed url", http.StatusBadRequest) return } if gh.cfg.Bool("auth.anon_allowed") { // validatePath will check if the user is actually logged in // and may access the path in question. The login could come // from a previous login to the UI (the /get endpoint could be used separately) if !gh.validatePath(nodePath, w, r) { // If the user was not previously logged into the UI, // we also accept basic auth for this endpoint. // This way hyperlinks can be shared without having to login. // Using HTTPS here is strongly recommended. if !gh.checkBasicAuth(nodePath, w, r) { http.Error(w, "not authorized", http.StatusUnauthorized) return } } else { // Check if the user allowed the anon user to download files. anonName := gh.cfg.String("auth.anon_user") if !gh.checkDownloadRightByName(anonName, w, r) { http.Error(w, "insufficient rights", http.StatusUnauthorized) return } } // All good. Proceed with the content. } else { if !gh.validatePath(nodePath, w, r) { http.Error(w, "insufficient rights", http.StatusUnauthorized) return } if !gh.checkDownloadRight(w, r) { http.Error(w, "insufficient rights for anon", http.StatusUnauthorized) return } } info, err := gh.fs.Stat(nodePath) if err != nil { // Handle a bad nodePath more explicit: if ie.IsNoSuchFileError(err) { http.Error(w, "not found", http.StatusNotFound) return } log.Errorf("gateway: failed to stat %s: %v", nodePath, err) http.Error(w, "failed to stat file", http.StatusInternalServerError) return } hdr := w.Header() hdr.Set("ETag", info.ContentHash.B58String()) hdr.Set("Last-Modified", info.ModTime.Format(http.TimeFormat)) if info.IsDir { params := r.URL.Query() includes := params["include"] filter := func(info *catfs.StatInfo) bool { if len(includes) == 0 { return true } for _, include := range includes { if strings.HasPrefix(info.Path, include) { return true } } return false } setContentDisposition(info, hdr, "attachment") if err := gh.fs.Tar(nodePath, w, filter); err != nil { log.Errorf("gateway: failed to stream %s: %v", nodePath, err) http.Error(w, "failed to stream", http.StatusInternalServerError) return } } else { stream, err := gh.fs.Cat(nodePath) if err != nil { log.Errorf("gateway: failed to stream %s: %v", nodePath, err) http.Error(w, "failed to stream", http.StatusInternalServerError) return } prefixStream, mimeType := mimeTypeFromStream(stream) hdr.Set("Content-Type", mimeType) hdr.Set("Content-Length", strconv.FormatUint(info.Size, 10)) isDirectDownload := r.URL.Query().Get("direct") == "yes" // Set the content disposition to inline if it looks like something viewable. if mimeType == "application/octet-stream" || isDirectDownload { setContentDisposition(info, hdr, "attachment") } else { setContentDisposition(info, hdr, "inline") } http.ServeContent(w, r, path.Base(info.Path), info.ModTime, prefixStream) } } ================================================ FILE: gateway/endpoints/get_test.go ================================================ package endpoints import ( "bytes" "io/ioutil" "net/http" "testing" "github.com/stretchr/testify/require" ) func TestGetEndpointSuccess(t *testing.T) { withState(t, func(s *testState) { fileData := []byte("HelloWorld") require.Nil(t, s.fs.Stage("/file", bytes.NewReader(fileData))) resp := s.mustRun( t, NewGetHandler(s.State), "GET", "http://localhost:5000/get/file", nil, ) require.Equal(t, http.StatusOK, resp.StatusCode) data, err := ioutil.ReadAll(resp.Body) require.Nil(t, err) require.Equal(t, fileData, data) }) } func TestGetEndpointDisallowed(t *testing.T) { withState(t, func(s *testState) { fileData := []byte("HelloWorld") require.Nil(t, s.fs.Stage("/file", bytes.NewReader(fileData))) s.mustChangeFolders(t, "/public") resp := s.mustRun( t, NewGetHandler(s.State), "GET", "http://localhost:5000/get/file", nil, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) }) } ================================================ FILE: gateway/endpoints/history.go ================================================ package endpoints import ( "encoding/json" "net/http" "github.com/sahib/brig/catfs" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) // HistoryHandler implements http.Handler type HistoryHandler struct { *State } // NewHistoryHandler returns a new HistoryHandler func NewHistoryHandler(s *State) *HistoryHandler { return &HistoryHandler{State: s} } // HistoryRequest is the request sent to this endpoint. type HistoryRequest struct { Path string `json:"path"` } // Commit is the same as catfs.Commit, but JSON friendly // and with some omitted fields that are not used by the client. type Commit struct { Date int64 `json:"date"` Msg string `json:"msg"` Tags []string `json:"tags"` Hash string `json:"hash"` Index int64 `json:"index"` } // HistoryEntry is one entry in the response. type HistoryEntry struct { Head Commit `json:"head"` Path string `json:"path"` Change string `json:"change"` IsPinned bool `json:"is_pinned"` IsExplicit bool `json:"is_explicit"` } // HistoryResponse is the data that is sent back to the client. type HistoryResponse struct { Success bool `json:"success"` Entries []HistoryEntry `json:"entries"` } func toExternalCommit(cmt *catfs.Commit) Commit { ext := Commit{} ext.Date = cmt.Date.Unix() * 1000 ext.Hash = cmt.Hash.B58String() ext.Msg = cmt.Msg ext.Tags = cmt.Tags ext.Index = cmt.Index // Make sure we set an empty list, // otherwise .Tags gets serialized as null // which breaks frontend. if ext.Tags == nil { ext.Tags = []string{} } return ext } func toExternalChange(c catfs.Change) HistoryEntry { e := HistoryEntry{} e.Change = c.Change e.Head = toExternalCommit(c.Head) e.Path = c.Path e.IsPinned = c.IsPinned e.IsExplicit = c.IsExplicit return e } func (hh *HistoryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsView) { return } histReq := HistoryRequest{} if err := json.NewDecoder(r.Body).Decode(&histReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } path := prefixRoot(histReq.Path) if !hh.validatePath(path, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "path forbidden") return } hist, err := hh.fs.History(path) if err != nil { log.Debugf("failed to check history for %s: %v", path, err) jsonifyErrf(w, http.StatusBadRequest, "failed to check history") return } entries := []HistoryEntry{} for _, change := range hist { // Filter none changes, since they are only neat for debugging. if change.Change == "none" { continue } entries = append(entries, toExternalChange(change)) } jsonify(w, http.StatusOK, &HistoryResponse{ Success: true, Entries: entries, }) } ================================================ FILE: gateway/endpoints/history_test.go ================================================ package endpoints import ( "bytes" "net/http" "testing" "github.com/stretchr/testify/require" ) func TestHistoryEndpointSuccess(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.fs.Stage("/x", bytes.NewReader([]byte("hello")))) require.Nil(t, s.fs.MakeCommit("hello")) require.Nil(t, s.fs.Stage("/x", bytes.NewReader([]byte("world")))) require.Nil(t, s.fs.MakeCommit("world")) require.Nil(t, s.fs.Remove("/x")) require.Nil(t, s.fs.MakeCommit("remove")) resp := s.mustRun( t, NewHistoryHandler(s.State), "POST", "http://localhost:5000/api/v0/history", &HistoryRequest{ Path: "/x", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) data := &HistoryResponse{} mustDecodeBody(t, resp.Body, &data) require.Equal(t, true, data.Success) ents := data.Entries require.Len(t, ents, 3) require.Equal(t, "removed", ents[0].Change) require.Equal(t, "/x", ents[0].Path) require.Equal(t, "remove", ents[0].Head.Msg) require.Equal(t, []string{"head"}, ents[0].Head.Tags) require.Equal(t, "modified", ents[1].Change) require.Equal(t, "/x", ents[1].Path) require.Equal(t, "world", ents[1].Head.Msg) require.Equal(t, []string{}, ents[1].Head.Tags) require.Equal(t, "added", ents[2].Change) require.Equal(t, "/x", ents[2].Path) require.Equal(t, "hello", ents[2].Head.Msg) require.Equal(t, []string{"init"}, ents[2].Head.Tags) }) } func TestHistoryEndpointForbidden(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/public") resp := s.mustRun( t, NewHistoryHandler(s.State), "POST", "http://localhost:5000/api/v0/history", &HistoryRequest{ Path: "/x", }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) }) } ================================================ FILE: gateway/endpoints/index.go ================================================ package endpoints import ( "html/template" "io" "io/ioutil" "net/http" "os" "github.com/gorilla/csrf" "github.com/phogolabs/parcello" log "github.com/sirupsen/logrus" // Include static resources: _ "github.com/sahib/brig/gateway/templates" ) // IndexHandler implements http.Handler. // It serves index.html from either file or memory. type IndexHandler struct { *State } // NewIndexHandler returns a new IndexHandler. func NewIndexHandler(s *State) *IndexHandler { return &IndexHandler{State: s} } func (ih *IndexHandler) loadTemplateData() (io.ReadCloser, error) { if ih.cfg.Bool("ui.debug_mode") { return os.Open("./gateway/templates/index.html") } mgr := parcello.ManagerAt("/") return mgr.Open("index.html") } func (ih *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fd, err := ih.loadTemplateData() if err != nil { jsonifyErrf(w, http.StatusInternalServerError, "no index.html") return } defer fd.Close() data, err := ioutil.ReadAll(fd) if err != nil { jsonifyErrf(w, http.StatusInternalServerError, "could not load template: %v", err) return } t, err := template.New("index").Parse(string(data)) if err != nil { log.Errorf("could not parse template: %v", err) jsonifyErrf(w, http.StatusInternalServerError, "template contains errors") return } wsScheme := "ws://" if r.TLS != nil { wsScheme = "wss://" } httpScheme := "http://" if r.TLS != nil { httpScheme = "https://" } err = t.Execute(w, map[string]interface{}{ "csrfToken": csrf.Token(r), "wsAddr": wsScheme + r.Host + "/events", "httpAddr": httpScheme + r.Host, }) if err != nil { jsonifyErrf(w, http.StatusInternalServerError, "could not execute template") return } } ================================================ FILE: gateway/endpoints/log.go ================================================ package endpoints import ( "encoding/json" "errors" "fmt" "net/http" "strings" "github.com/sahib/brig/catfs" "github.com/sahib/brig/gateway/db" ) // LogHandler implements http.Handler. type LogHandler struct { *State } // NewLogHandler returns a new LogHandler func NewLogHandler(s *State) *LogHandler { return &LogHandler{State: s} } // LogRequest is the data sent to this endpoint. type LogRequest struct { Offset int64 `json:"offset"` Limit int64 `json:"limit"` Filter string `json:"filter"` } // LogResponse is the response sent back to the client. type LogResponse struct { Success bool `json:"success"` HaveStagedChanges bool `json:"have_staged_changes"` Commits []Commit `json:"commits"` } func matchCommit(cmt *catfs.Commit, filter string) bool { return strings.Contains(strings.ToLower(cmt.Msg), filter) } func (lh *LogHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsView) { return } logReq := LogRequest{} if err := json.NewDecoder(r.Body).Decode(&logReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } status, err := lh.fs.CommitInfo("curr") if err != nil { jsonifyErrf(w, http.StatusInternalServerError, "could not get status") return } if logReq.Offset < 0 { jsonifyErrf(w, http.StatusBadRequest, "negative offsets are not supported") return } if status.Index < logReq.Offset { jsonify(w, http.StatusOK, &LogResponse{ Success: true, Commits: []Commit{}, }) return } commits := []Commit{} errSkip := errors.New("skip") filter := strings.ToLower(logReq.Filter) head := fmt.Sprintf("commit[%d]", -(logReq.Offset + 1)) err = lh.fs.Log(head, func(cmt *catfs.Commit) error { if filter != "" && !matchCommit(cmt, filter) { return nil } if logReq.Limit >= 0 && int64(len(commits)) >= logReq.Limit { return errSkip } commits = append(commits, toExternalCommit(cmt)) return nil }) if err != nil && err != errSkip { jsonifyErrf(w, http.StatusBadRequest, "failed to query log: %v", err) return } haveStagedChanges, err := lh.fs.HaveStagedChanges() if err != nil { jsonifyErrf(w, http.StatusInternalServerError, "failed to check staged state: %v", err) return } jsonify(w, http.StatusOK, &LogResponse{ Success: true, HaveStagedChanges: haveStagedChanges, Commits: commits, }) } ================================================ FILE: gateway/endpoints/log_test.go ================================================ package endpoints import ( "bytes" "net/http" "testing" "github.com/stretchr/testify/require" ) func TestLogEndpointSuccess(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.fs.Stage("/x", bytes.NewReader([]byte("hello")))) require.Nil(t, s.fs.MakeCommit("hello")) require.Nil(t, s.fs.Stage("/x", bytes.NewReader([]byte("world")))) require.Nil(t, s.fs.MakeCommit("world")) require.Nil(t, s.fs.Remove("/x")) require.Nil(t, s.fs.MakeCommit("remove")) resp := s.mustRun( t, NewLogHandler(s.State), "POST", "http://localhost:5000/api/v0/log", &LogRequest{ Offset: 0, Limit: -1, Filter: "", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) data := &LogResponse{} mustDecodeBody(t, resp.Body, &data) require.Equal(t, true, data.Success) require.Equal(t, 4, len(data.Commits)) require.Equal(t, "", data.Commits[0].Msg) require.Equal(t, []string{"curr"}, data.Commits[0].Tags) require.Equal(t, "remove", data.Commits[1].Msg) require.Equal(t, []string{"head"}, data.Commits[1].Tags) require.Equal(t, "world", data.Commits[2].Msg) require.Equal(t, []string{}, data.Commits[2].Tags) require.Equal(t, "hello", data.Commits[3].Msg) require.Equal(t, []string{"init"}, data.Commits[3].Tags) }) } ================================================ FILE: gateway/endpoints/login.go ================================================ package endpoints import ( "context" "encoding/json" "net/http" "github.com/gorilla/sessions" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) func getUserName(store *sessions.CookieStore, w http.ResponseWriter, r *http.Request) string { sess, err := store.Get(r, "sess") if err != nil { log.Warningf("failed to get session: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return "" } userNameIf, ok := sess.Values["name"] if !ok { return "" } userName, ok := userNameIf.(string) if !ok { log.Warningf("failed to convert user name to string: %v", userNameIf) http.Error(w, "internal error", http.StatusInternalServerError) return "" } return userName } func setSession(store *sessions.CookieStore, userName string, w http.ResponseWriter, r *http.Request) { // Ignore the error here, since it will usually trigger when there was a previously // outdated session that fails to decode. Since we overwrite the session anyways, it // doesn't really matter in this case. sess, _ := store.Get(r, "sess") isHTTPS := r.TLS != nil sess.Options = &sessions.Options{ Path: "/", MaxAge: 31 * 24 * 60 * 60, HttpOnly: true, Secure: isHTTPS, } sess.Values["name"] = userName if err := sess.Save(r, w); err != nil { log.Warningf("set: failed to save session: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } } func clearSession(store *sessions.CookieStore, w http.ResponseWriter, r *http.Request) { sess, err := store.Get(r, "sess") if err != nil { log.Warningf("failed to get session: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } sess.Options.MaxAge = -1 if err := sess.Save(r, w); err != nil { log.Warningf("clear: failed to save session: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } } /////// // LoginHandler implements http.Handler type LoginHandler struct { *State } // NewLoginHandler creates a new LoginHandler func NewLoginHandler(s *State) *LoginHandler { return &LoginHandler{State: s} } // LoginRequest is the request sent as JSON to this endpoint. type LoginRequest struct { Username string `json:"username"` Password string `json:"password"` } // LoginResponse is what the endpoint will return. type LoginResponse struct { Success bool `json:"success"` Username string `json:"username"` Rights []string `json:"rights"` IsAnon bool `json:"is_anon"` AnonIsAllowed bool `json:"anon_is_allowed"` } func (lih *LoginHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { loginReq := LoginRequest{} if err := json.NewDecoder(r.Body).Decode(&loginReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } if loginReq.Username == "" || loginReq.Password == "" { jsonifyErrf(w, http.StatusBadRequest, "empty password or username") return } dbUser, err := lih.userDb.Get(loginReq.Username) if err != nil { // No such user. jsonifyErrf(w, http.StatusForbidden, "bad credentials") return } if dbUser.Name != loginReq.Username { // Bad username. Might be a problem on our side. jsonifyErrf(w, http.StatusForbidden, "bad credentials") return } isValid, err := dbUser.CheckPassword(loginReq.Password) if err != nil || !isValid { if err != nil { log.Warningf("check password failed: %v", err) } jsonifyErrf(w, http.StatusForbidden, "bad credentials") return } anonIsAllowed := lih.cfg.Bool("auth.anon_allowed") anonUserName := lih.cfg.String("auth.anon_user") setSession(lih.store, dbUser.Name, w, r) jsonify(w, http.StatusOK, &LoginResponse{ Success: true, Username: loginReq.Username, Rights: dbUser.Rights, IsAnon: anonUserName == loginReq.Username, AnonIsAllowed: anonIsAllowed, }) } /////// // LogoutHandler implements http.Handler type LogoutHandler struct { *State } // NewLogoutHandler returns a new LogoutHandler func NewLogoutHandler(s *State) *LogoutHandler { return &LogoutHandler{State: s} } func (loh *LogoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { user := getUserName(loh.store, w, r) if user == "" { jsonifyErrf(w, http.StatusBadRequest, "not logged in") return } clearSession(loh.store, w, r) jsonifySuccess(w) } /////// // WhoamiHandler implements http.Handler. // This handler checks if a user is already logged in. type WhoamiHandler struct { *State } // NewWhoamiHandler returns a new WhoamiHandler. func NewWhoamiHandler(s *State) *WhoamiHandler { return &WhoamiHandler{State: s} } // WhoamiResponse is the response sent back by this endpoint. type WhoamiResponse struct { IsLoggedIn bool `json:"is_logged_in"` IsAnon bool `json:"is_anon"` AnonIsAllowed bool `json:"anon_is_allowed"` User string `json:"user"` Rights []string `json:"rights"` } func (wh *WhoamiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { rights := []string{} isAnon := false anonIsAllowed := wh.cfg.Bool("auth.anon_allowed") name := getUserName(wh.store, w, r) if name == "" && anonIsAllowed { isAnon = true name = wh.cfg.String("auth.anon_user") } if name != "" { possiblyAnonUser, err := wh.userDb.Get(name) if err != nil { log.Warningf("could not get user »%s« : %v", name, err) } else { rights = possiblyAnonUser.Rights setSession(wh.store, name, w, r) } } jsonify(w, http.StatusOK, WhoamiResponse{ IsLoggedIn: len(name) > 0, IsAnon: isAnon, AnonIsAllowed: anonIsAllowed, User: name, Rights: rights, }) } /////// type authMiddleware struct { *State SubHandler http.Handler } type dbUserKey string func (am *authMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) { anonIsAllowed := am.cfg.Bool("auth.anon_allowed") name := getUserName(am.store, w, r) if name == "" { if !anonIsAllowed { // invalid token. jsonifyErrf(w, http.StatusUnauthorized, "not authorized") return } name = am.cfg.String("auth.anon_user") } user, err := am.userDb.Get(name) if err != nil { // valid token, but invalid user. // (user might have been deleted on our side) jsonifyErrf(w, http.StatusUnauthorized, "not authorized") return } r = r.WithContext( context.WithValue(r.Context(), dbUserKey("brig.db_user"), user, ), ) am.SubHandler.ServeHTTP(w, r) } func checkRights(w http.ResponseWriter, r *http.Request, rights ...string) bool { user, ok := r.Context().Value(dbUserKey("brig.db_user")).(db.User) if !ok { jsonifyErrf(w, http.StatusInternalServerError, "could not cast user") return false } rmap := make(map[string]bool) for _, right := range user.Rights { rmap[right] = true } for _, right := range rights { if !rmap[right] { jsonifyErrf(w, http.StatusUnauthorized, "insufficient rights") return false } } return true } // AuthMiddleware returns a new handler wrapper, that will require // all calls to the respective handler to have a "sess" cookie with // a valid user name. func AuthMiddleware(s *State) func(http.Handler) http.Handler { return func(h http.Handler) http.Handler { return &authMiddleware{State: s, SubHandler: h} } } ================================================ FILE: gateway/endpoints/login_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) type loginResponse struct { Success bool `json:"success"` } func TestLoginEndpointSuccess(t *testing.T) { withState(t, func(s *testState) { resp := s.mustRun( t, NewLoginHandler(s.State), "POST", "http://localhost:5000/api/v0/login", &LoginRequest{ Username: "ali", Password: "ila", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) loginResp := &loginResponse{} mustDecodeBody(t, resp.Body, &loginResp) require.Equal(t, true, loginResp.Success) cookies := resp.Cookies() require.Equal(t, "sess", cookies[0].Name) }) } ================================================ FILE: gateway/endpoints/ls.go ================================================ package endpoints import ( "encoding/json" "net/http" "sort" "strings" "github.com/sahib/brig/catfs" "github.com/sahib/brig/gateway/db" ) // LsHandler implements http.Handler. type LsHandler struct { *State } // NewLsHandler returns a new LsHandler func NewLsHandler(s *State) *LsHandler { return &LsHandler{State: s} } // LsRequest is the data that needs to be sent to this endpoint. type LsRequest struct { Root string `json:"root"` Filter string `json:"filter,omitempty"` } // StatInfo is a single node in the list response. // It is the same as catfs.StatInfo, but is more JSON friendly // and omits some fields like hashes that are not useful to the client. type StatInfo struct { Path string `json:"path"` User string `json:"user"` Size uint64 `json:"size"` Inode uint64 `json:"inode"` Depth int `json:"depth"` ModTime int64 `json:"last_modified_ms"` IsDir bool `json:"is_dir"` IsPinned bool `json:"is_pinned"` IsExplicit bool `json:"is_explicit"` } func toExternalStatInfo(i *catfs.StatInfo) *StatInfo { return &StatInfo{ Path: i.Path, User: i.User, Size: i.Size, Inode: i.Inode, Depth: i.Depth, ModTime: i.ModTime.Unix() * 1000, IsDir: i.IsDir, IsPinned: i.IsPinned, IsExplicit: i.IsExplicit, } } // LsResponse is the response sent back to the client. type LsResponse struct { Success bool `json:"success"` Self *StatInfo `json:"self"` Files []*StatInfo `json:"files"` IsFiltered bool `json:"is_filtered"` } func doQuery(fs *catfs.FS, root, filter string) ([]*catfs.StatInfo, error) { if filter == "" { return fs.List(root, 1) } return fs.Filter(root, filter) } func (lh *LsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsView) { return } lsReq := LsRequest{} if err := json.NewDecoder(r.Body).Decode(&lsReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } root := prefixRoot(lsReq.Root) info, err := lh.fs.Stat(root) if err != nil { jsonifyErrf(w, http.StatusBadRequest, "failed to stat root %s: %v", root, err) return } items, err := doQuery(lh.fs, root, lsReq.Filter) if err != nil { jsonifyErrf(w, http.StatusBadRequest, "failed to query: %v", err) return } files := []*StatInfo{} for _, item := range items { if !lh.pathIsVisible(item.Path, w, r) { continue } files = append(files, toExternalStatInfo(item)) } // Sort dirs before files and sort each part alphabetically sort.Slice(files, func(i, j int) bool { if files[i].IsDir != files[j].IsDir { return files[i].IsDir } return strings.ToLower(files[i].Path) < strings.ToLower(files[j].Path) }) jsonify(w, http.StatusOK, &LsResponse{ Success: true, Files: files, IsFiltered: len(lsReq.Filter) > 0, Self: toExternalStatInfo(info), }) } ================================================ FILE: gateway/endpoints/ls_test.go ================================================ package endpoints import ( "bytes" "net/http" "testing" "github.com/stretchr/testify/require" ) func TestLsEndpoint(t *testing.T) { withState(t, func(s *testState) { exampleData := bytes.NewReader([]byte("Hello world")) require.Nil(t, s.fs.Stage("/hello/world.png", exampleData)) resp := s.mustRun( t, NewLsHandler(s.State), "POST", "http://localhost:5000/api/v0/ls", &LsRequest{ Root: "/", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) lsResp := &LsResponse{} mustDecodeBody(t, resp.Body, &lsResp) require.Len(t, lsResp.Files, 1) require.Equal(t, lsResp.Files[0].Path, "/hello") }) } ================================================ FILE: gateway/endpoints/mkdir.go ================================================ package endpoints import ( "encoding/json" "fmt" "net/http" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) // MkdirHandler implements http.Handler. type MkdirHandler struct { *State } // NewMkdirHandler creates a new mkdir handler. func NewMkdirHandler(s *State) *MkdirHandler { return &MkdirHandler{State: s} } // MkdirRequest is the request that can be sent to this endpoint as JSON. type MkdirRequest struct { // Path to create. Path string `json:"path"` } func (mh *MkdirHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsEdit) { return } mkdirReq := MkdirRequest{} if err := json.NewDecoder(r.Body).Decode(&mkdirReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } path := prefixRoot(mkdirReq.Path) if !mh.validatePath(path, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "path forbidden") return } if err := mh.fs.Mkdir(path, true); err != nil { log.Debugf("failed to mkdir %s: %v", path, err) jsonifyErrf(w, http.StatusInternalServerError, "failed to mkdir") return } msg := fmt.Sprintf("mkdir'd »%s«", path) if !mh.commitChange(msg, w, r) { return } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/mkdir_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) type mkdirResponse struct { Success bool `json:"success"` Message string `json:"message"` } func TestMkdirEndpointSuccess(t *testing.T) { withState(t, func(s *testState) { resp := s.mustRun( t, NewMkdirHandler(s.State), "POST", "http://localhost:5000/api/v0/mkdir", &MkdirRequest{ Path: "/test", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) mkdirResp := &mkdirResponse{} mustDecodeBody(t, resp.Body, &mkdirResp) require.Equal(t, true, mkdirResp.Success) info, err := s.fs.Stat("/test") require.Nil(t, err) require.Equal(t, "/test", info.Path) }) } func TestMkdirEndpointInvalidPath(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/something/else") resp := s.mustRun( t, NewMkdirHandler(s.State), "POST", "http://localhost:5000/api/v0/mkdir", &MkdirRequest{ Path: "/test", }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) }) } ================================================ FILE: gateway/endpoints/move.go ================================================ package endpoints import ( "encoding/json" "fmt" "net/http" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) // MoveHandler implements http.Handler. type MoveHandler struct { *State } // NewMoveHandler creates a new move handler. func NewMoveHandler(s *State) *MoveHandler { return &MoveHandler{State: s} } // MoveRequest is the request that can be send to this endpoint. type MoveRequest struct { // Source is the path to the old node. Source string `json:"source"` // Destination is the path of the new node. Destination string `json:"destination"` } func (mh *MoveHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsEdit) { return } moveReq := MoveRequest{} if err := json.NewDecoder(r.Body).Decode(&moveReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } src := prefixRoot(moveReq.Source) dst := prefixRoot(moveReq.Destination) if !mh.validatePath(src, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "source path forbidden") return } if !mh.validatePath(dst, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "destination path forbidden") return } // Move does some extended checking before actually moving the file: if err := mh.fs.Move(src, dst); err != nil { log.Debugf("failed to move %s -> %s: %v", src, dst, err) jsonifyErrf(w, http.StatusInternalServerError, "failed to move") return } msg := fmt.Sprintf("moved »%s« to »%s« via gateway", src, dst) if !mh.commitChange(msg, w, r) { return } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/move_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) type moveResponse struct { Success bool `json:"success"` } func TestMoveSuccess(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.fs.Mkdir("/hinz", true)) resp := s.mustRun( t, NewMoveHandler(s.State), "POST", "http://localhost:5000/api/v0/move", &MoveRequest{ Source: "/hinz", Destination: "/kunz", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) moveResp := &moveResponse{} mustDecodeBody(t, resp.Body, moveResp) require.Equal(t, true, moveResp.Success) _, err := s.fs.Stat("/hinz") require.NotNil(t, err) kunzInfo, err := s.fs.Stat("/kunz") require.Nil(t, err) require.Equal(t, "/kunz", kunzInfo.Path) }) } func TestMoveDisallowedSource(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/kunz") require.Nil(t, s.fs.Mkdir("/hinz", true)) resp := s.mustRun( t, NewMoveHandler(s.State), "POST", "http://localhost:5000/api/v0/move", &MoveRequest{ Source: "/hinz", Destination: "/kunz", }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) }) } func TestMoveDisallowedDest(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/hinz") require.Nil(t, s.fs.Mkdir("/hinz", true)) resp := s.mustRun( t, NewMoveHandler(s.State), "POST", "http://localhost:5000/api/v0/move", &MoveRequest{ Source: "/hinz", Destination: "/kunz", }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) }) } ================================================ FILE: gateway/endpoints/pin.go ================================================ package endpoints import ( "encoding/json" "fmt" "net/http" ie "github.com/sahib/brig/catfs/errors" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) // PinHandler implements http.Handler. type PinHandler struct { *State doPin bool } // NewPinHandler returns a new PinHandler func NewPinHandler(s *State) *PinHandler { return &PinHandler{State: s, doPin: true} } // NewUnpinHandler returns a new PinHandler func NewUnpinHandler(s *State) *PinHandler { return &PinHandler{State: s, doPin: false} } // PinRequest is the request that is being sent to the endpoint. type PinRequest struct { Path string `json:"path"` Revision string `json:"revision"` DoPin bool `json:"do_pin"` } func (ph *PinHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsEdit) { return } pinReq := PinRequest{} if err := json.NewDecoder(r.Body).Decode(&pinReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } path := prefixRoot(pinReq.Path) if !ph.validatePath(path, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "path forbidden") return } // Select the right operation: op, name := ph.fs.Pin, "pin" if ph.doPin == false { op, name = ph.fs.Unpin, "unpin" } if err := op(path, pinReq.Revision, true); err != nil { if !ie.IsNoSuchFileError(err) { log.Debugf("failed to %s %s: %v", name, path, err) jsonifyErrf(w, http.StatusBadRequest, fmt.Sprintf("failed to %s", name)) return } } ph.evHdl.Notify(r.Context(), "pin") jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/pin_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) type pinResponse struct { Success bool `json:"success"` Message string `json:"message"` } func TestPinEndpointSuccess(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.fs.Touch("/file")) require.Nil(t, s.fs.Mkdir("/dir", true)) resp := s.mustRun( t, NewPinHandler(s.State), "POST", "http://localhost:5000/api/v0/pin", &PinRequest{ Path: "/file", Revision: "curr", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) pinResp := &pinResponse{} mustDecodeBody(t, resp.Body, &pinResp) require.Equal(t, true, pinResp.Success) stat, err := s.fs.Stat("/file") require.Nil(t, err) require.True(t, stat.IsPinned) require.True(t, stat.IsExplicit) stat, err = s.fs.Stat("/dir") require.Nil(t, err) require.True(t, stat.IsPinned) require.True(t, stat.IsExplicit) }) } func TestPinEndpointForbidden(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/public") resp := s.mustRun( t, NewPinHandler(s.State), "POST", "http://localhost:5000/api/v0/pin", &PinRequest{ Path: "/file", Revision: "curr", }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) pinResp := &pinResponse{} mustDecodeBody(t, resp.Body, &pinResp) require.Equal(t, false, pinResp.Success) }) } ================================================ FILE: gateway/endpoints/ping.go ================================================ package endpoints import ( "net/http" ) // PingHandler implements http.Handler. // This handler checks if a user is already logged in. type PingHandler struct { *State } // NewPingHandler returns a new PingHandler. func NewPingHandler(s *State) *PingHandler { return &PingHandler{State: s} } // PingResponse is the response sent back by this endpoint. type PingResponse struct { IsOnline bool `json:"is_online"` } func (wh *PingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { jsonify(w, http.StatusOK, PingResponse{ IsOnline: true, }) } ================================================ FILE: gateway/endpoints/ping_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) func TestPingEndpointSuccess(t *testing.T) { withState(t, func(s *testState) { resp := s.mustRun( t, NewPingHandler(s.State), "POST", "http://localhost:5000/api/v0/ping", nil, ) require.Equal(t, http.StatusOK, resp.StatusCode) pingResp := &PingResponse{} mustDecodeBody(t, resp.Body, &pingResp) require.Equal(t, true, pingResp.IsOnline) }) } ================================================ FILE: gateway/endpoints/redirect.go ================================================ package endpoints import ( "fmt" "net" "net/http" log "github.com/sirupsen/logrus" ) // RedirHandler implements http.Handler. // It redirects all of its requests to the respective https:// route. type RedirHandler struct { redirPort int64 } // NewHTTPRedirectHandler returns a new RedirHandler func NewHTTPRedirectHandler(redirPort int64) *RedirHandler { return &RedirHandler{ redirPort: redirPort, } } func (rh *RedirHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { // remove/add not default ports from req.Host host, _, err := net.SplitHostPort(req.Host) if err != nil { http.Error(w, "invalid request", http.StatusBadRequest) return } target := fmt.Sprintf("https://%s:%d%s", host, rh.redirPort, req.URL.Path) if len(req.URL.RawQuery) > 0 { target += "?" + req.URL.RawQuery } log.Debugf("redirect to: %s", target) http.Redirect(w, req, target, http.StatusTemporaryRedirect) } ================================================ FILE: gateway/endpoints/remotes_add.go ================================================ package endpoints import ( "encoding/json" "fmt" "net/http" "strings" "github.com/sahib/brig/gateway/db" "github.com/sahib/brig/gateway/remotesapi" "github.com/sahib/brig/net/peer" log "github.com/sirupsen/logrus" ) // RemotesAddHandler implements http.Handler type RemotesAddHandler struct { *State } // NewRemotesAddHandler returns a new RemotesAddHandler func NewRemotesAddHandler(s *State) *RemotesAddHandler { return &RemotesAddHandler{State: s} } // RemoteAddRequest is the data being sent to this endpoint. type RemoteAddRequest struct { Name string `json:"name"` Folders []remotesapi.Folder `json:"folders"` Fingerprint string `json:"fingerprint"` AcceptAutoUpdates bool `json:"accept_auto_updates"` AcceptPush bool `json:"accept_push"` ConflictStrategy string `json:"conflict_strategy"` } func dedupeFolders(folders []remotesapi.Folder) []remotesapi.Folder { seen := make(map[string]bool) deduped := []remotesapi.Folder{} for _, folder := range folders { path := folder.Folder if !strings.HasPrefix(path, "/") { path = "/" + path } if seen[path] { continue } deduped = append(deduped, folder) seen[path] = true } return deduped } func validateFingerprint(fingerprint string, w http.ResponseWriter, r *http.Request) bool { if _, err := peer.CastFingerprint(fingerprint); err != nil { log.Debugf("invalid fingerprint: %v", err) jsonifyErrf(w, http.StatusBadRequest, "bad fingerprint format") return false } return true } func readRemoteRequest(w http.ResponseWriter, r *http.Request) (*remotesapi.Remote, error) { if !checkRights(w, r, db.RightRemotesEdit) { return nil, fmt.Errorf("bad rights") } remoteAddReq := RemoteAddRequest{} if err := json.NewDecoder(r.Body).Decode(&remoteAddReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return nil, fmt.Errorf("bad json") } if !validateFingerprint(remoteAddReq.Fingerprint, w, r) { return nil, fmt.Errorf("bad fingerprint") } return &remotesapi.Remote{ Name: remoteAddReq.Name, Folders: dedupeFolders(remoteAddReq.Folders), Fingerprint: remoteAddReq.Fingerprint, AcceptAutoUpdates: remoteAddReq.AcceptAutoUpdates, AcceptPush: remoteAddReq.AcceptPush, ConflictStrategy: remoteAddReq.ConflictStrategy, }, nil } func (rh *RemotesAddHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { rmt, err := readRemoteRequest(w, r) if err != nil { return } if _, err := rh.rapi.Get(rmt.Name); err == nil { jsonifyErrf(w, http.StatusBadRequest, "remote does exist already") return } if err := rh.rapi.Set(*rmt); err != nil { jsonifyErrf(w, http.StatusBadRequest, "failed to add") return } jsonifySuccess(w) } ////////////// // RemotesModifyHandler implements http.Handler type RemotesModifyHandler struct { *State } // NewRemotesModifyHandler returns a new RemotesModifyHandler func NewRemotesModifyHandler(s *State) *RemotesModifyHandler { return &RemotesModifyHandler{State: s} } func (rh *RemotesModifyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { rmt, err := readRemoteRequest(w, r) if err != nil { return } if _, err := rh.rapi.Get(rmt.Name); err != nil { jsonifyErrf(w, http.StatusBadRequest, "remote does not exist yet") return } if err := rh.rapi.Set(*rmt); err != nil { jsonifyErrf(w, http.StatusBadRequest, "failed to add") return } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/remotes_add_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/sahib/brig/gateway/remotesapi" "github.com/stretchr/testify/require" ) const ( TestFingerprint = "QmgtEcRda8Nm4RMHQCBzGGXBE2zjQqvDXHfEye1zay3f1w:W1fKKbqVAUhEXkC3yoJ92fKK1aWAuVYMmneUUvUQdDRbMq" ) func TestRemoteAddEndpoint(t *testing.T) { withState(t, func(s *testState) { resp := s.mustRun( t, NewRemotesAddHandler(s.State), "POST", "http://localhost:5000/api/v0/remotes/add", &RemoteAddRequest{ Name: "bob", Folders: nil, Fingerprint: TestFingerprint, AcceptAutoUpdates: true, }, ) require.Equal(t, http.StatusOK, resp.StatusCode) data := struct { Success bool `json:"success"` }{} mustDecodeBody(t, resp.Body, &data) require.Equal(t, true, data.Success) rmt, err := s.State.rapi.Get("bob") require.Nil(t, err) require.Equal(t, "bob", rmt.Name) require.Equal(t, TestFingerprint, rmt.Fingerprint) require.Equal(t, true, rmt.AcceptAutoUpdates) }) } func TestRemoteModifyEndpoint(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.State.rapi.Set(remotesapi.Remote{ Name: "bob", Fingerprint: TestFingerprint + "xxx", Folders: []remotesapi.Folder{ { Folder: "/public", ReadOnly: false, ConflictStrategy: "", }, }, })) resp := s.mustRun( t, NewRemotesModifyHandler(s.State), "POST", "http://localhost:5000/api/v0/remotes/modify", &RemoteAddRequest{ Name: "bob", Folders: nil, Fingerprint: TestFingerprint, AcceptAutoUpdates: true, }, ) require.Equal(t, http.StatusOK, resp.StatusCode) data := struct { Success bool `json:"success"` }{} mustDecodeBody(t, resp.Body, &data) require.Equal(t, true, data.Success) rmt, err := s.State.rapi.Get("bob") require.Nil(t, err) require.Equal(t, "bob", rmt.Name) require.Equal(t, TestFingerprint, rmt.Fingerprint) require.Equal(t, true, rmt.AcceptAutoUpdates) }) } ================================================ FILE: gateway/endpoints/remotes_diff.go ================================================ package endpoints import ( "encoding/json" "net/http" "github.com/sahib/brig/catfs" "github.com/sahib/brig/gateway/db" ) // RemotesDiffHandler implements http.Handler type RemotesDiffHandler struct { *State } // NewRemotesDiffHandler returns a new RemotesDiffHandler func NewRemotesDiffHandler(s *State) *RemotesDiffHandler { return &RemotesDiffHandler{State: s} } // RemoteDiffRequest is the data being sent to this endpoint. type RemoteDiffRequest struct { Name string `json:"name"` } // DiffPair is like catfs.DiffPair, but with some // fields removed and with json instructions. type DiffPair struct { Src *StatInfo `json:"src"` Dst *StatInfo `json:"dst"` } // Diff is like catfs.Diff, but json-ized. type Diff struct { Added []*StatInfo `json:"added"` Removed []*StatInfo `json:"removed"` Ignored []*StatInfo `json:"ignored"` Missing []*StatInfo `json:"missing"` Conflict []DiffPair `json:"conflict"` Moved []DiffPair `json:"moved"` Merged []DiffPair `json:"merged"` } // RemoteDiffResponse is the data being sent to this endpoint. type RemoteDiffResponse struct { Success bool `json:"success"` Diff *Diff `json:"diff"` } func convertSingles(infos []catfs.StatInfo) []*StatInfo { result := []*StatInfo{} for _, info := range infos { result = append(result, toExternalStatInfo(&info)) } return result } func convertPairs(pairs []catfs.DiffPair) []DiffPair { result := []DiffPair{} for _, pair := range pairs { result = append(result, DiffPair{ Src: toExternalStatInfo(&pair.Src), Dst: toExternalStatInfo(&pair.Dst), }) } return result } func (rh *RemotesDiffHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightRemotesView) { return } rmtDiffReq := RemoteDiffRequest{} if err := json.NewDecoder(r.Body).Decode(&rmtDiffReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } if rmtDiffReq.Name == "" { jsonifyErrf(w, http.StatusBadRequest, "empty remote name") return } rawDiff, err := rh.rapi.MakeDiff(rmtDiffReq.Name) if err != nil { jsonifyErrf(w, http.StatusBadRequest, "failed to diff") return } diff := &Diff{ Added: convertSingles(rawDiff.Added), Removed: convertSingles(rawDiff.Removed), Ignored: convertSingles(rawDiff.Ignored), Missing: convertSingles(rawDiff.Missing), Conflict: convertPairs(rawDiff.Conflict), Moved: convertPairs(rawDiff.Moved), Merged: convertPairs(rawDiff.Merged), } jsonify(w, http.StatusOK, RemoteDiffResponse{ Success: true, Diff: diff, }) } ================================================ FILE: gateway/endpoints/remotes_diff_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/sahib/brig/gateway/remotesapi" "github.com/stretchr/testify/require" ) func TestRemoteDiffEndpoint(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.State.rapi.Set(remotesapi.Remote{ Name: "bob", Fingerprint: "xxx", })) resp := s.mustRun( t, NewRemotesDiffHandler(s.State), "POST", "http://localhost:5000/api/v0/remotes/diff", RemoteDiffRequest{ Name: "bob", }, ) data := &RemoteDiffResponse{} require.Equal(t, http.StatusOK, resp.StatusCode) mustDecodeBody(t, resp.Body, &data) require.Equal(t, true, data.Success) require.Equal(t, 2, len(data.Diff.Added)) require.Equal(t, "/new_dir", data.Diff.Added[0].Path) require.Equal(t, "/new_file", data.Diff.Added[1].Path) require.Equal(t, 1, len(data.Diff.Removed)) require.Equal(t, "/removed_file", data.Diff.Removed[0].Path) require.Equal(t, 1, len(data.Diff.Ignored)) require.Equal(t, "/ignored", data.Diff.Ignored[0].Path) require.Equal(t, 1, len(data.Diff.Missing)) require.Equal(t, "/missing", data.Diff.Missing[0].Path) require.Equal(t, 1, len(data.Diff.Conflict)) require.Equal(t, "/conflict_src", data.Diff.Conflict[0].Src.Path) require.Equal(t, "/conflict_dst", data.Diff.Conflict[0].Dst.Path) require.Equal(t, 1, len(data.Diff.Moved)) require.Equal(t, "/moved_src", data.Diff.Moved[0].Src.Path) require.Equal(t, "/moved_dst", data.Diff.Moved[0].Dst.Path) require.Equal(t, 1, len(data.Diff.Merged)) require.Equal(t, "/merged_src", data.Diff.Merged[0].Src.Path) require.Equal(t, "/merged_dst", data.Diff.Merged[0].Dst.Path) }) } ================================================ FILE: gateway/endpoints/remotes_list.go ================================================ package endpoints import ( "net/http" "sort" "github.com/sahib/brig/gateway/db" "github.com/sahib/brig/gateway/remotesapi" ) // RemoteListHandler implements http.Handler type RemoteListHandler struct { *State } // NewRemotesListHandler returns a new RemoteListHandler func NewRemotesListHandler(s *State) *RemoteListHandler { return &RemoteListHandler{State: s} } // RemoteListResponse is the response given by this endpoint. type RemoteListResponse struct { Success bool `json:"success"` Remotes []*remotesapi.Remote `json:"remotes"` } func (rh *RemoteListHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightRemotesView) { return } rmts, err := rh.rapi.List() if err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } sort.Slice(rmts, func(i, j int) bool { return rmts[i].Name < rmts[j].Name }) jsonify(w, http.StatusOK, &RemoteListResponse{ Success: true, Remotes: rmts, }) } ================================================ FILE: gateway/endpoints/remotes_list_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/sahib/brig/gateway/remotesapi" "github.com/stretchr/testify/require" ) func TestRemoteListEndpoint(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.State.rapi.Set(remotesapi.Remote{ Name: "bob", Fingerprint: "xxx", })) require.Nil(t, s.State.rapi.Set(remotesapi.Remote{ Name: "charlie", Fingerprint: "yyy", AcceptAutoUpdates: true, Folders: []remotesapi.Folder{ { Folder: "/public", ReadOnly: false, ConflictStrategy: "", }, }, })) resp := s.mustRun( t, NewRemotesListHandler(s.State), "POST", "http://localhost:5000/api/v0/remotes/list", nil, ) require.Equal(t, http.StatusOK, resp.StatusCode) data := &RemoteListResponse{} mustDecodeBody(t, resp.Body, &data) require.Equal(t, true, data.Success) require.Equal(t, 2, len(data.Remotes)) require.Equal(t, "bob", data.Remotes[0].Name) require.Equal(t, "xxx", data.Remotes[0].Fingerprint) require.Equal(t, false, data.Remotes[0].AcceptAutoUpdates) require.Equal(t, "charlie", data.Remotes[1].Name) require.Equal(t, "yyy", data.Remotes[1].Fingerprint) require.Equal(t, true, data.Remotes[1].AcceptAutoUpdates) }) } ================================================ FILE: gateway/endpoints/remotes_remove.go ================================================ package endpoints import ( "encoding/json" "net/http" "github.com/sahib/brig/gateway/db" ) // RemotesRemoveHandler implements http.Handler type RemotesRemoveHandler struct { *State } // NewRemotesRemoveHandler returns a new RemotesRemoveHandler func NewRemotesRemoveHandler(s *State) *RemotesRemoveHandler { return &RemotesRemoveHandler{State: s} } // RemoteRemoveRequest is the data being sent to this endpoint. type RemoteRemoveRequest struct { Name string `json:"name"` } func (rh *RemotesRemoveHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightRemotesEdit) { return } rmtRmReq := RemoteRemoveRequest{} if err := json.NewDecoder(r.Body).Decode(&rmtRmReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } if rmtRmReq.Name == "" { jsonifyErrf(w, http.StatusBadRequest, "empty remote name") return } if err := rh.rapi.Remove(rmtRmReq.Name); err != nil { jsonifyErrf(w, http.StatusBadRequest, "failed to remove remote") return } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/remotes_remove_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/sahib/brig/gateway/remotesapi" "github.com/stretchr/testify/require" ) func TestRemoteRemoveEndpoint(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.State.rapi.Set(remotesapi.Remote{ Name: "bob", Fingerprint: "xxx", })) resp := s.mustRun( t, NewRemotesRemoveHandler(s.State), "POST", "http://localhost:5000/api/v0/remotes/remove", RemoteRemoveRequest{ Name: "bob", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) data := struct { Success bool `json:"success"` }{} mustDecodeBody(t, resp.Body, &data) require.Equal(t, true, data.Success) resp = s.mustRun( t, NewRemotesRemoveHandler(s.State), "POST", "http://localhost:5000/api/v0/remotes/remove", RemoteRemoveRequest{ Name: "bob", }, ) require.Equal(t, http.StatusBadRequest, resp.StatusCode) }) } ================================================ FILE: gateway/endpoints/remotes_self.go ================================================ package endpoints import ( "net/http" "github.com/sahib/brig/gateway/db" "github.com/sahib/brig/gateway/remotesapi" ) // RemoteSelfHandler implements http.Handler type RemoteSelfHandler struct { *State } // NewRemotesSelfHandler returns a new RemoteSelfHandler func NewRemotesSelfHandler(s *State) *RemoteSelfHandler { return &RemoteSelfHandler{State: s} } // RemoteSelfResponse is the data being sent to this endpoint. type RemoteSelfResponse struct { Success bool `json:"success"` Self remotesapi.Identity `json:"self"` DefaultConflictStrategy string `json:"default_conflict_strategy"` } func (rh *RemoteSelfHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightRemotesView) { return } self, err := rh.rapi.Self() if err != nil { jsonifyErrf(w, http.StatusBadRequest, "failed to get self") return } jsonify(w, http.StatusOK, RemoteSelfResponse{ Success: true, Self: self, DefaultConflictStrategy: "marker", }) } ================================================ FILE: gateway/endpoints/remotes_self_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) func TestRemoteSelfEndpoint(t *testing.T) { withState(t, func(s *testState) { resp := s.mustRun( t, NewRemotesSelfHandler(s.State), "POST", "http://localhost:5000/api/v0/remotes/self", nil, ) require.Equal(t, http.StatusOK, resp.StatusCode) data := &RemoteSelfResponse{} mustDecodeBody(t, resp.Body, &data) require.Equal(t, true, data.Success) require.Equal(t, "ali", data.Self.Name) require.Equal(t, "alisfingerprint", data.Self.Fingerprint) }) } ================================================ FILE: gateway/endpoints/remotes_sync.go ================================================ package endpoints import ( "encoding/json" "net/http" "github.com/sahib/brig/gateway/db" ) // RemotesSyncHandler implements http.Handler type RemotesSyncHandler struct { *State } // NewRemotesSyncHandler returns a new RemotesSyncHandler func NewRemotesSyncHandler(s *State) *RemotesSyncHandler { return &RemotesSyncHandler{State: s} } // RemoteSyncRequest is the data being sent to this endpoint. type RemoteSyncRequest struct { Name string `json:"name"` } func (rh *RemotesSyncHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightRemotesEdit, db.RightFsEdit) { return } rmtSyncReq := RemoteSyncRequest{} if err := json.NewDecoder(r.Body).Decode(&rmtSyncReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } if rmtSyncReq.Name == "" { jsonifyErrf(w, http.StatusBadRequest, "empty remote name") return } if err := rh.rapi.Sync(rmtSyncReq.Name); err != nil { jsonifyErrf(w, http.StatusBadRequest, "failed to sync") return } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/remotes_sync_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/sahib/brig/gateway/remotesapi" "github.com/stretchr/testify/require" ) func TestRemoteSyncEndpoint(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.State.rapi.Set(remotesapi.Remote{ Name: "bob", Fingerprint: "xxx", })) resp := s.mustRun( t, NewRemotesSyncHandler(s.State), "POST", "http://localhost:5000/api/v0/remotes/sync", RemoteSyncRequest{ Name: "bob", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) data := struct { Success bool `json:"success"` }{} mustDecodeBody(t, resp.Body, &data) require.Equal(t, true, data.Success) }) } ================================================ FILE: gateway/endpoints/remove.go ================================================ package endpoints import ( "encoding/json" "fmt" "net/http" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) // RemoveHandler implements http.Handler. type RemoveHandler struct { *State } // NewRemoveHandler returns a new RemoveHandler func NewRemoveHandler(s *State) *RemoveHandler { return &RemoveHandler{State: s} } // RemoveRequest is the request that is being sent to the endpoint. type RemoveRequest struct { Paths []string `json:"paths"` } func (rh *RemoveHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsEdit) { return } rmReq := RemoveRequest{} if err := json.NewDecoder(r.Body).Decode(&rmReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } for _, path := range rmReq.Paths { path = prefixRoot(path) if !rh.validatePath(path, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "path forbidden") return } } paths := []string{} for _, path := range rmReq.Paths { path = prefixRoot(path) if err := rh.fs.Remove(path); err != nil { log.Debugf("failed to remove %s: %v", path, err) jsonifyErrf(w, http.StatusBadRequest, "failed to remove") return } paths = append(paths, path) } if len(paths) > 0 { msg := fmt.Sprintf("removed »%s«", paths[0]) if len(paths) > 1 { msg += fmt.Sprintf(" and %d others", len(paths)-1) } if !rh.commitChange(msg, w, r) { return } } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/remove_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) type removeResponse struct { Success bool `json:"success"` Message string `json:"message"` } func TestRemoveEndpointSuccess(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.fs.Touch("/file")) require.Nil(t, s.fs.Mkdir("/dir", true)) resp := s.mustRun( t, NewRemoveHandler(s.State), "POST", "http://localhost:5000/api/v0/remove", &RemoveRequest{ Paths: []string{"/file", "/dir"}, }, ) require.Equal(t, http.StatusOK, resp.StatusCode) removeResp := &removeResponse{} mustDecodeBody(t, resp.Body, &removeResp) require.Equal(t, true, removeResp.Success) _, err := s.fs.Stat("/file") require.NotNil(t, err) _, err = s.fs.Stat("/dir") require.NotNil(t, err) }) } func TestRemoveEndpointForbidden(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/public") resp := s.mustRun( t, NewRemoveHandler(s.State), "POST", "http://localhost:5000/api/v0/remove", &RemoveRequest{ Paths: []string{"/file", "/dir"}, }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) removeResp := &removeResponse{} mustDecodeBody(t, resp.Body, &removeResp) require.Equal(t, false, removeResp.Success) }) } ================================================ FILE: gateway/endpoints/reset.go ================================================ package endpoints import ( "encoding/json" "fmt" "net/http" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) // ResetHandler implements http.Handler. type ResetHandler struct { *State } // NewResetHandler returns a new ResetHandler. func NewResetHandler(s *State) *ResetHandler { return &ResetHandler{State: s} } // ResetRequest is a request sent to this endpoint. type ResetRequest struct { Path string `json:"path"` Revision string `json:"revision"` Force bool `json:"force"` } func (rh *ResetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsEdit) { return } resetReq := ResetRequest{} if err := json.NewDecoder(r.Body).Decode(&resetReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } path := prefixRoot(resetReq.Path) if !rh.validatePath(path, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "path forbidden") return } var err error if resetReq.Path == "/" { err = rh.fs.Checkout(resetReq.Revision, true) } else { err = rh.fs.Reset(path, resetReq.Revision) } log.Debugf("reset %s to %s", path, resetReq.Revision) if err != nil { log.Debugf("failed to reset %s to %s: %v", path, resetReq.Revision, err) jsonifyErrf(w, http.StatusInternalServerError, "failed to reset") return } msg := fmt.Sprintf("reverted »%s« to »%s«", path, resetReq.Revision) if !rh.commitChange(msg, w, r) { return } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/reset_test.go ================================================ package endpoints import ( "bytes" "io/ioutil" "net/http" "testing" "github.com/stretchr/testify/require" ) type resetResponse struct { Success bool `json:"success"` Message string `json:"message"` } func TestResetSuccess(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.fs.Stage("/file", bytes.NewReader([]byte("hello")))) require.Nil(t, s.fs.MakeCommit("add")) require.Nil(t, s.fs.Stage("/file", bytes.NewReader([]byte("world")))) require.Nil(t, s.fs.MakeCommit("modify")) resp := s.mustRun( t, NewResetHandler(s.State), "POST", "http://localhost:5000/api/v0/reset", &ResetRequest{ Path: "/file", Revision: "init", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) resetResp := &resetResponse{} mustDecodeBody(t, resp.Body, &resetResp) require.Equal(t, true, resetResp.Success) stream, err := s.fs.Cat("/file") require.Nil(t, err) data, err := ioutil.ReadAll(stream) require.Nil(t, err) require.Equal(t, []byte("hello"), data) }) } func TestResetForbidden(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/public") resp := s.mustRun( t, NewResetHandler(s.State), "POST", "http://localhost:5000/api/v0/reset", &ResetRequest{ Path: "/file", Revision: "init", }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) resetResp := &resetResponse{} mustDecodeBody(t, resp.Body, &resetResp) require.Equal(t, false, resetResp.Success) }) } ================================================ FILE: gateway/endpoints/testing.go ================================================ package endpoints import ( "bytes" "context" "encoding/json" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "path/filepath" "testing" "github.com/sahib/brig/catfs" "github.com/sahib/brig/defaults" "github.com/sahib/brig/gateway/db" "github.com/sahib/brig/gateway/remotesapi" "github.com/sahib/config" "github.com/stretchr/testify/require" ) const ( testGwUser = "ali" ) type testState struct { *State } func withState(t *testing.T, fn func(state *testState)) { tmpDir, err := ioutil.TempDir("", "brig-endpoints-test-userdb") require.Nil(t, err) defer func() { os.RemoveAll(tmpDir) }() cfg, err := config.Open(nil, defaults.Defaults, config.StrictnessPanic) require.Nil(t, err) fs, err := catfs.NewFilesystem( catfs.NewMemFsBackend(), filepath.Join(tmpDir, "fs"), testGwUser, false, cfg.Section("fs"), nil, nil, ) require.Nil(t, err) dbPath := filepath.Join(tmpDir, "user") rapi := remotesapi.NewMock("ali", "alisfingerprint") userDb, err := db.NewUserDatabase(dbPath) require.Nil(t, err) state, err := NewState( fs, rapi, cfg.Section("gateway"), NewEventsHandler(rapi, nil), nil, userDb, ) require.Nil(t, err) state.UserDatabase().Add("ali", "ila", nil, nil) fn(&testState{state}) require.NoError(t, state.Close()) require.NoError(t, state.fs.Close()) require.NoError(t, userDb.Close()) } func mustEncodeBody(t *testing.T, v interface{}) io.Reader { buf := &bytes.Buffer{} require.Nil(t, json.NewEncoder(buf).Encode(v)) return buf } func mustDecodeBody(t *testing.T, body io.Reader, v interface{}) { data, err := ioutil.ReadAll(body) require.Nil(t, err) require.Nil(t, json.NewDecoder(bytes.NewReader(data)).Decode(v)) } func (s *testState) mustRun(t *testing.T, hdl http.Handler, verb, url string, jsonBody interface{}) *http.Response { req := httptest.NewRequest(verb, url, mustEncodeBody(t, jsonBody)) rsw := httptest.NewRecorder() user, err := s.userDb.Get("ali") require.Nil(t, err) req = req.WithContext(context.WithValue(req.Context(), dbUserKey("brig.db_user"), user)) setSession(s.store, "ali", rsw, req) hdl.ServeHTTP(rsw, req) return rsw.Result() } func (s *testState) mustChangeFolders(t *testing.T, folders ...string) { require.Nil(t, s.userDb.Remove("ali")) require.Nil(t, s.userDb.Add("ali", "ila", folders, nil)) } ================================================ FILE: gateway/endpoints/undelete.go ================================================ package endpoints import ( "encoding/json" "fmt" "net/http" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) // UndeleteHandler implements http.Handler. type UndeleteHandler struct { *State } // NewUndeleteHandler creates a new undelete handler. func NewUndeleteHandler(s *State) *UndeleteHandler { return &UndeleteHandler{State: s} } // UndeleteRequest is the request that can be sent to this endpoint as JSON. type UndeleteRequest struct { // Path to create. Path string `json:"path"` } func (uh *UndeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsEdit) { return } undelReq := UndeleteRequest{} if err := json.NewDecoder(r.Body).Decode(&undelReq); err != nil { jsonifyErrf(w, http.StatusBadRequest, "bad json") return } path := prefixRoot(undelReq.Path) if !uh.validatePath(path, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "path forbidden") return } if err := uh.fs.Undelete(path); err != nil { log.Debugf("failed to undelete %s: %v", path, err) fmt.Println(err) jsonifyErrf(w, http.StatusInternalServerError, "failed to undelete") return } msg := fmt.Sprintf("undeleted »%s«", path) if !uh.commitChange(msg, w, r) { return } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/undelete_test.go ================================================ package endpoints import ( "net/http" "testing" "github.com/stretchr/testify/require" ) type undeleteResponse struct { Success bool `json:"success"` Message string `json:"message"` } func TestUndeleteEndpointSuccess(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.fs.Touch("/test")) require.Nil(t, s.fs.Touch("/dir")) require.Nil(t, s.fs.MakeCommit("create")) require.Nil(t, s.fs.Remove("/test")) require.Nil(t, s.fs.Remove("/dir")) require.Nil(t, s.fs.MakeCommit("remove")) resp := s.mustRun( t, NewUndeleteHandler(s.State), "POST", "http://localhost:5000/api/v0/undelete", &UndeleteRequest{ Path: "/test", }, ) require.Equal(t, http.StatusOK, resp.StatusCode) undeleteResp := &undeleteResponse{} mustDecodeBody(t, resp.Body, &undeleteResp) require.Equal(t, true, undeleteResp.Success) info, err := s.fs.Stat("/test") require.Nil(t, err) require.Equal(t, "/test", info.Path) require.Equal(t, false, info.IsDir) }) } func TestUndeleteEndpointInvalidPath(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/something/else") resp := s.mustRun( t, NewUndeleteHandler(s.State), "POST", "http://localhost:5000/api/v0/undelete", &UndeleteRequest{ Path: "/test", }, ) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) }) } ================================================ FILE: gateway/endpoints/upload.go ================================================ package endpoints import ( "fmt" "net/http" "path" "github.com/sahib/brig/gateway/db" log "github.com/sirupsen/logrus" ) // UploadHandler implements http.Handler. type UploadHandler struct { *State } // NewUploadHandler returns a new UploadHandler. func NewUploadHandler(s *State) *UploadHandler { return &UploadHandler{State: s} } func (uh *UploadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !checkRights(w, r, db.RightFsEdit) { fmt.Println("BAD RIGHTS") return } root := r.URL.Query().Get("root") if root == "" { root = "/" } else { root = prefixRoot(root) } if err := r.ParseMultipartForm(1 * 1024 * 1024); err != nil { log.Debugf("upload: bad multipartform: %v", err) jsonifyErrf(w, http.StatusBadRequest, "failed to parse mutlipart form: %v", err) return } // Remove the cached files in /tmp defer r.MultipartForm.RemoveAll() paths := []string{} for _, headers := range r.MultipartForm.File { for _, header := range headers { path := path.Join(root, header.Filename) fd, err := header.Open() if err != nil { log.Debugf("upload: bad header: %v", err) jsonifyErrf(w, http.StatusBadRequest, "failed to open file: %v", header.Filename) return } if !uh.validatePath(path, w, r) { jsonifyErrf(w, http.StatusUnauthorized, "unauthorized") return } if err := uh.fs.Stage(path, fd); err != nil { log.Debugf("upload: could not stage: %v", err) jsonifyErrf(w, http.StatusBadRequest, "failed to insert file: %v", path) fd.Close() return } paths = append(paths, path) fd.Close() } } if len(paths) > 0 { msg := fmt.Sprintf("uploaded »%s«", paths[0]) if len(paths) > 1 { msg += fmt.Sprintf(" and %d more", len(paths)-1) } if !uh.commitChange(msg, w, r) { return } } jsonifySuccess(w) } ================================================ FILE: gateway/endpoints/upload_test.go ================================================ package endpoints import ( "bytes" "context" "io/ioutil" "mime/multipart" "net/http" "net/http/httptest" "net/url" "path" "testing" "github.com/stretchr/testify/require" ) func mustDoUpload(t *testing.T, s *testState, name string, data []byte) *http.Response { body := &bytes.Buffer{} writer := multipart.NewWriter(body) part, err := writer.CreateFormFile("file", path.Base(name)) require.Nil(t, err) _, err = part.Write(data) require.Nil(t, err) require.Nil(t, writer.Close()) req := httptest.NewRequest( "POST", "/api/v0/upload?root="+url.QueryEscape(path.Dir(name)), body, ) user, err := s.userDb.Get("ali") require.Nil(t, err) req = req.WithContext(context.WithValue(req.Context(), dbUserKey("brig.db_user"), user)) req.Header.Set("Content-Type", writer.FormDataContentType()) rsw := httptest.NewRecorder() setSession(s.store, "ali", rsw, req) NewUploadHandler(s.State).ServeHTTP(rsw, req) return rsw.Result() } func TestUploadSuccess(t *testing.T) { withState(t, func(s *testState) { require.Nil(t, s.fs.Mkdir("/sub", true)) resp := mustDoUpload(t, s, "/sub/new_file.png", []byte("hello")) require.Equal(t, http.StatusOK, resp.StatusCode) entries, err := s.fs.List("/sub", 1) require.Nil(t, err) require.Len(t, entries, 1) stream, err := s.fs.Cat("/sub/new_file.png") require.Nil(t, err) data, err := ioutil.ReadAll(stream) require.Nil(t, err) require.Equal(t, []byte("hello"), data) }) } func TestUploadForbidden(t *testing.T) { withState(t, func(s *testState) { s.mustChangeFolders(t, "/public") resp := mustDoUpload(t, s, "/sub/new_file.png", []byte("hello")) require.Equal(t, http.StatusUnauthorized, resp.StatusCode) }) } ================================================ FILE: gateway/endpoints/util.go ================================================ package endpoints import ( "context" "encoding/base64" "encoding/json" "fmt" "net/http" "path" "strings" "time" "github.com/gorilla/securecookie" "github.com/gorilla/sessions" "github.com/sahib/brig/catfs" ie "github.com/sahib/brig/catfs/errors" "github.com/sahib/brig/events" "github.com/sahib/brig/gateway/db" "github.com/sahib/brig/gateway/remotesapi" "github.com/sahib/config" log "github.com/sirupsen/logrus" ) // State is a helper struct that contains all API objects that might be useful // to the endpoint implementation. It does not serve other purposes. type State struct { fs *catfs.FS rapi remotesapi.RemotesAPI cfg *config.Config ev *events.Listener evHdl *EventsHandler store *sessions.CookieStore userDb *db.UserDatabase } func readOrInitKeyFromConfig(cfg *config.Config, keyName string, keyLen int) ([]byte, error) { keyStr := cfg.String(keyName) if keyStr == "" { keyData := securecookie.GenerateRandomKey(keyLen) cfg.SetString(keyName, base64.StdEncoding.EncodeToString(keyData)) return keyData, nil } return base64.StdEncoding.DecodeString(keyStr) } // NewState creates a new state object. // events.Listener can be set later with SetEventListener. func NewState( fs *catfs.FS, rapi remotesapi.RemotesAPI, cfg *config.Config, evHdl *EventsHandler, ev *events.Listener, userDb *db.UserDatabase, ) (*State, error) { authKey, err := readOrInitKeyFromConfig(cfg, "auth.session-authentication-key", 64) if err != nil { return nil, err } encKey, err := readOrInitKeyFromConfig(cfg, "auth.session-encryption-key", 32) if err != nil { return nil, err } // Generated here, but used by the server: _, err = readOrInitKeyFromConfig(cfg, "auth.session-csrf-key", 32) if err != nil { return nil, err } return &State{ fs: fs, rapi: rapi, cfg: cfg, evHdl: evHdl, store: sessions.NewCookieStore(authKey, encKey), userDb: userDb, }, nil } // Close cleans up any potentially open resource. func (s *State) Close() error { s.evHdl.Shutdown() return nil } // UserDatabase returns the currently opened user database. func (s *State) UserDatabase() *db.UserDatabase { return s.userDb } func (s *State) publishFsEvent(req *http.Request) { if s.evHdl != nil { ctx, cancel := context.WithTimeout(req.Context(), 5*time.Second) defer cancel() s.evHdl.Notify(ctx, "fs") } if s.ev == nil { return } log.Debugf("publishing fs event from gateway") ev := events.Event{ Type: events.FsEvent, } if err := s.ev.PublishEvent(ev); err != nil { log.Warningf("failed to publish filesystem change event: %v", err) } } func prefixRoot(nodePath string) string { if strings.HasPrefix(nodePath, "/") { return nodePath } return "/" + nodePath } func buildFolderCache(folders []string) map[string]bool { folderCache := make(map[string]bool) for _, folder := range folders { folderCache[prefixRoot(path.Clean(folder))] = true } return folderCache } func (s *State) pathIsVisible(nodePath string, w http.ResponseWriter, r *http.Request) bool { nodePath = prefixRoot(path.Clean(nodePath)) if s.validatePath(nodePath, w, r) { return true } name := getUserName(s.store, w, r) if name == "" { return false } user, err := s.userDb.Get(name) if err != nil { return false } folderCache := buildFolderCache(user.Folders) if err != nil { log.Debugf("failed to build folder cache: %v", err) return false } // Go over all folders, and see if we have some allowed folder // that we need to display "on the way". This could be probably // made faster if we ever need to. for folder, isValid := range folderCache { if !isValid { continue } // Example case: // folder = /nested/something // nodePath = /nested // (also handles if folder == nodePath) // // Other case (folder = /nested, nodePath = /nested/something) // is already handled by calling validatePath() above. if strings.HasPrefix(folder, nodePath) { return true } } // There is no valid prefix at all. return false } func (s *State) validatePath(nodePath string, w http.ResponseWriter, r *http.Request) bool { if !strings.HasPrefix(nodePath, "/") { return false } name := getUserName(s.store, w, r) if name == "" { return false } user, err := s.userDb.Get(name) if err != nil { return false } // At this point we know that the user is logged in. return s.validatePathForUser(nodePath, user, w, r) } func (s *State) validatePathForUser(nodePath string, user db.User, w http.ResponseWriter, r *http.Request) bool { curr := prefixRoot(nodePath) folderCache := buildFolderCache(user.Folders) for curr != "" { if folderCache[curr] { return true } next := path.Dir(curr) if curr == "/" && next == curr { // We've gone up too much: break } curr = next } // No fitting path found: return false } ////////////////////// func jsonify(w http.ResponseWriter, statusCode int, data interface{}) { w.WriteHeader(statusCode) w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(data); err != nil { log.Warningf("failed to encode json: %v", err) w.Write([]byte( "{\"success\": false, \"message\": \"failed to encode json response\"}", )) w.WriteHeader(500) return } } func jsonifyErrf(w http.ResponseWriter, statusCode int, format string, data ...interface{}) { msg := fmt.Sprintf(format, data...) success := false if statusCode >= 200 && statusCode < 400 { success = true } else { // TODO: also pass request and output the url in this log message. log.Debugf("failed to respond: %v", msg) } jsonify(w, statusCode, struct { Success bool `json:"success"` Message string `json:"message"` }{ Success: success, Message: msg, }) } func jsonifySuccess(w http.ResponseWriter) { jsonifyErrf(w, http.StatusOK, "success") } func (s *State) commitChange(msg string, w http.ResponseWriter, r *http.Request) bool { name := getUserName(s.store, w, r) fullMsg := fmt.Sprintf("gateway: »%s« %s", name, msg) if err := s.fs.MakeCommit(fullMsg); err != nil { if err != ie.ErrNoChange { log.Warningf("could not commit: %v", err) jsonifyErrf(w, http.StatusInternalServerError, "could not commit") return false } // There was no change. No need to notify. return true } s.evHdl.Notify(r.Context(), "fs") return true } /////// type secureMiddleware struct { *State SubHandler http.Handler } func (sm *secureMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) { hdr := w.Header() // Do not let browsers guess the content type: // https://en.wikipedia.org/wiki/Content_sniffing hdr.Set("X-Content-Type-Options", "nosniff") // https://security.stackexchange.com/questions/121796/what-security-implications-does-dns-prefetching-have hdr.Set("X-DNS-Prefetch-Control", "off") // Do not allow