Repository: braid-org/braidjs
Branch: master
Commit: 52803d16617e
Files: 102
Total size: 895.6 KB
Directory structure:
gitextract_8cg5elin/
├── .gitignore
├── antimatter/
│ ├── antimatter.js
│ ├── doc.html
│ ├── package.json
│ ├── readme.md
│ └── test.html
├── antimatter_ts/
│ ├── antimatter.js
│ ├── doc.html
│ ├── package.json
│ ├── random002.js
│ ├── readme.md
│ ├── src/
│ │ ├── antimatter_crdt.ts
│ │ ├── json_crdt.ts
│ │ └── sequence_crdt.ts
│ ├── test.html
│ └── tsconfig.json
├── antimatter_wiki/
│ ├── client.html
│ ├── package.json
│ ├── readme.md
│ └── server.js
├── braid-http/
│ ├── braid-http-client.js
│ ├── braid-http-server.js
│ ├── contributing.md
│ ├── demos/
│ │ ├── blog/
│ │ │ ├── README
│ │ │ ├── certificate
│ │ │ ├── client.html
│ │ │ ├── package.json
│ │ │ ├── private-key
│ │ │ └── server.js
│ │ └── chat/
│ │ ├── README
│ │ ├── certificate
│ │ ├── client.html
│ │ ├── package.json
│ │ ├── private-key
│ │ └── server.js
│ ├── index.js
│ ├── index.mjs
│ ├── package.json
│ ├── package.md
│ ├── readme.md
│ └── test/
│ ├── client.html
│ ├── readme.md
│ ├── server.js
│ ├── test-request.txt
│ └── test-responses.txt
├── json-patch/
│ ├── apply-patch.js
│ ├── package.json
│ ├── readme.md
│ └── test.js
├── kernel/
│ ├── antimatter.js
│ ├── demos/
│ │ ├── simple/
│ │ │ ├── simple-client.html
│ │ │ └── simple-server.js
│ │ ├── sync9-chat/
│ │ │ ├── chat-server.js
│ │ │ ├── chat.css
│ │ │ ├── chat.html
│ │ │ ├── chat.js
│ │ │ ├── client.js
│ │ │ ├── mobile.css
│ │ │ ├── package.json
│ │ │ ├── settings.css
│ │ │ ├── settings.html
│ │ │ └── worker.js
│ │ └── wiki/
│ │ ├── wiki-client.html
│ │ └── wiki-server.js
│ ├── errors.js
│ ├── http-client.js
│ ├── http-server.js
│ ├── leadertab-shell.js
│ ├── llww.js
│ ├── node.js
│ ├── package.json
│ ├── pipe.js
│ ├── readme.md
│ ├── sqlite-store.js
│ ├── store.js
│ ├── test/
│ │ ├── tests.js
│ │ ├── virtual-p2p.js
│ │ ├── websocket-test.js
│ │ ├── wiki-perf.html
│ │ └── wiki-tester.js
│ ├── websocket-client.js
│ └── websocket-server.js
├── readme.md
├── simple_d_ton/
│ ├── index.js
│ └── package.json
├── simpleton/
│ ├── client.js
│ ├── demo.js
│ ├── index.js
│ ├── index.mjs
│ ├── package.json
│ └── server.js
├── sync9/
│ ├── old-vis/
│ │ ├── visualization.html
│ │ └── visualization.js
│ └── sync9.js
├── util/
│ ├── apply-patch.js
│ ├── braid-bundler.js
│ ├── diff.js
│ ├── require.js
│ └── utilities.js
└── yarnball/
├── server.js
├── yarnball.html
└── yarnball.js
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
# Any certificates
certs/
certificates/
*.pem
# Database stuff
db.sqlite*
# Builds
braid-bundle.js
builds/
# VS Code
.vscode/
# Basic Nodejs Gitignore
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
# Mike isn't into package-lock, but feel free to disagree with him
package-lock.json
# antimatter wiki db files
antimatter_wiki_db/
antimatter_wiki_db.*
antimatter_wiki.*
april-db-backup
jan-db-backup-2025
db
server.sh
# apple
.DS_Store
================================================
FILE: antimatter/antimatter.js
================================================
/// # Software Architecture
/// The software is architected into three objects:
///
/// ``` js
/// var {create_antimatter_crdt, create_json_crdt, sequence_crdt} = require('@braidjs/antimatter')
/// ```
// v522
/// - *antimatter_crdt*: created using `create_antimatter_crdt`, this object is a json_crdt with antimatter algorithm methods added to it so that it can communicate with other peers to learn which history can be pruned, and tells the underlying json_crdt object to prune it.
var create_antimatter_crdt;
/// - *json_crdt*: created using `create_json_crdt`, this object is a pruneable
/// JSON CRDT — "JSON" meaning it represents an arbitrary JSON datstructure, and
/// "CRDT" and "pruneable" having the same meaning as for sequence_crdt below. The
/// json_crdt makes recursive use of sequence_crdt structures to represent
/// arbitrary JSON (for instance, a map is represented with a sequence_crdt
/// structure for each value, where the first element in the sequence is the
/// value).
var create_json_crdt;
/// - *sequence_crdt*: methods to manipulate a pruneable sequence CRDT —
/// "sequence" meaning it represents a javascript string or array, "CRDT" meaning
/// this structure can be merged with other ones, and "pruneable" meaning that it
/// supports an operation to remove meta-data when it is no longer needed (whereas
/// CRDT's often keep track of this meta-data forever).
var sequence_crdt = {};
(() => {
/// # create_antimatter_crdt(send[, init])
///
/// Creates and returns a new antimatter_crdt object (or adds antimatter_crdt methods and properties to `init`).
///
/// * `send`: A callback function to be called whenever this antimatter_crdt wants to send a
/// message over a connection registered with `subscribe`. The sole
/// parameter to this function is a JSONafiable object that hopes to be passed to
/// the `receive` method on the antimatter_crdt object at the other end of the
/// connection specified in the `conn` key.
/// * `get_time`: function that returns a number representing time (e.g. `Date.now()`)
/// * `set_timeout`: function that takes a callback and timeout length, and calls that callback after that amount of time; also returns an identifier that can be passed to `clear_timeout` to cancel the timeout (e.g. wrapping the javascript setTimeout)
/// * `clear_timeout`: function that takes a timeout identifier an cancels it (e.g. wrapping the javascript clearTimeout)
/// * `init`: (optional) An antimatter_crdt object to start with, which we'll add any properties to that it doesn't have, and we'll add all the antimatter_crdt methods to it. This option exists so you can serialize an antimatter_crdt instance as JSON, and then restore it later.
/// ``` js
/// var antimatter_crdt = create_antimatter_crdt(msg => {
/// websockets[msg.conn].send(JSON.stringify(msg))
/// },
/// () => Date.now(),
/// (func, t) => setTimeout(func, t),
/// (t) => clearTimeout(t)),
///. JSON.parse(fs.readFileSync('./antimatter.backup'))
/// )
/// ```
create_antimatter_crdt = (
send,
get_time,
set_timeout,
clear_timeout,
self
) => {
self = create_json_crdt(self);
self.send = send;
// purposely not:
// self.id = self.id || Math.random().toString(36).slice(2);
// to accomodate an id of numeric 0
if (self.id === undefined) self.id = Math.random().toString(36).slice(2);
self.next_seq = self.next_seq || 0;
self.conns = self.conns || {};
self.proto_conns = self.proto_conns || {};
self.conn_count = self.conn_count || 0;
self.fissures = self.fissures || {};
self.acked_boundary = self.acked_boundary || {};
self.ackmes = self.ackmes || {};
self.forget_cbs = self.forget_cbs || {};
self.version_groups = self.version_groups || {};
self.ackme_map = self.ackme_map || {};
self.ackme_time_est_1 = self.ackme_time_est_1 || 1000;
self.ackme_time_est_2 = self.ackme_time_est_2 || 1000;
self.ackme_current_wait_time = self.ackme_current_wait_time || 1000;
self.ackme_increases_allowed = 1;
self.ackme_timeout = self.ackme_timeout || null;
function raw_add_version_group(version_array) {
let version_map = {};
for (let v of version_array) {
if (version_map[v]) continue;
version_map[v] = true;
if (self.version_groups[v]) self.version_groups[v].forEach((v) => (version_map[v] = true));
}
let version_group = Object.keys(version_map).sort();
version_group.forEach((v) => (self.version_groups[v] = version_group));
return version_group;
}
function get_parent_and_child_sets(children) {
let parent_sets = {};
let child_sets = {};
let done = {};
function add_set_to_sets(s, sets, mark_done) {
let container = { members: s };
let array = Object.keys(s);
if (array.length < 2) return;
for (let v of array) {
sets[v] = container;
if (mark_done) done[v] = true;
}
}
add_set_to_sets(self.current_version, parent_sets, true);
for (let v of Object.keys(self.T)) {
if (done[v]) continue;
done[v] = true;
if (!children[v]) continue;
let first_child_set = children[v];
let first_child_array = Object.keys(first_child_set);
let first_parent_set = self.T[first_child_array[0]];
let first_parent_array = Object.keys(first_parent_set);
if (
first_child_array.every((child) => {
let parent_set = self.T[child];
let parent_array = Object.keys(parent_set);
return (
parent_array.length == first_parent_array.length &&
parent_array.every((parent) => first_parent_set[parent])
);
}) &&
first_parent_array.every((parent) => {
let child_set = children[parent];
let child_array = Object.keys(child_set);
return (
child_array.length == first_child_array.length &&
child_array.every((child) => first_child_set[child])
);
})
) {
add_set_to_sets(first_parent_set, parent_sets, true);
add_set_to_sets(first_child_set, child_sets);
}
}
return { parent_sets, child_sets };
}
function find_one_bubble(bottom, children, child_sets, restricted) {
let expecting = { ...bottom };
let seen = {};
Object.keys(bottom).forEach(
(v) =>
children[v] &&
Object.keys(children[v]).forEach((v) => (seen[v] = true))
);
let q = Object.keys(expecting);
let last_top = null;
while (q.length) {
cur = q.shift();
if (!self.T[cur]) {
if (!restricted) throw "bad";
else return last_top;
}
if (restricted && restricted[cur]) return last_top;
if (seen[cur]) continue;
if (children[cur] && !Object.keys(children[cur]).every((c) => seen[c]))
continue;
seen[cur] = true;
delete expecting[cur];
if (!Object.keys(expecting).length) {
last_top = { [cur]: true };
if (!restricted) return last_top;
}
Object.keys(self.T[cur]).forEach((p) => {
expecting[p] = true;
q.push(p);
});
if (
child_sets[cur] &&
Object.keys(child_sets[cur].members).every((v) => seen[v])
) {
let expecting_array = Object.keys(expecting);
let parent_set = self.T[cur];
let parent_array = Object.keys(parent_set);
if (
expecting_array.length == parent_array.length &&
expecting_array.every((v) => parent_set[v])
) {
last_top = child_sets[cur].members;
if (!restricted) return last_top;
}
}
}
return last_top;
}
function add_version_group(version_array) {
let version_group = raw_add_version_group(version_array);
if (!version_array.some((x) => self.T[x])) return version_group[0];
let children = self.get_child_map();
let { parent_sets, child_sets } = get_parent_and_child_sets(children);
let to_bubble = {};
function mark_bubble(v, bubble) {
if (to_bubble[v]) return;
to_bubble[v] = bubble;
for (let vv of Object.keys(self.T[v])) mark_bubble(vv, bubble);
}
let bottom = Object.fromEntries(
version_group.filter((x) => self.T[x]).map((x) => [x, true])
);
let top = find_one_bubble(bottom, children, child_sets);
let bubble = [Object.keys(bottom).sort()[0], Object.keys(top)[0]];
for (let v of Object.keys(top)) to_bubble[v] = bubble;
for (let v of Object.keys(bottom)) mark_bubble(v, bubble);
self.apply_bubbles(to_bubble);
return version_group[0];
}
let orig_send = send;
send = (x) => {
if (self.version_groups[x.version])
x.version = self.version_groups[x.version];
if (x.parents) {
x.parents = { ...x.parents };
Object.keys(x.parents).forEach((v) =>
self.version_groups[v] && self.version_groups[v].forEach((v) => (x.parents[v] = true))
);
}
if (Array.isArray(x.versions)) {
x.versions = JSON.parse(JSON.stringify(x.versions));
x.versions.forEach(
(v) =>
self.version_groups[v.version] &&
(v.version = self.version_groups[v.version])
);
x.versions.forEach((v) => {
Object.keys(v.parents).forEach((vv) =>
self.version_groups[vv] && self.version_groups[vv].forEach((vv) => (v.parents[vv] = true))
);
});
}
orig_send(x);
};
/// # antimatter_crdt.receive(message)
///
/// Let this antimatter object "receive" a message from another antimatter object, presumably from its `send` callback.
/// ``` js
/// websocket.on('message', data => {
/// antimatter_crdt.receive(JSON.parse(data)) });
/// ```
/// You generally do not need to mess with a message object directly, but below are the various message objects you might see, categorized by their `type` entry. Note that each object also
/// contains a `conn` entry with the id of the connection the message is sent
/// over.
self.receive = (x) => {
let {
type,
version,
parents,
patches,
versions,
fissure,
fissures,
seen,
forget,
ackme,
peer,
conn,
} = x;
if (version && typeof version != "string") {
if (!self.T[version[0]]) version = add_version_group(version);
else version = version[0];
}
if (parents) {
parents = { ...parents };
Object.keys(parents).forEach((v) => {
if (self.version_groups[v] && self.version_groups[v][0] != v)
delete parents[v];
});
}
if (versions && versions.forEach) versions.forEach((v) => {
if (typeof v.version != "string") {
if (!self.T[v.version[0]]) v.version = add_version_group(v.version);
else v.version = v.version[0];
}
v.parents = { ...v.parents };
Object.keys(v.parents).forEach((vv) => {
if (self.version_groups[vv] && self.version_groups[vv][0] != vv)
delete v.parents[vv];
});
});
let ackme_versions_array = version
? [version]
: versions && !Array.isArray(versions)
? Object.keys(versions).sort()
: null;
let ackme_versions =
ackme_versions_array &&
Object.fromEntries(ackme_versions_array.map((v) => [v, true]));
if (versions && !Array.isArray(versions)) {
versions = { ...versions };
Object.keys(versions).forEach((v) => {
if (self.version_groups[v] && self.version_groups[v][0] != v)
delete versions[v];
});
if (!Object.keys(versions).length) return;
}
/// ## message `subscribe`
/// `subscribe` is the first message sent over a connection, and the peer at the other end will respond with `welcome`.
/// ``` js
/// { type: 'subscribe',
/// peer: 'SENDER_ID',
/// conn: 'CONN_ID',
/// parents: {'PARENT_VERSION_ID': true, ...} }
/// ```
/// The `parents` are optional, and describes which versions this peer already has. The other end will respond with versions since that set of parents.
if (type == "subscribe" || (type == "welcome" && peer != null)) {
if (self.conns[conn] != null) throw Error("bad");
self.conns[conn] = { peer, seq: ++self.conn_count };
}
/// ## message `fissure`
///
/// Sent to alert peers about a fissure. The `fissure` entry contains information about the two peers involved in the fissure, the specific connection id that broke, the `versions` that need to be protected, and the `time` of the fissure (in case we want to ignore it after some time). It is also possible to send multiple `fissures` in an array.
/// ``` js
/// { type: 'fissure',
/// fissure: { // or fissures: [{...}, {...}, ...],
/// a: 'PEER_A_ID',
/// b: 'PEER_B_ID',
/// conn: 'CONN_ID',
/// versions: {'VERSION_ID': true, ...},
/// time: Date.now()
/// },
/// conn: 'CONN_ID' }
/// ```
/// Note that `time` isn't used for anything critical, as it's just wallclock time.
if (fissure) fissures = [fissure];
if (fissures) fissures = fissures.map((f) => {
f = JSON.parse(JSON.stringify(f));
f.t = self.conn_count;
return f;
});
if (versions && (type == "update" || type == "welcome"))
versions = Object.fromEntries(versions.map((v) => [v.version, v]));
if (version) versions = { [version]: true };
let rebased_patches = [];
let fissures_back = [];
let fissures_forward = [];
let fissures_done = {};
function copy_fissures(fs) {
return fs.map((f) => {
f = JSON.parse(JSON.stringify(f));
delete f.t;
return f;
});
}
if (fissures) {
let fiss_map = Object.fromEntries(
fissures.map((f) => [f.a + ":" + f.b + ":" + f.conn, f])
);
for (let [key, f] of Object.entries(fiss_map)) {
if (fissures_done[f.conn]) continue;
fissures_done[f.conn] = true;
let our_f = self.fissures[key];
let other_key = f.b + ":" + f.a + ":" + f.conn;
let their_other = fiss_map[other_key];
let our_other = self.fissures[other_key];
if (!our_f) self.fissures[key] = f;
if (their_other && !our_other) self.fissures[other_key] = their_other;
if (!their_other && !our_other && f.b == self.id && !self.conns[f.conn]) {
our_other = self.fissures[other_key] = {
...f,
a: f.b,
b: f.a,
t: self.conn_count,
};
}
if (!their_other && our_other) {
fissures_back.push(f);
fissures_back.push(our_other);
}
if (!our_f || (their_other && !our_other)) {
fissures_forward.push(f);
if (their_other || our_other)
fissures_forward.push(their_other || our_other);
}
}
}
/// ## message `welcome`
/// Sent in response to a `subscribe`, basically contains the initial state of the document; incoming `welcome` messages are also propagated over all our other connections but only with information that was new to us, so the propagation will eventually stop. When sent in response to a `subscribe` (rather than being propagated), we include a `peer` entry with the id of the sending peer, so they know who we are, and to trigger them to send us their own `welcome` message.
///
/// ``` js
/// {
/// type: 'welcome',
/// versions: [
/// //each version looks like an update message...
/// ],
/// fissures: [
/// //each fissure looks as it would in a fissure message...
/// ],
/// parents:
/// {
/// //versions you must have before consuming these new versions
/// 'PARENT_VERSION_ID': true,
/// ...
/// },
/// [peer: 'SENDER_ID'], // if responding to a subscribe
/// conn: 'CONN_ID'
/// }
/// ```
let _T = {};
let added_versions = [];
if (type == "welcome") {
var versions_to_add = {};
let vs = Object.values(versions);
vs.forEach((v) => (versions_to_add[v.version] = v.parents));
vs.forEach((v) => {
if (
self.T[v.version] ||
(self.version_groups[v.version] &&
self.version_groups[v.version][0] != v.version)
) {
remove_ancestors(v.version);
function remove_ancestors(v) {
if (versions_to_add[v]) {
Object.keys(versions_to_add[v]).forEach(remove_ancestors);
delete versions_to_add[v];
}
}
}
});
for (let v of vs) _T[v.version] = v.parents;
l1: for (var v of vs) {
if (versions_to_add[v.version]) {
let ps = Object.keys(v.parents);
if (!ps.length && Object.keys(self.T).length) continue;
for (p of ps) if (!self.T[p]) continue l1;
rebased_patches = rebased_patches.concat(
self.add_version(v.version, v.parents, v.patches, v.sort_keys)
);
added_versions.push(v);
delete _T[v.version];
}
}
}
if (type == "subscribe" || (type == "welcome" && peer != null)) {
let fissures_back = Object.values(self.fissures);
if (type == "welcome") {
var leaves = { ..._T };
Object.keys(_T).forEach((v) => {
Object.keys(_T[v]).forEach((p) => delete leaves[p]);
});
let f = {
a: self.id,
b: peer,
conn: "-" + conn,
versions: Object.fromEntries(
added_versions
.concat(Object.keys(leaves).map((v) => versions[v]))
.map((v) => [v.version, true])
),
time: get_time(),
t: self.conn_count,
};
if (Object.keys(f.versions).length) {
let key = f.a + ":" + f.b + ":" + f.conn;
self.fissures[key] = f;
fissures_back.push(f);
fissures_forward.push(f);
}
}
send({
type: "welcome",
versions: self.generate_braid(parents || versions),
fissures: copy_fissures(fissures_back),
parents:
parents &&
Object.keys(parents).length &&
self.get_leaves(self.ancestors(parents, true)),
...(type == "subscribe" ? { peer: self.id } : {}),
conn,
});
} else if (fissures_back.length) {
send({
type: "fissure",
fissures: copy_fissures(fissures_back),
conn,
});
}
/// ## message `forget`
/// Used to disconnect without creating a fissure, presumably meaning the sending peer doesn't plan to make any edits while they're disconnected.
/// ``` js
/// {type: 'forget', conn: 'CONN_ID'}
/// ```
if (type == "forget") {
if (self.conns[conn] == null) throw Error("bad");
send({ type: "ack", forget: true, conn });
delete self.conns[conn];
delete self.proto_conns[conn];
}
/// ## message forget `ack`
/// Sent in response to `forget`.. so they know we forgot them.
/// ``` js
/// {type: 'ack', forget: true, conn: 'CONN_ID'}
/// ```
if (type == "ack" && forget) {
self.forget_cbs[conn]();
}
/// ## message `update`
/// Sent to alert peers about a change in the document. The change is represented as a version, with a unique id, a set of parent versions (the most recent versions known before adding this version), and an array of patches, where the offsets in the patches do not take into account the application of other patches in the same array.
/// ``` js
/// { type: 'update',
/// version: 'VERSION_ID',
/// parents: {'PARENT_VERSION_ID': true, ...},
/// patches: [ {range: '.json.path.a.b', content: 42}, ... ],
/// conn: 'CONN_ID' }
/// ```
if (type == "update") {
if (conn == null || !self.T[version]) {
let ps = Object.keys(parents);
if (!ps.length && Object.keys(self.T).length) return;
for (p of ps) if (!self.T[p]) return;
rebased_patches = self.add_version(version, parents, patches);
for (let c of Object.keys(self.conns))
if (c != conn)
send({ type: "update", version, parents, patches, ackme, conn: c });
}
}
/// ## message `ackme`
/// Sent for pruning purposes, to try and establish whether everyone has seen the most recent versions. Note that an `update` message is treated as a `ackme` message for the version in the update.
/// ``` js
/// { type: 'ackme',
/// version: 'ACKME_ID',
/// versions: {'VERSION_ID_A': true, ...},
/// conn: 'CONN_ID' }
/// ```
if (type == "ackme" || type == "update") {
if (!Object.keys(versions).every((v) => self.T[v])) return;
if (
self.ackme_timeout &&
ackme_versions_array.length ==
Object.keys(self.current_version).length &&
ackme_versions_array.every((x) => self.current_version[x])
) {
clear_timeout(self.ackme_timeout);
self.ackme_timeout = null;
}
let m = self.ackmes[ackme];
if (!m) {
m = self.ackmes[ackme] = {
id: ackme,
origin: conn,
count: Object.keys(self.conns).length - (conn != null ? 1 : 0),
versions: ackme_versions,
seq: self.conn_count,
time: get_time(),
};
m.orig_count = m.count;
m.real_ackme = type == "ackme";
m.key = JSON.stringify(Object.keys(m.versions).sort());
self.ackme_map[m.key] = self.ackme_map[m.key] || {};
let before = Object.keys(self.ackme_map[m.key]).length;
self.ackme_map[m.key][m.id] = true;
let after = Object.keys(self.ackme_map[m.key]).length;
if (before == 1 && after == 2 && self.ackme_increases_allowed > 0) {
self.ackme_current_wait_time *= 2;
self.ackme_increases_allowed--;
}
if (type == "ackme")
for (let c of Object.keys(self.conns))
if (c != conn)
send({
type: "ackme",
ackme,
versions: ackme_versions,
conn: c,
});
} else if (m.seq < self.conns[conn].seq) {
send({
type: "ack",
seen: "local",
ackme,
versions: ackme_versions,
conn,
});
return;
} else m.count--;
check_ackme_count(ackme);
}
/// ## message local `ack`
/// Sent in response to `update`, but not right away; a peer will first send the `update` over all its other connections, and only after they have all responded with a local `ack` – and we didn't see a `fissure` message while waiting – will the peer send a local `ack` over the originating connection.
/// ``` js
/// {type: 'ack', seen: 'local', version: 'VERSION_ID', conn: 'CONN_ID'}
/// ```
if (type == "ack" && seen == "local") {
let m = self.ackmes[ackme];
if (!m || m.cancelled) return;
m.count--;
check_ackme_count(ackme);
}
function check_ackme_count(ackme) {
let m = self.ackmes[ackme];
if (m && m.count === 0 && !m.cancelled) {
m.time2 = get_time();
if (m.orig_count > 0) {
let t = m.time2 - m.time;
let weight = 0.1;
self.ackme_time_est_1 =
weight * t + (1 - weight) * self.ackme_time_est_1;
}
if (m.origin != null) {
if (self.conns[m.origin])
send({
type: "ack",
seen: "local",
ackme,
versions: ackme_versions,
conn: m.origin,
});
} else add_full_ack_leaves(ackme);
}
}
/// ## message global `ack`
/// Sent after an originating peer has received a local `ack` over all its connections, or after any peer receives a global `ack`, so that everyone may come to know that this version has been seen by everyone in this peer group.
/// ``` js
/// {type: 'ack', seen: 'global', version: 'VERSION_ID', conn: 'CONN_ID'}
/// ```
if (type == "ack" && seen == "global") {
let m = self.ackmes[ackme];
if (!m || m.cancelled) return;
let t = get_time() - m.time2;
let weight = 0.1;
self.ackme_time_est_2 =
weight * t + (1 - weight) * self.ackme_time_est_2;
if (m.real_ackme && Object.keys(self.ackme_map[m.key]).length == 1) {
self.ackme_current_wait_time *= 0.8;
}
add_full_ack_leaves(ackme, conn);
}
function add_full_ack_leaves(ackme, conn) {
let m = self.ackmes[ackme];
if (!m || m.cancelled) return;
m.cancelled = true;
for (let [c, cc] of Object.entries(self.conns))
if (c != conn && cc.seq <= m.seq)
send({
type: "ack",
seen: "global",
ackme,
versions: ackme_versions,
conn: c,
});
for (let v of Object.keys(m.versions)) {
if (!self.T[v]) continue;
let marks = {};
let f = (v) => {
if (!marks[v]) {
marks[v] = true;
delete self.acked_boundary[v];
Object.keys(self.T[v]).forEach(f);
}
};
f(v);
self.acked_boundary[v] = true;
}
prune(false, m.seq);
}
if (added_versions.length || fissures_forward.length) {
for (let c of Object.keys(self.conns))
if (c != conn)
send({
type: added_versions.length ? "welcome" : "fissure",
...(added_versions.length ? { versions: added_versions } : {}),
fissures: copy_fissures(fissures_forward),
conn: c,
});
}
if (fissures_forward.length) resolve_fissures();
if (
!self.ackme_timeout &&
type != "update" &&
type != "ackme" &&
prune(true)
) {
if (!self.ackme_current_wait_time) {
self.ackme_current_wait_time =
4 * (self.ackme_time_est_1 + self.ackme_time_est_2);
}
let t = Math.random() * self.ackme_current_wait_time;
self.ackme_timeout = set_timeout(() => {
self.ackme_increases_allowed = 1;
self.ackme_timeout = null;
if (prune(true)) self.ackme();
}, t);
}
if (type == "welcome" && peer == null && prune(true, null, true))
self.ackme();
return rebased_patches;
};
/// # antimatter_crdt.subscribe(conn)
///
/// Register a new connection with id `conn` – triggers this antimatter_crdt object to send a `subscribe` message over the given connection.
///
/// ``` js
/// alice_antimatter_crdt.subscribe('connection_to_bob')
/// ```
self.subscribe = (conn) => {
self.proto_conns[conn] = true;
send({ type: "subscribe", peer: self.id, conn });
};
/// # antimatter_crdt.forget(conn)
///
/// Disconnect the given connection without creating a fissure – we don't need to reconnect with them.. it seems.. if we do, then we need to call `disconnect` instead, which will create a fissure allowing us to reconnect.
///
/// ``` js
/// alice_antimatter_crdt.forget('connection_to_bob')
/// ```
self.forget = async (conn) => {
await new Promise((done) => {
if (self.conns[conn] != null) {
self.forget_cbs[conn] = done;
send({ type: "forget", conn });
}
self.disconnect(conn, false);
});
};
/// # antimatter_crdt.disconnect(conn)
///
/// If we detect that a connection has closed, let the antimatter_crdt object know by calling this method with the given connection id – this will create a fissure so we can reconnect with whoever was on the other end of the connection later on.
///
/// ``` js
/// alice_antimatter_crdt.disconnect('connection_to_bob')
/// ```
self.disconnect = (conn, fissure = true) => {
if (self.conns[conn] == null && !self.proto_conns[conn]) return;
delete self.proto_conns[conn];
if (self.conns[conn]) {
let peer = self.conns[conn].peer;
delete self.conns[conn];
if (fissure) {
fissure = create_fissure(peer, conn);
if (fissure) self.receive({ type: "fissure", fissure });
}
}
};
/// # antimatter_crdt.update(...patches)
///
/// Modify this antimatter_crdt object by applying the given patches. Each patch looks like `{range: '.life.meaning', content: 42}`. Calling this method will trigger calling the `send` callback to let our peers know about this change.
///
/// ``` js
/// antimatter_crdt.update({
/// range: '.life.meaning',
/// content: 42
/// })
/// ```
self.update = (...patches) => {
var version = `${self.next_seq++}@${self.id}`;
self.receive({
type: "update",
version,
parents: { ...self.current_version },
patches,
ackme: Math.random().toString(36).slice(2),
});
return version;
};
/// # antimatter_crdt.ackme()
///
/// Initiate sending a `ackme` message to try and establish whether certain versions can be pruned.
///
/// ``` js
/// antimatter_crdt.ackme()
/// ```
self.ackme = () => {
let versions = { ...self.current_version };
Object.keys(versions).forEach((v) =>
self.version_groups[v] && self.version_groups[v].forEach((v) => (versions[v] = true))
);
let ackme = Math.random().toString(36).slice(2);
self.receive({ type: "ackme", ackme, versions });
return ackme;
};
function cancel_ackmes() {
for (let m of Object.values(self.ackmes)) m.cancelled = true;
}
function create_fissure(peer, conn) {
let ack_versions = self.ancestors(self.acked_boundary);
let entries = Object.keys(self.T)
.filter((v) => !ack_versions[v] || self.acked_boundary[v])
.map((v) => [v, true]);
if (!entries.length) return;
let versions = Object.fromEntries(entries);
return { a: self.id, b: peer, conn, versions, time: get_time() };
}
function resolve_fissures() {
let unfissured = {};
Object.entries(self.fissures).forEach(([fk, f]) => {
var other_key = f.b + ":" + f.a + ":" + f.conn;
var other = self.fissures[other_key];
if (other) {
if (Object.keys(f.versions).length) {
for (let v of Object.keys(f.versions)) unfissured[v] = true;
self.fissures[fk] = { ...f, versions: {} };
}
if (Object.keys(other.versions).length) {
for (let v of Object.keys(other.versions)) unfissured[v] = true;
self.fissures[other_key] = { ...other, versions: {} };
}
}
});
if (Object.keys(unfissured).length) {
cancel_ackmes();
let ack_versions = self.ancestors(self.acked_boundary);
let unfissured_descendants = self.descendants(unfissured, true);
for (let un of Object.keys(unfissured_descendants))
if (ack_versions[un]) delete ack_versions[un];
self.acked_boundary = self.get_leaves(ack_versions);
}
}
function prune(just_checking, t, just_versions) {
if (just_checking) t = Infinity;
let fissures = just_checking ? { ...self.fissures } : self.fissures;
Object.entries(fissures).forEach((x) => {
var other_key = x[1].b + ":" + x[1].a + ":" + x[1].conn;
var other = fissures[other_key];
if (other && x[1].t <= t && other.t <= t) {
delete fissures[x[0]];
delete fissures[other_key];
}
});
if (self.fissure_lifetime != null) {
var now = get_time();
Object.entries(fissures).forEach(([k, f]) => {
if (f.time == null) f.time = now;
if (f.time <= now - self.fissure_lifetime) {
delete fissures[k];
}
});
}
if (
just_checking &&
!just_versions &&
Object.keys(fissures).length < Object.keys(self.fissures).length
)
return true;
var restricted = {};
Object.values(fissures).forEach((f) => {
Object.keys(f.versions).forEach((v) => (restricted[v] = true));
});
if (!just_checking) {
var acked = self.ancestors(self.acked_boundary);
Object.keys(self.T).forEach((x) => {
if (!acked[x]) restricted[x] = true;
});
}
let children = self.get_child_map();
let { parent_sets, child_sets } = get_parent_and_child_sets(children);
let to_bubble = {};
function mark_bubble(v, bubble) {
if (to_bubble[v]) return;
to_bubble[v] = bubble;
for (let vv of Object.keys(self.T[v])) mark_bubble(vv, bubble);
}
let visited = {};
function f(cur) {
if (!self.T[cur] || visited[cur]) return;
visited[cur] = true;
if (
to_bubble[cur] == null &&
parent_sets[cur] &&
!parent_sets[cur].done
) {
parent_sets[cur].done = true;
let bottom = parent_sets[cur].members;
let top = find_one_bubble(bottom, children, child_sets, restricted);
if (top) {
if (just_checking) return true;
let bottom_array = Object.keys(bottom).sort();
let top_array = Object.keys(top);
raw_add_version_group(bottom_array);
let bubble = [bottom_array[0], top_array[0]];
for (let v of top_array) to_bubble[v] = bubble;
for (let v of bottom_array) mark_bubble(v, bubble);
}
}
if (to_bubble[cur] == null) {
let top = find_one_bubble(
{ [cur]: true },
children,
child_sets,
restricted
);
if (top && !top[cur]) {
if (just_checking) return true;
let bubble = [cur, Object.keys(top)[0]];
for (let v of Object.keys(top)) to_bubble[v] = bubble;
mark_bubble(bubble[0], bubble);
} else {
to_bubble[cur] = [cur, cur];
}
}
return Object.keys(
self.T[cur] || self.T[self.version_groups[cur][0]]
).some(f);
}
if (Object.keys(self.current_version).some(f) && just_checking)
return true;
self.apply_bubbles(to_bubble);
for (let [k, m] of Object.entries(self.ackmes)) {
let vs = Object.keys(m.versions);
if (
!vs.length ||
!vs.every((v) => self.T[v] || self.version_groups[v])
) {
delete self.ackmes[k];
delete self.ackme_map[m.key][m.id];
if (!Object.keys(self.ackme_map[m.key]).length)
delete self.ackme_map[m.key];
}
}
for (let [v, vs] of Object.entries(self.version_groups)) {
if (!self.T[vs[0]]) delete self.version_groups[v];
}
}
return self;
};
/// ## create_json_crdt([init])
///
/// Create a new `json_crdt` object (or start with `init`, and add stuff to that).
///
/// ``` js
/// var json_crdt = create_json_crdt()
/// ```
create_json_crdt = (self) => {
self = self || {};
if (self.S === undefined) self.S = null;
self.T = self.T || {};
if (self.root_version === undefined) self.root_version = null;
self.current_version = self.current_version || {};
self.version_cache = self.version_cache || {};
let is_lit = (x) => !x || typeof x != "object" || x.t == "lit";
let get_lit = (x) => (x && typeof x == "object" && x.t == "lit" ? x.S : x);
let make_lit = (x) => (x && typeof x == "object" ? { t: "lit", S: x } : x);
/// # json_crdt.read()
///
/// Returns an instance of the `json` object represented by this json_crdt data-structure.
///
/// ``` js
/// console.log(json_crdt.read())
/// ```
self.read = (is_anc) => {
if (!is_anc) is_anc = () => true;
return raw_read(self.S, is_anc);
};
function raw_read(x, is_anc) {
if (x && typeof x == "object") {
if (x.t == "lit") return JSON.parse(JSON.stringify(x.S));
if (x.t == "val")
return raw_read(sequence_crdt.get(x.S, 0, is_anc), is_anc);
if (x.t == "obj") {
var o = {};
Object.entries(x.S).forEach(([k, v]) => {
var x = raw_read(v, is_anc);
if (x != null) o[k] = x;
});
return o;
}
if (x.t == "arr") {
var a = [];
sequence_crdt.traverse(
x.S,
is_anc,
(node, _, __, ___, ____, deleted) => {
if (!deleted)
node.elems.forEach((e) => a.push(raw_read(e, is_anc)));
},
true
);
return a;
}
if (x.t == "str") {
var s = [];
sequence_crdt.traverse(
x.S,
is_anc,
(node, _, __, ___, ____, deleted) => {
if (!deleted) s.push(node.elems);
},
true
);
return s.join("");
}
throw Error("bad");
}
return x;
}
/// # json_crdt.generate_braid(versions)
///
/// Returns an array of `update` messages that each look like this: `{version, parents, patches, sort_keys}`, such that if we pass all these messages to `antimatter_crdt.receive()`, we'll reconstruct the data in this `json_crdt` data-structure, assuming the recipient already has the given `versions` (each version is represented as a key in an object, and each value is `true`).
///
/// ``` js
/// json_crdt.generate_braid({
/// alice2: true,
/// bob3: true
/// })
/// ```
self.generate_braid = (versions) => {
var anc =
versions && Object.keys(versions).length
? self.ancestors(versions, true)
: {};
var is_anc = (x) => anc[x];
if (Object.keys(self.T).length === 0) return [];
return Object.entries(self.version_cache)
.filter((x) => !is_anc(x[0]))
.map(([version, update_message]) => {
return (self.version_cache[version] =
update_message || generate_update_message(version));
});
function generate_update_message(version) {
if (!Object.keys(self.T[version]).length) {
return {
version,
parents: {},
patches: [{ range: "", content: self.read((v) => v == version) }],
};
}
var is_lit = (x) => !x || typeof x !== "object" || x.t === "lit";
var get_lit = (x) =>
x && typeof x === "object" && x.t === "lit" ? x.S : x;
var ancs = self.ancestors({ [version]: true });
delete ancs[version];
var is_anc = (x) => ancs[x];
var path = [];
var patches = [];
var sort_keys = {};
recurse(self.S);
function recurse(x) {
if (is_lit(x)) {
} else if (x.t === "val") {
sequence_crdt
.generate_braid(x.S, version, is_anc, raw_read)
.forEach((s) => {
if (s[2].length) {
patches.push({ range: path.join(""), content: s[2][0] });
if (s[3]) sort_keys[patches.length - 1] = s[3];
}
});
sequence_crdt.traverse(x.S, is_anc, (node) => {
node.elems.forEach(recurse);
});
} else if (x.t === "arr") {
sequence_crdt.generate_braid(x.S, version, is_anc).forEach((s) => {
patches.push({
range: `${path.join("")}[${s[0]}:${s[0] + s[1]}]`,
content: s[2],
});
if (s[3]) sort_keys[patches.length - 1] = s[3];
});
var i = 0;
sequence_crdt.traverse(x.S, is_anc, (node) => {
node.elems.forEach((e) => {
path.push(`[${i++}]`);
recurse(e);
path.pop();
});
});
} else if (x.t === "obj") {
Object.entries(x.S).forEach((e) => {
path.push("[" + JSON.stringify(e[0]) + "]");
recurse(e[1]);
path.pop();
});
} else if (x.t === "str") {
sequence_crdt.generate_braid(x.S, version, is_anc).forEach((s) => {
patches.push({
range: `${path.join("")}[${s[0]}:${s[0] + s[1]}]`,
content: s[2],
});
if (s[3]) sort_keys[patches.length - 1] = s[3];
});
}
}
return {
version,
parents: { ...self.T[version] },
patches,
sort_keys,
};
}
};
/// # json_crdt.apply_bubbles(to_bubble)
///
/// This method helps prune away meta data and compress stuff when we have determined that certain versions can be renamed to other versions – these renamings are expressed in `to_bubble`, where keys are versions and values are "bubbles", each bubble is represented with an array of two elements, the first element is the "bottom" of the bubble, and the second element is the "top" of the bubble. We will use the "bottom" as the new name for the version, and we'll use the "top" as the new parents.
///
/// ``` js
/// json_crdt.apply_bubbles({
/// alice4: ['bob5', 'alice4'],
/// bob5: ['bob5', 'alice4']
/// })
/// ```
self.apply_bubbles = (to_bubble) => {
function recurse(x) {
if (is_lit(x)) return x;
if (x.t == "val") {
sequence_crdt.apply_bubbles(x.S, to_bubble);
sequence_crdt.traverse(
x.S,
() => true,
(node) => {
node.elems = node.elems.slice(0, 1).map(recurse);
},
true
);
if (
x.S.nexts.length == 0 &&
!x.S.next &&
x.S.elems.length == 1 &&
is_lit(x.S.elems[0])
)
return x.S.elems[0];
return x;
}
if (x.t == "arr") {
sequence_crdt.apply_bubbles(x.S, to_bubble);
sequence_crdt.traverse(
x.S,
() => true,
(node) => {
node.elems = node.elems.map(recurse);
},
true
);
if (
x.S.nexts.length == 0 &&
!x.S.next &&
x.S.elems.every(is_lit) &&
!Object.keys(x.S.deleted_by).length
)
return { t: "lit", S: x.S.elems.map(get_lit) };
return x;
}
if (x.t == "obj") {
Object.entries(x.S).forEach((e) => {
var y = (x.S[e[0]] = recurse(e[1]));
if (y == null) delete x.S[e[0]];
});
if (Object.values(x.S).every(is_lit)) {
var o = {};
Object.entries(x.S).forEach((e) => (o[e[0]] = get_lit(e[1])));
return { t: "lit", S: o };
}
return x;
}
if (x.t == "str") {
sequence_crdt.apply_bubbles(x.S, to_bubble);
if (
x.S.nexts.length == 0 &&
!x.S.next &&
!Object.keys(x.S.deleted_by).length
)
return x.S.elems;
return x;
}
}
self.S = recurse(self.S);
Object.entries(to_bubble).forEach(([version, bubble]) => {
if (!self.T[version]) return;
if (self.my_where_are_they_now)
self.my_where_are_they_now[version] = bubble[0];
if (version === bubble[1]) self.T[bubble[0]] = self.T[bubble[1]];
if (version !== bubble[0]) {
if (self.root_version == version) self.root_version = bubble[0];
delete self.T[version];
delete self.version_cache[version];
delete self.acked_boundary[version];
delete self.current_version[version];
if (
self.version_groups[version] &&
self.version_groups[version][0] == version
) {
for (let v of self.version_groups[version]) {
delete self.version_groups[v];
}
}
for (let [k, parents] of Object.entries(self.T)) {
self.T[k] = parents = { ...parents };
for (let p of Object.keys(parents)) {
if (p == version) delete parents[p];
}
}
} else self.version_cache[version] = null;
});
var leaves = Object.keys(self.current_version);
var acked_boundary = Object.keys(self.acked_boundary);
var fiss = Object.keys(self.fissures);
if (
leaves.length == 1 &&
acked_boundary.length == 1 &&
leaves[0] == acked_boundary[0] &&
fiss.length == 0
) {
self.T = { [leaves[0]]: {} };
self.S = make_lit(self.read());
}
};
/// # json_crdt.add_version(version, parents, patches[, sort_keys])
///
/// The main method for modifying a `json_crdt` data structure.
///
/// * `version`: Unique string associated with this edit.
/// * `parents`: A set of versions that this version is aware of, represented as a map with versions as keys, and values of `true`.
/// * `patches`: An array of patches, each patch looks like this `{range: '.life.meaning', content: 42}`.
/// * `sort_keys`: (optional) An object where each key is an index, and the value is a sort_key to use with the patch at the given index in the `patches` array – a sort_key overrides the version for a patch for the purposes of sorting. This can be useful after doing some pruning.
///
/// ``` js
/// json_crdt.add_version(
/// 'alice6',
/// {
/// alice5: true,
/// bob7: true
/// },
/// [
/// {
/// range: '.a.b',
/// content: 'c'
/// }
/// ]
/// )
/// ```
self.add_version = (version, parents, patches, sort_keys) => {
if (self.T[version]) return;
if (self.root_version == null) self.root_version = version;
self.T[version] = { ...parents };
self.version_cache[version] = JSON.parse(
JSON.stringify({
version,
parents,
patches,
sort_keys,
})
);
Object.keys(parents).forEach((k) => {
if (self.current_version[k]) delete self.current_version[k];
});
self.current_version[version] = true;
if (!sort_keys) sort_keys = {};
if (!Object.keys(parents).length) {
var parse = self.parse_patch(patches[0]);
self.S = make_lit(parse.value);
return patches;
}
let is_anc;
if (parents == self.current_version) {
is_anc = (_version) => _version != version;
} else {
let ancs = self.ancestors(parents);
is_anc = (_version) => ancs[_version];
}
var rebased_patches = [];
patches.forEach((patch, i) => {
var sort_key = sort_keys[i];
var parse = self.parse_patch(patch);
var cur = resolve_path(parse);
if (!parse.slice) {
if (cur.t != "val") throw Error("bad");
var len = sequence_crdt.length(cur.S, is_anc);
sequence_crdt.add_version(
cur.S,
version,
[[0, len, [parse.delete ? null : make_lit(parse.value)], sort_key]],
is_anc
);
rebased_patches.push(patch);
} else {
if (typeof parse.value === "string" && cur.t !== "str")
throw Error(
`Cannot splice string ${JSON.stringify(
parse.value
)} into non-string`
);
if (parse.value instanceof Array && cur.t !== "arr")
throw Error(
`Cannot splice array ${JSON.stringify(
parse.value
)} into non-array`
);
if (parse.value instanceof Array)
parse.value = parse.value.map((x) => make_lit(x));
var r0 = parse.slice[0];
var r1 = parse.slice[1];
if (r0 < 0 || Object.is(r0, -0) || r1 < 0 || Object.is(r1, -0)) {
let len = sequence_crdt.length(cur.S, is_anc);
if (r0 < 0 || Object.is(r0, -0)) r0 = len + r0;
if (r1 < 0 || Object.is(r1, -0)) r1 = len + r1;
}
var rebased_splices = sequence_crdt.add_version(
cur.S,
version,
[[r0, r1 - r0, parse.value, sort_key]],
is_anc
);
for (let rebased_splice of rebased_splices)
rebased_patches.push({
range: `${parse.path
.map((x) => `[${JSON.stringify(x)}]`)
.join("")}[${rebased_splice[0]}:${
rebased_splice[0] + rebased_splice[1]
}]`,
content: rebased_splice[2],
});
}
});
function resolve_path(parse) {
var cur = self.S;
if (!cur || typeof cur != "object" || cur.t == "lit")
cur = self.S = {
t: "val",
S: sequence_crdt.create_node(self.root_version, [cur]),
};
var prev_S = null;
var prev_i = 0;
for (var i = 0; i < parse.path.length; i++) {
var key = parse.path[i];
if (cur.t == "val")
cur = sequence_crdt.get((prev_S = cur.S), (prev_i = 0), is_anc);
if (cur.t == "lit") {
var new_cur = {};
if (cur.S instanceof Array) {
new_cur.t = "arr";
new_cur.S = sequence_crdt.create_node(
self.root_version,
cur.S.map((x) => make_lit(x))
);
} else {
if (typeof cur.S != "object") throw Error("bad");
new_cur.t = "obj";
new_cur.S = {};
Object.entries(cur.S).forEach(
(e) => (new_cur.S[e[0]] = make_lit(e[1]))
);
}
cur = new_cur;
sequence_crdt.update(prev_S, prev_i, cur, is_anc);
}
if (cur.t == "obj") {
let x = cur.S[key];
if (!x || typeof x != "object" || x.t == "lit")
x = cur.S[key] = {
t: "val",
S: sequence_crdt.create_node(self.root_version, [
x == null ? null : x,
]),
};
cur = x;
} else if (i == parse.path.length - 1 && !parse.slice) {
parse.slice = [key, key + 1];
parse.value = cur.t == "str" ? parse.value : [parse.value];
} else if (cur.t == "arr") {
cur = sequence_crdt.get((prev_S = cur.S), (prev_i = key), is_anc);
} else throw Error("bad");
}
if (parse.slice) {
if (cur.t == "val")
cur = sequence_crdt.get((prev_S = cur.S), (prev_i = 0), is_anc);
if (typeof cur == "string") {
cur = {
t: "str",
S: sequence_crdt.create_node(self.root_version, cur),
};
sequence_crdt.update(prev_S, prev_i, cur, is_anc);
} else if (cur.t == "lit") {
if (!(cur.S instanceof Array)) throw Error("bad");
cur = {
t: "arr",
S: sequence_crdt.create_node(
self.root_version,
cur.S.map((x) => make_lit(x))
),
};
sequence_crdt.update(prev_S, prev_i, cur, is_anc);
}
}
return cur;
}
return rebased_patches;
};
/// # json_crdt.get_child_map()
///
/// Returns a map where each key is a version, and each value is a set of child versions, represented as a map with versions as keys, and values of `true`.
///
/// ``` js
/// json_crdt.get_child_map()
/// ```
self.get_child_map = () => {
let children = {};
Object.entries(self.T).forEach(([v, parents]) => {
Object.keys(parents).forEach((parent) => {
if (!children[parent]) children[parent] = {};
children[parent][v] = true;
});
});
return children;
};
/// # json_crdt.ancestors(versions, ignore_nonexistent=false)
///
/// Gather `versions` and all their ancestors into a set. `versions` is a set of versions, i.e. a map with version-keys and values of true – we'll basically return a larger set. If `ignore_nonexistent` is `true`, then we won't throw an exception if we encounter a version that we don't have in our data-structure.
///
/// ``` js
/// json_crdt.ancestors({
/// alice12: true,
/// bob10: true
/// })
/// ```
self.ancestors = (versions, ignore_nonexistent) => {
var result = {};
function recurse(version) {
if (result[version]) return;
if (!self.T[version]) {
if (ignore_nonexistent) return;
throw Error(`The version ${version} no existo`);
}
result[version] = true;
Object.keys(self.T[version]).forEach(recurse);
}
Object.keys(versions).forEach(recurse);
return result;
};
/// # json_crdt.descendants(versions, ignore_nonexistent=false)
///
/// Gather `versions` and all their descendants into a set. `versions` is a set of versions, i.e. a map with version-keys and values of true – we'll basically return a larger set. If `ignore_nonexistent` is `true`, then we won't throw an exception if we encounter a version that we don't have in our data-structure.
///
/// ``` js
/// json_crdt.descendants({
/// alice12: true,
/// bob10: true
/// })
/// ```
self.descendants = (versions, ignore_nonexistent) => {
let children = self.get_child_map();
var result = {};
function recurse(version) {
if (result[version]) return;
if (!self.T[version]) {
if (ignore_nonexistent) return;
throw Error(`The version ${version} no existo`);
}
result[version] = true;
Object.keys(children[version] || {}).forEach(recurse);
}
Object.keys(versions).forEach(recurse);
return result;
};
/// # json_crdt.get_leaves(versions)
///
/// Returns a set of versions from `versions` which don't also have a child in `versions`. `versions` is itself a set of versions, represented as an object with version keys and `true` values, and the return value is represented the same way.
self.get_leaves = (versions) => {
var leaves = { ...versions };
Object.keys(versions).forEach((v) => {
Object.keys(self.T[v]).forEach((p) => delete leaves[p]);
});
return leaves;
};
/// # json_crdt.parse_patch(patch)
///
/// Takes a patch in the form `{range, content}`, and returns an object of the form `{path: [...], [slice: [...]], [delete: true], content}`; basically calling `parse_json_path` on `patch.range`, and adding `patch.content` along for the ride.
self.parse_patch = (patch) => {
let x = self.parse_json_path(patch.range);
x.value = patch.content;
return x;
};
/// # json_crdt.parse_json_path(json_path)
///
/// Parses the string `json_path` into an object like: `{path: [...], [slice: [...]], [delete: true]}`.
///
/// * `a.b[3]` --> `{path: ['a', 'b', 3]}`
/// * `a.b[3:5]` --> `{path: ['a', 'b'], slice: [3, 5]}`
/// * `delete a.b` --> `{path: ['a', 'b'], delete: true}`
///
/// ``` js
/// console.log(json_crdt.parse_json_path('a.b.c'))
/// ```
self.parse_json_path = (json_path) => {
var ret = { path: [] };
var re =
/^(delete)\s+|\.?([^\.\[ =]+)|\[((\-?\d+)(:\-?\d+)?|"(\\"|[^"])*")\]/g;
var m;
while ((m = re.exec(json_path))) {
if (m[1]) ret.delete = true;
else if (m[2]) ret.path.push(m[2]);
else if (m[3] && m[5])
ret.slice = [JSON.parse(m[4]), JSON.parse(m[5].substr(1))];
else if (m[3]) ret.path.push(JSON.parse(m[3]));
}
return ret;
};
return self;
};
/// # sequence_crdt.create_node(version, elems, [end_cap, sort_key])
///
/// Creates a node for a `sequence_crdt` sequence CRDT with the given properties. The resulting node will look like this:
///
/// ``` js
/// {
/// version, // globally unique string
/// elems, // a string or array representing actual data elements of the underlying sequence
/// end_cap, // this is useful for dealing with replace operations
/// sort_key, // version to pretend this is for the purposes of sorting
/// deleted_by : {}, // if this node gets deleted, we'll mark it here
/// nexts : [], // array of nodes following this one
/// next : null // final node following this one (after all the nexts)
/// }
///
/// var sequence_node = sequence_crdt.create_node('alice1', 'hello')
/// ```
sequence_crdt.create_node = (version, elems, end_cap, sort_key) => ({
version,
sort_key,
elems,
end_cap,
deleted_by: {},
nexts: [],
next: null,
});
/// # sequence_crdt.generate_braid(root_node, version, is_anc)
///
/// Reconstructs an array of splice-information which can be passed to `sequence_crdt.add_version` in order to add `version` to another `sequence_crdt` instance – the returned array looks like: `[[insert_pos, delete_count, insert_elems, sort_key, ...], ...]`. `is_anc` is a function which accepts a version string and returns `true` if and only if the given version is an ancestor of `version` (i.e. a version which the author of `version` knew about when they created that version).
///
/// ``` js
/// var root_node = sequence_crdt.create_node('root', '')
/// sequence_crdt.add_version(root_node, 'alice1', [[0, 0, 'hello']])
/// console.log(sequence_crdt.generate_braid(root_node, 'alice1', x => false)) // outputs [[0, 0, "hello", ...]]
/// ```
sequence_crdt.generate_braid = (S, version, is_anc, read_array_elements) => {
if (!read_array_elements) read_array_elements = (x) => x;
var splices = [];
function add_ins(offset, ins, sort_key, end_cap, is_row_header) {
if (typeof ins !== "string")
ins = ins.map((x) => read_array_elements(x, () => false));
if (splices.length > 0) {
var prev = splices[splices.length - 1];
if (
prev[0] + prev[1] === offset &&
!end_cap &&
(!is_row_header || prev[3] == sort_key) &&
(prev[4] === "i" || (prev[4] === "r" && prev[1] === 0))
) {
prev[2] = prev[2].concat(ins);
return;
}
}
splices.push([offset, 0, ins, sort_key, end_cap ? "r" : "i"]);
}
function add_del(offset, del, ins) {
if (splices.length > 0) {
var prev = splices[splices.length - 1];
if (prev[0] + prev[1] === offset && prev[4] !== "i") {
prev[1] += del;
return;
}
}
splices.push([offset, del, ins, null, "d"]);
}
var offset = 0;
function helper(node, _version, end_cap, is_row_header) {
if (_version === version) {
add_ins(
offset,
node.elems.slice(0),
node.sort_key,
end_cap,
is_row_header
);
} else if (node.deleted_by[version] && node.elems.length > 0) {
add_del(offset, node.elems.length, node.elems.slice(0, 0));
}
if (
(!_version || is_anc(_version)) &&
!Object.keys(node.deleted_by).some(is_anc)
) {
offset += node.elems.length;
}
node.nexts.forEach((next) =>
helper(next, next.version, node.end_cap, true)
);
if (node.next) helper(node.next, _version);
}
helper(S, null);
splices.forEach((s) => {
// if we have replaces with 0 deletes,
// make them have at least 1 delete..
// this can happen when there are multiple replaces of the same text,
// and our code above will associate those deletes with only one of them
if (s[4] === "r" && s[1] === 0) s[1] = 1;
});
return splices;
};
/// # sequence_crdt.apply_bubbles(root_node, to_bubble)
///
/// This method helps prune away meta data and compress stuff when we have determined that certain versions can be renamed to other versions – these renamings are expressed in `to_bubble`, where keys are versions and values are "bubbles", each bubble is represented with an array of two elements, the first element is the "bottom" of the bubble, and the second element is the "top" of the bubble. We will use the "bottom" as the new name for the version, and we'll use the "top" as the new parents.
///
/// ``` js
/// sequence_crdt.apply_bubbles(root_node, {
/// alice4: ['bob5', 'alice4'],
/// bob5: ['bob5', 'alice4']
/// })
/// ```
sequence_crdt.apply_bubbles = (S, to_bubble) => {
sequence_crdt.traverse(
S,
() => true,
(node) => {
if (
to_bubble[node.version] &&
to_bubble[node.version][0] != node.version
) {
if (!node.sort_key) node.sort_key = node.version;
node.version = to_bubble[node.version][0];
}
for (var x of Object.keys(node.deleted_by)) {
if (to_bubble[x]) {
delete node.deleted_by[x];
node.deleted_by[to_bubble[x][0]] = true;
}
}
},
true
);
function set_nnnext(node, next) {
while (node.next) node = node.next;
node.next = next;
}
do_line(S, S.version);
function do_line(node, version) {
var prev = null;
while (node) {
if (node.nexts[0] && node.nexts[0].version == version) {
for (let i = 0; i < node.nexts.length; i++) {
delete node.nexts[i].version;
delete node.nexts[i].sort_key;
set_nnnext(
node.nexts[i],
i + 1 < node.nexts.length ? node.nexts[i + 1] : node.next
);
}
node.next = node.nexts[0];
node.nexts = [];
}
if (node.deleted_by[version]) {
node.elems = node.elems.slice(0, 0);
node.deleted_by = {};
if (prev) {
node = prev;
continue;
}
}
var next = node.next;
if (
!node.nexts.length &&
next &&
(!node.elems.length ||
!next.elems.length ||
(Object.keys(node.deleted_by).every((x) => next.deleted_by[x]) &&
Object.keys(next.deleted_by).every((x) => node.deleted_by[x])))
) {
if (!node.elems.length) node.deleted_by = next.deleted_by;
node.elems = node.elems.concat(next.elems);
node.end_cap = next.end_cap;
node.nexts = next.nexts;
node.next = next.next;
continue;
}
if (next && !next.elems.length && !next.nexts.length) {
node.next = next.next;
continue;
}
for (let n of node.nexts) do_line(n, n.version);
prev = node;
node = next;
}
}
};
/// # sequence_crdt.get(root_node, i, is_anc)
///
/// Returns the element at the `i`th position (0-based) in the `sequence_crdt` rooted at `root_node`, when only considering versions which result in `true` when passed to `is_anc`.
///
/// ``` js
/// var x = sequence_crdt.get(root_node, 2, {
/// alice1: true
/// })
/// ```
sequence_crdt.get = (S, i, is_anc) => {
var ret = null;
var offset = 0;
sequence_crdt.traverse(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
ret = node.elems[i - offset];
return false;
}
offset += node.elems.length;
});
return ret;
};
/// # sequence_crdt.update(root_node, i, v, is_anc)
///
/// Sets the element at the `i`th position (0-based) in the `sequence_crdt` rooted at `root_node` to the value `v`, when only considering versions which result in `true` when passed to `is_anc`.
///
/// ``` js
/// sequence_crdt.update(root_node, 2, 'x', {
/// alice1: true
/// })
/// ```
sequence_crdt.update = (S, i, v, is_anc) => {
var offset = 0;
sequence_crdt.traverse(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
if (typeof node.elems == "string")
node.elems =
node.elems.slice(0, i - offset) +
v +
node.elems.slice(i - offset + 1);
else node.elems[i - offset] = v;
return false;
}
offset += node.elems.length;
});
};
/// # sequence_crdt.length(root_node, is_anc)
///
/// Returns the length of the `sequence_crdt` rooted at `root_node`, when only considering versions which result in `true` when passed to `is_anc`.
///
/// ``` js
/// console.log(sequence_crdt.length(root_node, {
/// alice1: true
/// }))
/// ```
sequence_crdt.length = (S, is_anc) => {
var count = 0;
sequence_crdt.traverse(S, is_anc ? is_anc : () => true, (node) => {
count += node.elems.length;
});
return count;
};
/// # sequence_crdt.break_node(node, break_position, end_cap, new_next)
///
/// This method breaks apart a `sequence_crdt` node into two nodes, each representing a subsequence of the sequence represented by the original node. The `node` parameter is modified into the first node, and the second node is returned. The first node represents the elements of the sequence before `break_position`, and the second node represents the rest of the elements. If `end_cap` is truthy, then the first node will have `end_cap` set – this is generally done if the elements in the second node are being replaced. This method will add `new_next` to the first node's `nexts` array.
///
/// ``` js
/// var node = sequence_crdt.create_node('alice1', 'hello') // node.elems == 'hello'
/// var second = sequence_crdt.break_node(node, 2) // now node.elems == 'he', and second.elems == 'llo'
/// ```
sequence_crdt.break_node = (node, x, end_cap, new_next) => {
var tail = sequence_crdt.create_node(
null,
node.elems.slice(x),
node.end_cap
);
Object.assign(tail.deleted_by, node.deleted_by);
tail.nexts = node.nexts;
tail.next = node.next;
node.elems = node.elems.slice(0, x);
node.end_cap = end_cap;
node.nexts = new_next ? [new_next] : [];
node.next = tail;
return tail;
};
/// # sequence_crdt.add_version(root_node, version, splices, [is_anc])
///
/// This is the main method in sequence_crdt, used to modify the sequence. The modification must be given a unique `version` string, and the modification itself is represented as an array of `splices`, where each splice looks like this: `[position, num_elements_to_delete, elements_to_insert, optional_sort_key]`.
///
/// Note that all positions are relative to the original sequence, before any splices have been applied. Positions are counted by only considering nodes with versions which result in `true` when passed to `is_anc`. (and are not `deleted_by` any versions which return `true` when passed to `is_anc`).
///
/// ``` js
/// var node = sequence_crdt.create_node('alice1', 'hello')
/// sequence_crdt.add_version(node, 'alice2', [[5, 0, ' world']], v => v == 'alice1')
/// ```
sequence_crdt.add_version = (S, version, splices, is_anc) => {
var rebased_splices = [];
function add_to_nexts(nexts, to) {
var i = binarySearch(nexts, function (x) {
if ((to.sort_key || to.version) < (x.sort_key || x.version)) return -1;
if ((to.sort_key || to.version) > (x.sort_key || x.version)) return 1;
return 0;
});
nexts.splice(i, 0, to);
}
var si = 0;
var delete_up_to = 0;
var process_patch = (node, offset, has_nexts, prev, _version, deleted) => {
var s = splices[si];
if (!s) return;
var sort_key = s[3];
if (deleted) {
if (s[1] == 0 && s[0] == offset) {
if (node.elems.length == 0 && !node.end_cap && has_nexts) return;
var new_node = sequence_crdt.create_node(
version,
s[2],
null,
sort_key
);
fresh_nodes.add(new_node);
if (node.elems.length == 0 && !node.end_cap)
add_to_nexts(node.nexts, new_node);
else sequence_crdt.break_node(node, 0, undefined, new_node);
si++;
}
if (
delete_up_to <= offset &&
s[1] &&
s[2] &&
s[0] == offset &&
node.end_cap &&
!has_nexts &&
(node.next && node.next.elems.length) &&
!Object.keys(node.next.deleted_by).some((version) => f(version))
) {
delete_up_to = s[0] + s[1];
var new_node = sequence_crdt.create_node(
version,
s[2],
null,
sort_key
);
fresh_nodes.add(new_node);
add_to_nexts(node.nexts, new_node);
}
return;
}
if (s[1] == 0) {
var d = s[0] - (offset + node.elems.length);
if (d > 0) return;
if (d == 0 && !node.end_cap && has_nexts) return;
var new_node = sequence_crdt.create_node(version, s[2], null, sort_key);
fresh_nodes.add(new_node);
if (d == 0 && !node.end_cap) {
add_to_nexts(node.nexts, new_node);
} else {
sequence_crdt.break_node(node, s[0] - offset, undefined, new_node);
}
si++;
return;
}
if (delete_up_to <= offset) {
var d = s[0] - (offset + node.elems.length);
let add_at_end =
d == 0 &&
s[2] &&
node.end_cap &&
!has_nexts &&
(node.next && node.next.elems.length) &&
!Object.keys(node.next.deleted_by).some((version) => f(version));
if (d > 0 || (d == 0 && !add_at_end)) return;
delete_up_to = s[0] + s[1];
if (s[2]) {
var new_node = sequence_crdt.create_node(
version,
s[2],
null,
sort_key
);
fresh_nodes.add(new_node);
if (add_at_end) {
add_to_nexts(node.nexts, new_node);
} else {
sequence_crdt.break_node(node, s[0] - offset, true, new_node);
}
return;
} else {
if (s[0] == offset) {
} else {
sequence_crdt.break_node(node, s[0] - offset);
return;
}
}
}
if (delete_up_to > offset) {
if (delete_up_to <= offset + node.elems.length) {
if (delete_up_to < offset + node.elems.length) {
sequence_crdt.break_node(node, delete_up_to - offset);
}
si++;
}
node.deleted_by[version] = true;
return;
}
};
var f = is_anc || (() => true);
var offset = 0;
var rebase_offset = 0;
let fresh_nodes = new Set();
function traverse(node, prev, version) {
if (!version || f(version)) {
var has_nexts = node.nexts.find((next) => f(next.version));
var deleted = Object.keys(node.deleted_by).some((version) =>
f(version)
);
let rebase_deleted = Object.keys(node.deleted_by).length;
process_patch(node, offset, has_nexts, prev, version, deleted);
if (!deleted) offset += node.elems.length;
if (!rebase_deleted && Object.keys(node.deleted_by).length)
rebased_splices.push([rebase_offset, node.elems.length, ""]);
}
if (fresh_nodes.has(node))
rebased_splices.push([rebase_offset, 0, node.elems]);
if (!Object.keys(node.deleted_by).length)
rebase_offset += node.elems.length;
for (var next of node.nexts) traverse(next, null, next.version);
if (node.next) traverse(node.next, node, version);
}
traverse(S, null, S.version);
return rebased_splices;
};
/// # sequence_crdt.traverse(root_node, is_anc, callback, [view_deleted, tail_callback])
///
/// Traverses the subset of nodes in the tree rooted at `root_node` whose versions return `true` when passed to `is_anc`. For each node, `callback` is called with these parameters: `node, offset, has_nexts, prev, version, deleted`,
///
/// Where
/// - `node` is the current node being traversed
/// - `offset` says how many elements we have passed so far
/// - `has_nexts` is true if some of this node's `nexts` will be traversed according to `is_anc`
/// - `prev` is a pointer to the node whos `next` points to this one, or `null` if this is the root node
/// - `version` is the version of this node, or this node's `prev` if our version is `null`, or that node's `prev` if it is also `null`, etc
/// - `deleted` is true if this node is deleted according to `is_anc`
///
/// Usually we skip deleted nodes when traversing, but we'll include them if `view_deleted` is `true`.
///
/// `tail_callback` is an optional callback that will get called with a single parameter `node` after all of that node's children `nexts` and `next` have been traversed.
///
/// ``` js
/// sequence_crdt.traverse(node, () => true, node =>
/// process.stdout.write(node.elems))
/// ```
sequence_crdt.traverse = (S, f, cb, view_deleted, tail_cb) => {
var offset = 0;
function helper(node, prev, version) {
var has_nexts = node.nexts.find((next) => f(next.version));
var deleted = Object.keys(node.deleted_by).some((version) => f(version));
if (view_deleted || !deleted) {
if (cb(node, offset, has_nexts, prev, version, deleted) == false)
return true;
offset += node.elems.length;
}
for (var next of node.nexts)
if (f(next.version)) {
if (helper(next, null, next.version)) return true;
}
if (node.next) {
if (helper(node.next, node, version)) return true;
} else if (tail_cb) tail_cb(node);
}
helper(S, null, S.version);
};
// modified from https://stackoverflow.com/questions/22697936/binary-search-in-javascript
function binarySearch(ar, compare_fn) {
var m = 0;
var n = ar.length - 1;
while (m <= n) {
var k = (n + m) >> 1;
var cmp = compare_fn(ar[k]);
if (cmp > 0) {
m = k + 1;
} else if (cmp < 0) {
n = k - 1;
} else {
return k;
}
}
return m;
}
})();
if (typeof module != "undefined")
module.exports = {
create_antimatter_crdt,
create_json_crdt,
sequence_crdt,
};
================================================
FILE: antimatter/doc.html
================================================
================================================
FILE: antimatter/package.json
================================================
{
"name": "@braidjs/antimatter",
"version": "0.0.34",
"description": "antimatter: a pruning algorithm for CRDTs and other mergeables",
"main": "antimatter.js",
"scripts": {
"test": "node test.js"
},
"author": "Braid Working Group",
"repository": "braid-org/braidjs",
"homepage": "https://braid.org/antimatter"
}
================================================
FILE: antimatter/readme.md
================================================
# MOVED TO https://github.com/braid-org/antimatter
--
# antimatter: an algorithm that prunes CRDT/OT history
[Antimatter](https://braid.org/antimatter) is the world's first peer-to-peer synchronization algorithm that can prune its history in a network where peers disconnect, reconnect, and merge offline edits. Antimatter supports arbitrary simultaneous edits, from arbitrary peers, under arbitrary network delays and partitions, and guarantees full CRDT/OT consistency, while pruning unnecessary history within each partitioned subnet, and across subnets once they reconnect. In steady state, it prunes down to zero overhead. This lets you put synchronizing data structures in more parts of your software, without worrying about memory overhead.
This package implements an antimatter peer composed of three objects:
```js
var {create_antimatter_crdt, create_json_crdt, sequence_crdt} = require('@braidjs/antimatter')
```
- *antimatter_crdt*: created using `create_antimatter_crdt`, this object is a json_crdt with antimatter algorithm methods added to it so that it can communicate with other peers to learn which history can be pruned, and tells the underlying json_crdt object to prune it.
- *json_crdt*: created using `create_json_crdt`, this object is a pruneable JSON CRDT — "JSON" meaning it represents an arbitrary JSON datstructure, and "CRDT" and "pruneable" having the same meaning as for sequence_crdt below. The json_crdt makes recursive use of sequence_crdt structures to represent arbitrary JSON (for instance, a map is represented with a sequence_crdt structure for each value, where the first element in the sequence is the value).
- *sequence_crdt*: methods to manipulate a pruneable sequence CRDT — "sequence" meaning it represents a javascript string or array, "CRDT" meaning this structure can be merged with other ones, and "pruneable" meaning that it supports an operation to remove meta-data when it is no longer needed (whereas CRDT's often keep track of this meta-data forever).
The Antimatter Algorithm was invented by Michael Toomim and Greg Little in the
[Braid Project](https://braid.org) of [Invisible College](https://invisible.college/).
[Click here to see more details, and the API side-by-side with the source code.](https://braid.org/antimatter)
================================================
FILE: antimatter/test.html
================================================
================================================
FILE: antimatter_ts/antimatter.js
================================================
/// # Software Architecture
/// The software is architected into three objects:
///
/// ``` js
/// var {create_antimatter_crdt, create_json_crdt, sequence_crdt} = require('@braidjs/antimatter')
/// ```
// v522
/// - *antimatter_crdt*: created using `create_antimatter_crdt`, this object is a json_crdt with antimatter algorithm methods added to it so that it can communicate with other peers to learn which history can be pruned, and tells the underlying json_crdt object to prune it.
var create_antimatter_crdt;
/// - *json_crdt*: created using `create_json_crdt`, this object is a pruneable
/// JSON CRDT — "JSON" meaning it represents an arbitrary JSON datstructure, and
/// "CRDT" and "pruneable" having the same meaning as for sequence_crdt below. The
/// json_crdt makes recursive use of sequence_crdt structures to represent
/// arbitrary JSON (for instance, a map is represented with a sequence_crdt
/// structure for each value, where the first element in the sequence is the
/// value).
var create_json_crdt;
/// - *sequence_crdt*: methods to manipulate a pruneable sequence CRDT —
/// "sequence" meaning it represents a javascript string or array, "CRDT" meaning
/// this structure can be merged with other ones, and "pruneable" meaning that it
/// supports an operation to remove meta-data when it is no longer needed (whereas
/// CRDT's often keep track of this meta-data forever).
var sequence_crdt = {};
(() => {
/// # create_antimatter_crdt(send[, init])
///
/// Creates and returns a new antimatter_crdt object (or adds antimatter_crdt methods and properties to `init`).
///
/// * `send`: A callback function to be called whenever this antimatter_crdt wants to send a
/// message over a connection registered with `get` or `connect`. The sole
/// parameter to this function is a JSONafiable object that hopes to be passed to
/// the `receive` method on the antimatter_crdt object at the other end of the
/// connection specified in the `conn` key.
/// * `get_time`: function that returns a number representing time (e.g. `Date.now()`)
/// * `set_timeout`: function that takes a callback and timeout length, and calls that callback after that amount of time; also returns an identifier that can be passed to `clear_timeout` to cancel the timeout (e.g. wrapping the javascript setTimeout)
/// * `clear_timeout`: function that takes a timeout identifier an cancels it (e.g. wrapping the javascript clearTimeout)
/// * `init`: (optional) An antimatter_crdt object to start with, which we'll add any properties to that it doesn't have, and we'll add all the antimatter_crdt methods to it. This option exists so you can serialize an antimatter_crdt instance as JSON, and then restore it later.
/// ``` js
/// var antimatter_crdt = create_antimatter_crdt(msg => {
/// websockets[msg.conn].send(JSON.stringify(msg))
/// },
/// () => Date.now(),
/// (func, t) => setTimeout(func, t),
/// (t) => clearTimeout(t)),
///. JSON.parse(fs.readFileSync('./antimatter.backup'))
/// )
/// ```
create_antimatter_crdt = (
send,
get_time,
set_timeout,
clear_timeout,
self
) => {
self = create_json_crdt(self);
self.send = send;
self.id = self.id || Math.random().toString(36).slice(2);
self.next_seq = self.next_seq || 0;
self.conns = self.conns || {};
self.proto_conns = self.proto_conns || {};
self.conn_count = self.conn_count || 0;
self.fissures = self.fissures || {};
self.acked_boundary = self.acked_boundary || {};
self.marcos = self.marcos || {};
self.forget_cbs = self.forget_cbs || {};
self.version_groups = self.version_groups || {};
self.marco_map = self.marco_map || {};
self.marco_time_est_1 = self.marco_time_est_1 || 1000;
self.marco_time_est_2 = self.marco_time_est_2 || 1000;
self.marco_current_wait_time = self.marco_current_wait_time || 1000;
self.marco_increases_allowed = 1;
self.marco_timeout = self.marco_timeout || null;
function raw_add_version_group(version_array) {
let version_map = {};
for (let v of version_array) {
if (version_map[v]) continue;
version_map[v] = true;
if (self.version_groups[v]) self.version_groups[v].forEach((v) => (version_map[v] = true));
}
let version_group = Object.keys(version_map).sort();
version_group.forEach((v) => (self.version_groups[v] = version_group));
return version_group;
}
function get_parent_and_child_sets(children) {
let parent_sets = {};
let child_sets = {};
let done = {};
function add_set_to_sets(s, sets, mark_done) {
let container = { members: s };
let array = Object.keys(s);
if (array.length < 2) return;
for (let v of array) {
sets[v] = container;
if (mark_done) done[v] = true;
}
}
add_set_to_sets(self.current_version, parent_sets, true);
for (let v of Object.keys(self.T)) {
if (done[v]) continue;
done[v] = true;
if (!children[v]) continue;
let first_child_set = children[v];
let first_child_array = Object.keys(first_child_set);
let first_parent_set = self.T[first_child_array[0]];
let first_parent_array = Object.keys(first_parent_set);
if (
first_child_array.every((child) => {
let parent_set = self.T[child];
let parent_array = Object.keys(parent_set);
return (
parent_array.length == first_parent_array.length &&
parent_array.every((parent) => first_parent_set[parent])
);
}) &&
first_parent_array.every((parent) => {
let child_set = children[parent];
let child_array = Object.keys(child_set);
return (
child_array.length == first_child_array.length &&
child_array.every((child) => first_child_set[child])
);
})
) {
add_set_to_sets(first_parent_set, parent_sets, true);
add_set_to_sets(first_child_set, child_sets);
}
}
return { parent_sets, child_sets };
}
function find_one_bubble(bottom, children, child_sets, restricted) {
let expecting = { ...bottom };
let seen = {};
Object.keys(bottom).forEach(
(v) =>
children[v] &&
Object.keys(children[v]).forEach((v) => (seen[v] = true))
);
let q = Object.keys(expecting);
let last_top = null;
while (q.length) {
cur = q.shift();
if (!self.T[cur]) {
if (!restricted) throw "bad";
else return last_top;
}
if (restricted && restricted[cur]) return last_top;
if (seen[cur]) continue;
if (children[cur] && !Object.keys(children[cur]).every((c) => seen[c]))
continue;
seen[cur] = true;
delete expecting[cur];
if (!Object.keys(expecting).length) {
last_top = { [cur]: true };
if (!restricted) return last_top;
}
Object.keys(self.T[cur]).forEach((p) => {
expecting[p] = true;
q.push(p);
});
if (
child_sets[cur] &&
Object.keys(child_sets[cur].members).every((v) => seen[v])
) {
let expecting_array = Object.keys(expecting);
let parent_set = self.T[cur];
let parent_array = Object.keys(parent_set);
if (
expecting_array.length == parent_array.length &&
expecting_array.every((v) => parent_set[v])
) {
last_top = child_sets[cur].members;
if (!restricted) return last_top;
}
}
}
return last_top;
}
function add_version_group(version_array) {
let version_group = raw_add_version_group(version_array);
if (!version_array.some((x) => self.T[x])) return version_group[0];
let children = self.get_child_map();
let { parent_sets, child_sets } = get_parent_and_child_sets(children);
let to_bubble = {};
function mark_bubble(v, bubble) {
if (to_bubble[v]) return;
to_bubble[v] = bubble;
for (let vv of Object.keys(self.T[v])) mark_bubble(vv, bubble);
}
let bottom = Object.fromEntries(
version_group.filter((x) => self.T[x]).map((x) => [x, true])
);
let top = find_one_bubble(bottom, children, child_sets);
let bubble = [Object.keys(bottom).sort()[0], Object.keys(top)[0]];
for (let v of Object.keys(top)) to_bubble[v] = bubble;
for (let v of Object.keys(bottom)) mark_bubble(v, bubble);
self.apply_bubbles(to_bubble);
return version_group[0];
}
let orig_send = send;
send = (x) => {
if (self.version_groups[x.version])
x.version = self.version_groups[x.version];
if (x.parents) {
x.parents = { ...x.parents };
Object.keys(x.parents).forEach((v) =>
self.version_groups[v] && self.version_groups[v].forEach((v) => (x.parents[v] = true))
);
}
if (Array.isArray(x.versions)) {
x.versions = JSON.parse(JSON.stringify(x.versions));
x.versions.forEach(
(v) =>
self.version_groups[v.version] &&
(v.version = self.version_groups[v.version])
);
x.versions.forEach((v) => {
Object.keys(v.parents).forEach((vv) =>
self.version_groups[vv] && self.version_groups[vv].forEach((vv) => (v.parents[vv] = true))
);
});
}
orig_send(x);
};
/// # antimatter_crdt.receive(message)
///
/// Let this antimatter object "receive" a message from another antimatter object, presumably from its `send` callback.
/// ``` js
/// websocket.on('message', data => {
/// antimatter_crdt.receive(JSON.parse(data)) });
/// ```
/// You generally do not need to mess with a message object directly, but below are the various message objects you might see, categorized by their `cmd` entry. Note that each object also
/// contains a `conn` entry with the id of the connection the message is sent
/// over.
self.receive = (x) => {
let {
cmd,
version,
parents,
patches,
versions,
fissure,
fissures,
seen,
forget,
marco,
peer,
conn,
} = x;
if (version && typeof version != "string") {
if (!self.T[version[0]]) version = add_version_group(version);
else version = version[0];
}
if (parents) {
parents = { ...parents };
Object.keys(parents).forEach((v) => {
if (self.version_groups[v] && self.version_groups[v][0] != v)
delete parents[v];
});
}
if (versions && versions.forEach) versions.forEach((v) => {
if (typeof v.version != "string") {
if (!self.T[v.version[0]]) v.version = add_version_group(v.version);
else v.version = v.version[0];
}
v.parents = { ...v.parents };
Object.keys(v.parents).forEach((vv) => {
if (self.version_groups[vv] && self.version_groups[vv][0] != vv)
delete v.parents[vv];
});
});
let marco_versions_array = version
? [version]
: versions && !Array.isArray(versions)
? Object.keys(versions).sort()
: null;
let marco_versions =
marco_versions_array &&
Object.fromEntries(marco_versions_array.map((v) => [v, true]));
if (versions && !Array.isArray(versions)) {
versions = { ...versions };
Object.keys(versions).forEach((v) => {
if (self.version_groups[v] && self.version_groups[v][0] != v)
delete versions[v];
});
if (!Object.keys(versions).length) return;
}
/// ## message `get`
/// `get` is the first message sent over a connection, and the peer at the other end will respond with `welcome`.
/// ``` js
/// { cmd: 'get',
/// peer: 'SENDER_ID',
/// conn: 'CONN_ID',
/// parents: {'PARENT_VERSION_ID': true, ...} }
/// ```
/// The `parents` are optional, and describes which versions this peer already has. The other end will respond with versions since that set of parents.
if (cmd == "get" || (cmd == "welcome" && peer != null)) {
if (self.conns[conn] != null) throw Error("bad");
self.conns[conn] = { peer, seq: ++self.conn_count };
}
/// ## message `fissure`
///
/// Sent to alert peers about a fissure. The `fissure` entry contains information about the two peers involved in the fissure, the specific connection id that broke, the `versions` that need to be protected, and the `time` of the fissure (in case we want to ignore it after some time). It is also possible to send multiple `fissures` in an array.
/// ``` js
/// { cmd: 'fissure',
/// fissure: { // or fissures: [{...}, {...}, ...],
/// a: 'PEER_A_ID',
/// b: 'PEER_B_ID',
/// conn: 'CONN_ID',
/// versions: {'VERSION_ID': true, ...},
/// time: Date.now()
/// },
/// conn: 'CONN_ID' }
/// ```
/// Note that `time` isn't used for anything critical, as it's just wallclock time.
if (fissure) fissures = [fissure];
if (fissures) fissures.forEach((f) => (f.t = self.conn_count));
if (versions && (cmd == "set" || cmd == "welcome"))
versions = Object.fromEntries(versions.map((v) => [v.version, v]));
if (version) versions = { [version]: true };
let rebased_patches = [];
let fissures_back = [];
let fissures_forward = [];
let fissures_done = {};
function copy_fissures(fs) {
return fs.map((f) => {
f = JSON.parse(JSON.stringify(f));
delete f.t;
return f;
});
}
if (fissures) {
let fiss_map = Object.fromEntries(
fissures.map((f) => [f.a + ":" + f.b + ":" + f.conn, f])
);
for (let [key, f] of Object.entries(fiss_map)) {
if (fissures_done[f.conn]) continue;
fissures_done[f.conn] = true;
let our_f = self.fissures[key];
let other_key = f.b + ":" + f.a + ":" + f.conn;
let their_other = fiss_map[other_key];
let our_other = self.fissures[other_key];
if (!our_f) self.fissures[key] = f;
if (their_other && !our_other) self.fissures[other_key] = their_other;
if (!their_other && !our_other && f.b == self.id) {
if (self.conns[f.conn]) delete self.conns[f.conn];
our_other = self.fissures[other_key] = {
...f,
a: f.b,
b: f.a,
t: self.conn_count,
};
}
if (!their_other && our_other) {
fissures_back.push(f);
fissures_back.push(our_other);
}
if (!our_f || (their_other && !our_other)) {
fissures_forward.push(f);
if (their_other || our_other)
fissures_forward.push(their_other || our_other);
}
}
}
/// ## message `welcome`
/// Sent in response to a `get`, basically contains the initial state of the document; incoming `welcome` messages are also propagated over all our other connections but only with information that was new to us, so the propagation will eventually stop. When sent in response to a `get` (rather than being propagated), we include a `peer` entry with the id of the sending peer, so they know who we are, and to trigger them to send us their own `welcome` message.
///
/// ``` js
/// {
/// cmd: 'welcome',
/// versions: [
/// //each version looks like a set message...
/// ],
/// fissures: [
/// //each fissure looks as it would in a fissure message...
/// ],
/// parents:
/// {
/// //versions you must have before consuming these new versions
/// 'PARENT_VERSION_ID': true,
/// ...
/// },
/// [peer: 'SENDER_ID'], // if responding to a get
/// conn: 'CONN_ID'
/// }
/// ```
let _T = {};
let added_versions = [];
if (cmd == "welcome") {
var versions_to_add = {};
let vs = Object.values(versions);
vs.forEach((v) => (versions_to_add[v.version] = v.parents));
vs.forEach((v) => {
if (
self.T[v.version] ||
(self.version_groups[v.version] &&
self.version_groups[v.version][0] != v.version)
) {
remove_ancestors(v.version);
function remove_ancestors(v) {
if (versions_to_add[v]) {
Object.keys(versions_to_add[v]).forEach(remove_ancestors);
delete versions_to_add[v];
}
}
}
});
for (let v of vs) _T[v.version] = v.parents;
l1: for (var v of vs) {
if (versions_to_add[v.version]) {
let ps = Object.keys(v.parents);
if (!ps.length && Object.keys(self.T).length) continue;
for (p of ps) if (!self.T[p]) continue l1;
rebased_patches = rebased_patches.concat(
self.add_version(v.version, v.parents, v.patches, v.sort_keys)
);
added_versions.push(v);
delete _T[v.version];
}
}
}
if (cmd == "get" || (cmd == "welcome" && peer != null)) {
let fissures_back = Object.values(self.fissures);
if (cmd == "welcome") {
var leaves = { ..._T };
Object.keys(_T).forEach((v) => {
Object.keys(_T[v]).forEach((p) => delete leaves[p]);
});
let f = {
a: self.id,
b: peer,
conn: "-" + conn,
versions: Object.fromEntries(
added_versions
.concat(Object.keys(leaves).map((v) => versions[v]))
.map((v) => [v.version, true])
),
time: get_time(),
t: self.conn_count,
};
if (Object.keys(f.versions).length) {
let key = f.a + ":" + f.b + ":" + f.conn;
self.fissures[key] = f;
fissures_back.push(f);
fissures_forward.push(f);
}
}
send({
cmd: "welcome",
versions: self.generate_braid(parents || versions),
fissures: copy_fissures(fissures_back),
parents:
parents &&
Object.keys(parents).length &&
self.get_leaves(self.ancestors(parents, true)),
...(cmd == "get" ? { peer: self.id } : {}),
conn,
});
} else if (fissures_back.length) {
send({
cmd: "fissure",
fissures: copy_fissures(fissures_back),
conn,
});
}
/// ## message `forget`
/// Used to disconnect without creating a fissure, presumably meaning the sending peer doesn't plan to make any edits while they're disconnected.
/// ``` js
/// {cmd: 'forget', conn: 'CONN_ID'}
/// ```
if (cmd == "forget") {
if (self.conns[conn] == null) throw Error("bad");
send({ cmd: "ack", forget: true, conn });
delete self.conns[conn];
delete self.proto_conns[conn];
}
/// ## message forget `ack`
/// Sent in response to `forget`.. so they know we forgot them.
/// ``` js
/// {cmd: 'ack', forget: true, conn: 'CONN_ID'}
/// ```
if (cmd == "ack" && forget) {
self.forget_cbs[conn]();
}
/// ## message `set`
/// Sent to alert peers about a change in the document. The change is represented as a version, with a unique id, a set of parent versions (the most recent versions known before adding this version), and an array of patches, where the offsets in the patches do not take into account the application of other patches in the same array.
/// ``` js
/// { cmd: 'set',
/// version: 'VERSION_ID',
/// parents: {'PARENT_VERSION_ID': true, ...},
/// patches: [ {range: '.json.path.a.b', content: 42}, ... ],
/// conn: 'CONN_ID' }
/// ```
if (cmd == "set") {
if (conn == null || !self.T[version]) {
let ps = Object.keys(parents);
if (!ps.length && Object.keys(self.T).length) return;
for (p of ps) if (!self.T[p]) return;
rebased_patches = self.add_version(version, parents, patches);
for (let c of Object.keys(self.conns))
if (c != conn)
send({ cmd: "set", version, parents, patches, marco, conn: c });
}
}
/// ## message `marco`
/// Sent for pruning purposes, to try and establish whether everyone has seen the most recent versions. Note that a `set` message is treated as a `marco` message for the version being set.
/// ``` js
/// { cmd: 'marco',
/// version: 'MARCO_ID',
/// versions: {'VERSION_ID_A': true, ...},
/// conn: 'CONN_ID' }
/// ```
if (cmd == "marco" || cmd == "set") {
if (!Object.keys(versions).every((v) => self.T[v])) return;
if (
self.marco_timeout &&
marco_versions_array.length ==
Object.keys(self.current_version).length &&
marco_versions_array.every((x) => self.current_version[x])
) {
clear_timeout(self.marco_timeout);
self.marco_timeout = null;
}
let m = self.marcos[marco];
if (!m) {
m = self.marcos[marco] = {
id: marco,
origin: conn,
count: Object.keys(self.conns).length - (conn != null ? 1 : 0),
versions: marco_versions,
seq: self.conn_count,
time: get_time(),
};
m.orig_count = m.count;
m.real_marco = cmd == "marco";
m.key = JSON.stringify(Object.keys(m.versions).sort());
self.marco_map[m.key] = self.marco_map[m.key] || {};
let before = Object.keys(self.marco_map[m.key]).length;
self.marco_map[m.key][m.id] = true;
let after = Object.keys(self.marco_map[m.key]).length;
if (before == 1 && after == 2 && self.marco_increases_allowed > 0) {
self.marco_current_wait_time *= 2;
self.marco_increases_allowed--;
}
if (cmd == "marco")
for (let c of Object.keys(self.conns))
if (c != conn)
send({
cmd: "marco",
marco,
versions: marco_versions,
conn: c,
});
} else if (m.seq < self.conns[conn].seq) {
send({
cmd: "ack",
seen: "local",
marco,
versions: marco_versions,
conn,
});
return;
} else m.count--;
check_marco_count(marco);
}
/// ## message local `ack`
/// Sent in response to `set`, but not right away; a peer will first send the `set` over all its other connections, and only after they have all responded with a local `ack` – and we didn't see a `fissure` message while waiting – will the peer send a local `ack` over the originating connection.
/// ``` js
/// {cmd: 'ack', seen: 'local', version: 'VERSION_ID', conn: 'CONN_ID'}
/// ```
if (cmd == "ack" && seen == "local") {
let m = self.marcos[marco];
if (!m || m.cancelled) return;
m.count--;
check_marco_count(marco);
}
function check_marco_count(marco) {
let m = self.marcos[marco];
if (m && m.count === 0 && !m.cancelled) {
m.time2 = get_time();
if (m.orig_count > 0) {
let t = m.time2 - m.time;
let weight = 0.1;
self.marco_time_est_1 =
weight * t + (1 - weight) * self.marco_time_est_1;
}
if (m.origin != null) {
if (self.conns[m.origin])
send({
cmd: "ack",
seen: "local",
marco,
versions: marco_versions,
conn: m.origin,
});
} else add_full_ack_leaves(marco);
}
}
/// ## message global `ack`
/// Sent after an originating peer has received a local `ack` over all its connections, or after any peer receives a global `ack`, so that everyone may come to know that this version has been seen by everyone in this peer group.
/// ``` js
/// {cmd: 'ack', seen: 'global', version: 'VERSION_ID', conn: 'CONN_ID'}
/// ```
if (cmd == "ack" && seen == "global") {
let m = self.marcos[marco];
if (!m || m.cancelled) return;
let t = get_time() - m.time2;
let weight = 0.1;
self.marco_time_est_2 =
weight * t + (1 - weight) * self.marco_time_est_2;
if (m.real_marco && Object.keys(self.marco_map[m.key]).length == 1) {
self.marco_current_wait_time *= 0.8;
}
add_full_ack_leaves(marco, conn);
}
function add_full_ack_leaves(marco, conn) {
let m = self.marcos[marco];
if (!m || m.cancelled) return;
m.cancelled = true;
for (let [c, cc] of Object.entries(self.conns))
if (c != conn && cc.seq <= m.seq)
send({
cmd: "ack",
seen: "global",
marco,
versions: marco_versions,
conn: c,
});
for (let v of Object.keys(m.versions)) {
if (!self.T[v]) continue;
let marks = {};
let f = (v) => {
if (!marks[v]) {
marks[v] = true;
delete self.acked_boundary[v];
Object.keys(self.T[v]).forEach(f);
}
};
f(v);
self.acked_boundary[v] = true;
}
prune(false, m.seq);
}
if (added_versions.length || fissures_forward.length) {
for (let c of Object.keys(self.conns))
if (c != conn)
send({
cmd: added_versions.length ? "welcome" : "fissure",
...(added_versions.length ? { versions: added_versions } : {}),
fissures: copy_fissures(fissures_forward),
conn: c,
});
}
if (fissures_forward.length) resolve_fissures();
if (
!self.marco_timeout &&
cmd != "set" &&
cmd != "marco" &&
prune(true)
) {
if (!self.marco_current_wait_time) {
self.marco_current_wait_time =
4 * (self.marco_time_est_1 + self.marco_time_est_2);
}
let t = Math.random() * self.marco_current_wait_time;
self.marco_timeout = set_timeout(() => {
self.marco_increases_allowed = 1;
self.marco_timeout = null;
if (prune(true)) self.marco();
}, t);
}
if (cmd == "welcome" && peer == null && prune(true, null, true))
self.marco();
return rebased_patches;
};
/// # antimatter_crdt.get(conn) or connect(conn)
///
/// Register a new connection with id `conn` – triggers this antimatter_crdt object to send a `get` message over the given connection.
///
/// ``` js
/// alice_antimatter_crdt.get('connection_to_bob')
/// ```
self.get = (conn) => {
self.proto_conns[conn] = true;
send({ cmd: "get", peer: self.id, conn });
};
self.connect = self.get;
/// # antimatter_crdt.forget(conn)
///
/// Disconnect the given connection without creating a fissure – we don't need to reconnect with them.. it seems.. if we do, then we need to call `disconnect` instead, which will create a fissure allowing us to reconnect.
///
/// ``` js
/// alice_antimatter_crdt.forget('connection_to_bob')
/// ```
self.forget = async (conn) => {
await new Promise((done) => {
if (self.conns[conn] != null) {
self.forget_cbs[conn] = done;
send({ cmd: "forget", conn });
}
self.disconnect(conn, false);
});
};
/// # antimatter_crdt.disconnect(conn)
///
/// If we detect that a connection has closed, let the antimatter_crdt object know by calling this method with the given connection id – this will create a fissure so we can reconnect with whoever was on the other end of the connection later on.
///
/// ``` js
/// alice_antimatter_crdt.disconnect('connection_to_bob')
/// ```
self.disconnect = (conn, fissure = true) => {
if (self.conns[conn] == null && !self.proto_conns[conn]) return;
delete self.proto_conns[conn];
if (self.conns[conn]) {
let peer = self.conns[conn].peer;
delete self.conns[conn];
if (fissure) {
fissure = create_fissure(peer, conn);
if (fissure) self.receive({ cmd: "fissure", fissure });
}
}
};
/// # antimatter_crdt.set(...patches)
///
/// Modify this antimatter_crdt object by applying the given patches. Each patch looks like `{range: '.life.meaning', content: 42}`. Calling this method will trigger calling the `send` callback to let our peers know about this change.
///
/// ``` js
/// antimatter_crdt.set({
/// range: '.life.meaning',
/// content: 42
/// })
/// ```
self.set = (...patches) => {
var version = `${self.next_seq++}@${self.id}`;
self.receive({
cmd: "set",
version,
parents: { ...self.current_version },
patches,
marco: Math.random().toString(36).slice(2),
});
return version;
};
/// # antimatter_crdt.marco()
///
/// Initiate sending a `marco` message to try and establish whether certain versions can be pruned.
///
/// ``` js
/// antimatter_crdt.marco()
/// ```
self.marco = () => {
let versions = { ...self.current_version };
Object.keys(versions).forEach((v) =>
self.version_groups[v] && self.version_groups[v].forEach((v) => (versions[v] = true))
);
let marco = Math.random().toString(36).slice(2);
self.receive({ cmd: "marco", marco, versions });
return marco;
};
function cancel_marcos() {
for (let m of Object.values(self.marcos)) m.cancelled = true;
}
function create_fissure(peer, conn) {
let ack_versions = self.ancestors(self.acked_boundary);
let entries = Object.keys(self.T)
.filter((v) => !ack_versions[v] || self.acked_boundary[v])
.map((v) => [v, true]);
if (!entries.length) return;
let versions = Object.fromEntries(entries);
return { a: self.id, b: peer, conn, versions, time: get_time() };
}
function resolve_fissures() {
let unfissured = {};
Object.entries(self.fissures).forEach(([fk, f]) => {
var other_key = f.b + ":" + f.a + ":" + f.conn;
var other = self.fissures[other_key];
if (other) {
if (Object.keys(f.versions).length) {
for (let v of Object.keys(f.versions)) unfissured[v] = true;
self.fissures[fk] = { ...f, versions: {} };
}
if (Object.keys(other.versions).length) {
for (let v of Object.keys(other.versions)) unfissured[v] = true;
self.fissures[other_key] = { ...other, versions: {} };
}
}
});
if (Object.keys(unfissured).length) {
cancel_marcos();
let ack_versions = self.ancestors(self.acked_boundary);
let unfissured_descendants = self.descendants(unfissured, true);
for (let un of Object.keys(unfissured_descendants))
if (ack_versions[un]) delete ack_versions[un];
self.acked_boundary = self.get_leaves(ack_versions);
}
}
function prune(just_checking, t, just_versions) {
if (just_checking) t = Infinity;
let fissures = just_checking ? { ...self.fissures } : self.fissures;
Object.entries(fissures).forEach((x) => {
var other_key = x[1].b + ":" + x[1].a + ":" + x[1].conn;
var other = fissures[other_key];
if (other && x[1].t <= t && other.t <= t) {
delete fissures[x[0]];
delete fissures[other_key];
}
});
if (self.fissure_lifetime != null) {
var now = get_time();
Object.entries(fissures).forEach(([k, f]) => {
if (f.time == null) f.time = now;
if (f.time <= now - self.fissure_lifetime) {
delete fissures[k];
}
});
}
if (
just_checking &&
!just_versions &&
Object.keys(fissures).length < Object.keys(self.fissures).length
)
return true;
var restricted = {};
Object.values(fissures).forEach((f) => {
Object.keys(f.versions).forEach((v) => (restricted[v] = true));
});
if (!just_checking) {
var acked = self.ancestors(self.acked_boundary);
Object.keys(self.T).forEach((x) => {
if (!acked[x]) restricted[x] = true;
});
}
let children = self.get_child_map();
let { parent_sets, child_sets } = get_parent_and_child_sets(children);
let to_bubble = {};
function mark_bubble(v, bubble) {
if (to_bubble[v]) return;
to_bubble[v] = bubble;
for (let vv of Object.keys(self.T[v])) mark_bubble(vv, bubble);
}
let visited = {};
function f(cur) {
if (!self.T[cur] || visited[cur]) return;
visited[cur] = true;
if (
to_bubble[cur] == null &&
parent_sets[cur] &&
!parent_sets[cur].done
) {
parent_sets[cur].done = true;
let bottom = parent_sets[cur].members;
let top = find_one_bubble(bottom, children, child_sets, restricted);
if (top) {
if (just_checking) return true;
let bottom_array = Object.keys(bottom).sort();
let top_array = Object.keys(top);
raw_add_version_group(bottom_array);
let bubble = [bottom_array[0], top_array[0]];
for (let v of top_array) to_bubble[v] = bubble;
for (let v of bottom_array) mark_bubble(v, bubble);
}
}
if (to_bubble[cur] == null) {
let top = find_one_bubble(
{ [cur]: true },
children,
child_sets,
restricted
);
if (top && !top[cur]) {
if (just_checking) return true;
let bubble = [cur, Object.keys(top)[0]];
for (let v of Object.keys(top)) to_bubble[v] = bubble;
mark_bubble(bubble[0], bubble);
} else {
to_bubble[cur] = [cur, cur];
}
}
return Object.keys(
self.T[cur] || self.T[self.version_groups[cur][0]]
).some(f);
}
if (Object.keys(self.current_version).some(f) && just_checking)
return true;
self.apply_bubbles(to_bubble);
for (let [k, m] of Object.entries(self.marcos)) {
let vs = Object.keys(m.versions);
if (
!vs.length ||
!vs.every((v) => self.T[v] || self.version_groups[v])
) {
delete self.marcos[k];
delete self.marco_map[m.key][m.id];
if (!Object.keys(self.marco_map[m.key]).length)
delete self.marco_map[m.key];
}
}
for (let [v, vs] of Object.entries(self.version_groups)) {
if (!self.T[vs[0]]) delete self.version_groups[v];
}
}
return self;
};
/// ## create_json_crdt([init])
///
/// Create a new `json_crdt` object (or start with `init`, and add stuff to that).
///
/// ``` js
/// var json_crdt = create_json_crdt()
/// ```
create_json_crdt = (self) => {
self = self || {};
self.S = self.S || null;
self.T = self.T || {};
self.root_version = null;
self.current_version = self.current_version || {};
self.version_cache = self.version_cache || {};
let is_lit = (x) => !x || typeof x != "object" || x.t == "lit";
let get_lit = (x) => (x && typeof x == "object" && x.t == "lit" ? x.S : x);
let make_lit = (x) => (x && typeof x == "object" ? { t: "lit", S: x } : x);
self = self || {};
/// # json_crdt.read()
///
/// Returns an instance of the `json` object represented by this json_crdt data-structure.
///
/// ``` js
/// console.log(json_crdt.read())
/// ```
self.read = (is_anc) => {
if (!is_anc) is_anc = () => true;
return raw_read(self.S, is_anc);
};
function raw_read(x, is_anc) {
if (x && typeof x == "object") {
if (x.t == "lit") return JSON.parse(JSON.stringify(x.S));
if (x.t == "val")
return raw_read(sequence_crdt.get(x.S, 0, is_anc), is_anc);
if (x.t == "obj") {
var o = {};
Object.entries(x.S).forEach(([k, v]) => {
var x = raw_read(v, is_anc);
if (x != null) o[k] = x;
});
return o;
}
if (x.t == "arr") {
var a = [];
sequence_crdt.traverse(
x.S,
is_anc,
(node, _, __, ___, ____, deleted) => {
if (!deleted)
node.elems.forEach((e) => a.push(raw_read(e, is_anc)));
},
true
);
return a;
}
if (x.t == "str") {
var s = [];
sequence_crdt.traverse(
x.S,
is_anc,
(node, _, __, ___, ____, deleted) => {
if (!deleted) s.push(node.elems);
},
true
);
return s.join("");
}
throw Error("bad");
}
return x;
}
/// # json_crdt.generate_braid(versions)
///
/// Returns an array of `set` messages that each look like this: `{version, parents, patches, sort_keys}`, such that if we pass all these messages to `antimatter_crdt.receive()`, we'll reconstruct the data in this `json_crdt` data-structure, assuming the recipient already has the given `versions` (each version is represented as an object with a version, and each value is `true`).
///
/// ``` js
/// json_crdt.generate_braid({
/// alice2: true,
/// bob3: true
/// })
/// ```
self.generate_braid = (versions) => {
var anc =
versions && Object.keys(versions).length
? self.ancestors(versions, true)
: {};
var is_anc = (x) => anc[x];
if (Object.keys(self.T).length === 0) return [];
return Object.entries(self.version_cache)
.filter((x) => !is_anc(x[0]))
.map(([version, set_message]) => {
return (self.version_cache[version] =
set_message || generate_set_message(version));
});
function generate_set_message(version) {
if (!Object.keys(self.T[version]).length) {
return {
version,
parents: {},
patches: [{ range: "", content: self.read((v) => v == version) }],
};
}
var is_lit = (x) => !x || typeof x !== "object" || x.t === "lit";
var get_lit = (x) =>
x && typeof x === "object" && x.t === "lit" ? x.S : x;
var ancs = self.ancestors({ [version]: true });
delete ancs[version];
var is_anc = (x) => ancs[x];
var path = [];
var patches = [];
var sort_keys = {};
recurse(self.S);
function recurse(x) {
if (is_lit(x)) {
} else if (x.t === "val") {
sequence_crdt
.generate_braid(x.S, version, is_anc, raw_read)
.forEach((s) => {
if (s[2].length) {
patches.push({ range: path.join(""), content: s[2][0] });
if (s[3]) sort_keys[patches.length - 1] = s[3];
}
});
sequence_crdt.traverse(x.S, is_anc, (node) => {
node.elems.forEach(recurse);
});
} else if (x.t === "arr") {
sequence_crdt.generate_braid(x.S, version, is_anc).forEach((s) => {
patches.push({
range: `${path.join("")}[${s[0]}:${s[0] + s[1]}]`,
content: s[2],
});
if (s[3]) sort_keys[patches.length - 1] = s[3];
});
var i = 0;
sequence_crdt.traverse(x.S, is_anc, (node) => {
node.elems.forEach((e) => {
path.push(`[${i++}]`);
recurse(e);
path.pop();
});
});
} else if (x.t === "obj") {
Object.entries(x.S).forEach((e) => {
path.push("[" + JSON.stringify(e[0]) + "]");
recurse(e[1]);
path.pop();
});
} else if (x.t === "str") {
sequence_crdt.generate_braid(x.S, version, is_anc).forEach((s) => {
patches.push({
range: `${path.join("")}[${s[0]}:${s[0] + s[1]}]`,
content: s[2],
});
if (s[3]) sort_keys[patches.length - 1] = s[3];
});
}
}
return {
version,
parents: { ...self.T[version] },
patches,
sort_keys,
};
}
};
/// # json_crdt.apply_bubbles(to_bubble)
///
/// This method helps prune away meta data and compress stuff when we have determined that certain versions can be renamed to other versions – these renamings are expressed in `to_bubble`, where keys are versions and values are "bubbles", each bubble is represented with an array of two elements, the first element is the "bottom" of the bubble, and the second element is the "top" of the bubble. We will use the "bottom" as the new name for the version, and we'll use the "top" as the new parents.
///
/// ``` js
/// json_crdt.apply_bubbles({
/// alice4: ['bob5', 'alice4'],
/// bob5: ['bob5', 'alice4']
/// })
/// ```
self.apply_bubbles = (to_bubble) => {
function recurse(x) {
if (is_lit(x)) return x;
if (x.t == "val") {
sequence_crdt.apply_bubbles(x.S, to_bubble);
sequence_crdt.traverse(
x.S,
() => true,
(node) => {
node.elems = node.elems.slice(0, 1).map(recurse);
},
true
);
if (
x.S.nexts.length == 0 &&
!x.S.next &&
x.S.elems.length == 1 &&
is_lit(x.S.elems[0])
)
return x.S.elems[0];
return x;
}
if (x.t == "arr") {
sequence_crdt.apply_bubbles(x.S, to_bubble);
sequence_crdt.traverse(
x.S,
() => true,
(node) => {
node.elems = node.elems.map(recurse);
},
true
);
if (
x.S.nexts.length == 0 &&
!x.S.next &&
x.S.elems.every(is_lit) &&
!Object.keys(x.S.deleted_by).length
)
return { t: "lit", S: x.S.elems.map(get_lit) };
return x;
}
if (x.t == "obj") {
Object.entries(x.S).forEach((e) => {
var y = (x.S[e[0]] = recurse(e[1]));
if (y == null) delete x.S[e[0]];
});
if (Object.values(x.S).every(is_lit)) {
var o = {};
Object.entries(x.S).forEach((e) => (o[e[0]] = get_lit(e[1])));
return { t: "lit", S: o };
}
return x;
}
if (x.t == "str") {
sequence_crdt.apply_bubbles(x.S, to_bubble);
if (
x.S.nexts.length == 0 &&
!x.S.next &&
!Object.keys(x.S.deleted_by).length
)
return x.S.elems;
return x;
}
}
self.S = recurse(self.S);
Object.entries(to_bubble).forEach(([version, bubble]) => {
if (!self.T[version]) return;
self.my_where_are_they_now[version] = bubble[0];
if (version === bubble[1]) self.T[bubble[0]] = self.T[bubble[1]];
if (version !== bubble[0]) {
if (self.root_version == version) self.root_version = bubble[0];
delete self.T[version];
delete self.version_cache[version];
delete self.acked_boundary[version];
delete self.current_version[version];
if (
self.version_groups[version] &&
self.version_groups[version][0] == version
) {
for (let v of self.version_groups[version]) {
delete self.version_groups[v];
}
}
for (let [k, parents] of Object.entries(self.T)) {
self.T[k] = parents = { ...parents };
for (let p of Object.keys(parents)) {
if (p == version) delete parents[p];
}
}
} else self.version_cache[version] = null;
});
var leaves = Object.keys(self.current_version);
var acked_boundary = Object.keys(self.acked_boundary);
var fiss = Object.keys(self.fissures);
if (
leaves.length == 1 &&
acked_boundary.length == 1 &&
leaves[0] == acked_boundary[0] &&
fiss.length == 0
) {
self.T = { [leaves[0]]: {} };
self.S = make_lit(self.read());
}
};
/// # json_crdt.add_version(version, parents, patches[, sort_keys])
///
/// The main method for modifying a `json_crdt` data structure.
///
/// * `version`: Unique string associated with this edit.
/// * `parents`: A set of versions that this version is aware of, represented as a map with versions as keys, and values of `true`.
/// * `patches`: An array of patches, each patch looks like this `{range: '.life.meaning', content: 42}`.
/// * `sort_keys`: (optional) An object where each key is an index, and the value is a sort_key to use with the patch at the given index in the `patches` array – a sort_key overrides the version for a patch for the purposes of sorting. This can be useful after doing some pruning.
///
/// ``` js
/// json_crdt.add_version(
/// 'alice6',
/// {
/// alice5: true,
/// bob7: true
/// },
/// [
/// {
/// range: '.a.b',
/// content: 'c'
/// }
/// ]
/// )
/// ```
self.add_version = (version, parents, patches, sort_keys) => {
if (self.T[version]) return;
if (self.root_version == null) self.root_version = version;
self.T[version] = { ...parents };
self.version_cache[version] = JSON.parse(
JSON.stringify({
version,
parents,
patches,
sort_keys,
})
);
Object.keys(parents).forEach((k) => {
if (self.current_version[k]) delete self.current_version[k];
});
self.current_version[version] = true;
if (!sort_keys) sort_keys = {};
if (!Object.keys(parents).length) {
var parse = self.parse_patch(patches[0]);
self.S = make_lit(parse.value);
return patches;
}
let is_anc;
if (parents == self.current_version) {
is_anc = (_version) => _version != version;
} else {
let ancs = self.ancestors(parents);
is_anc = (_version) => ancs[_version];
}
var rebased_patches = [];
patches.forEach((patch, i) => {
var sort_key = sort_keys[i];
var parse = self.parse_patch(patch);
var cur = resolve_path(parse);
if (!parse.slice) {
if (cur.t != "val") throw Error("bad");
var len = sequence_crdt.length(cur.S, is_anc);
sequence_crdt.add_version(
cur.S,
version,
[[0, len, [parse.delete ? null : make_lit(parse.value)], sort_key]],
is_anc
);
rebased_patches.push(patch);
} else {
if (typeof parse.value === "string" && cur.t !== "str")
throw Error(
`Cannot splice string ${JSON.stringify(
parse.value
)} into non-string`
);
if (parse.value instanceof Array && cur.t !== "arr")
throw Error(
`Cannot splice array ${JSON.stringify(
parse.value
)} into non-array`
);
if (parse.value instanceof Array)
parse.value = parse.value.map((x) => make_lit(x));
var r0 = parse.slice[0];
var r1 = parse.slice[1];
if (r0 < 0 || Object.is(r0, -0) || r1 < 0 || Object.is(r1, -0)) {
let len = sequence_crdt.length(cur.S, is_anc);
if (r0 < 0 || Object.is(r0, -0)) r0 = len + r0;
if (r1 < 0 || Object.is(r1, -0)) r1 = len + r1;
}
var rebased_splices = sequence_crdt.add_version(
cur.S,
version,
[[r0, r1 - r0, parse.value, sort_key]],
is_anc
);
for (let rebased_splice of rebased_splices)
rebased_patches.push({
range: `${parse.path
.map((x) => `[${JSON.stringify(x)}]`)
.join("")}[${rebased_splice[0]}:${
rebased_splice[0] + rebased_splice[1]
}]`,
content: rebased_splice[2],
});
}
});
function resolve_path(parse) {
var cur = self.S;
if (!cur || typeof cur != "object" || cur.t == "lit")
cur = self.S = {
t: "val",
S: sequence_crdt.create_node(self.root_version, [cur]),
};
var prev_S = null;
var prev_i = 0;
for (var i = 0; i < parse.path.length; i++) {
var key = parse.path[i];
if (cur.t == "val")
cur = sequence_crdt.get((prev_S = cur.S), (prev_i = 0), is_anc);
if (cur.t == "lit") {
var new_cur = {};
if (cur.S instanceof Array) {
new_cur.t = "arr";
new_cur.S = sequence_crdt.create_node(
self.root_version,
cur.S.map((x) => make_lit(x))
);
} else {
if (typeof cur.S != "object") throw Error("bad");
new_cur.t = "obj";
new_cur.S = {};
Object.entries(cur.S).forEach(
(e) => (new_cur.S[e[0]] = make_lit(e[1]))
);
}
cur = new_cur;
sequence_crdt.set(prev_S, prev_i, cur, is_anc);
}
if (cur.t == "obj") {
let x = cur.S[key];
if (!x || typeof x != "object" || x.t == "lit")
x = cur.S[key] = {
t: "val",
S: sequence_crdt.create_node(self.root_version, [
x == null ? null : x,
]),
};
cur = x;
} else if (i == parse.path.length - 1 && !parse.slice) {
parse.slice = [key, key + 1];
parse.value = cur.t == "str" ? parse.value : [parse.value];
} else if (cur.t == "arr") {
cur = sequence_crdt.get((prev_S = cur.S), (prev_i = key), is_anc);
} else throw Error("bad");
}
if (parse.slice) {
if (cur.t == "val")
cur = sequence_crdt.get((prev_S = cur.S), (prev_i = 0), is_anc);
if (typeof cur == "string") {
cur = {
t: "str",
S: sequence_crdt.create_node(self.root_version, cur),
};
sequence_crdt.set(prev_S, prev_i, cur, is_anc);
} else if (cur.t == "lit") {
if (!(cur.S instanceof Array)) throw Error("bad");
cur = {
t: "arr",
S: sequence_crdt.create_node(
self.root_version,
cur.S.map((x) => make_lit(x))
),
};
sequence_crdt.set(prev_S, prev_i, cur, is_anc);
}
}
return cur;
}
return rebased_patches;
};
/// # json_crdt.get_child_map()
///
/// Returns a map where each key is a version, and each value is a set of child versions, represented as a map with versions as keys, and values of `true`.
///
/// ``` js
/// json_crdt.get_child_map()
/// ```
self.get_child_map = () => {
let children = {};
Object.entries(self.T).forEach(([v, parents]) => {
Object.keys(parents).forEach((parent) => {
if (!children[parent]) children[parent] = {};
children[parent][v] = true;
});
});
return children;
};
/// # json_crdt.ancestors(versions, ignore_nonexistent=false)
///
/// Gather `versions` and all their ancestors into a set. `versions` is a set of versions, i.e. a map with version-keys and values of true – we'll basically return a larger set. If `ignore_nonexistent` is `true`, then we won't throw an exception if we encounter a version that we don't have in our data-structure.
///
/// ``` js
/// json_crdt.ancestors({
/// alice12: true,
/// bob10: true
/// })
/// ```
self.ancestors = (versions, ignore_nonexistent) => {
var result = {};
function recurse(version) {
if (result[version]) return;
if (!self.T[version]) {
if (ignore_nonexistent) return;
throw Error(`The version ${version} no existo`);
}
result[version] = true;
Object.keys(self.T[version]).forEach(recurse);
}
Object.keys(versions).forEach(recurse);
return result;
};
/// # json_crdt.descendants(versions, ignore_nonexistent=false)
///
/// Gather `versions` and all their descendants into a set. `versions` is a set of versions, i.e. a map with version-keys and values of true – we'll basically return a larger set. If `ignore_nonexistent` is `true`, then we won't throw an exception if we encounter a version that we don't have in our data-structure.
///
/// ``` js
/// json_crdt.descendants({
/// alice12: true,
/// bob10: true
/// })
/// ```
self.descendants = (versions, ignore_nonexistent) => {
let children = self.get_child_map();
var result = {};
function recurse(version) {
if (result[version]) return;
if (!self.T[version]) {
if (ignore_nonexistent) return;
throw Error(`The version ${version} no existo`);
}
result[version] = true;
Object.keys(children[version] || {}).forEach(recurse);
}
Object.keys(versions).forEach(recurse);
return result;
};
/// # json_crdt.get_leaves(versions)
///
/// Returns a set of versions from `versions` which don't also have a child in `versions`. `versions` is itself a set of versions, represented as an object with version keys and `true` values, and the return value is represented the same way.
self.get_leaves = (versions) => {
var leaves = { ...versions };
Object.keys(versions).forEach((v) => {
Object.keys(self.T[v]).forEach((p) => delete leaves[p]);
});
return leaves;
};
/// # json_crdt.parse_patch(patch)
///
/// Takes a patch in the form `{range, content}`, and returns an object of the form `{path: [...], [slice: [...]], [delete: true], content}`; basically calling `parse_json_path` on `patch.range`, and adding `patch.content` along for the ride.
self.parse_patch = (patch) => {
let x = self.parse_json_path(patch.range);
x.value = patch.content;
return x;
};
/// # json_crdt.parse_json_path(json_path)
///
/// Parses the string `json_path` into an object like: `{path: [...], [slice: [...]], [delete: true]}`.
///
/// * `a.b[3]` --> `{path: ['a', 'b', 3]}`
/// * `a.b[3:5]` --> `{path: ['a', 'b'], slice: [3, 5]}`
/// * `delete a.b` --> `{path: ['a', 'b'], delete: true}`
///
/// ``` js
/// console.log(json_crdt.parse_json_path('a.b.c'))
/// ```
self.parse_json_path = (json_path) => {
var ret = { path: [] };
var re =
/^(delete)\s+|\.?([^\.\[ =]+)|\[((\-?\d+)(:\-?\d+)?|"(\\"|[^"])*")\]/g;
var m;
while ((m = re.exec(json_path))) {
if (m[1]) ret.delete = true;
else if (m[2]) ret.path.push(m[2]);
else if (m[3] && m[5])
ret.slice = [JSON.parse(m[4]), JSON.parse(m[5].substr(1))];
else if (m[3]) ret.path.push(JSON.parse(m[3]));
}
return ret;
};
return self;
};
/// # sequence_crdt.create_node(version, elems, [end_cap, sort_key])
///
/// Creates a node for a `sequence_crdt` sequence CRDT with the given properties. The resulting node will look like this:
///
/// ``` js
/// {
/// version, // globally unique string
/// elems, // a string or array representing actual data elements of the underlying sequence
/// end_cap, // this is useful for dealing with replace operations
/// sort_key, // version to pretend this is for the purposes of sorting
/// deleted_by : {}, // if this node gets deleted, we'll mark it here
/// nexts : [], // array of nodes following this one
/// next : null // final node following this one (after all the nexts)
/// }
///
/// var sequence_node = sequence_crdt.create_node('alice1', 'hello')
/// ```
sequence_crdt.create_node = (version, elems, end_cap, sort_key) => ({
version,
sort_key,
elems,
end_cap,
deleted_by: {},
nexts: [],
next: null,
});
/// # sequence_crdt.generate_braid(root_node, version, is_anc)
///
/// Reconstructs an array of splice-information which can be passed to `sequence_crdt.add_version` in order to add `version` to another `sequence_crdt` instance – the returned array looks like: `[[insert_pos, delete_count, insert_elems, sort_key], ...]`. `is_anc` is a function which accepts a version string and returns `true` if and only if the given version is an ancestor of `version` (i.e. a version which the author of `version` knew about when they created that version).
///
/// ``` js
/// var root_node = sequence_crdt.create_node('alice1', 'hello')
/// console.log(sequence_crdt.generate_braid(root_node, 'alice1', x => false)) // outputs [0, 0, "hello"]
/// ```
sequence_crdt.generate_braid = (S, version, is_anc, read_array_elements) => {
if (!read_array_elements) read_array_elements = (x) => x;
var splices = [];
function add_ins(offset, ins, sort_key, end_cap, is_row_header) {
if (typeof ins !== "string")
ins = ins.map((x) => read_array_elements(x, () => false));
if (splices.length > 0) {
var prev = splices[splices.length - 1];
if (
prev[0] + prev[1] === offset &&
!end_cap &&
(!is_row_header || prev[3] == sort_key) &&
(prev[4] === "i" || (prev[4] === "r" && prev[1] === 0))
) {
prev[2] = prev[2].concat(ins);
return;
}
}
splices.push([offset, 0, ins, sort_key, end_cap ? "r" : "i"]);
}
function add_del(offset, del, ins) {
if (splices.length > 0) {
var prev = splices[splices.length - 1];
if (prev[0] + prev[1] === offset && prev[4] !== "i") {
prev[1] += del;
return;
}
}
splices.push([offset, del, ins, null, "d"]);
}
var offset = 0;
function helper(node, _version, end_cap, is_row_header) {
if (_version === version) {
add_ins(
offset,
node.elems.slice(0),
node.sort_key,
end_cap,
is_row_header
);
} else if (node.deleted_by[version] && node.elems.length > 0) {
add_del(offset, node.elems.length, node.elems.slice(0, 0));
}
if (
(!_version || is_anc(_version)) &&
!Object.keys(node.deleted_by).some(is_anc)
) {
offset += node.elems.length;
}
node.nexts.forEach((next) =>
helper(next, next.version, node.end_cap, true)
);
if (node.next) helper(node.next, _version);
}
helper(S, null);
splices.forEach((s) => {
// if we have replaces with 0 deletes,
// make them have at least 1 delete..
// this can happen when there are multiple replaces of the same text,
// and our code above will associate those deletes with only one of them
if (s[4] === "r" && s[1] === 0) s[1] = 1;
});
return splices;
};
/// # sequence_crdt.apply_bubbles(root_node, to_bubble)
///
/// This method helps prune away meta data and compress stuff when we have determined that certain versions can be renamed to other versions – these renamings are expressed in `to_bubble`, where keys are versions and values are "bubbles", each bubble is represented with an array of two elements, the first element is the "bottom" of the bubble, and the second element is the "top" of the bubble. We will use the "bottom" as the new name for the version, and we'll use the "top" as the new parents.
///
/// ``` js
/// sequence_crdt.apply_bubbles(root_node, {
/// alice4: ['bob5', 'alice4'],
/// bob5: ['bob5', 'alice4']
/// })
/// ```
sequence_crdt.apply_bubbles = (S, to_bubble) => {
sequence_crdt.traverse(
S,
() => true,
(node) => {
if (
to_bubble[node.version] &&
to_bubble[node.version][0] != node.version
) {
if (!node.sort_key) node.sort_key = node.version;
node.version = to_bubble[node.version][0];
}
for (var x of Object.keys(node.deleted_by)) {
if (to_bubble[x]) {
delete node.deleted_by[x];
node.deleted_by[to_bubble[x][0]] = true;
}
}
},
true
);
function set_nnnext(node, next) {
while (node.next) node = node.next;
node.next = next;
}
do_line(S, S.version);
function do_line(node, version) {
var prev = null;
while (node) {
if (node.nexts[0] && node.nexts[0].version == version) {
for (let i = 0; i < node.nexts.length; i++) {
delete node.nexts[i].version;
delete node.nexts[i].sort_key;
set_nnnext(
node.nexts[i],
i + 1 < node.nexts.length ? node.nexts[i + 1] : node.next
);
}
node.next = node.nexts[0];
node.nexts = [];
}
if (node.deleted_by[version]) {
node.elems = node.elems.slice(0, 0);
node.deleted_by = {};
if (prev) {
node = prev;
continue;
}
}
var next = node.next;
if (
!node.nexts.length &&
next &&
(!node.elems.length ||
!next.elems.length ||
(Object.keys(node.deleted_by).every((x) => next.deleted_by[x]) &&
Object.keys(next.deleted_by).every((x) => node.deleted_by[x])))
) {
if (!node.elems.length) node.deleted_by = next.deleted_by;
node.elems = node.elems.concat(next.elems);
node.end_cap = next.end_cap;
node.nexts = next.nexts;
node.next = next.next;
continue;
}
if (next && !next.elems.length && !next.nexts.length) {
node.next = next.next;
continue;
}
for (let n of node.nexts) do_line(n, n.version);
prev = node;
node = next;
}
}
};
/// # sequence_crdt.get(root_node, i, is_anc)
///
/// Returns the element at the `i`th position (0-based) in the `sequence_crdt` rooted at `root_node`, when only considering versions which result in `true` when passed to `is_anc`.
///
/// ``` js
/// var x = sequence_crdt.get(root_node, 2, {
/// alice1: true
/// })
/// ```
sequence_crdt.get = (S, i, is_anc) => {
var ret = null;
var offset = 0;
sequence_crdt.traverse(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
ret = node.elems[i - offset];
return false;
}
offset += node.elems.length;
});
return ret;
};
/// # sequence_crdt.set(root_node, i, v, is_anc)
///
/// Sets the element at the `i`th position (0-based) in the `sequence_crdt` rooted at `root_node` to the value `v`, when only considering versions which result in `true` when passed to `is_anc`.
///
/// ``` js
/// sequence_crdt.set(root_node, 2, 'x', {
/// alice1: true
/// })
/// ```
sequence_crdt.set = (S, i, v, is_anc) => {
var offset = 0;
sequence_crdt.traverse(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
if (typeof node.elems == "string")
node.elems =
node.elems.slice(0, i - offset) +
v +
node.elems.slice(i - offset + 1);
else node.elems[i - offset] = v;
return false;
}
offset += node.elems.length;
});
};
/// # sequence_crdt.length(root_node, is_anc)
///
/// Returns the length of the `sequence_crdt` rooted at `root_node`, when only considering versions which result in `true` when passed to `is_anc`.
///
/// ``` js
/// console.log(sequence_crdt.length(root_node, {
/// alice1: true
/// }))
/// ```
sequence_crdt.length = (S, is_anc) => {
var count = 0;
sequence_crdt.traverse(S, is_anc ? is_anc : () => true, (node) => {
count += node.elems.length;
});
return count;
};
/// # sequence_crdt.break_node(node, break_position, end_cap, new_next)
///
/// This method breaks apart a `sequence_crdt` node into two nodes, each representing a subsequence of the sequence represented by the original node. The `node` parameter is modified into the first node, and the second node is returned. The first node represents the elements of the sequence before `break_position`, and the second node represents the rest of the elements. If `end_cap` is truthy, then the first node will have `end_cap` set – this is generally done if the elements in the second node are being replaced. This method will add `new_next` to the first node's `nexts` array.
///
/// ``` js
/// var node = sequence_crdt.create_node('alice1', 'hello') // node.elems == 'hello'
/// var second = sequence_crdt.break_node(node, 2) // now node.elems == 'he', and second.elems == 'llo'
/// ```
sequence_crdt.break_node = (node, x, end_cap, new_next) => {
var tail = sequence_crdt.create_node(
null,
node.elems.slice(x),
node.end_cap
);
Object.assign(tail.deleted_by, node.deleted_by);
tail.nexts = node.nexts;
tail.next = node.next;
node.elems = node.elems.slice(0, x);
node.end_cap = end_cap;
node.nexts = new_next ? [new_next] : [];
node.next = tail;
return tail;
};
/// # sequence_crdt.add_version(root_node, version, splices, [is_anc])
///
/// This is the main method in sequence_crdt, used to modify the sequence. The modification must be given a unique `version` string, and the modification itself is represented as an array of `splices`, where each splice looks like this: `[position, num_elements_to_delete, elements_to_insert, optional_sort_key]`.
///
/// Note that all positions are relative to the original sequence, before any splices have been applied. Positions are counted by only considering nodes with versions which result in `true` when passed to `is_anc`. (and are not `deleted_by` any versions which return `true` when passed to `is_anc`).
///
/// ``` js
/// var node = sequence_crdt.create_node('alice1', 'hello')
/// sequence_crdt.add_version(node, 'alice2', [[5, 0, ' world']], null, v => v == 'alice1')
/// ```
sequence_crdt.add_version = (S, version, splices, is_anc) => {
var rebased_splices = [];
function add_to_nexts(nexts, to) {
var i = binarySearch(nexts, function (x) {
if ((to.sort_key || to.version) < (x.sort_key || x.version)) return -1;
if ((to.sort_key || to.version) > (x.sort_key || x.version)) return 1;
return 0;
});
nexts.splice(i, 0, to);
}
var si = 0;
var delete_up_to = 0;
var process_patch = (node, offset, has_nexts, prev, _version, deleted) => {
var s = splices[si];
if (!s) return;
var sort_key = s[3];
if (deleted) {
if (s[1] == 0 && s[0] == offset) {
if (node.elems.length == 0 && !node.end_cap && has_nexts) return;
var new_node = sequence_crdt.create_node(
version,
s[2],
null,
sort_key
);
fresh_nodes.add(new_node);
if (node.elems.length == 0 && !node.end_cap)
add_to_nexts(node.nexts, new_node);
else sequence_crdt.break_node(node, 0, undefined, new_node);
si++;
}
if (
delete_up_to <= offset &&
s[1] &&
s[2] &&
s[0] == offset &&
node.end_cap &&
!has_nexts &&
(node.next && node.next.elems.length) &&
!Object.keys(node.next.deleted_by).some((version) => f(version))
) {
delete_up_to = s[0] + s[1];
var new_node = sequence_crdt.create_node(
version,
s[2],
null,
sort_key
);
fresh_nodes.add(new_node);
add_to_nexts(node.nexts, new_node);
}
return;
}
if (s[1] == 0) {
var d = s[0] - (offset + node.elems.length);
if (d > 0) return;
if (d == 0 && !node.end_cap && has_nexts) return;
var new_node = sequence_crdt.create_node(version, s[2], null, sort_key);
fresh_nodes.add(new_node);
if (d == 0 && !node.end_cap) {
add_to_nexts(node.nexts, new_node);
} else {
sequence_crdt.break_node(node, s[0] - offset, undefined, new_node);
}
si++;
return;
}
if (delete_up_to <= offset) {
var d = s[0] - (offset + node.elems.length);
let add_at_end =
d == 0 &&
s[2] &&
node.end_cap &&
!has_nexts &&
(node.next && node.next.elems.length) &&
!Object.keys(node.next.deleted_by).some((version) => f(version));
if (d > 0 || (d == 0 && !add_at_end)) return;
delete_up_to = s[0] + s[1];
if (s[2]) {
var new_node = sequence_crdt.create_node(
version,
s[2],
null,
sort_key
);
fresh_nodes.add(new_node);
if (add_at_end) {
add_to_nexts(node.nexts, new_node);
} else {
sequence_crdt.break_node(node, s[0] - offset, true, new_node);
}
return;
} else {
if (s[0] == offset) {
} else {
sequence_crdt.break_node(node, s[0] - offset);
return;
}
}
}
if (delete_up_to > offset) {
if (delete_up_to <= offset + node.elems.length) {
if (delete_up_to < offset + node.elems.length) {
sequence_crdt.break_node(node, delete_up_to - offset);
}
si++;
}
node.deleted_by[version] = true;
return;
}
};
var f = is_anc || (() => true);
var offset = 0;
var rebase_offset = 0;
let fresh_nodes = new Set();
function traverse(node, prev, version) {
if (!version || f(version)) {
var has_nexts = node.nexts.find((next) => f(next.version));
var deleted = Object.keys(node.deleted_by).some((version) =>
f(version)
);
let rebase_deleted = Object.keys(node.deleted_by).length;
process_patch(node, offset, has_nexts, prev, version, deleted);
if (!deleted) offset += node.elems.length;
if (!rebase_deleted && Object.keys(node.deleted_by).length)
rebased_splices.push([rebase_offset, node.elems.length, ""]);
}
if (fresh_nodes.has(node))
rebased_splices.push([rebase_offset, 0, node.elems]);
if (!Object.keys(node.deleted_by).length)
rebase_offset += node.elems.length;
for (var next of node.nexts) traverse(next, null, next.version);
if (node.next) traverse(node.next, node, version);
}
traverse(S, null, S.version);
return rebased_splices;
};
/// # sequence_crdt.traverse(root_node, is_anc, callback, [view_deleted, tail_callback])
///
/// Traverses the subset of nodes in the tree rooted at `root_node` whose versions return `true` when passed to `is_anc`. For each node, `callback` is called with these parameters: `node, offset, has_nexts, prev, version, deleted`,
///
/// Where
/// - `node` is the current node being traversed
/// - `offset` says how many elements we have passed so far
/// - `has_nexts` is true if some of this node's `nexts` will be traversed according to `is_anc`
/// - `prev` is a pointer to the node whos `next` points to this one, or `null` if this is the root node
/// - `version` is the version of this node, or this node's `prev` if our version is `null`, or that node's `prev` if it is also `null`, etc
/// - `deleted` is true if this node is deleted according to `is_anc`
///
/// Usually we skip deleted nodes when traversing, but we'll include them if `view_deleted` is `true`.
///
/// `tail_callback` is an optional callback that will get called with a single parameter `node` after all of that node's children `nexts` and `next` have been traversed.
///
/// ``` js
/// sequence_crdt.traverse(node, () => true, node =>
/// process.stdout.write(node.elems))
/// ```
sequence_crdt.traverse = (S, f, cb, view_deleted, tail_cb) => {
var offset = 0;
function helper(node, prev, version) {
var has_nexts = node.nexts.find((next) => f(next.version));
var deleted = Object.keys(node.deleted_by).some((version) => f(version));
if (view_deleted || !deleted) {
if (cb(node, offset, has_nexts, prev, version, deleted) == false)
return true;
offset += node.elems.length;
}
for (var next of node.nexts)
if (f(next.version)) {
if (helper(next, null, next.version)) return true;
}
if (node.next) {
if (helper(node.next, node, version)) return true;
} else if (tail_cb) tail_cb(node);
}
helper(S, null, S.version);
};
// modified from https://stackoverflow.com/questions/22697936/binary-search-in-javascript
function binarySearch(ar, compare_fn) {
var m = 0;
var n = ar.length - 1;
while (m <= n) {
var k = (n + m) >> 1;
var cmp = compare_fn(ar[k]);
if (cmp > 0) {
m = k + 1;
} else if (cmp < 0) {
n = k - 1;
} else {
return k;
}
}
return m;
}
})();
if (typeof module != "undefined")
module.exports = {
create_antimatter_crdt,
create_json_crdt,
sequence_crdt,
};
================================================
FILE: antimatter_ts/doc.html
================================================
================================================
FILE: antimatter_ts/package.json
================================================
{
"name": "@braidjs/antimatter",
"version": "0.0.23",
"description": "antimatter: a pruning algorithm for CRDTs and other mergeables",
"main": "antimatter.js",
"scripts": {
"test": "node test.js"
},
"author": "Braid Working Group",
"repository": "braid-org/braidjs",
"homepage": "https://braid.org/antimatter",
"packageManager": "pnpm@9.0.4+sha256.caa915eaae9d9aefccf50ee8aeda25a2f8684d8f9d5c6e367eaf176d97c1f89e",
"dependencies": {
"typescript": "^5.6.2"
}
}
================================================
FILE: antimatter_ts/random002.js
================================================
// the next two functions added by me
function create_rand(seed) {
if (typeof(seed) == 'string') {
var t = new MersenneTwister(0)
var a = []
for (var i = 0; i < seed.length; i++)
a[i] = seed.charCodeAt(i)
t.init_by_array(a, a.length)
} else if (typeof(seed) == 'number') {
var t = new MersenneTwister(seed)
} else {
var t = new MersenneTwister()
}
return () => t.random()
}
Math.randomSeed = function (seed) {
var r = create_rand(seed)
Math.random = () => r()
}
/* The following piece of code is an implementation of MersenneTwister object
taken from https://gist.github.com/banksean/300494, with one method
xor_array(array, size) added.
*/
/*
I've wrapped Makoto Matsumoto and Takuji Nishimura's code in a namespace
so it's better encapsulated. Now you can have multiple random number generators
and they won't stomp all over eachother's state.
If you want to use this as a substitute for Math.random(), use the random()
method like so:
var m = new MersenneTwister();
var randomNumber = m.random();
You can also call the other genrand_{foo}() methods on the instance.
If you want to use a specific seed in order to get a repeatable random
sequence, pass an integer into the constructor:
var m = new MersenneTwister(123);
and that will always produce the same random sequence.
Sean McCullough (banksean@gmail.com)
*/
/*
A C-program for MT19937, with initialization improved 2002/1/26.
Coded by Takuji Nishimura and Makoto Matsumoto.
Before using, initialize the state by using init_genrand(seed)
or init_by_array(init_key, key_length).
Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Any feedback is very welcome.
http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
*/
var MersenneTwister = function(seed) {
if (seed == undefined) {
seed = new Date().getTime();
}
/* Period parameters */
this.N = 624;
this.M = 397;
this.MATRIX_A = 0x9908b0df; /* constant vector a */
this.UPPER_MASK = 0x80000000; /* most significant w-r bits */
this.LOWER_MASK = 0x7fffffff; /* least significant r bits */
this.mt = new Array(this.N); /* the array for the state vector */
this.mti=this.N+1; /* mti==N+1 means mt[N] is not initialized */
this.init_genrand(seed);
}
/* initializes mt[N] with a seed */
MersenneTwister.prototype.init_genrand = function(s) {
this.mt[0] = s >>> 0;
for (this.mti=1; this.mti>> 30);
this.mt[this.mti] = (((((s & 0xffff0000) >>> 16) * 1812433253) << 16) + (s & 0x0000ffff) * 1812433253)
+ this.mti;
/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
/* In the previous versions, MSBs of the seed affect */
/* only MSBs of the array mt[]. */
/* 2002/01/09 modified by Makoto Matsumoto */
this.mt[this.mti] >>>= 0;
/* for >32 bit machines */
}
}
/* initialize by an array with array-length */
/* init_key is the array for initializing keys */
/* key_length is its length */
/* slight change for C++, 2004/2/26 */
MersenneTwister.prototype.init_by_array = function(init_key, key_length) {
var i, j, k;
this.init_genrand(19650218);
i=1; j=0;
k = (this.N>key_length ? this.N : key_length);
for (; k; k--) {
var s = this.mt[i-1] ^ (this.mt[i-1] >>> 30)
this.mt[i] = (this.mt[i] ^ (((((s & 0xffff0000) >>> 16) * 1664525) << 16) + ((s & 0x0000ffff) * 1664525)))
+ init_key[j] + j; /* non linear */
this.mt[i] >>>= 0; /* for WORDSIZE > 32 machines */
i++; j++;
if (i>=this.N) { this.mt[0] = this.mt[this.N-1]; i=1; }
if (j>=key_length) j=0;
}
for (k=this.N-1; k; k--) {
var s = this.mt[i-1] ^ (this.mt[i-1] >>> 30);
this.mt[i] = (this.mt[i] ^ (((((s & 0xffff0000) >>> 16) * 1566083941) << 16) + (s & 0x0000ffff) * 1566083941))
- i; /* non linear */
this.mt[i] >>>= 0; /* for WORDSIZE > 32 machines */
i++;
if (i>=this.N) { this.mt[0] = this.mt[this.N-1]; i=1; }
}
this.mt[0] = 0x80000000; /* MSB is 1; assuring non-zero initial array */
}
/* XORs the mt array with a given array xor_key of length key_length */
MersenneTwister.prototype.xor_array = function(xor_key, key_length) {
var i, j;
j = 0;
for (i = 0; i < this.N; i++) {
this.mt[i] ^= xor_key[j];
this.mt[i] >>>= 0;
j++;
if (j >= key_length) j = 0;
}
}
/* generates a random number on [0,0xffffffff]-interval */
MersenneTwister.prototype.genrand_int32 = function() {
var y;
var mag01 = new Array(0x0, this.MATRIX_A);
/* mag01[x] = x * MATRIX_A for x=0,1 */
if (this.mti >= this.N) { /* generate N words at one time */
var kk;
if (this.mti == this.N+1) /* if init_genrand() has not been called, */
this.init_genrand(5489); /* a default initial seed is used */
for (kk=0;kk>> 1) ^ mag01[y & 0x1];
}
for (;kk>> 1) ^ mag01[y & 0x1];
}
y = (this.mt[this.N-1]&this.UPPER_MASK)|(this.mt[0]&this.LOWER_MASK);
this.mt[this.N-1] = this.mt[this.M-1] ^ (y >>> 1) ^ mag01[y & 0x1];
this.mti = 0;
}
y = this.mt[this.mti++];
/* Tempering */
y ^= (y >>> 11);
y ^= (y << 7) & 0x9d2c5680;
y ^= (y << 15) & 0xefc60000;
y ^= (y >>> 18);
return y >>> 0;
}
/* generates a random number on [0,0x7fffffff]-interval */
MersenneTwister.prototype.genrand_int31 = function() {
return (this.genrand_int32()>>>1);
}
/* generates a random number on [0,1]-real-interval */
MersenneTwister.prototype.genrand_real1 = function() {
return this.genrand_int32()*(1.0/4294967295.0);
/* divided by 2^32-1 */
}
/* generates a random number on [0,1)-real-interval */
MersenneTwister.prototype.random = function() {
return this.genrand_int32()*(1.0/4294967296.0);
/* divided by 2^32 */
}
/* generates a random number on (0,1)-real-interval */
MersenneTwister.prototype.genrand_real3 = function() {
return (this.genrand_int32() + 0.5)*(1.0/4294967296.0);
/* divided by 2^32 */
}
/* generates a random number on [0,1) with 53-bit resolution*/
MersenneTwister.prototype.genrand_res53 = function() {
var a=this.genrand_int32()>>>5, b=this.genrand_int32()>>>6;
return(a*67108864.0+b)*(1.0/9007199254740992.0);
}
/* These real versions are due to Isaku Wada, 2002/01/09 added */
================================================
FILE: antimatter_ts/readme.md
================================================
# antimatter: an algorithm that prunes CRDT/OT history
[Antimatter](https://braid.org/antimatter) is the world's first peer-to-peer synchronization algorithm that can prune its history in a network where peers disconnect, reconnect, and merge offline edits. Antimatter supports arbitrary simultaneous edits, from arbitrary peers, under arbitrary network delays and partitions, and guarantees full CRDT/OT consistency, while pruning unnecessary history within each partitioned subnet, and across subnets once they reconnect. In steady state, it prunes down to zero overhead. This lets you put synchronizing data structures in more parts of your software, without worrying about memory overhead.
This package implements an antimatter peer composed of three objects:
```js
var {create_antimatter_crdt, create_json_crdt, sequence_crdt} = require('@braidjs/antimatter')
```
- *antimatter_crdt*: created using `create_antimatter_crdt`, this object is a json_crdt with antimatter algorithm methods added to it so that it can communicate with other peers to learn which history can be pruned, and tells the underlying json_crdt object to prune it.
- *json_crdt*: created using `create_json_crdt`, this object is a pruneable JSON CRDT — "JSON" meaning it represents an arbitrary JSON datstructure, and "CRDT" and "pruneable" having the same meaning as for sequence_crdt below. The json_crdt makes recursive use of sequence_crdt structures to represent arbitrary JSON (for instance, a map is represented with a sequence_crdt structure for each value, where the first element in the sequence is the value).
- *sequence_crdt*: methods to manipulate a pruneable sequence CRDT — "sequence" meaning it represents a javascript string or array, "CRDT" meaning this structure can be merged with other ones, and "pruneable" meaning that it supports an operation to remove meta-data when it is no longer needed (whereas CRDT's often keep track of this meta-data forever).
The Antimatter Algorithm was invented by Michael Toomim and Greg Little in the
[Braid Project](https://braid.org) of [Invisible College](https://invisible.college/).
[Click here to see more details, and the API side-by-side with the source code.](https://braid.org/antimatter)
================================================
FILE: antimatter_ts/src/antimatter_crdt.ts
================================================
/// # Software Architecture
/// The software is architected into three objects:
///
/// ``` js
/// let {create_antimatter_crdt, create_json_crdt, sequence_crdt} = require('@braidjs/antimatter')
/// ```
import { create_json_crdt } from "./json_crdt.ts";
// v522
/// - *antimatter_crdt*: created using `create_antimatter_crdt`, this object is a json_crdt with antimatter algorithm methods added to it so that it can communicate with other peers to learn which history can be pruned, and tells the underlying json_crdt object to prune it.
export let create_antimatter_crdt;
/// # create_antimatter_crdt(send[, init])
///
/// Creates and returns a new antimatter_crdt object (or adds antimatter_crdt methods and properties to `init`).
///
/// * `send`: A callback function to be called whenever this antimatter_crdt wants to send a
/// message over a connection registered with `get` or `connect`. The sole
/// parameter to this function is a JSONafiable object that hopes to be passed to
/// the `receive` method on the antimatter_crdt object at the other end of the
/// connection specified in the `conn` key.
/// * `get_time`: function that returns a number representing time (e.g. `Date.now()`)
/// * `set_timeout`: function that takes a callback and timeout length, and calls that callback after that amount of time; also returns an identifier that can be passed to `clear_timeout` to cancel the timeout (e.g. wrapping the javascript setTimeout)
/// * `clear_timeout`: function that takes a timeout identifier an cancels it (e.g. wrapping the javascript clearTimeout)
/// * `init`: (optional) An antimatter_crdt object to start with, which we'll add any properties to that it doesn't have, and we'll add all the antimatter_crdt methods to it. This option exists so you can serialize an antimatter_crdt instance as JSON, and then restore it later.
/// ``` js
/// let antimatter_crdt = create_antimatter_crdt(msg => {
/// websockets[msg.conn].send(JSON.stringify(msg))
/// },
/// () => Date.now(),
/// (func, t) => setTimeout(func, t),
/// (t) => clearTimeout(t)),
///. JSON.parse(fs.readFileSync('./antimatter.backup'))
/// )
/// ```
create_antimatter_crdt = (
send,
get_time,
set_timeout,
clear_timeout,
self
) => {
self = create_json_crdt(self);
self.send = send;
self.id = self.id || Math.random().toString(36).slice(2);
self.next_seq = self.next_seq || 0;
self.conns = self.conns || {};
self.proto_conns = self.proto_conns || {};
self.conn_count = self.conn_count || 0;
self.fissures = self.fissures || {};
self.acked_boundary = self.acked_boundary || {};
self.marcos = self.marcos || {};
self.forget_cbs = self.forget_cbs || {};
self.version_groups = self.version_groups || {};
self.marco_map = self.marco_map || {};
self.marco_time_est_1 = self.marco_time_est_1 || 1000;
self.marco_time_est_2 = self.marco_time_est_2 || 1000;
self.marco_current_wait_time = self.marco_current_wait_time || 1000;
self.marco_increases_allowed = 1;
self.marco_timeout = self.marco_timeout || null;
function raw_add_version_group(version_array) {
let version_map = {};
for (let v of version_array) {
if (version_map[v]) continue;
version_map[v] = true;
if (self.version_groups[v]) self.version_groups[v].forEach((v) => (version_map[v] = true));
}
let version_group = Object.keys(version_map).sort();
version_group.forEach((v) => (self.version_groups[v] = version_group));
return version_group;
}
function get_parent_and_child_sets(children) {
let parent_sets = {};
let child_sets = {};
let done = {};
function add_set_to_sets(s, sets, mark_done) {
let container = { members: s };
let array = Object.keys(s);
if (array.length < 2) return;
for (let v of array) {
sets[v] = container;
if (mark_done) done[v] = true;
}
}
add_set_to_sets(self.current_version, parent_sets, true);
for (let v of Object.keys(self.T)) {
if (done[v]) continue;
done[v] = true;
if (!children[v]) continue;
let first_child_set = children[v];
let first_child_array = Object.keys(first_child_set);
let first_parent_set = self.T[first_child_array[0]];
let first_parent_array = Object.keys(first_parent_set);
if (
first_child_array.every((child) => {
let parent_set = self.T[child];
let parent_array = Object.keys(parent_set);
return (
parent_array.length == first_parent_array.length &&
parent_array.every((parent) => first_parent_set[parent])
);
}) &&
first_parent_array.every((parent) => {
let child_set = children[parent];
let child_array = Object.keys(child_set);
return (
child_array.length == first_child_array.length &&
child_array.every((child) => first_child_set[child])
);
})
) {
add_set_to_sets(first_parent_set, parent_sets, true);
add_set_to_sets(first_child_set, child_sets);
}
}
return { parent_sets, child_sets };
}
function find_one_bubble(bottom, children, child_sets, restricted) {
let expecting = { ...bottom };
let seen = {};
Object.keys(bottom).forEach(
(v) =>
children[v] &&
Object.keys(children[v]).forEach((v) => (seen[v] = true))
);
let q = Object.keys(expecting);
let last_top = null;
while (q.length) {
cur = q.shift();
if (!self.T[cur]) {
if (!restricted) throw "bad";
else return last_top;
}
if (restricted && restricted[cur]) return last_top;
if (seen[cur]) continue;
if (children[cur] && !Object.keys(children[cur]).every((c) => seen[c]))
continue;
seen[cur] = true;
delete expecting[cur];
if (!Object.keys(expecting).length) {
last_top = { [cur]: true };
if (!restricted) return last_top;
}
Object.keys(self.T[cur]).forEach((p) => {
expecting[p] = true;
q.push(p);
});
if (
child_sets[cur] &&
Object.keys(child_sets[cur].members).every((v) => seen[v])
) {
let expecting_array = Object.keys(expecting);
let parent_set = self.T[cur];
let parent_array = Object.keys(parent_set);
if (
expecting_array.length == parent_array.length &&
expecting_array.every((v) => parent_set[v])
) {
last_top = child_sets[cur].members;
if (!restricted) return last_top;
}
}
}
return last_top;
}
function add_version_group(version_array) {
let version_group = raw_add_version_group(version_array);
if (!version_array.some((x) => self.T[x])) return version_group[0];
let children = self.get_child_map();
let { parent_sets, child_sets } = get_parent_and_child_sets(children);
let to_bubble = {};
function mark_bubble(v, bubble) {
if (to_bubble[v]) return;
to_bubble[v] = bubble;
for (let vv of Object.keys(self.T[v])) mark_bubble(vv, bubble);
}
let bottom = Object.fromEntries(
version_group.filter((x) => self.T[x]).map((x) => [x, true])
);
let top = find_one_bubble(bottom, children, child_sets);
let bubble = [Object.keys(bottom).sort()[0], Object.keys(top)[0]];
for (let v of Object.keys(top)) to_bubble[v] = bubble;
for (let v of Object.keys(bottom)) mark_bubble(v, bubble);
self.apply_bubbles(to_bubble);
return version_group[0];
}
let orig_send = send;
send = (x) => {
if (self.version_groups[x.version])
x.version = self.version_groups[x.version];
if (x.parents) {
x.parents = { ...x.parents };
Object.keys(x.parents).forEach((v) =>
self.version_groups[v] && self.version_groups[v].forEach((v) => (x.parents[v] = true))
);
}
if (Array.isArray(x.versions)) {
x.versions = JSON.parse(JSON.stringify(x.versions));
x.versions.forEach(
(v) =>
self.version_groups[v.version] &&
(v.version = self.version_groups[v.version])
);
x.versions.forEach((v) => {
Object.keys(v.parents).forEach((vv) =>
self.version_groups[vv] && self.version_groups[vv].forEach((vv) => (v.parents[vv] = true))
);
});
}
orig_send(x);
};
/// # antimatter_crdt.receive(message)
///
/// Let this antimatter object "receive" a message from another antimatter object, presumably from its `send` callback.
/// ``` js
/// websocket.on('message', data => {
/// antimatter_crdt.receive(JSON.parse(data)) });
/// ```
/// You generally do not need to mess with a message object directly, but below are the various message objects you might see, categorized by their `cmd` entry. Note that each object also
/// contains a `conn` entry with the id of the connection the message is sent
/// over.
self.receive = (x) => {
let {
cmd,
version,
parents,
patches,
versions,
fissure,
fissures,
seen,
forget,
marco,
peer,
conn,
} = x;
if (version && typeof version != "string") {
if (!self.T[version[0]]) version = add_version_group(version);
else version = version[0];
}
if (parents) {
parents = { ...parents };
Object.keys(parents).forEach((v) => {
if (self.version_groups[v] && self.version_groups[v][0] != v)
delete parents[v];
});
}
if (versions && versions.forEach) versions.forEach((v) => {
if (typeof v.version != "string") {
if (!self.T[v.version[0]]) v.version = add_version_group(v.version);
else v.version = v.version[0];
}
v.parents = { ...v.parents };
Object.keys(v.parents).forEach((vv) => {
if (self.version_groups[vv] && self.version_groups[vv][0] != vv)
delete v.parents[vv];
});
});
let marco_versions_array = version
? [version]
: versions && !Array.isArray(versions)
? Object.keys(versions).sort()
: null;
let marco_versions =
marco_versions_array &&
Object.fromEntries(marco_versions_array.map((v) => [v, true]));
if (versions && !Array.isArray(versions)) {
versions = { ...versions };
Object.keys(versions).forEach((v) => {
if (self.version_groups[v] && self.version_groups[v][0] != v)
delete versions[v];
});
if (!Object.keys(versions).length) return;
}
/// ## message `get`
/// `get` is the first message sent over a connection, and the peer at the other end will respond with `welcome`.
/// ``` js
/// { cmd: 'get',
/// peer: 'SENDER_ID',
/// conn: 'CONN_ID',
/// parents: {'PARENT_VERSION_ID': true, ...} }
/// ```
/// The `parents` are optional, and describes which versions this peer already has. The other end will respond with versions since that set of parents.
if (cmd == "get" || (cmd == "welcome" && peer != null)) {
if (self.conns[conn] != null) throw Error("bad");
self.conns[conn] = { peer, seq: ++self.conn_count };
}
/// ## message `fissure`
///
/// Sent to alert peers about a fissure. The `fissure` entry contains information about the two peers involved in the fissure, the specific connection id that broke, the `versions` that need to be protected, and the `time` of the fissure (in case we want to ignore it after some time). It is also possible to send multiple `fissures` in an array.
/// ``` js
/// { cmd: 'fissure',
/// fissure: { // or fissures: [{...}, {...}, ...],
/// a: 'PEER_A_ID',
/// b: 'PEER_B_ID',
/// conn: 'CONN_ID',
/// versions: {'VERSION_ID': true, ...},
/// time: Date.now()
/// },
/// conn: 'CONN_ID' }
/// ```
/// Note that `time` isn't used for anything critical, as it's just wallclock time.
if (fissure) fissures = [fissure];
if (fissures) fissures.forEach((f) => (f.t = self.conn_count));
if (versions && (cmd == "set" || cmd == "welcome"))
versions = Object.fromEntries(versions.map((v) => [v.version, v]));
if (version) versions = { [version]: true };
let rebased_patches = [];
let fissures_back = [];
let fissures_forward = [];
let fissures_done = {};
function copy_fissures(fs) {
return fs.map((f) => {
f = JSON.parse(JSON.stringify(f));
delete f.t;
return f;
});
}
if (fissures) {
let fiss_map = Object.fromEntries(
fissures.map((f) => [f.a + ":" + f.b + ":" + f.conn, f])
);
for (let [key, f] of Object.entries(fiss_map)) {
if (fissures_done[f.conn]) continue;
fissures_done[f.conn] = true;
let our_f = self.fissures[key];
let other_key = f.b + ":" + f.a + ":" + f.conn;
let their_other = fiss_map[other_key];
let our_other = self.fissures[other_key];
if (!our_f) self.fissures[key] = f;
if (their_other && !our_other) self.fissures[other_key] = their_other;
if (!their_other && !our_other && f.b == self.id) {
if (self.conns[f.conn]) delete self.conns[f.conn];
our_other = self.fissures[other_key] = {
...f,
a: f.b,
b: f.a,
t: self.conn_count,
};
}
if (!their_other && our_other) {
fissures_back.push(f);
fissures_back.push(our_other);
}
if (!our_f || (their_other && !our_other)) {
fissures_forward.push(f);
if (their_other || our_other)
fissures_forward.push(their_other || our_other);
}
}
}
/// ## message `welcome`
/// Sent in response to a `get`, basically contains the initial state of the document; incoming `welcome` messages are also propagated over all our other connections but only with information that was new to us, so the propagation will eventually stop. When sent in response to a `get` (rather than being propagated), we include a `peer` entry with the id of the sending peer, so they know who we are, and to trigger them to send us their own `welcome` message.
///
/// ``` js
/// {
/// cmd: 'welcome',
/// versions: [
/// //each version looks like a set message...
/// ],
/// fissures: [
/// //each fissure looks as it would in a fissure message...
/// ],
/// parents:
/// {
/// //versions you must have before consuming these new versions
/// 'PARENT_VERSION_ID': true,
/// ...
/// },
/// [peer: 'SENDER_ID'], // if responding to a get
/// conn: 'CONN_ID'
/// }
/// ```
let _T = {};
let added_versions = [];
if (cmd == "welcome") {
let versions_to_add = {};
let vs = Object.values(versions);
vs.forEach((v) => (versions_to_add[v.version] = v.parents));
vs.forEach((v) => {
if (
self.T[v.version] ||
(self.version_groups[v.version] &&
self.version_groups[v.version][0] != v.version)
) {
remove_ancestors(v.version);
function remove_ancestors(v) {
if (versions_to_add[v]) {
Object.keys(versions_to_add[v]).forEach(remove_ancestors);
delete versions_to_add[v];
}
}
}
});
for (let v of vs) _T[v.version] = v.parents;
l1: for (let v of vs) {
if (versions_to_add[v.version]) {
let ps = Object.keys(v.parents);
if (!ps.length && Object.keys(self.T).length) continue;
for (p of ps) if (!self.T[p]) continue l1;
rebased_patches = rebased_patches.concat(
self.add_version(v.version, v.parents, v.patches, v.sort_keys)
);
added_versions.push(v);
delete _T[v.version];
}
}
}
if (cmd == "get" || (cmd == "welcome" && peer != null)) {
let fissures_back = Object.values(self.fissures);
if (cmd == "welcome") {
let leaves = { ..._T };
Object.keys(_T).forEach((v) => {
Object.keys(_T[v]).forEach((p) => delete leaves[p]);
});
let f = {
a: self.id,
b: peer,
conn: "-" + conn,
versions: Object.fromEntries(
added_versions
.concat(Object.keys(leaves).map((v) => versions[v]))
.map((v) => [v.version, true])
),
time: get_time(),
t: self.conn_count,
};
if (Object.keys(f.versions).length) {
let key = f.a + ":" + f.b + ":" + f.conn;
self.fissures[key] = f;
fissures_back.push(f);
fissures_forward.push(f);
}
}
send({
cmd: "welcome",
versions: self.generate_braid(parents || versions),
fissures: copy_fissures(fissures_back),
parents:
parents &&
Object.keys(parents).length &&
self.get_leaves(self.ancestors(parents, true)),
...(cmd == "get" ? { peer: self.id } : {}),
conn,
});
} else if (fissures_back.length) {
send({
cmd: "fissure",
fissures: copy_fissures(fissures_back),
conn,
});
}
/// ## message `forget`
/// Used to disconnect without creating a fissure, presumably meaning the sending peer doesn't plan to make any edits while they're disconnected.
/// ``` js
/// {cmd: 'forget', conn: 'CONN_ID'}
/// ```
if (cmd == "forget") {
if (self.conns[conn] == null) throw Error("bad");
send({ cmd: "ack", forget: true, conn });
delete self.conns[conn];
delete self.proto_conns[conn];
}
/// ## message forget `ack`
/// Sent in response to `forget`.. so they know we forgot them.
/// ``` js
/// {cmd: 'ack', forget: true, conn: 'CONN_ID'}
/// ```
if (cmd == "ack" && forget) {
self.forget_cbs[conn]();
}
/// ## message `set`
/// Sent to alert peers about a change in the document. The change is represented as a version, with a unique id, a set of parent versions (the most recent versions known before adding this version), and an array of patches, where the offsets in the patches do not take into account the application of other patches in the same array.
/// ``` js
/// { cmd: 'set',
/// version: 'VERSION_ID',
/// parents: {'PARENT_VERSION_ID': true, ...},
/// patches: [ {range: '.json.path.a.b', content: 42}, ... ],
/// conn: 'CONN_ID' }
/// ```
if (cmd == "set") {
if (conn == null || !self.T[version]) {
let ps = Object.keys(parents);
if (!ps.length && Object.keys(self.T).length) return;
for (p of ps) if (!self.T[p]) return;
rebased_patches = self.add_version(version, parents, patches);
for (let c of Object.keys(self.conns))
if (c != conn)
send({ cmd: "set", version, parents, patches, marco, conn: c });
}
}
/// ## message `marco`
/// Sent for pruning purposes, to try and establish whether everyone has seen the most recent versions. Note that a `set` message is treated as a `marco` message for the version being set.
/// ``` js
/// { cmd: 'marco',
/// version: 'MARCO_ID',
/// versions: {'VERSION_ID_A': true, ...},
/// conn: 'CONN_ID' }
/// ```
if (cmd == "marco" || cmd == "set") {
if (!Object.keys(versions).every((v) => self.T[v])) return;
if (
self.marco_timeout &&
marco_versions_array.length ==
Object.keys(self.current_version).length &&
marco_versions_array.every((x) => self.current_version[x])
) {
clear_timeout(self.marco_timeout);
self.marco_timeout = null;
}
let m = self.marcos[marco];
if (!m) {
m = self.marcos[marco] = {
id: marco,
origin: conn,
count: Object.keys(self.conns).length - (conn != null ? 1 : 0),
versions: marco_versions,
seq: self.conn_count,
time: get_time(),
};
m.orig_count = m.count;
m.real_marco = cmd == "marco";
m.key = JSON.stringify(Object.keys(m.versions).sort());
self.marco_map[m.key] = self.marco_map[m.key] || {};
let before = Object.keys(self.marco_map[m.key]).length;
self.marco_map[m.key][m.id] = true;
let after = Object.keys(self.marco_map[m.key]).length;
if (before == 1 && after == 2 && self.marco_increases_allowed > 0) {
self.marco_current_wait_time *= 2;
self.marco_increases_allowed--;
}
if (cmd == "marco")
for (let c of Object.keys(self.conns))
if (c != conn)
send({
cmd: "marco",
marco,
versions: marco_versions,
conn: c,
});
} else if (m.seq < self.conns[conn].seq) {
send({
cmd: "ack",
seen: "local",
marco,
versions: marco_versions,
conn,
});
return;
} else m.count--;
check_marco_count(marco);
}
/// ## message local `ack`
/// Sent in response to `set`, but not right away; a peer will first send the `set` over all its other connections, and only after they have all responded with a local `ack` – and we didn't see a `fissure` message while waiting – will the peer send a local `ack` over the originating connection.
/// ``` js
/// {cmd: 'ack', seen: 'local', version: 'VERSION_ID', conn: 'CONN_ID'}
/// ```
if (cmd == "ack" && seen == "local") {
let m = self.marcos[marco];
if (!m || m.cancelled) return;
m.count--;
check_marco_count(marco);
}
function check_marco_count(marco) {
let m = self.marcos[marco];
if (m && m.count === 0 && !m.cancelled) {
m.time2 = get_time();
if (m.orig_count > 0) {
let t = m.time2 - m.time;
let weight = 0.1;
self.marco_time_est_1 =
weight * t + (1 - weight) * self.marco_time_est_1;
}
if (m.origin != null) {
if (self.conns[m.origin])
send({
cmd: "ack",
seen: "local",
marco,
versions: marco_versions,
conn: m.origin,
});
} else add_full_ack_leaves(marco);
}
}
/// ## message global `ack`
/// Sent after an originating peer has received a local `ack` over all its connections, or after any peer receives a global `ack`, so that everyone may come to know that this version has been seen by everyone in this peer group.
/// ``` js
/// {cmd: 'ack', seen: 'global', version: 'VERSION_ID', conn: 'CONN_ID'}
/// ```
if (cmd == "ack" && seen == "global") {
let m = self.marcos[marco];
if (!m || m.cancelled) return;
let t = get_time() - m.time2;
let weight = 0.1;
self.marco_time_est_2 =
weight * t + (1 - weight) * self.marco_time_est_2;
if (m.real_marco && Object.keys(self.marco_map[m.key]).length == 1) {
self.marco_current_wait_time *= 0.8;
}
add_full_ack_leaves(marco, conn);
}
function add_full_ack_leaves(marco, conn) {
let m = self.marcos[marco];
if (!m || m.cancelled) return;
m.cancelled = true;
for (let [c, cc] of Object.entries(self.conns))
if (c != conn && cc.seq <= m.seq)
send({
cmd: "ack",
seen: "global",
marco,
versions: marco_versions,
conn: c,
});
for (let v of Object.keys(m.versions)) {
if (!self.T[v]) continue;
let marks = {};
let f = (v) => {
if (!marks[v]) {
marks[v] = true;
delete self.acked_boundary[v];
Object.keys(self.T[v]).forEach(f);
}
};
f(v);
self.acked_boundary[v] = true;
}
prune(false, m.seq);
}
if (added_versions.length || fissures_forward.length) {
for (let c of Object.keys(self.conns))
if (c != conn)
send({
cmd: added_versions.length ? "welcome" : "fissure",
...(added_versions.length ? { versions: added_versions } : {}),
fissures: copy_fissures(fissures_forward),
conn: c,
});
}
if (fissures_forward.length) resolve_fissures();
if (
!self.marco_timeout &&
cmd != "set" &&
cmd != "marco" &&
prune(true)
) {
if (!self.marco_current_wait_time) {
self.marco_current_wait_time =
4 * (self.marco_time_est_1 + self.marco_time_est_2);
}
let t = Math.random() * self.marco_current_wait_time;
self.marco_timeout = set_timeout(() => {
self.marco_increases_allowed = 1;
self.marco_timeout = null;
if (prune(true)) self.marco();
}, t);
}
if (cmd == "welcome" && peer == null && prune(true, null, true))
self.marco();
return rebased_patches;
};
/// # antimatter_crdt.get(conn) or connect(conn)
///
/// Register a new connection with id `conn` – triggers this antimatter_crdt object to send a `get` message over the given connection.
///
/// ``` js
/// alice_antimatter_crdt.get('connection_to_bob')
/// ```
self.get = (conn) => {
self.proto_conns[conn] = true;
send({ cmd: "get", peer: self.id, conn });
};
self.connect = self.get;
/// # antimatter_crdt.forget(conn)
///
/// Disconnect the given connection without creating a fissure – we don't need to reconnect with them.. it seems.. if we do, then we need to call `disconnect` instead, which will create a fissure allowing us to reconnect.
///
/// ``` js
/// alice_antimatter_crdt.forget('connection_to_bob')
/// ```
self.forget = async (conn) => {
await new Promise((done) => {
if (self.conns[conn] != null) {
self.forget_cbs[conn] = done;
send({ cmd: "forget", conn });
}
self.disconnect(conn, false);
});
};
/// # antimatter_crdt.disconnect(conn)
///
/// If we detect that a connection has closed, let the antimatter_crdt object know by calling this method with the given connection id – this will create a fissure so we can reconnect with whoever was on the other end of the connection later on.
///
/// ``` js
/// alice_antimatter_crdt.disconnect('connection_to_bob')
/// ```
self.disconnect = (conn, fissure = true) => {
if (self.conns[conn] == null && !self.proto_conns[conn]) return;
delete self.proto_conns[conn];
if (self.conns[conn]) {
let peer = self.conns[conn].peer;
delete self.conns[conn];
if (fissure) {
fissure = create_fissure(peer, conn);
if (fissure) self.receive({ cmd: "fissure", fissure });
}
}
};
/// # antimatter_crdt.set(...patches)
///
/// Modify this antimatter_crdt object by applying the given patches. Each patch looks like `{range: '.life.meaning', content: 42}`. Calling this method will trigger calling the `send` callback to let our peers know about this change.
///
/// ``` js
/// antimatter_crdt.set({
/// range: '.life.meaning',
/// content: 42
/// })
/// ```
self.set = (...patches) => {
let version = `${self.next_seq++}@${self.id}`;
self.receive({
cmd: "set",
version,
parents: { ...self.current_version },
patches,
marco: Math.random().toString(36).slice(2),
});
return version;
};
/// # antimatter_crdt.marco()
///
/// Initiate sending a `marco` message to try and establish whether certain versions can be pruned.
///
/// ``` js
/// antimatter_crdt.marco()
/// ```
self.marco = () => {
let versions = { ...self.current_version };
Object.keys(versions).forEach((v) =>
self.version_groups[v] && self.version_groups[v].forEach((v) => (versions[v] = true))
);
let marco = Math.random().toString(36).slice(2);
self.receive({ cmd: "marco", marco, versions });
return marco;
};
function cancel_marcos() {
for (let m of Object.values(self.marcos)) m.cancelled = true;
}
function create_fissure(peer, conn) {
let ack_versions = self.ancestors(self.acked_boundary);
let entries = Object.keys(self.T)
.filter((v) => !ack_versions[v] || self.acked_boundary[v])
.map((v) => [v, true]);
if (!entries.length) return;
let versions = Object.fromEntries(entries);
return { a: self.id, b: peer, conn, versions, time: get_time() };
}
function resolve_fissures() {
let unfissured = {};
Object.entries(self.fissures).forEach(([fk, f]) => {
let other_key = f.b + ":" + f.a + ":" + f.conn;
let other = self.fissures[other_key];
if (other) {
if (Object.keys(f.versions).length) {
for (let v of Object.keys(f.versions)) unfissured[v] = true;
self.fissures[fk] = { ...f, versions: {} };
}
if (Object.keys(other.versions).length) {
for (let v of Object.keys(other.versions)) unfissured[v] = true;
self.fissures[other_key] = { ...other, versions: {} };
}
}
});
if (Object.keys(unfissured).length) {
cancel_marcos();
let ack_versions = self.ancestors(self.acked_boundary);
let unfissured_descendants = self.descendants(unfissured, true);
for (let un of Object.keys(unfissured_descendants))
if (ack_versions[un]) delete ack_versions[un];
self.acked_boundary = self.get_leaves(ack_versions);
}
}
function prune(just_checking, t, just_versions) {
if (just_checking) t = Infinity;
let fissures = just_checking ? { ...self.fissures } : self.fissures;
Object.entries(fissures).forEach((x) => {
let other_key = x[1].b + ":" + x[1].a + ":" + x[1].conn;
let other = fissures[other_key];
if (other && x[1].t <= t && other.t <= t) {
delete fissures[x[0]];
delete fissures[other_key];
}
});
if (self.fissure_lifetime != null) {
let now = get_time();
Object.entries(fissures).forEach(([k, f]) => {
if (f.time == null) f.time = now;
if (f.time <= now - self.fissure_lifetime) {
delete fissures[k];
}
});
}
if (
just_checking &&
!just_versions &&
Object.keys(fissures).length < Object.keys(self.fissures).length
)
return true;
let restricted = {};
Object.values(fissures).forEach((f) => {
Object.keys(f.versions).forEach((v) => (restricted[v] = true));
});
if (!just_checking) {
let acked = self.ancestors(self.acked_boundary);
Object.keys(self.T).forEach((x) => {
if (!acked[x]) restricted[x] = true;
});
}
let children = self.get_child_map();
let { parent_sets, child_sets } = get_parent_and_child_sets(children);
let to_bubble = {};
function mark_bubble(v, bubble) {
if (to_bubble[v]) return;
to_bubble[v] = bubble;
for (let vv of Object.keys(self.T[v])) mark_bubble(vv, bubble);
}
let visited = {};
function f(cur) {
if (!self.T[cur] || visited[cur]) return;
visited[cur] = true;
if (
to_bubble[cur] == null &&
parent_sets[cur] &&
!parent_sets[cur].done
) {
parent_sets[cur].done = true;
let bottom = parent_sets[cur].members;
let top = find_one_bubble(bottom, children, child_sets, restricted);
if (top) {
if (just_checking) return true;
let bottom_array = Object.keys(bottom).sort();
let top_array = Object.keys(top);
raw_add_version_group(bottom_array);
let bubble = [bottom_array[0], top_array[0]];
for (let v of top_array) to_bubble[v] = bubble;
for (let v of bottom_array) mark_bubble(v, bubble);
}
}
if (to_bubble[cur] == null) {
let top = find_one_bubble(
{ [cur]: true },
children,
child_sets,
restricted
);
if (top && !top[cur]) {
if (just_checking) return true;
let bubble = [cur, Object.keys(top)[0]];
for (let v of Object.keys(top)) to_bubble[v] = bubble;
mark_bubble(bubble[0], bubble);
} else {
to_bubble[cur] = [cur, cur];
}
}
return Object.keys(
self.T[cur] || self.T[self.version_groups[cur][0]]
).some(f);
}
if (Object.keys(self.current_version).some(f) && just_checking)
return true;
self.apply_bubbles(to_bubble);
for (let [k, m] of Object.entries(self.marcos)) {
let vs = Object.keys(m.versions);
if (
!vs.length ||
!vs.every((v) => self.T[v] || self.version_groups[v])
) {
delete self.marcos[k];
delete self.marco_map[m.key][m.id];
if (!Object.keys(self.marco_map[m.key]).length)
delete self.marco_map[m.key];
}
}
for (let [v, vs] of Object.entries(self.version_groups)) {
if (!self.T[vs[0]]) delete self.version_groups[v];
}
}
return self;
};
================================================
FILE: antimatter_ts/src/json_crdt.ts
================================================
/// - *json_crdt*: created using `create_json_crdt`, this object is a pruneable
/// JSON CRDT — "JSON" meaning it represents an arbitrary JSON datstructure, and
/// "CRDT" and "pruneable" having the same meaning as for sequence_crdt below. The
/// json_crdt makes recursive use of sequence_crdt structures to represent
/// arbitrary JSON (for instance, a map is represented with a sequence_crdt
/// structure for each value, where the first element in the sequence is the
/// value).
import {
create_node as sequence_crdt_create_node,
generate_braid as sequence_crdt_generate_braid,
apply_bubbles as sequence_crdt_apply_bubbles,
get as sequence_crdt_get,
set as sequence_crdt_set,
length as sequence_crdt_length,
break_node as sequence_crdt_break_node,
add_version as sequence_crdt_add_version,
traverse as sequence_crdt_traverse,
} from "./sequence_crdt.ts";
/// ## create_json_crdt([init])
///
/// Create a new `json_crdt` object (or start with `init`, and add stuff to that).
///
/// ``` js
/// let json_crdt = create_json_crdt()
/// ```
export const create_json_crdt = (self) => {
self = self || {};
self.S = self.S || null;
self.T = self.T || {};
self.root_version = null;
self.current_version = self.current_version || {};
self.version_cache = self.version_cache || {};
let is_lit = (x) => !x || typeof x != "object" || x.t == "lit";
let get_lit = (x) => (x && typeof x == "object" && x.t == "lit" ? x.S : x);
let make_lit = (x) => (x && typeof x == "object" ? { t: "lit", S: x } : x);
self = self || {};
/// # json_crdt.read()
///
/// Returns an instance of the `json` object represented by this json_crdt data-structure.
///
/// ``` js
/// console.log(json_crdt.read())
/// ```
self.read = (is_anc) => {
if (!is_anc) is_anc = () => true;
return raw_read(self.S, is_anc);
};
function raw_read(x, is_anc) {
if (x && typeof x == "object") {
if (x.t == "lit") return JSON.parse(JSON.stringify(x.S));
if (x.t == "val")
return raw_read(sequence_crdt_get(x.S, 0, is_anc), is_anc);
if (x.t == "obj") {
let o = {};
Object.entries(x.S).forEach(([k, v]) => {
let x = raw_read(v, is_anc);
if (x != null) o[k] = x;
});
return o;
}
if (x.t == "arr") {
let a = [];
sequence_crdt_traverse(
x.S,
is_anc,
(node, _, __, ___, ____, deleted) => {
if (!deleted)
node.elems.forEach((e) => a.push(raw_read(e, is_anc)));
},
true
);
return a;
}
if (x.t == "str") {
let s = [];
sequence_crdt_traverse(
x.S,
is_anc,
(node, _, __, ___, ____, deleted) => {
if (!deleted) s.push(node.elems);
},
true
);
return s.join("");
}
throw Error("bad");
}
return x;
}
/// # json_crdt.generate_braid(versions)
///
/// Returns an array of `set` messages that each look like this: `{version, parents, patches, sort_keys}`, such that if we pass all these messages to `antimatter_crdt.receive()`, we'll reconstruct the data in this `json_crdt` data-structure, assuming the recipient already has the given `versions` (each version is represented as an object with a version, and each value is `true`).
///
/// ``` js
/// json_crdt.generate_braid({
/// alice2: true,
/// bob3: true
/// })
/// ```
self.generate_braid = (versions) => {
let anc =
versions && Object.keys(versions).length
? self.ancestors(versions, true)
: {};
let is_anc = (x) => anc[x];
if (Object.keys(self.T).length === 0) return [];
return Object.entries(self.version_cache)
.filter((x) => !is_anc(x[0]))
.map(([version, set_message]) => {
return (self.version_cache[version] =
set_message || generate_set_message(version));
});
function generate_set_message(version) {
if (!Object.keys(self.T[version]).length) {
return {
version,
parents: {},
patches: [{ range: "", content: self.read((v) => v == version) }],
};
}
let is_lit = (x) => !x || typeof x !== "object" || x.t === "lit";
let get_lit = (x) =>
x && typeof x === "object" && x.t === "lit" ? x.S : x;
let ancs = self.ancestors({ [version]: true });
delete ancs[version];
let is_anc = (x) => ancs[x];
let path = [];
let patches = [];
let sort_keys = {};
recurse(self.S);
function recurse(x) {
if (is_lit(x)) {
} else if (x.t === "val") {
sequence_crdt_generate_braid(x.S, version, is_anc, raw_read)
.forEach((s) => {
if (s[2].length) {
patches.push({ range: path.join(""), content: s[2][0] });
if (s[3]) sort_keys[patches.length - 1] = s[3];
}
});
sequence_crdt_traverse(x.S, is_anc, (node) => {
node.elems.forEach(recurse);
});
} else if (x.t === "arr") {
sequence_crdt_generate_braid(x.S, version, is_anc).forEach((s) => {
patches.push({
range: `${path.join("")}[${s[0]}:${s[0] + s[1]}]`,
content: s[2],
});
if (s[3]) sort_keys[patches.length - 1] = s[3];
});
let i = 0;
sequence_crdt_traverse(x.S, is_anc, (node) => {
node.elems.forEach((e) => {
path.push(`[${i++}]`);
recurse(e);
path.pop();
});
});
} else if (x.t === "obj") {
Object.entries(x.S).forEach((e) => {
path.push("[" + JSON.stringify(e[0]) + "]");
recurse(e[1]);
path.pop();
});
} else if (x.t === "str") {
sequence_crdt_generate_braid(x.S, version, is_anc).forEach((s) => {
patches.push({
range: `${path.join("")}[${s[0]}:${s[0] + s[1]}]`,
content: s[2],
});
if (s[3]) sort_keys[patches.length - 1] = s[3];
});
}
}
return {
version,
parents: { ...self.T[version] },
patches,
sort_keys,
};
}
};
/// # json_crdt.apply_bubbles(to_bubble)
///
/// This method helps prune away meta data and compress stuff when we have determined that certain versions can be renamed to other versions – these renamings are expressed in `to_bubble`, where keys are versions and values are "bubbles", each bubble is represented with an array of two elements, the first element is the "bottom" of the bubble, and the second element is the "top" of the bubble. We will use the "bottom" as the new name for the version, and we'll use the "top" as the new parents.
///
/// ``` js
/// json_crdt.apply_bubbles({
/// alice4: ['bob5', 'alice4'],
/// bob5: ['bob5', 'alice4']
/// })
/// ```
self.apply_bubbles = (to_bubble) => {
function recurse(x) {
if (is_lit(x)) return x;
if (x.t == "val") {
sequence_crdt_apply_bubbles(x.S, to_bubble);
sequence_crdt_traverse(
x.S,
() => true,
(node) => {
node.elems = node.elems.slice(0, 1).map(recurse);
},
true
);
if (
x.S.nexts.length == 0 &&
!x.S.next &&
x.S.elems.length == 1 &&
is_lit(x.S.elems[0])
)
return x.S.elems[0];
return x;
}
if (x.t == "arr") {
sequence_crdt_apply_bubbles(x.S, to_bubble);
sequence_crdt_traverse(
x.S,
() => true,
(node) => {
node.elems = node.elems.map(recurse);
},
true
);
if (
x.S.nexts.length == 0 &&
!x.S.next &&
x.S.elems.every(is_lit) &&
!Object.keys(x.S.deleted_by).length
)
return { t: "lit", S: x.S.elems.map(get_lit) };
return x;
}
if (x.t == "obj") {
Object.entries(x.S).forEach((e) => {
let y = (x.S[e[0]] = recurse(e[1]));
if (y == null) delete x.S[e[0]];
});
if (Object.values(x.S).every(is_lit)) {
let o = {};
Object.entries(x.S).forEach((e) => (o[e[0]] = get_lit(e[1])));
return { t: "lit", S: o };
}
return x;
}
if (x.t == "str") {
sequence_crdt_apply_bubbles(x.S, to_bubble);
if (
x.S.nexts.length == 0 &&
!x.S.next &&
!Object.keys(x.S.deleted_by).length
)
return x.S.elems;
return x;
}
}
self.S = recurse(self.S);
Object.entries(to_bubble).forEach(([version, bubble]) => {
if (!self.T[version]) return;
self.my_where_are_they_now[version] = bubble[0];
if (version === bubble[1]) self.T[bubble[0]] = self.T[bubble[1]];
if (version !== bubble[0]) {
if (self.root_version == version) self.root_version = bubble[0];
delete self.T[version];
delete self.version_cache[version];
delete self.acked_boundary[version];
delete self.current_version[version];
if (
self.version_groups[version] &&
self.version_groups[version][0] == version
) {
for (let v of self.version_groups[version]) {
delete self.version_groups[v];
}
}
for (let [k, parents] of Object.entries(self.T)) {
self.T[k] = parents = { ...parents };
for (let p of Object.keys(parents)) {
if (p == version) delete parents[p];
}
}
} else self.version_cache[version] = null;
});
let leaves = Object.keys(self.current_version);
let acked_boundary = Object.keys(self.acked_boundary);
let fiss = Object.keys(self.fissures);
if (
leaves.length == 1 &&
acked_boundary.length == 1 &&
leaves[0] == acked_boundary[0] &&
fiss.length == 0
) {
self.T = { [leaves[0]]: {} };
self.S = make_lit(self.read());
}
};
/// # json_crdt.add_version(version, parents, patches[, sort_keys])
///
/// The main method for modifying a `json_crdt` data structure.
///
/// * `version`: Unique string associated with this edit.
/// * `parents`: A set of versions that this version is aware of, represented as a map with versions as keys, and values of `true`.
/// * `patches`: An array of patches, each patch looks like this `{range: '.life.meaning', content: 42}`.
/// * `sort_keys`: (optional) An object where each key is an index, and the value is a sort_key to use with the patch at the given index in the `patches` array – a sort_key overrides the version for a patch for the purposes of sorting. This can be useful after doing some pruning.
///
/// ``` js
/// json_crdt.add_version(
/// 'alice6',
/// {
/// alice5: true,
/// bob7: true
/// },
/// [
/// {
/// range: '.a.b',
/// content: 'c'
/// }
/// ]
/// )
/// ```
self.add_version = (version, parents, patches, sort_keys) => {
if (self.T[version]) return;
if (self.root_version == null) self.root_version = version;
self.T[version] = { ...parents };
self.version_cache[version] = JSON.parse(
JSON.stringify({
version,
parents,
patches,
sort_keys,
})
);
Object.keys(parents).forEach((k) => {
if (self.current_version[k]) delete self.current_version[k];
});
self.current_version[version] = true;
if (!sort_keys) sort_keys = {};
if (!Object.keys(parents).length) {
let parse = self.parse_patch(patches[0]);
self.S = make_lit(parse.value);
return patches;
}
let is_anc;
if (parents == self.current_version) {
is_anc = (_version) => _version != version;
} else {
let ancs = self.ancestors(parents);
is_anc = (_version) => ancs[_version];
}
let rebased_patches = [];
patches.forEach((patch, i) => {
let sort_key = sort_keys[i];
let parse = self.parse_patch(patch);
let cur = resolve_path(parse);
if (!parse.slice) {
if (cur.t != "val") throw Error("bad");
let len = sequence_crdt_length(cur.S, is_anc);
sequence_crdt_add_version(
cur.S,
version,
[[0, len, [parse.delete ? null : make_lit(parse.value)], sort_key]],
is_anc
);
rebased_patches.push(patch);
} else {
if (typeof parse.value === "string" && cur.t !== "str")
throw Error(
`Cannot splice string ${JSON.stringify(
parse.value
)} into non-string`
);
if (parse.value instanceof Array && cur.t !== "arr")
throw Error(
`Cannot splice array ${JSON.stringify(
parse.value
)} into non-array`
);
if (parse.value instanceof Array)
parse.value = parse.value.map((x) => make_lit(x));
let r0 = parse.slice[0];
let r1 = parse.slice[1];
if (r0 < 0 || Object.is(r0, -0) || r1 < 0 || Object.is(r1, -0)) {
let len = sequence_crdt_length(cur.S, is_anc);
if (r0 < 0 || Object.is(r0, -0)) r0 = len + r0;
if (r1 < 0 || Object.is(r1, -0)) r1 = len + r1;
}
let rebased_splices = sequence_crdt_add_version(
cur.S,
version,
[[r0, r1 - r0, parse.value, sort_key]],
is_anc
);
for (let rebased_splice of rebased_splices)
rebased_patches.push({
range: `${parse.path
.map((x) => `[${JSON.stringify(x)}]`)
.join("")}[${rebased_splice[0]}:${rebased_splice[0] + rebased_splice[1]
}]`,
content: rebased_splice[2],
});
}
});
function resolve_path(parse) {
let cur = self.S;
if (!cur || typeof cur != "object" || cur.t == "lit")
cur = self.S = {
t: "val",
S: sequence_crdt_create_node(self.root_version, [cur]),
};
let prev_S = null;
let prev_i = 0;
for (let i = 0; i < parse.path.length; i++) {
let key = parse.path[i];
if (cur.t == "val")
cur = sequence_crdt_get((prev_S = cur.S), (prev_i = 0), is_anc);
if (cur.t == "lit") {
let new_cur = {};
if (cur.S instanceof Array) {
new_cur.t = "arr";
new_cur.S = sequence_crdt_create_node(
self.root_version,
cur.S.map((x) => make_lit(x))
);
} else {
if (typeof cur.S != "object") throw Error("bad");
new_cur.t = "obj";
new_cur.S = {};
Object.entries(cur.S).forEach(
(e) => (new_cur.S[e[0]] = make_lit(e[1]))
);
}
cur = new_cur;
sequence_crdt_set(prev_S, prev_i, cur, is_anc);
}
if (cur.t == "obj") {
let x = cur.S[key];
if (!x || typeof x != "object" || x.t == "lit")
x = cur.S[key] = {
t: "val",
S: sequence_crdt_create_node(self.root_version, [
x == null ? null : x,
]),
};
cur = x;
} else if (i == parse.path.length - 1 && !parse.slice) {
parse.slice = [key, key + 1];
parse.value = cur.t == "str" ? parse.value : [parse.value];
} else if (cur.t == "arr") {
cur = sequence_crdt_get((prev_S = cur.S), (prev_i = key), is_anc);
} else throw Error("bad");
}
if (parse.slice) {
if (cur.t == "val")
cur = sequence_crdt_get((prev_S = cur.S), (prev_i = 0), is_anc);
if (typeof cur == "string") {
cur = {
t: "str",
S: sequence_crdt_create_node(self.root_version, cur),
};
sequence_crdt_set(prev_S, prev_i, cur, is_anc);
} else if (cur.t == "lit") {
if (!(cur.S instanceof Array)) throw Error("bad");
cur = {
t: "arr",
S: sequence_crdt_create_node(
self.root_version,
cur.S.map((x) => make_lit(x))
),
};
sequence_crdt_set(prev_S, prev_i, cur, is_anc);
}
}
return cur;
}
return rebased_patches;
};
/// # json_crdt.get_child_map()
///
/// Returns a map where each key is a version, and each value is a set of child versions, represented as a map with versions as keys, and values of `true`.
///
/// ``` js
/// json_crdt.get_child_map()
/// ```
self.get_child_map = () => {
let children = {};
Object.entries(self.T).forEach(([v, parents]) => {
Object.keys(parents).forEach((parent) => {
if (!children[parent]) children[parent] = {};
children[parent][v] = true;
});
});
return children;
};
/// # json_crdt.ancestors(versions, ignore_nonexistent=false)
///
/// Gather `versions` and all their ancestors into a set. `versions` is a set of versions, i.e. a map with version-keys and values of true – we'll basically return a larger set. If `ignore_nonexistent` is `true`, then we won't throw an exception if we encounter a version that we don't have in our data-structure.
///
/// ``` js
/// json_crdt.ancestors({
/// alice12: true,
/// bob10: true
/// })
/// ```
self.ancestors = (versions, ignore_nonexistent) => {
let result = {};
function recurse(version) {
if (result[version]) return;
if (!self.T[version]) {
if (ignore_nonexistent) return;
throw Error(`The version ${version} no existo`);
}
result[version] = true;
Object.keys(self.T[version]).forEach(recurse);
}
Object.keys(versions).forEach(recurse);
return result;
};
/// # json_crdt.descendants(versions, ignore_nonexistent=false)
///
/// Gather `versions` and all their descendants into a set. `versions` is a set of versions, i.e. a map with version-keys and values of true – we'll basically return a larger set. If `ignore_nonexistent` is `true`, then we won't throw an exception if we encounter a version that we don't have in our data-structure.
///
/// ``` js
/// json_crdt.descendants({
/// alice12: true,
/// bob10: true
/// })
/// ```
self.descendants = (versions, ignore_nonexistent) => {
let children = self.get_child_map();
let result = {};
function recurse(version) {
if (result[version]) return;
if (!self.T[version]) {
if (ignore_nonexistent) return;
throw Error(`The version ${version} no existo`);
}
result[version] = true;
Object.keys(children[version] || {}).forEach(recurse);
}
Object.keys(versions).forEach(recurse);
return result;
};
/// # json_crdt.get_leaves(versions)
///
/// Returns a set of versions from `versions` which don't also have a child in `versions`. `versions` is itself a set of versions, represented as an object with version keys and `true` values, and the return value is represented the same way.
self.get_leaves = (versions) => {
let leaves = { ...versions };
Object.keys(versions).forEach((v) => {
Object.keys(self.T[v]).forEach((p) => delete leaves[p]);
});
return leaves;
};
/// # json_crdt.parse_patch(patch)
///
/// Takes a patch in the form `{range, content}`, and returns an object of the form `{path: [...], [slice: [...]], [delete: true], content}`; basically calling `parse_json_path` on `patch.range`, and adding `patch.content` along for the ride.
self.parse_patch = (patch) => {
let x = self.parse_json_path(patch.range);
x.value = patch.content;
return x;
};
/// # json_crdt.parse_json_path(json_path)
///
/// Parses the string `json_path` into an object like: `{path: [...], [slice: [...]], [delete: true]}`.
///
/// * `a.b[3]` --> `{path: ['a', 'b', 3]}`
/// * `a.b[3:5]` --> `{path: ['a', 'b'], slice: [3, 5]}`
/// * `delete a.b` --> `{path: ['a', 'b'], delete: true}`
///
/// ``` js
/// console.log(json_crdt.parse_json_path('a.b.c'))
/// ```
self.parse_json_path = (json_path) => {
let ret = { path: [] };
let re =
/^(delete)\s+|\.?([^\.\[ =]+)|\[((\-?\d+)(:\-?\d+)?|"(\\"|[^"])*")\]/g;
let m;
while ((m = re.exec(json_path))) {
if (m[1]) ret.delete = true;
else if (m[2]) ret.path.push(m[2]);
else if (m[3] && m[5])
ret.slice = [JSON.parse(m[4]), JSON.parse(m[5].substr(1))];
else if (m[3]) ret.path.push(JSON.parse(m[3]));
}
return ret;
};
return self;
};
================================================
FILE: antimatter_ts/src/sequence_crdt.ts
================================================
type Version = string;
type Node = {
/// globally unique string
version: Version,
/// a string or array representing actual data elements of the underlying sequence
elems: string | any[],
/// this is useful for dealing with replace operations
end_cap: any | undefined,
/// version to pretend this is for the purposes of sorting
sort_key: any | undefined,
/// if this node gets deleted, we'll mark it here
deleted_by: Record,
/// array of nodes following this one
nexts: any[],
/// final node following this one (after all the nexts)
next: null | any,
};
/// # sequence_crdt_create_node(version, elems, [end_cap, sort_key])
///
/// Creates a node for a `sequence_crdt` sequence CRDT with the given properties. The resulting node will look like this:
///
/// let sequence_node = sequence_crdt_create_node('alice1', 'hello')
/// ```
const sequence_crdt_create_node = (version: Version, elems: string | any[], end_cap: any = undefined, sort_key: any = undefined): Node => ({
version,
elems,
end_cap,
sort_key,
deleted_by: {},
nexts: [],
next: null,
});
/// # sequence_crdt_generate_braid(root_node, version, is_anc)
///
/// Reconstructs an array of splice-information which can be passed to `sequence_crdt_add_version` in order to add `version` to another `sequence_crdt` instance – the returned array looks like: `[[insert_pos, delete_count, insert_elems, sort_key], ...]`. `is_anc` is a function which accepts a version string and returns `true` if and only if the given version is an ancestor of `version` (i.e. a version which the author of `version` knew about when they created that version).
///
/// ``` js
/// let root_node = sequence_crdt_create_node('alice1', 'hello')
/// console.log(sequence_crdt_generate_braid(root_node, 'alice1', x => false)) // outputs [0, 0, "hello"]
/// ```
const sequence_crdt_generate_braid = (S: Node, version: Version, is_anc: (v: Version) => boolean, read_array_elements: ((x: T, cb?: () => boolean) => T) | undefined = undefined) => {
if (!read_array_elements) read_array_elements = (x) => x;
let splices = [];
function add_ins(offset, ins, sort_key, end_cap, is_row_header) {
if (typeof ins !== "string")
ins = ins.map((x) => read_array_elements(x, () => false));
if (splices.length > 0) {
let prev = splices[splices.length - 1];
if (
prev[0] + prev[1] === offset &&
!end_cap &&
(!is_row_header || prev[3] == sort_key) &&
(prev[4] === "i" || (prev[4] === "r" && prev[1] === 0))
) {
prev[2] = prev[2].concat(ins);
return;
}
}
splices.push([offset, 0, ins, sort_key, end_cap ? "r" : "i"]);
}
function add_del(offset, del, ins) {
if (splices.length > 0) {
let prev = splices[splices.length - 1];
if (prev[0] + prev[1] === offset && prev[4] !== "i") {
prev[1] += del;
return;
}
}
splices.push([offset, del, ins, null, "d"]);
}
let offset = 0;
function helper(node, _version, end_cap = undefined, is_row_header = undefined) {
if (_version === version) {
add_ins(
offset,
node.elems.slice(0),
node.sort_key,
end_cap,
is_row_header
);
} else if (node.deleted_by[version] && node.elems.length > 0) {
add_del(offset, node.elems.length, node.elems.slice(0, 0));
}
if (
(!_version || is_anc(_version)) &&
!Object.keys(node.deleted_by).some(is_anc)
) {
offset += node.elems.length;
}
node.nexts.forEach((next) =>
helper(next, next.version, node.end_cap, true)
);
if (node.next) helper(node.next, _version);
}
helper(S, null);
splices.forEach((s) => {
// if we have replaces with 0 deletes,
// make them have at least 1 delete..
// this can happen when there are multiple replaces of the same text,
// and our code above will associate those deletes with only one of them
if (s[4] === "r" && s[1] === 0) s[1] = 1;
});
return splices;
};
/// # sequence_crdt_apply_bubbles(root_node, to_bubble)
///
/// This method helps prune away meta data and compress stuff when we have determined that certain versions can be renamed to other versions – these renamings are expressed in `to_bubble`, where keys are versions and values are "bubbles", each bubble is represented with an array of two elements, the first element is the "bottom" of the bubble, and the second element is the "top" of the bubble. We will use the "bottom" as the new name for the version, and we'll use the "top" as the new parents.
///
/// ``` js
/// sequence_crdt_apply_bubbles(root_node, {
/// alice4: ['bob5', 'alice4'],
/// bob5: ['bob5', 'alice4']
/// })
/// ```
const sequence_crdt_apply_bubbles = (S, to_bubble) => {
sequence_crdt_traverse(
S,
() => true,
(node) => {
if (
to_bubble[node.version] &&
to_bubble[node.version][0] != node.version
) {
if (!node.sort_key) node.sort_key = node.version;
node.version = to_bubble[node.version][0];
}
for (let x of Object.keys(node.deleted_by)) {
if (to_bubble[x]) {
delete node.deleted_by[x];
node.deleted_by[to_bubble[x][0]] = true;
}
}
},
true
);
function set_nnnext(node, next) {
while (node.next) node = node.next;
node.next = next;
}
do_line(S, S.version);
function do_line(node, version) {
let prev = null;
while (node) {
if (node.nexts[0] && node.nexts[0].version == version) {
for (let i = 0; i < node.nexts.length; i++) {
delete node.nexts[i].version;
delete node.nexts[i].sort_key;
set_nnnext(
node.nexts[i],
i + 1 < node.nexts.length ? node.nexts[i + 1] : node.next
);
}
node.next = node.nexts[0];
node.nexts = [];
}
if (node.deleted_by[version]) {
node.elems = node.elems.slice(0, 0);
node.deleted_by = {};
if (prev) {
node = prev;
continue;
}
}
let next = node.next;
if (
!node.nexts.length &&
next &&
(!node.elems.length ||
!next.elems.length ||
(Object.keys(node.deleted_by).every((x) => next.deleted_by[x]) &&
Object.keys(next.deleted_by).every((x) => node.deleted_by[x])))
) {
if (!node.elems.length) node.deleted_by = next.deleted_by;
node.elems = node.elems.concat(next.elems);
node.end_cap = next.end_cap;
node.nexts = next.nexts;
node.next = next.next;
continue;
}
if (next && !next.elems.length && !next.nexts.length) {
node.next = next.next;
continue;
}
for (let n of node.nexts) do_line(n, n.version);
prev = node;
node = next;
}
}
};
/// # sequence_crdt_get(root_node, i, is_anc)
///
/// Returns the element at the `i`th position (0-based) in the `sequence_crdt` rooted at `root_node`, when only considering versions which result in `true` when passed to `is_anc`.
///
/// ``` js
/// let x = sequence_crdt_get(root_node, 2, {
/// alice1: true
/// })
/// ```
const sequence_crdt_get = (S, i, is_anc) => {
let ret = null;
let offset = 0;
sequence_crdt_traverse(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
ret = node.elems[i - offset];
return false;
}
offset += node.elems.length;
});
return ret;
};
/// # sequence_crdt_set(root_node, i, v, is_anc)
///
/// Sets the element at the `i`th position (0-based) in the `sequence_crdt` rooted at `root_node` to the value `v`, when only considering versions which result in `true` when passed to `is_anc`.
///
/// ``` js
/// sequence_crdt_set(root_node, 2, 'x', {
/// alice1: true
/// })
/// ```
const sequence_crdt_set = (S, i, v, is_anc) => {
let offset = 0;
sequence_crdt_traverse(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
if (typeof node.elems == "string")
node.elems =
node.elems.slice(0, i - offset) +
v +
node.elems.slice(i - offset + 1);
else node.elems[i - offset] = v;
return false;
}
offset += node.elems.length;
});
};
/// # sequence_crdt_length(root_node, is_anc)
///
/// Returns the length of the `sequence_crdt` rooted at `root_node`, when only considering versions which result in `true` when passed to `is_anc`.
///
/// ``` js
/// console.log(sequence_crdt_length(root_node, {
/// alice1: true
/// }))
/// ```
const sequence_crdt_length = (S, is_anc) => {
let count = 0;
sequence_crdt_traverse(S, is_anc ? is_anc : () => true, (node) => {
count += node.elems.length;
});
return count;
};
/// # sequence_crdt_break_node(node, break_position, end_cap, new_next)
///
/// This method breaks apart a `sequence_crdt` node into two nodes, each representing a subsequence of the sequence represented by the original node. The `node` parameter is modified into the first node, and the second node is returned. The first node represents the elements of the sequence before `break_position`, and the second node represents the rest of the elements. If `end_cap` is truthy, then the first node will have `end_cap` set – this is generally done if the elements in the second node are being replaced. This method will add `new_next` to the first node's `nexts` array.
///
/// ``` js
/// let node = sequence_crdt_create_node('alice1', 'hello') // node.elems == 'hello'
/// let second = sequence_crdt_break_node(node, 2) // now node.elems == 'he', and second.elems == 'llo'
/// ```
const sequence_crdt_break_node = (node, x, end_cap = undefined, new_next = undefined) => {
let tail = sequence_crdt_create_node(
null,
node.elems.slice(x),
node.end_cap
);
Object.assign(tail.deleted_by, node.deleted_by);
tail.nexts = node.nexts;
tail.next = node.next;
node.elems = node.elems.slice(0, x);
node.end_cap = end_cap;
node.nexts = new_next ? [new_next] : [];
node.next = tail;
return tail;
};
/// # sequence_crdt_add_version(root_node, version, splices, [is_anc])
///
/// This is the main method in sequence_crdt, used to modify the sequence. The modification must be given a unique `version` string, and the modification itself is represented as an array of `splices`, where each splice looks like this: `[position, num_elements_to_delete, elements_to_insert, optional_sort_key]`.
///
/// Note that all positions are relative to the original sequence, before any splices have been applied. Positions are counted by only considering nodes with versions which result in `true` when passed to `is_anc`. (and are not `deleted_by` any versions which return `true` when passed to `is_anc`).
///
/// ``` js
/// let node = sequence_crdt_create_node('alice1', 'hello')
/// sequence_crdt_add_version(node, 'alice2', [[5, 0, ' world']], null, v => v == 'alice1')
/// ```
const sequence_crdt_add_version = (S: Node, version: Version, splices, is_anc) => {
let rebased_splices = [];
function add_to_nexts(nexts: Node[], to: Node) {
let i = binarySearch(nexts, function (x: Node) {
if ((to.sort_key || to.version) < (x.sort_key || x.version)) return -1;
if ((to.sort_key || to.version) > (x.sort_key || x.version)) return 1;
return 0;
});
nexts.splice(i, 0, to);
}
let si = 0;
let delete_up_to = 0;
let process_patch = (node, offset, has_nexts, prev, _version, deleted) => {
let s = splices[si];
if (!s) return;
let sort_key = s[3];
if (deleted) {
if (s[1] == 0 && s[0] == offset) {
if (node.elems.length == 0 && !node.end_cap && has_nexts) return;
let new_node = sequence_crdt_create_node(
version,
s[2],
null,
sort_key
);
fresh_nodes.add(new_node);
if (node.elems.length == 0 && !node.end_cap)
add_to_nexts(node.nexts, new_node);
else sequence_crdt_break_node(node, 0, undefined, new_node);
si++;
}
if (
delete_up_to <= offset &&
s[1] &&
s[2] &&
s[0] == offset &&
node.end_cap &&
!has_nexts &&
(node.next && node.next.elems.length) &&
!Object.keys(node.next.deleted_by).some((version) => f(version))
) {
delete_up_to = s[0] + s[1];
let new_node = sequence_crdt_create_node(
version,
s[2],
null,
sort_key
);
fresh_nodes.add(new_node);
add_to_nexts(node.nexts, new_node);
}
return;
}
if (s[1] == 0) {
let d = s[0] - (offset + node.elems.length);
if (d > 0) return;
if (d == 0 && !node.end_cap && has_nexts) return;
let new_node = sequence_crdt_create_node(version, s[2], null, sort_key);
fresh_nodes.add(new_node);
if (d == 0 && !node.end_cap) {
add_to_nexts(node.nexts, new_node);
} else {
sequence_crdt_break_node(node, s[0] - offset, undefined, new_node);
}
si++;
return;
}
if (delete_up_to <= offset) {
let d = s[0] - (offset + node.elems.length);
let add_at_end =
d == 0 &&
s[2] &&
node.end_cap &&
!has_nexts &&
(node.next && node.next.elems.length) &&
!Object.keys(node.next.deleted_by).some((version) => f(version));
if (d > 0 || (d == 0 && !add_at_end)) return;
delete_up_to = s[0] + s[1];
if (s[2]) {
let new_node = sequence_crdt_create_node(
version,
s[2],
null,
sort_key
);
fresh_nodes.add(new_node);
if (add_at_end) {
add_to_nexts(node.nexts, new_node);
} else {
sequence_crdt_break_node(node, s[0] - offset, true, new_node);
}
return;
} else {
if (s[0] == offset) {
} else {
sequence_crdt_break_node(node, s[0] - offset);
return;
}
}
}
if (delete_up_to > offset) {
if (delete_up_to <= offset + node.elems.length) {
if (delete_up_to < offset + node.elems.length) {
sequence_crdt_break_node(node, delete_up_to - offset);
}
si++;
}
node.deleted_by[version] = true;
return;
}
};
let f = is_anc || (() => true);
let offset = 0;
let rebase_offset = 0;
let fresh_nodes = new Set();
function traverse(node, prev, version) {
if (!version || f(version)) {
let has_nexts = node.nexts.find((next) => f(next.version));
let deleted = Object.keys(node.deleted_by).some((version) =>
f(version)
);
let rebase_deleted = Object.keys(node.deleted_by).length;
process_patch(node, offset, has_nexts, prev, version, deleted);
if (!deleted) offset += node.elems.length;
if (!rebase_deleted && Object.keys(node.deleted_by).length)
rebased_splices.push([rebase_offset, node.elems.length, ""]);
}
if (fresh_nodes.has(node))
rebased_splices.push([rebase_offset, 0, node.elems]);
if (!Object.keys(node.deleted_by).length)
rebase_offset += node.elems.length;
for (let next of node.nexts) traverse(next, null, next.version);
if (node.next) traverse(node.next, node, version);
}
traverse(S, null, S.version);
return rebased_splices;
};
/// # sequence_crdt_traverse(root_node, is_anc, callback, [view_deleted, tail_callback])
///
/// Traverses the subset of nodes in the tree rooted at `root_node` whose versions return `true` when passed to `is_anc`. For each node, `callback` is called with these parameters: `node, offset, has_nexts, prev, version, deleted`,
///
/// Where
/// - `node` is the current node being traversed
/// - `offset` says how many elements we have passed so far
/// - `has_nexts` is true if some of this node's `nexts` will be traversed according to `is_anc`
/// - `prev` is a pointer to the node whos `next` points to this one, or `null` if this is the root node
/// - `version` is the version of this node, or this node's `prev` if our version is `null`, or that node's `prev` if it is also `null`, etc
/// - `deleted` is true if this node is deleted according to `is_anc`
///
/// Usually we skip deleted nodes when traversing, but we'll include them if `view_deleted` is `true`.
///
/// `tail_callback` is an optional callback that will get called with a single parameter `node` after all of that node's children `nexts` and `next` have been traversed.
///
/// ``` js
/// sequence_crdt_traverse(node, () => true, node =>
/// process.stdout.write(node.elems))
/// ```
const sequence_crdt_traverse = (S, f, cb, view_deleted = undefined, tail_cb = undefined) => {
let offset = 0;
function helper(node, prev, version) {
let has_nexts = node.nexts.find((next) => f(next.version));
let deleted = Object.keys(node.deleted_by).some((version) => f(version));
if (view_deleted || !deleted) {
if (cb(node, offset, has_nexts, prev, version, deleted) == false)
return true;
offset += node.elems.length;
}
for (let next of node.nexts)
if (f(next.version)) {
if (helper(next, null, next.version)) return true;
}
if (node.next) {
if (helper(node.next, node, version)) return true;
} else if (tail_cb) tail_cb(node);
}
helper(S, null, S.version);
};
// modified from https://stackoverflow.com/questions/22697936/binary-search-in-javascript
function binarySearch(ar: T[], compare_fn: (x: T) => number): number {
let m = 0;
let n = ar.length - 1;
while (m <= n) {
let k = (n + m) >> 1;
let cmp = compare_fn(ar[k]);
if (cmp > 0) {
m = k + 1;
} else if (cmp < 0) {
n = k - 1;
} else {
return k;
}
}
return m;
}
/// - *sequence_crdt*: methods to manipulate a pruneable sequence CRDT —
/// "sequence" meaning it represents a javascript string or array, "CRDT" meaning
/// this structure can be merged with other ones, and "pruneable" meaning that it
/// supports an operation to remove meta-data when it is no longer needed (whereas
/// CRDT's often keep track of this meta-data forever).
export {
sequence_crdt_create_node as create_node,
sequence_crdt_generate_braid as generate_braid,
sequence_crdt_apply_bubbles as apply_bubbles,
sequence_crdt_get as get,
sequence_crdt_set as set,
sequence_crdt_length as length,
sequence_crdt_break_node as break_node,
sequence_crdt_add_version as add_version,
sequence_crdt_traverse as traverse,
};
================================================
FILE: antimatter_ts/test.html
================================================
================================================
FILE: antimatter_ts/tsconfig.json
================================================
{
"compilerOptions": {
"lib": ["ES2017"],
}
}
================================================
FILE: antimatter_wiki/client.html
================================================
================================================
FILE: antimatter_wiki/package.json
================================================
{
"name": "@braidjs/antimatter_wiki",
"version": "0.1.5",
"description": "collaborative wiki using antimatter sync algorithm",
"main": "server.js",
"author": "Braid Working Group",
"repository": "braid-org/braidjs",
"homepage": "https://braid.org/antimatter",
"dependencies": {
"@braidjs/antimatter": "^0.0.12",
"ws": "^8.16.0"
}
}
================================================
FILE: antimatter_wiki/readme.md
================================================
# MOVED TO https://github.com/braid-org/antimatter_wiki
# Antimatter Wiki
A collaborative wiki based on the [Antimatter Algorithm](https://braid.org/antimatter).
To use:
```bash
npm install @braidjs/antimatter_wiki
```
Then put this into an app.js:
```javascript
var port = 60509, domain = 'localhost:60509', ws_scheme
require('@braidjs/antimatter_wiki').serve({
port: 60509,
domain: 'localhost:60509',
ws_prefix: 'wss://' // Or 'ws://' for insecure websocket
})
```
And run it with `node app.js`.
================================================
FILE: antimatter_wiki/server.js
================================================
console.log(require('./package.json').version)
var fs = require('fs')
var fs_p = require('fs/promises')
var {antimatter} = require('@braidjs/antimatter')
var port = process.argv[2] || 1001
var ws_url = process.argv[3] || `ws://localhost:${port}`
var fissure_lifetime = 1*(process.argv[4] || 1000 * 60 * 60)
console.log(`port = ${port}`)
console.log(`ws_url = ${ws_url}`)
console.log(`fissure_lifetime = ${fissure_lifetime / (1000 * 60 * 60)} hours`)
if (!fs.existsSync('./antimatter_wiki_db')) fs.mkdirSync('./antimatter_wiki_db')
let conns = {}
let antimatters = {}
async function ensure_antimatter(key) {
console.log('finding db at ', JSON.stringify(key))
if (!antimatters[key]) antimatters[key] = new Promise(async done => {
let dir = `./antimatter_wiki_db/${encodeURIComponent(key)}`
if (!fs.existsSync(dir))
fs.mkdirSync(dir)
let files = []
for (let filename of await fs_p.readdir(dir)) {
let m = filename.match(/^([dw])(\d+)$/)
if (m) files.push({t: m[1], i: 1*m[2]})
}
files.sort((a, b) => a.i - b.i)
let file_i = files[files.length - 1]?.i ?? -1
console.log('files: ', files)
await Promise.all(files.splice(0, files.reduce((a, b, i) => b.t == 'd' ? i : a, 0)).map(x => fs_p.rm(`${dir}/${x.t}${x.i}`)))
let a
function create_antimatter(prev) {
let a = antimatter.create(x => {
try {
console.log(`key=${key}, sending to [${x.conn}]: ` + JSON.stringify(x).slice(0, 100))
conns[x.conn].send(JSON.stringify(x))
} catch (e) {
console.log(`key=${key}, failed to send: ` + e)
}
}, prev)
a.fissure_lifetime = fissure_lifetime
if (a.S == null) a.set({range: '', content: ''})
return a
}
for (let file of files) {
console.log(`file: `, file)
let s = await fs_p.readFile(`${dir}/${file.t}${file.i}`)
if (file.t == 'd') {
a = create_antimatter(JSON.parse(s))
} else {
for (let line of ('' + s).split(/\n/)) {
let x = JSON.parse(line || '{}')
if (x.receive) {
try {
a.receive(x.receive)
} catch (e) {}
}
if (x.disconnect) a.disconnect(x.disconnect)
}
}
}
if (!a) a = create_antimatter()
for (let c of Object.keys(a.conns)) a.disconnect(c)
for (let c of Object.keys(a.proto_conns)) a.disconnect(c)
let dirty = true
let wol_filename
await compactor()
async function compactor() {
if (dirty) {
dirty = false
wol_filename = `${dir}/w${file_i + 2}`
await fs_p.writeFile(`${dir}/d${file_i + 1}`, JSON.stringify(a))
await Promise.all(files.map(x => fs_p.rm(`${dir}/${x.t}${x.i}`)))
files = [{t: 'd', i: file_i + 1}, {t: 'w', i: file_i + 2}]
file_i += 2
}
setTimeout(compactor, 1000 * 60)
}
a.write_to_log = (obj) => {
fs.appendFileSync(wol_filename, JSON.stringify(obj) + '\n')
dirty = true
}
done(a)
})
return await antimatters[key]
}
function respond_with_client (req, res) {
var client_html = fs.readFileSync('./client.html')
client_html = '' + client_html
client_html = client_html.replace(/__VERSION__/, `${require('./package.json').version}`)
client_html = client_html.replace(/__WIKI_HOST__/, `${ws_url}`)
var etag = require('crypto').createHash('md5').update(client_html).digest('hex')
if (req.headers['if-none-match'] === etag) {
res.writeHead(304)
res.end()
} else {
res.writeHead(200, {
'Content-Type': 'text/html',
'Cache-Control': 'public, max-age=31536000',
'ETag': etag,
})
res.end(client_html)
}
}
var server = require('http').createServer(async function (req, res) {
console.log('GET: ', {method: req.method, url: req.url})
res.setHeader('Access-Control-Allow-Origin', '*')
res.setHeader('Access-Control-Allow-Headers', '*')
res.setHeader('Access-Control-Allow-Methods', '*')
respond_with_client(req, res)
})
var wss = new (require('ws').Server)({server})
wss.on('connection', (ws, req) => {
console.log(`new connection! ${req.url}`)
let key = decodeURIComponent(req.url.slice(1))
if (key === '' || key[0] === '_')
key = '_' + key
let a_p = ensure_antimatter(key)
let conn
let pong = true
ping()
function ping() {
if (ws.readyState > 1) return
if (!pong) {
console.log(`ping timeout! conn ${conn} key=${key}`)
ws.terminate()
return
}
pong = false
ws.send('ping')
setTimeout(ping, 12000)
}
ws.on('message', async x => {
pong = true
if (x == 'pong') return
if (x == 'ping') return ws.send('pong')
console.log(`RECV: ${x.slice(0, 100)}`)
x = JSON.parse(x)
if (x.conn) conns[conn = x.conn] = ws
var a = await a_p
a.write_to_log({receive: x})
try {
a.receive(x)
} catch (e) {
ws.send(JSON.stringify({type: 'error', message: e.message}))
}
})
ws.on('close', async () => {
if (!conn) return
console.log(`close: ` + conn)
var a = await a_p
a.write_to_log({disconnect: conn})
a.disconnect(conn)
delete conns[conn]
})
})
server.listen(port)
console.log(`listening on port ${port}`)
================================================
FILE: braid-http/braid-http-client.js
================================================
// var peer = Math.random().toString(36).substr(2)
// ***************************
// http
// ***************************
function braidify_http (http) {
http.normal_get = http.get
http.get = function braid_req (arg1, arg2, arg3) {
var url, options, cb
// http.get() supports two forms:
//
// - http.get(url[, options][, callback])
// - http.get(options[, callback])
//
// We need to know which arguments are which, so let's detect which
// form we are looking at.
// Detect form #1: http.get(url[, options][, callback])
if (typeof arg1 === 'string' || arg1 instanceof URL) {
url = arg1
if (typeof arg2 === 'function')
cb = arg2
else {
options = arg2
cb = arg3
}
}
// Otherwise it's form #2: http.get(options[, callback])
else {
options = arg2
cb = arg3
}
options = options || {}
// Now we know where the `options` are specified, let's set headers.
if (!options.headers)
options.headers = {}
// Add the subscribe header if this is a subscription
if (options.subscribe)
options.headers.subscribe = 'true'
// // Always add the `peer` header
// options.headers.peer = options.headers.peer || peer
// Wrap the callback to provide our new .on('update', ...) feature
// on nodejs servers
var on_update,
on_error,
orig_cb = cb
cb = (res) => {
res.orig_on = res.on
res.on = (key, f) => {
// Define .on('update', cb)
if (key === 'update'
|| key === 'version' /* Deprecated API calls it 'version' */ ) {
// If we have an 'update' handler, let's remember it
on_update = f
// And set up a subscription parser
var parser = subscription_parser((update, error) => {
if (!error)
on_update && on_update(update)
else
on_error && on_error(error)
})
// That will run each time we get new data
res.orig_on('data', (chunk) => {
parser.read(chunk)
})
}
// Forward .on('error', cb) and remember the error function
else if (key === 'error') {
on_error = f
res.orig_on(key, f)
}
// Forward all other .on(*, cb) calls
else res.orig_on(key, f)
}
orig_cb && orig_cb(res)
}
// Now put the parameters back in their prior order and call the
// underlying .get() function
if (url) {
arg1 = url
if (options) {
arg2 = options
arg3 = cb
} else {
arg2 = cb
}
} else {
arg1 = options
arg2 = cb
}
return http.normal_get(arg1, arg2, arg3)
}
return http
}
// ***************************
// Fetch
// ***************************
var normal_fetch,
AbortController,
Headers,
is_nodejs = typeof window === 'undefined'
if (is_nodejs) {
// Nodejs
// Note that reconnect logic doesn't work in node-fetch, because it
// doesn't call the .catch() handler when the stream fails.
//
// See https://github.com/node-fetch/node-fetch/issues/753
normal_fetch = require('node-fetch')
AbortController = require('abort-controller')
Headers = normal_fetch.Headers
var to_whatwg_stream = require('web-streams-node').toWebReadableStream
} else {
// Web Browser
normal_fetch = window.fetch
AbortController = window.AbortController
Headers = window.Headers
// window.fetch = braid_fetch
}
async function braid_fetch (url, params = {}) {
params = {...params} // Copy params, because we'll mutate it
// Initialize the headers object
if (!params.headers)
params.headers = new Headers()
else
params.headers = new Headers(params.headers)
// Sanity check inputs
if (params.version)
console.assert(Array.isArray(params.version),
'fetch(): `version` must be an array')
if (params.parents)
console.assert(Array.isArray(params.parents),
'fetch(): `parents` must be an array')
// // Always set the peer
// params.headers.set('peer', peer)
// We provide some shortcuts for Braid params
if (params.version)
params.headers.set('version', params.version.map(JSON.stringify).join(', '))
if (params.parents)
params.headers.set('parents', params.parents.map(JSON.stringify).join(', '))
if (params.subscribe)
params.headers.set('subscribe', 'true')
if (params.peer)
params.headers.set('peer', params.peer)
// Prevent browsers from going to disk cache
params.cache = 'no-cache'
// Prepare patches
if (params.patches) {
console.assert(!params.body, 'Cannot send both patches and body')
console.assert(typeof params.patches === 'object', 'Patches must be object or array')
// We accept a single patch as an array of one patch
if (!Array.isArray(params.patches))
params.patches = [params.patches]
// If just one patch, send it directly!
if (params.patches.length === 1) {
let patch = params.patches[0]
params.headers.set('Content-Range', `${patch.unit} ${patch.range}`)
params.headers.set('Content-Length', `${(new TextEncoder().encode(patch.content)).length}`)
params.body = patch.content
}
// Multiple patches get sent within a Patches: N block
else {
params.headers.set('Patches', params.patches.length)
params.body = (params.patches).map(patch => {
var length = `content-length: ${(new TextEncoder().encode(patch.content)).length}`
var range = `content-range: ${patch.unit} ${patch.range}`
return `${length}\r\n${range}\r\n\r\n${patch.content}\r\n`
}).join('\r\n')
}
}
// Wrap the AbortController with a new one that we control.
//
// This is because we want to be able to abort the fetch that the user
// passes in. However, the fetch() command uses a silly "AbortController"
// abstraction to abort fetches, which has both a `signal` and a
// `controller`, and only passes the signal to fetch(), but we need the
// `controller` to abort the fetch itself.
var original_signal = params.signal
var underlying_aborter = new AbortController()
params.signal = underlying_aborter.signal
if (original_signal)
original_signal.addEventListener(
'abort',
() => underlying_aborter.abort()
)
// Now we run the original fetch....
var res = await normal_fetch(url, params)
// And customize the response with a couple methods for getting
// the braid subscription data:
res.subscribe = start_subscription
res.subscription = {[Symbol.asyncIterator]: iterator}
// Now we define the subscription function we just used:
function start_subscription (cb, error) {
if (!res.ok)
throw new Error('Request returned not ok status:', res.status)
if (res.bodyUsed)
// TODO: check if this needs a return
throw new Error('This response\'s body has already been read', res)
// Parse the streamed response
handle_fetch_stream(
res.body,
// Each time something happens, we'll either get a new
// version back, or an error.
(result, err) => {
if (!err)
// Yay! We got a new version! Tell the callback!
cb(result)
else {
// This error handling code runs if the connection
// closes, or if there is unparseable stuff in the
// streamed response.
// In any case, we want to be sure to abort the
// underlying fetch.
underlying_aborter.abort()
// Then send the error upstream.
if (error)
error(err)
else
throw 'Unhandled network error in subscription'
}
}
)
}
// And the iterator for use with "for async (...)"
function iterator () {
// We'll keep this state while our iterator runs
var initialized = false,
inbox = [],
resolve = null,
reject = null
return {
async next() {
// If we've already received a version, return it
if (inbox.length > 0)
return {done: false, value: inbox.shift()}
// Otherwise, let's set up a promise to resolve when we get the next item
var promise = new Promise((_resolve, _reject) => {
resolve = _resolve
reject = _reject
})
// Start the subscription, if we haven't already
if (!initialized) {
initialized = true
// The subscription will call whichever resolve and
// reject functions the current promise is waiting for
start_subscription(x => resolve(x),
x => reject(x) )
}
// Now wait for the subscription to resolve or reject the promise.
var result = await promise
// Anything we get from here out we should add to the inbox
resolve = (new_version) => inbox.push(new_version)
reject = (err) => {throw err}
return { done: false, value: result }
}
}
}
return res
}
// Parse a stream of versions from the incoming bytes
async function handle_fetch_stream (stream, cb) {
if (is_nodejs)
stream = to_whatwg_stream(stream)
// Set up a reader
var reader = stream.getReader(),
parser = subscription_parser(cb)
while (true) {
var versions = []
// Read the next chunk of stream!
try {
var {done, value} = await reader.read()
}
catch (e) {
cb(null, e)
return
}
// Check if this connection has been closed!
if (done) {
console.debug("Connection closed.")
cb(null, 'Connection closed')
return
}
// Tell the parser to process some more stream
parser.read(value)
}
}
// ****************************
// Braid-HTTP Subscription Parser
// ****************************
var subscription_parser = (cb) => ({
// A parser keeps some parse state
state: {input: []},
// And reports back new versions as soon as they are ready
cb: cb,
// You give it new input information as soon as you get it, and it will
// report back with new versions as soon as it finds them.
read (input) {
// Store the new input!
for (let x of input) this.state.input.push(x)
// Now loop through the input and parse until we hit a dead end
while (this.state.input.length) {
// Try to parse an update
try {
this.state = parse_update (this.state)
} catch (e) {
this.cb(null, e)
return
}
// Maybe we parsed an update! That's cool!
if (this.state.result === 'success') {
var update = {
version: this.state.version,
parents: this.state.parents,
body: this.state.body,
patches: this.state.patches,
// Output extra_headers if there are some
extra_headers: extra_headers(this.state.headers)
}
for (var k in update)
if (update[k] === undefined) delete update[k]
this.cb(update)
// Reset the parser for the next version!
this.state = {input: this.state.input}
}
// Or maybe there's an error to report upstream
else if (this.state.result === 'error') {
this.cb(null, this.state.message)
return
}
// We stop once we've run out of parseable input.
if (this.state.result == 'waiting') break
}
}
})
// ****************************
// General parsing functions
// ****************************
//
// Each of these functions takes parsing state as input, mutates the state,
// and returns the new state.
//
// Depending on the parse result, each parse function returns:
//
// parse_ (state)
// => {result: 'waiting', ...} If it parsed part of an item, but neeeds more input
// => {result: 'success', ...} If it parses an entire item
// => {result: 'error', ...} If there is a syntax error in the input
function parse_update (state) {
// If we don't have headers yet, let's try to parse some
if (!state.headers) {
var parsed = parse_headers(state.input)
// If header-parsing fails, send the error upstream
if (parsed.result === 'error')
return parsed
if (parsed.result === 'waiting') {
state.result = 'waiting'
return state
}
state.headers = parsed.headers
state.version = state.headers.version
state.parents = state.headers.parents
// Take the parsed headers out of the buffer
state.input = parsed.input
}
// We have headers now! Try parsing more body.
return parse_body(state)
}
// Parsing helpers
function parse_headers (input) {
var h = extractHeader(input)
if (!h) return {result: 'waiting'}
var headers_source = h.header_string
var headers_length = headers_source.length
// Let's parse them! First define some variables:
var headers = {},
header_regex = /(:?[\w-_]+):\s?(.*)\r?\n?/gy, // Parses one line a time
match,
found_last_match = false
// And now loop through the block, matching one line at a time
while (match = header_regex.exec(headers_source)) {
// console.log('Header match:', match && [match[1], match[2]])
headers[match[1].toLowerCase()] = match[2]
// This might be the last line of the headers block!
if (header_regex.lastIndex === headers_length)
found_last_match = true
}
// If the regex failed before we got to the end of the block, throw error:
if (!found_last_match)
return {
result: 'error',
message: 'Parse error in headers: "'
+ JSON.stringify(headers_source.substr(header_regex.lastIndex)) + '"',
headers_so_far: headers,
last_index: header_regex.lastIndex, headers_length
}
// Success! Let's parse special headers
if ('version' in headers)
headers.version = JSON.parse('['+headers.version+']')
if ('parents' in headers)
headers.parents = JSON.parse('['+headers.parents+']')
if ('patches' in headers)
headers.patches = JSON.parse(headers.patches)
// Update the input
input = h.remaining_bytes
// And return the parsed result
return { result: 'success', headers, input }
}
// Content-range is of the form '' e.g. 'json .index'
function parse_content_range (range_string) {
var match = range_string.match(/(\S+)( (.*))?/)
return match && {unit: match[1], range: match[3] || ''}
}
function parse_body (state) {
// Parse Body Snapshot
var content_length = parseInt(state.headers['content-length'])
if (!isNaN(content_length)) {
// We've read a Content-Length, so we have a block to parse
if (content_length > state.input.length) {
// But we haven't received the whole block yet
state.result = 'waiting'
return state
}
// We have the whole block!
state.result = 'success'
// If we have a content-range, then this is a patch
if (state.headers['content-range']) {
var match = parse_content_range(state.headers['content-range'])
if (!match)
return {
result: 'error',
message: 'cannot parse content-range',
range: state.headers['content-range']
}
state.patches = [{
unit: match.unit,
range: match.range,
content: (new TextDecoder('utf-8')).decode(new Uint8Array(state.input.slice(0, content_length))),
// Question: Perhaps we should include headers here, like we do for
// the Patches: N headers below?
// headers: state.headers
}]
}
// Otherwise, this is a snapshot body
else
state.body = (new TextDecoder('utf-8')).decode(new Uint8Array(state.input.slice(0, content_length)))
state.input = state.input.slice(content_length)
return state
}
// Parse Patches
else if (state.headers.patches != null) {
state.patches = state.patches || []
var last_patch = state.patches[state.patches.length-1]
// Parse patches until the final patch has its content filled
while (!(state.patches.length === state.headers.patches
&& (state.patches.length === 0 || 'content' in last_patch))) {
// Are we starting a new patch?
if (!last_patch || 'content' in last_patch) {
last_patch = {}
state.patches.push(last_patch)
}
// Parse patch headers
if (!('headers' in last_patch)) {
var parsed = parse_headers(state.input)
// If header-parsing fails, send the error upstream
if (parsed.result === 'error')
return parsed
if (parsed.result === 'waiting') {
state.result = 'waiting'
return state
}
// We parsed patch headers! Update state.
last_patch.headers = parsed.headers
state.input = parsed.input
}
// Todo: support custom patches, not just range-patch
// Parse Range Patch format
{
if (!('content-length' in last_patch.headers))
return {
result: 'error',
message: 'no content-length in patch',
patch: last_patch, input: (new TextDecoder('utf-8')).decode(new Uint8Array(state.input))
}
if (!('content-range' in last_patch.headers))
return {
result: 'error',
message: 'no content-range in patch',
patch: last_patch, input: (new TextDecoder('utf-8')).decode(new Uint8Array(state.input))
}
var content_length = parseInt(last_patch.headers['content-length'])
// Does input have the entire patch contents yet?
if (state.input.length < content_length) {
state.result = 'waiting'
return state
}
var match = parse_content_range(last_patch.headers['content-range'])
if (!match)
return {
result: 'error',
message: 'cannot parse content-range in patch',
patch: last_patch, input: (new TextDecoder('utf-8')).decode(new Uint8Array(state.input))
}
last_patch.unit = match.unit
last_patch.range = match.range
last_patch.content = (new TextDecoder('utf-8')).decode(new Uint8Array(state.input.slice(0, content_length)))
last_patch.extra_headers = extra_headers(last_patch.headers)
delete last_patch.headers // We only keep the extra headers ^^
// Consume the parsed input
state.input = state.input.slice(content_length)
}
}
state.result = 'success'
return state
}
return {
result: 'error',
message: 'cannot parse body without content-length or patches header'
}
}
// The "extra_headers" field is returned to the client on any *update* or
// *patch* to include any headers that we've received, but don't have braid
// semantics for.
//
// This function creates that hash from a headers object, by filtering out all
// known headers.
function extra_headers (headers) {
// Clone headers
var result = Object.assign({}, headers)
// Remove the non-extra parts
var known_headers = ['version', 'parents', 'patches',
'content-length', 'content-range']
for (var i = 0; i < known_headers.length; i++)
delete result[known_headers[i]]
// Return undefined if we deleted them all
if (Object.keys(result).length === 0)
return undefined
return result
}
// a parsing utility function that will inspect a byte array of incoming data
// to see if there is header information at the beginning,
// namely some non-newline characters followed by two newlines
function extractHeader(input) {
// Find the start of the headers
let begin_headers_i = 0;
while (input[begin_headers_i] === 13 || input[begin_headers_i] === 10) {
begin_headers_i++;
}
if (begin_headers_i === input.length) {
return null; // Incomplete headers
}
// Look for the double-newline at the end of the headers
let end_headers_i = begin_headers_i;
let size_of_tail = 0;
while (end_headers_i < input.length) {
if (input[end_headers_i] === 10 && input[end_headers_i + 1] === 10) {
size_of_tail = 2;
break;
}
if (input[end_headers_i] === 10 && input[end_headers_i + 1] === 13 && input[end_headers_i + 2] === 10) {
size_of_tail = 3;
break;
}
if (input[end_headers_i] === 13 && input[end_headers_i + 1] === 10 && input[end_headers_i + 2] === 10) {
size_of_tail = 3;
break;
}
if (input[end_headers_i] === 13 && input[end_headers_i + 1] === 10 && input[end_headers_i + 2] === 13 && input[end_headers_i + 3] === 10) {
size_of_tail = 4;
break;
}
end_headers_i++;
}
// If no double-newline is found, wait for more input
if (end_headers_i === input.length) {
return null; // Incomplete headers
}
// Extract the header string
const headerBytes = input.slice(begin_headers_i, end_headers_i);
const headerString = new TextDecoder('utf-8').decode(new Uint8Array(headerBytes));
// Return the remaining bytes and the header string
const remainingBytes = input.slice(end_headers_i + size_of_tail);
return {
remaining_bytes: remainingBytes,
header_string: headerString
};
}
// ****************************
// Exports
// ****************************
if (typeof module !== 'undefined' && module.exports)
module.exports = {
fetch: braid_fetch,
http: braidify_http,
subscription_parser,
parse_update,
parse_headers,
parse_body
}
================================================
FILE: braid-http/braid-http-server.js
================================================
var assert = require('assert')
// Return a string of patches in pseudoheader format.
//
// The `patches` argument can be:
// - Array of patches
// - A single patch
//
// Multiple patches are generated like:
//
// Patches: n
//
// content-length: 21
// content-range: json .range
//
// {"some": "json object"}
//
// content-length: x
// ...
//
// A single patch is generated like:
//
// content-length: 21
// content-range: json .range
//
// {"some": "json object"}
//
function generate_patches(res, patches) {
var result = ''
// `patches` must be a patch object or an array of patch objects
// - Object: {unit, range, content}
// - Array: [{unit, range, content}, ...]
assert(typeof patches === 'object') // An array is also an object
// An array of one patch behaves like a single patch
if (Array.isArray(patches)) {
// Add `Patches: N` header if array
result += `Patches: ${patches.length}\r\n\r\n`
} else
// Else, we'll out put a single patch
patches = [patches]
// Generate each patch
patches.forEach((patch, i) => {
assert(typeof patch.unit === 'string')
assert(typeof patch.range === 'string')
assert(typeof patch.content === 'string')
if (i > 0)
result += '\r\n\r\n'
let extra_headers = Object.fromEntries(Object.entries(patch).filter(([k, v]) => k != 'unit' && k != 'range' && k != 'content'))
result += `Content-Length: ${(new TextEncoder().encode(patch.content)).length}\r
Content-Range: ${patch.unit} ${patch.range}\r
${Object.entries(extra_headers).map(([k, v]) => `${k}: ${v}\r\n`).join('')}\r
${patch.content}`
})
return result
}
// Deprecated method for legacy support
function parse_patches (req, cb) {
parse_update(req, update => {
if (typeof update.body === 'string')
// Return body as an "everything" patch
cb([{unit: 'everything', range: '', content: update.body}])
else
cb(update.patches)
})
}
// This function reads an update (either a set of patches, or a body) from a
// ReadableStream and then fires a callback when finished.
function parse_update (req, cb) {
var num_patches = req.headers.patches
if (!num_patches && !req.headers['content-range']) {
var body = ''
req.on('data', chunk => {body += chunk.toString()})
req.on('end', () => {
cb({ body, patches: undefined })
})
}
// Parse a single patch, lacking Patches: N
else if (num_patches === undefined && req.headers['content-range']) {
// We only support range patches right now, so there must be a
// Content-Range header.
assert(req.headers['content-range'], 'No patches to parse: need `Patches: N` or `Content-Range:` header in ' + JSON.stringify(req.headers))
// Parse the Content-Range header
// Content-range is of the form '' e.g. 'json .index'
var [unit, range] = parse_content_range(req.headers['content-range'])
// The contents of the patch is in the request body
var buffer = []
// Read the body one chunk at a time
req.on('data', chunk => buffer.push(chunk))
// Then return it
req.on('end', () => {
patches = [{unit, range, content: Buffer.concat(buffer).toString('utf8')}]
cb({ patches, body: undefined })
})
}
// Parse multiple patches within a Patches: N block
else {
num_patches = parseInt(num_patches)
let patches = []
let buffer = []
// We check to send send patches each time we parse one. But if there
// are zero to parse, we will never check to send them.
if (num_patches === 0)
return cb({ patches: [], body: undefined })
req.on('data', function parse (chunk) {
// Merge the latest chunk into our buffer
for (let x of chunk) buffer.push(x)
while (patches.length < num_patches) {
let h = extractHeader(buffer)
if (!h) return
// Now let's parse those headers.
var headers = require('parse-headers')(h.header_string)
// We require `content-length` to declare the length of the patch.
if (!('content-length' in headers)) {
// Print a nice error if it's missing
console.error('No content-length in', JSON.stringify(headers),
'from', {buffer})
process.exit(1)
}
var body_length = parseInt(headers['content-length'])
// Give up if we don't have the full patch yet.
if (h.remaining_bytes.length < body_length)
return
// XX Todo: support custom patch types beyond content-range.
// Content-range is of the form '' e.g. 'json .index'
var [unit, range] = parse_content_range(headers['content-range'])
var patch_content = new TextDecoder('utf-8').decode(new Uint8Array(h.remaining_bytes.slice(0, body_length)))
// We've got our patch!
patches.push({unit, range, content: patch_content})
buffer = h.remaining_bytes.slice(body_length)
}
// We got all the patches! Pause the stream and tell the callback!
req.pause()
cb({ patches, body: undefined })
})
req.on('end', () => {
// If the stream ends before we get everything, then return what we
// did receive
console.error('Request stream ended!')
if (patches.length !== num_patches)
console.error(`Got an incomplete PUT: ${patches.length}/${num_patches} patches were received`)
})
}
}
function parse_content_range (range_string) {
var match = range_string.match(/(\S+)( (.*))?/)
if (!match) throw 'Cannot parse Content-Range in ' + string
var [unit, range] = [match[1], match[3] || '']
return [unit, range]
}
function braidify (req, res, next) {
// console.log('\n## Braidifying', req.method, req.url, req.headers.peer)
// First, declare that we support Patches and JSON ranges.
res.setHeader('Range-Request-Allow-Methods', 'PATCH, PUT')
res.setHeader('Range-Request-Allow-Units', 'json')
// Extract braid info from headers
var version = ('version' in req.headers) && JSON.parse('['+req.headers.version+']'),
parents = ('parents' in req.headers) && JSON.parse('['+req.headers.parents+']'),
peer = req.headers['peer'],
url = req.url.substr(1)
// Parse the subscribe header
var subscribe = req.headers.subscribe
if (subscribe === 'true')
subscribe = true
// Define convenience variables
req.version = version
req.parents = parents
req.subscribe = subscribe
// Add the braidly request/response helper methods
res.sendUpdate = (stuff) => send_update(res, stuff, req.url, peer)
res.sendVersion = res.sendUpdate
req.parseUpdate = () => new Promise(
(done, err) => parse_update(req, (update) => done(update))
)
req.patches = () => new Promise(
(done, err) => parse_patches(req, (patches) => done(patches))
)
req.patchesJSON = () => new Promise(
(done, err) => parse_patches(
req,
(patches) => done(patches.map(
p => ({...p, content: JSON.parse(p.content)})
))
)
)
req.startSubscription = res.startSubscription =
function startSubscription (args = {}) {
// console.log('Starting subscription!')
// console.log('Timeouts are:',
// req.socket.server.timeout,
// req.socket.server.keepAliveTimeout)
res.isSubscription = true
// Let's disable the timeouts (if it exists)
if (req.socket.server)
req.socket.server.timeout = 0.0
// We have a subscription!
res.statusCode = 209
res.setHeader("subscribe", req.headers.subscribe)
res.setHeader('cache-control', 'no-cache, no-transform')
// Note: I used to explicitly disable transfer-encoding chunked
// here by setting the header to empty string. This is the only
// way I know to disable it in nodejs. We don't need chunked
// encoding in subscriptions, because chunked encoding is used to
// signal the end of a response, and subscriptions don't end. I
// disabled them to make responses cleaner. However, it turns out
// the Caddy proxy throws an error if it receives a response with
// transfer-encoding: set to the empty string. So I'm disabling
// it now.
// if (req.httpVersionMajor == 1) {
// // Explicitly disable transfer-encoding chunked for http 1
// res.setHeader('transfer-encoding', '')
// }
// Tell nginx not to buffer the subscription
res.setHeader('X-Accel-Buffering', 'no')
var connected = true
function disconnected (x) {
if (!connected) return
connected = false
// console.log(`Connection closed on ${req.url} from`, x, 'event')
// Now call the callback
if (args.onClose)
args.onClose()
}
res.on('close', x => disconnected('close'))
res.on('finish', x => disconnected('finish'))
req.on('abort', x => disconnected('abort'))
}
// Check the Useragent to work around Firefox bugs
if (req.headers['user-agent']
&& typeof req.headers['user-agent'] === 'string'
&& req.headers['user-agent'].toLowerCase().indexOf('firefox') > -1)
res.is_firefox = true
next && next()
}
function send_update(res, data, url, peer) {
var {version, parents, patches, patch, body} = data
function set_header (key, val) {
if (res.isSubscription)
res.write(`${key}: ${val}\r\n`)
else
res.setHeader(key, val)
}
function write_body (body) {
if (res.isSubscription)
res.write('\r\n' + body)
else
res.write(body)
}
// console.log('sending version', {url, peer, version, parents, patches, body,
// subscription: res.isSubscription})
// Validate that the body and patches are strings
if (body !== undefined)
assert(typeof body === 'string')
else {
// Only one of patch or patches can be set
assert(!(patch && patches))
assert((patch || patches) !== undefined)
assert((patch || patches) !== null)
// Patches must be an array
if (patches)
assert(Array.isArray(patches))
// But if using `patch`, then we set `patches` to just that object
if (patch)
patches = patch
// Now `patches` will be an array of patches or a single patch object.
//
// This distinction is used in generate_patches() to determine whether
// to inline a single patch in the update body vs. writing out a
// Patches: N block.
assert(typeof patches === 'object')
if (Array.isArray(patches))
patches.forEach(p => {
assert('unit' in p)
assert('range' in p)
assert('content' in p)
assert(typeof p.content === 'string')
})
}
var body_exists = body || body === ''
assert(body_exists || patches, 'Missing body or patches')
assert(!(body_exists && patches), 'Cannot send both body and patches')
// Write the headers or virtual headers
for (var [header, value] of Object.entries(data)) {
header = header.toLowerCase()
// A header set to undefined acts like it wasn't set
if (value === undefined)
continue
// Version and Parents get output in the Structured Headers format,
// so we convert `value` from array to comma-separated strings.
if (header === 'version') {
header = 'Version' // Capitalize for prettiness
value = value.map(JSON.stringify).join(", ")
} else if (header === 'parents') {
header = 'Parents' // Capitalize for prettiness
value = value.map(JSON.stringify).join(", ")
}
// We don't output patches or body yet
else if (header === 'patches' || header === 'body' || header === 'patch')
continue
set_header(header, value)
}
// Write the patches or body
if (typeof body === 'string') {
set_header('Content-Length', (new TextEncoder().encode(body)).length)
write_body(body)
} else
res.write(generate_patches(res, patches))
// Add a newline to prepare for the next version
// See also https://github.com/braid-org/braid-spec/issues/73
if (res.isSubscription) {
var extra_newlines = 1
if (res.is_firefox)
// Work around Firefox network buffering bug
// See https://github.com/braid-org/braidjs/issues/15
extra_newlines = 240
for (var i = 0; i < 1 + extra_newlines; i++)
res.write("\r\n")
}
}
// a parsing utility function that will inspect a byte array of incoming data
// to see if there is header information at the beginning,
// namely some non-newline characters followed by two newlines
function extractHeader(input) {
// Find the start of the headers
let begin_headers_i = 0;
while (input[begin_headers_i] === 13 || input[begin_headers_i] === 10) {
begin_headers_i++;
}
if (begin_headers_i === input.length) {
return null; // Incomplete headers
}
// Look for the double-newline at the end of the headers
let end_headers_i = begin_headers_i;
let size_of_tail = 0;
while (end_headers_i < input.length) {
if (input[end_headers_i] === 10 && input[end_headers_i + 1] === 10) {
size_of_tail = 2;
break;
}
if (input[end_headers_i] === 10 && input[end_headers_i + 1] === 13 && input[end_headers_i + 2] === 10) {
size_of_tail = 3;
break;
}
if (input[end_headers_i] === 13 && input[end_headers_i + 1] === 10 && input[end_headers_i + 2] === 10) {
size_of_tail = 3;
break;
}
if (input[end_headers_i] === 13 && input[end_headers_i + 1] === 10 && input[end_headers_i + 2] === 13 && input[end_headers_i + 3] === 10) {
size_of_tail = 4;
break;
}
end_headers_i++;
}
// If no double-newline is found, wait for more input
if (end_headers_i === input.length) {
return null; // Incomplete headers
}
// Extract the header string
const headerBytes = input.slice(begin_headers_i, end_headers_i);
const headerString = new TextDecoder('utf-8').decode(new Uint8Array(headerBytes));
// Return the remaining bytes and the header string
const remainingBytes = input.slice(end_headers_i + size_of_tail);
return {
remaining_bytes: remainingBytes,
header_string: headerString
};
}
module.exports = braidify
================================================
FILE: braid-http/contributing.md
================================================
# Contributing to Braid-HTTP
This is core code, and I'd like it to meet everyone's needs! I welcome
suggestions and improvements on all aspects, including style, features, API
design, and algorithm design. I'd like to refine this code into a great
reference library.
I also promise to fix bugs and generally help you get your needs met. Just
let me know what you want!
—Michael
================================================
FILE: braid-http/demos/blog/README
================================================
This is a demo blog / chat.
To run the demo:
git clone https://github.com/braid-work/braidjs.git
cd braidjs/demos/blog
npm install express http2-express-bridge
node server.js
Then open https://localhost:3009/ in your browser, and then click through the
self-signed certificate warnings to "proceed anyway".
================================================
FILE: braid-http/demos/blog/certificate
================================================
-----BEGIN CERTIFICATE-----
MIIDXTCCAkWgAwIBAgIJALgm2/aRZmh6MA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQwHhcNMjAwOTEzMDU1NDI1WhcNMjEwOTEzMDU1NDI1WjBF
MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAvt7W6Y6IKMhIi/PGLWPMj+jWC/Ne2P2Yhqx9kuUN+hkXF8ZeFfIXpKI+
JRLObDB7Me/8y4bgGSUQAPEowi75gk/jShvEhoMoV3G4B1ERmJ2xe02hUY3AiRuT
Qj7Z26y6C1/zJ+sCyPbeHBOyuCo341qjTYZMKKWfKDX0CGex+pLQupSYXsSq4NPY
aVOlvxoqw4FBprxNXyjIs2PgX3LoGMNYVuBT4/F1B+Jwn62HNJdjGNFMW1272kC7
/NAd+V0cIcvDWojpX3DQXCOty+EVc58grjIfSi1N4Gq9OsjM49BV3neNknHSlxZ1
mNbthBCMc/7xWD0AVZhYdrquhDsKdQIDAQABo1AwTjAdBgNVHQ4EFgQUxcq1RR+P
dDFTGHLBdHU4s5WmYaIwHwYDVR0jBBgwFoAUxcq1RR+PdDFTGHLBdHU4s5WmYaIw
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAgp03sPUbQO73H4iZq87h
u+zDCiVjB88KTrXvkB4jQ7nLaGOwicMQKDp/fDl19HrF4gUdRZIRadPGxMS25yNz
COQ+kRy/wVLGU6G6XfCjkhF/zuI+X6Bgku+trYqaL/bZsIe3VHLnUdpGetrDR0yO
n6YyGPkPm0uYOF8apt2/BZ4jRNACAThnKMf+0cyTi5xI3sz0E84adLcU5CIV3fcq
xeREOP7QUnnXR/isR39HgUAtJ1JGKB/KK7PbUxzqD5iTGZU9KGLxNw+zDmnJA5BQ
MdF3u583S7W7xLsZf6rAx3FQBUJ5puyC+qMeU/JhS0rzBR/auUXGpkBU2ZPb846H
zg==
-----END CERTIFICATE-----
================================================
FILE: braid-http/demos/blog/client.html
================================================
================================================
FILE: braid-http/demos/blog/package.json
================================================
{
"dependencies": {
"express": "^4.19.2",
"http2-express-bridge": "^1.0.7"
}
}
================================================
FILE: braid-http/demos/blog/private-key
================================================
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAvt7W6Y6IKMhIi/PGLWPMj+jWC/Ne2P2Yhqx9kuUN+hkXF8Ze
FfIXpKI+JRLObDB7Me/8y4bgGSUQAPEowi75gk/jShvEhoMoV3G4B1ERmJ2xe02h
UY3AiRuTQj7Z26y6C1/zJ+sCyPbeHBOyuCo341qjTYZMKKWfKDX0CGex+pLQupSY
XsSq4NPYaVOlvxoqw4FBprxNXyjIs2PgX3LoGMNYVuBT4/F1B+Jwn62HNJdjGNFM
W1272kC7/NAd+V0cIcvDWojpX3DQXCOty+EVc58grjIfSi1N4Gq9OsjM49BV3neN
knHSlxZ1mNbthBCMc/7xWD0AVZhYdrquhDsKdQIDAQABAoIBAQCwg+S8mdPR42c0
Bn1//ItxiyJoaumMQvqLkXmQH8SNRibVFd5s7TZwSpquLnY4F53W0No4XsNgfaYP
OPc4nbihir/oCUX2H9VvCPvnyFE9kphQL7rAcRddtOK6oOVZAqQLYPC+OdiZgbRm
wtDFAEYvO+NKDqjf7ksCJONJ4pv13bczI7UDFhB/js1AhoLw+f65frdUw0smDNeO
0wAWlEMym0fXFMIil0VGq5EMRsc1bGlDmYpjZfwD5VPacpq/BjmdUO2IHRYH24G1
mPXQgphEgHYK4zgOltPkXDhVQzg79oCb9jwKCLoxHDeAtRMGKRohjjY7g6+Tcbok
86L0TxABAoGBAOl8HQFL+rna/Edm6jWW7+FbxVFnCTv4UC35XGA+suc0s8glLCeV
J3PM/VrvTqGNjvmpjmUMJuHvpGR879VuyKcjnq2VlPax3lIN5t8M4dMe02jX3E8q
GoREde/ElUv78/IdopNNV82KgLYVsCCfdYv7YpUr1r6JA5Mi1KraNbQFAoGBANFG
u9A4DBbG/nCBYdqzqsLVxKctnDnGtgM5/BVxfe9KYtj4RgsYE12OOD7QKy6oKp0i
ef3CcJGP6QOy8NGocm1GCeWLmULPJ2dBa2QP22dWCv9tEjf9rc9OcH3GxwuWS4I4
5V2/z2Q6/JI+1z2jvkdAe4zKkQ6cDUeHm6QR4rexAoGARFTaeEKwQixgoNTxvnVK
Bv5ApS4ueaqWbJ4J9vDikt/NrcmTPpJcVXusuixHKuiu60pALjp8NqtXxUD7P8+I
UcIO8mZQjlJH6mO/KZAvlwXygUCLbW+5CvSuP1mdB/vYzQb3SXzdMX5TZPa78RA5
6pnLpDSMSH742NrcupPaG7ECgYB7/AcLPhlTaUOXDeAWfPk/AvzF+syioip1UOYb
Iij1GsHjP3vn2LSoabjC5fufYbwVajaR859TbcokCeCIpd5dBPCaERGUtjvm/agN
GSPoXd6YI1t21pEhz5vvuFN2du13UhP5bckF3biDQD2u5BK4DejkqaI04JkxdpLs
KjiCkQKBgBV0DAhG+mOsMegv1LkK5Sg1XQt+zG8c11ESVPISEpLvaBZjVxVLdrKs
R5ohNxC25kVbaxvfOTP4GWrJ1l8u7HKPVdqfY2j68JV3qyIZJ40a8HdEm5mr24HA
fKQXk8BRRZb8OWrb6O1jeTuYkYvzwUGtqF7UzqY440YHxFED3V9j
-----END RSA PRIVATE KEY-----
================================================
FILE: braid-http/demos/blog/server.js
================================================
assert = require('assert')
// Blog Data
var resources = {
'/blog': [
{link: '/post/1'},
{link: '/post/2'},
{link: '/post/3'}
],
'/post/1': {body: 'First post OMGGG!!!!'},
'/post/2': {body: `Once upon a time,
I ate a big fish.
It was really tasty.`},
'/post/3': {body: "It's nice when things come in threes."}
}
var curr_version = () => [ resources['/blog'].length + '' ]
// Subscription data
var subscriptions = {}
var rhash = (req) => JSON.stringify([req.headers.peer, req.url])
// Create our HTTP bindings!
var braidify = require('../../index.js').http_server
var app = require('http2-express-bridge')(require('express'))
// Middleware
app.use(log_request)
app.use(free_the_cors)
app.use(braidify)
// HTTP Routes
function getter (req, res) {
// Make sure URL is valid
if (!(req.url in resources)) {
res.statusCode = 404
res.end()
return
}
// Honor any subscription request
if (req.subscribe) {
res.startSubscription({ onClose: _=> delete subscriptions[rhash(req)] })
subscriptions[rhash(req)] = res
} else
res.statusCode = 200
// Send the current version
res.sendUpdate({
version: curr_version(),
body: JSON.stringify(resources[req.url])
})
if (!req.subscribe)
res.end()
}
app.get('/blog', getter)
app.get('/post/:id', getter)
app.put('/blog', async (req, res) => {
var patches = (await req.parseUpdate()).patches
console.log('Extending /blog with!', patches)
// assert(patches.length === 1)
// assert(patches[0].range === '[-0:-0]')
resources['/blog'].push(JSON.parse(patches[0].content))
for (var k in subscriptions) {
var [peer, url] = JSON.parse(k)
if (peer !== req.headers.peer && url === req.url)
subscriptions[k].sendUpdate({
version: curr_version(),
patches
})
}
res.statusCode = 200
res.end()
})
app.put('/post/:id', async (req, res) => {
var update = await req.parseUpdate()
console.log('Setting', req.url, 'with', update)
assert(typeof update.body === 'string')
resources[req.url] = JSON.parse(update.body)
for (var k in subscriptions) {
var [peer, url] = JSON.parse(k)
if (peer !== req.headers.peer && url === req.url)
subscriptions[k].sendUpdate({
version: curr_version(),
body: update.body
})
}
res.end()
})
// Now serve the HTML and client files
sendfile = (f) => (req, res) => res.sendFile(f, {root:'../..'})
app.get('/', sendfile('demos/blog/client.html'));
app.get('/braid-http-client.js', sendfile('braid-http-client.js'))
// Define Middleware
function log_request (req, res, next) {
console.log(req.method, req.url)
next()
}
function free_the_cors (req, res, next) {
res.setHeader('Range-Request-Allow-Methods', 'PATCH, PUT')
res.setHeader('Range-Request-Allow-Units', 'json')
res.setHeader("Patches", "OK")
var free_the_cors = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS, HEAD, GET, PUT, UNSUBSCRIBE",
"Access-Control-Allow-Headers": "subscribe, peer, version, parents, merge-type, content-type, patches, cache-control"
}
Object.entries(free_the_cors).forEach(x => res.setHeader(x[0], x[1]))
if (req.method === 'OPTIONS') {
res.writeHead(200)
res.end()
} else
next()
}
// Launch the https server
var server = require('http2').createSecureServer(
{
cert: require('fs').readFileSync('./certificate'),
key: require('fs').readFileSync('./private-key'),
allowHTTP1: true
},
app
)
// server.setTimeout(0, x => console.log('Server timeout!', x))
// console.log('Server timeouts:', server.timeout, server.keepAliveTimeout)
server.listen(3009, _=> console.log('listening on port 3009...'))
================================================
FILE: braid-http/demos/chat/README
================================================
To run the braidjs chat demo:
git clone https://github.com/braid-work/braidjs.git
cd braidjs/demos/chat
npm install express spdy parse-headers
node server.js
Then open braidjs/demos/client.html in your browser. It won't work yet
because it doesn't trust the self-signed certificate. To get around this,
open https://localhost:3009/ in your browser, and then:
- In Chrome, type the magic phrase "thisisunsafe" into the page
- In Firefox, click "advanced" and "accept and continue"
- In Safari, click through the warnings to "proceed anyway"
================================================
FILE: braid-http/demos/chat/certificate
================================================
-----BEGIN CERTIFICATE-----
MIIDXTCCAkWgAwIBAgIJALgm2/aRZmh6MA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQwHhcNMjAwOTEzMDU1NDI1WhcNMjEwOTEzMDU1NDI1WjBF
MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAvt7W6Y6IKMhIi/PGLWPMj+jWC/Ne2P2Yhqx9kuUN+hkXF8ZeFfIXpKI+
JRLObDB7Me/8y4bgGSUQAPEowi75gk/jShvEhoMoV3G4B1ERmJ2xe02hUY3AiRuT
Qj7Z26y6C1/zJ+sCyPbeHBOyuCo341qjTYZMKKWfKDX0CGex+pLQupSYXsSq4NPY
aVOlvxoqw4FBprxNXyjIs2PgX3LoGMNYVuBT4/F1B+Jwn62HNJdjGNFMW1272kC7
/NAd+V0cIcvDWojpX3DQXCOty+EVc58grjIfSi1N4Gq9OsjM49BV3neNknHSlxZ1
mNbthBCMc/7xWD0AVZhYdrquhDsKdQIDAQABo1AwTjAdBgNVHQ4EFgQUxcq1RR+P
dDFTGHLBdHU4s5WmYaIwHwYDVR0jBBgwFoAUxcq1RR+PdDFTGHLBdHU4s5WmYaIw
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAgp03sPUbQO73H4iZq87h
u+zDCiVjB88KTrXvkB4jQ7nLaGOwicMQKDp/fDl19HrF4gUdRZIRadPGxMS25yNz
COQ+kRy/wVLGU6G6XfCjkhF/zuI+X6Bgku+trYqaL/bZsIe3VHLnUdpGetrDR0yO
n6YyGPkPm0uYOF8apt2/BZ4jRNACAThnKMf+0cyTi5xI3sz0E84adLcU5CIV3fcq
xeREOP7QUnnXR/isR39HgUAtJ1JGKB/KK7PbUxzqD5iTGZU9KGLxNw+zDmnJA5BQ
MdF3u583S7W7xLsZf6rAx3FQBUJ5puyC+qMeU/JhS0rzBR/auUXGpkBU2ZPb846H
zg==
-----END CERTIFICATE-----
================================================
FILE: braid-http/demos/chat/client.html
================================================
================================================
FILE: braid-http/demos/chat/package.json
================================================
{
"dependencies": {
"express": "^4.19.2",
"http2-express-bridge": "^1.0.7"
}
}
================================================
FILE: braid-http/demos/chat/private-key
================================================
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAvt7W6Y6IKMhIi/PGLWPMj+jWC/Ne2P2Yhqx9kuUN+hkXF8Ze
FfIXpKI+JRLObDB7Me/8y4bgGSUQAPEowi75gk/jShvEhoMoV3G4B1ERmJ2xe02h
UY3AiRuTQj7Z26y6C1/zJ+sCyPbeHBOyuCo341qjTYZMKKWfKDX0CGex+pLQupSY
XsSq4NPYaVOlvxoqw4FBprxNXyjIs2PgX3LoGMNYVuBT4/F1B+Jwn62HNJdjGNFM
W1272kC7/NAd+V0cIcvDWojpX3DQXCOty+EVc58grjIfSi1N4Gq9OsjM49BV3neN
knHSlxZ1mNbthBCMc/7xWD0AVZhYdrquhDsKdQIDAQABAoIBAQCwg+S8mdPR42c0
Bn1//ItxiyJoaumMQvqLkXmQH8SNRibVFd5s7TZwSpquLnY4F53W0No4XsNgfaYP
OPc4nbihir/oCUX2H9VvCPvnyFE9kphQL7rAcRddtOK6oOVZAqQLYPC+OdiZgbRm
wtDFAEYvO+NKDqjf7ksCJONJ4pv13bczI7UDFhB/js1AhoLw+f65frdUw0smDNeO
0wAWlEMym0fXFMIil0VGq5EMRsc1bGlDmYpjZfwD5VPacpq/BjmdUO2IHRYH24G1
mPXQgphEgHYK4zgOltPkXDhVQzg79oCb9jwKCLoxHDeAtRMGKRohjjY7g6+Tcbok
86L0TxABAoGBAOl8HQFL+rna/Edm6jWW7+FbxVFnCTv4UC35XGA+suc0s8glLCeV
J3PM/VrvTqGNjvmpjmUMJuHvpGR879VuyKcjnq2VlPax3lIN5t8M4dMe02jX3E8q
GoREde/ElUv78/IdopNNV82KgLYVsCCfdYv7YpUr1r6JA5Mi1KraNbQFAoGBANFG
u9A4DBbG/nCBYdqzqsLVxKctnDnGtgM5/BVxfe9KYtj4RgsYE12OOD7QKy6oKp0i
ef3CcJGP6QOy8NGocm1GCeWLmULPJ2dBa2QP22dWCv9tEjf9rc9OcH3GxwuWS4I4
5V2/z2Q6/JI+1z2jvkdAe4zKkQ6cDUeHm6QR4rexAoGARFTaeEKwQixgoNTxvnVK
Bv5ApS4ueaqWbJ4J9vDikt/NrcmTPpJcVXusuixHKuiu60pALjp8NqtXxUD7P8+I
UcIO8mZQjlJH6mO/KZAvlwXygUCLbW+5CvSuP1mdB/vYzQb3SXzdMX5TZPa78RA5
6pnLpDSMSH742NrcupPaG7ECgYB7/AcLPhlTaUOXDeAWfPk/AvzF+syioip1UOYb
Iij1GsHjP3vn2LSoabjC5fufYbwVajaR859TbcokCeCIpd5dBPCaERGUtjvm/agN
GSPoXd6YI1t21pEhz5vvuFN2du13UhP5bckF3biDQD2u5BK4DejkqaI04JkxdpLs
KjiCkQKBgBV0DAhG+mOsMegv1LkK5Sg1XQt+zG8c11ESVPISEpLvaBZjVxVLdrKs
R5ohNxC25kVbaxvfOTP4GWrJ1l8u7HKPVdqfY2j68JV3qyIZJ40a8HdEm5mr24HA
fKQXk8BRRZb8OWrb6O1jeTuYkYvzwUGtqF7UzqY440YHxFED3V9j
-----END RSA PRIVATE KEY-----
================================================
FILE: braid-http/demos/chat/server.js
================================================
var assert = require('assert')
// Chat Data
var resources = {
'/chat': [
{text: 'Hello!'},
{text: 'This is a post!'},
{text: 'This is a post-modern!'}
]
}
var chat_version = () => [resources['/chat'].length.toString()]
var post_versions = {}
// Subscription data
var subscriptions = {}
var subscription_hash = (req) => JSON.stringify([req.headers.peer, req.url])
// Create our HTTP bindings!
//var braidify = require('../../braid-http-server')
var braidify = require('../../index.js').http_server
var app = require('http2-express-bridge')(require('express'))
// Middleware
app.use(free_the_cors)
app.use(braidify)
app.get('/chat', (req, res) => {
console.log('get for peer', req.headers.peer)
// Honor any subscription request
if (req.subscribe) { // Using the new subscription feature braidify is adding to req & res
res.startSubscription({ onClose: _=> delete subscriptions[subscription_hash(req)] })
subscriptions[subscription_hash(req)] = res
console.log('We are subscribing at hash', subscription_hash(req))
} else {
res.statusCode = 200
}
// Send the current version
res.sendUpdate({
version: chat_version(),
body: JSON.stringify(resources['/chat'])
})
if (!req.subscribe)
res.end()
})
app.put('/chat', async (req, res) => {
var patches = await req.patches() // Braidify adds .patches() to request objects
// Bug: Should return error code (40x?) for invalid request instead of crashing
assert(patches.length === 1)
assert(patches[0].range === '[-0:-0]')
assert(patches[0].unit === 'json')
resources['/chat'].push(JSON.parse(patches[0].content))
// Now send the data to all subscribers
for (var k in subscriptions) {
var [peer, url] = JSON.parse(k)
if (url === req.url // Send only to subscribers of this URL
&& peer !== req.headers.peer) // Skip the peer that sent this PUT
subscriptions[k].sendUpdate({
version: chat_version(),
patches
})
}
res.statusCode = 200
res.end()
})
// Now serve the HTML and client files
var sendfile = (f) => (req, res) => res.sendFile(require('path').join(__dirname, f))
app.get('/', sendfile('client.html'));
app.get('/braid-http-client.js', sendfile('../../braid-http-client.js'))
// Free the CORS!
function free_the_cors (req, res, next) {
console.log('free the cors!', req.method, req.url)
// Hey... these headers aren't about CORS! Let's move them into the braid
// libraries:
res.setHeader('Range-Request-Allow-Methods', 'PATCH, PUT')
res.setHeader('Range-Request-Allow-Units', 'json')
res.setHeader("Patches", "OK")
// ^^ Actually, it looks like we're going to delete these soon.
var free_the_cors = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS, HEAD, GET, PUT, UNSUBSCRIBE",
"Access-Control-Allow-Headers": "subscribe, peer, version, parents, merge-type, content-type, patches, cache-control"
}
Object.entries(free_the_cors).forEach(x => res.setHeader(x[0], x[1]))
if (req.method === 'OPTIONS') {
res.writeHead(200)
res.end()
} else
next()
}
// Launch the https server
var server = require('http2').createSecureServer(
{
cert: require('fs').readFileSync('./certificate'),
key: require('fs').readFileSync('./private-key'),
allowHTTP1: true
},
app
)
// server.setTimeout(0, x => console.log('Server timeout!', x))
// console.log('Server timeouts:', server.timeout, server.keepAliveTimeout)
server.listen(3009, _=> console.log('listening on port 3009...'))
================================================
FILE: braid-http/index.js
================================================
// This is the root file for require('braid-http').
//
// It combines the client and server files into one file.
var client = require('./braid-http-client'),
server = require('./braid-http-server')
module.exports = {
fetch: client.fetch,
http_client: client.http,
http_server: server
}
================================================
FILE: braid-http/index.mjs
================================================
// This is the root file for es modules:
//
// import {fetch, http} from 'braid-http'
//
// This file combines the client and server files into one file.
import braid_client from './braid-http-client.js'
import braid_server from './braid-http-server.js'
var fetch = braid_client.fetch,
http_client = braid_client.http,
http_server = braid_server
export { fetch, http_client, http_server }
export default { fetch, http_client, http_server }
================================================
FILE: braid-http/package.json
================================================
{
"name": "braid-http",
"version": "0.3.21",
"description": "An implementation of Braid-HTTP for Node.js and Browsers",
"scripts": {
"test": "node test/server.js"
},
"author": "Braid Working Group",
"repository": "braid-org/braidjs",
"homepage": "https://braid.org",
"files": [
"braid-http-client.js",
"braid-http-server.js",
"index.js",
"index.mjs"
],
"main": "./index.js",
"exports": {
"require": "./index.js",
"import": "./index.mjs"
},
"browser": {
"node-web-streams": false,
"node-fetch": false,
"abort-controller": false
},
"dependencies": {
"abort-controller": "^3.0.0",
"node-fetch": "^2.6.1",
"parse-headers": "^2.0.3",
"web-streams-node": "^0.4.0"
}
}
================================================
FILE: braid-http/package.md
================================================
# package.json notes
This package is bundled as both a commonjs and es6-compatible NPM bundle. The
factor that enables this dual packaging is the "exports" key in the package.json
file:
## exports
- `require`: When this package is in a commonjs environment (e.g. default nodejs)
the ./index.js file will be the thing that is 'require'd.
- `import`: When this package is in an es6 environment (e.g. bundler, modern nodejs,
modern browser) the ./index.mjs will be the thing 'import'ed.
## dependencies
- `node-fetch`: When the http-client protocol is used, node-fetch supplies 'fetch'
for a nodejs client
- `node-web-streams`: Although node-fetch is mostly isomorphic, its internal stream
is not the same as a web stream reader; we need it to have the same API.
- `spdy`: This gives us http2.0 connection multiplexing with a 'natural http module
interface'. (http1.1 provides a max of 6 open conns)
## Development Notes
For code that is intended to run in all environments (e.g. browser, node) and
potentially pass through a bundler step, the following guidelines are helpful:
- Use single-value module.exports in files, and named exports in wrappers.
- If using globals, it's also important to use module.exports; for example:
```
function braid_fetch(...) { ... }
if (typeof module !== 'undefined' && module.exports) {
module.exports = braid_fetch
}
```
For a complete list of reasons for the madness, and to learn more about the method
we've used to build this package, see https://redfin.engineering/node-modules-at-war-why-commonjs-and-es-modules-cant-get-along-9617135eeca1
Because we `require` certain libraries that are meant to be used in a nodejs environment only, we also need to provide a hint to bundlers that are targeting a browser environment NOT to load those libraries. This is what the `browser` field in `package.json` is for:
```
"browser": {
"node-web-streams": false,
"node-fetch": false,
"abort-controller": false
}
```
If we don't hint that these libraries should not be loaded in the browser, bundled code that depends on braidjs libraries will fail in the browser.
See also https://github.com/defunctzombie/package-browser-field-spec.
================================================
FILE: braid-http/readme.md
================================================
# NOTE: This project has moved to [braid-http](https://github.com/braid-org/braid-http)
# Braid-HTTP
This polyfill library implements the [Braid-HTTP v04 protocol](https://github.com/braid-org/braid-spec/blob/master/draft-toomim-httpbis-braid-http-04.txt) in Javascript. It gives browsers a `braid_fetch()` drop-in replacement for the `fetch()` API, and gives nodejs an `http` plugin, allowing them to speak Braid in a simple way.
Developed in [braid.org](https://braid.org).
## Installing
Browsers:
```html
```
Node.js:
```shell
npm install braid-http
```
```javascript
// Import with require()
require('braid-http').fetch // A polyfill for require('node-fetch')
require('braid-http').http_client // A polyfill for require('http') clients
require('braid-http').http_server // A polyfill for require('http') servers
// Or as es6 module
import {fetch, http_client, http_server} from 'braid-http'
```
## Using it in Browsers
This library adds a `{subscribe: true}` option to `fetch()`, and lets you
access the result of a subscription with two new fields on the fetch response:
- `response.subscribe( update => ... )`
- `response.subscription`: an iterator that can be used with `for await`
### Example Subscription with Promises
Here is an example of subscribing to a Braid resource using promises:
```javascript
fetch('https://braid.org/chat', {subscribe: true}).then(
res => res.subscribe(
(update) => {
console.log('We got a new update!', update)
// {
// version: ["me"],
// parents: ["mom", "dad"],
// patches: [{unit: "json", range: ".foo", content: "3"}]
// body: "3"
// }
//
// Note that `update` will contain either patches *or* body
}
)
)
```
If you want automatic reconnections, add two error handlers like this:
```javascript
function connect() {
fetch('https://braid.org/chat', {subscribe: true}).then(
res => res.subscribe(
(update) => {
console.log('We got a new update!', update)
// Do something with the update
},
e => setTimeout(connect, 1000)
)
).catch(e => setTimeout(connect, 1000))
}
connect()
```
### Example Subscription with Async/Await
```javascript
async function connect () {
try {
(await fetch('/chat', {subscribe: true})).subscribe(
(update) => {
// We got a new update!
},
() => setTimeout(connect, 1000)
)
} catch (e) {
setTimeout(connect, 1000)
}
}
```
### Example Subscription with `for await`
```javascript
async function connect () {
try {
var subscription_iterator = fetch('/chat', {subscribe: true}).subscription
for await (var update of subscription_iterator) {
// Updates might come in the form of patches:
if (update.patches)
chat = apply_patches(update.patches, chat)
// Or complete snapshots:
else
// Beware the server doesn't send these yet.
chat = JSON.parse(update.body)
render_stuff()
}
} catch (e) {
console.log('Reconnecting...')
setTimeout(connect, 4000)
}
}
```
## Using it in Nodejs
### Example Nodejs server with `require('http')`
Braidify adds these fields and methods to requests and responses:
- `req.subscribe`
- `req.startSubscription({onClose: cb})`
- `await req.parseUpdate()`
- `res.sendUpdate()`
Use it like this:
```javascript
var braidify = require('braid-http').http_server
// or:
import {http_server as braidify} from 'braid-http'
require('http').createServer(
(req, res) => {
// Add braid stuff to req and res
braidify(req, res)
// Now use it
if (req.subscribe)
res.startSubscription({ onClose: _=> null })
// startSubscription automatically sets statusCode = 209
else
res.statusCode = 200
// Send the current version
res.sendUpdate({
version: ['greg'],
body: JSON.stringify({greg: 'greg'})
})
}
).listen(9935)
```
### Example Nodejs server with `require('express')`
With `express`, you can simply call `app.use(braidify)` to get braid features
added to every request and response.
```javascript
var braidify = require('braid-http').http_server
// or:
import {http_server as braidify} from 'braid-http'
var app = require('express')()
app.use(braidify) // Add braid stuff to req and res
app.get('/', (req, res) => {
// Now use it
if (req.subscribe)
res.startSubscription({ onClose: _=> null })
// startSubscription automatically sets statusCode = 209
else
res.statusCode = 200
// Send the current version
res.sendUpdate({
version: ['greg'],
parents: ['gr','eg'],
body: JSON.stringify({greg: 'greg'})
})
// Or you can send patches like this:
// res.sendUpdate({
// version: ['greg'],
// parents: ['gr','eg'],
// patches: [{range: '.greg', unit: 'json', content: '"greg"'}]
// })
})
require('http').createServer(app).listen(8583)
```
### Example Nodejs client with `require('http')`
```javascript
// Use this line if necessary for self-signed certs
// process.env["NODE_TLS_REJECT_UNAUTHORIZED"] = 0
var https = require('braid-http').http_client(require('https'))
// or:
// import braid_http from 'braid-http'
// https = braid_http.http_client(require('https'))
https.get(
'https://braid.org/chat',
{subscribe: true},
(res) => {
res.on('update', (update) => {
console.log('well we got one', update)
})
}
)
```
To get auto-reconnections use:
```javascript
function connect () {
https.get(
'https://braid.org/chat',
{subscribe: true},
(res) => {
res.on('update', (update) => {
// {
// version: ["me"],
// parents: ["mom", "dad"],
// patches: [{unit: "json", range: ".foo", content: "3"}]
// body: "3"
// }
// // Update will contain either patches *or* body, but not both
console.log('We got a new update!', update)
})
res.on('end', e => setTimeout(connect, 1000))
res.on('error', e => setTimeout(connect, 1000))
})
}
connect()
```
### Example Nodejs client with `fetch()`
```javascript
var fetch = require('braid-http').fetch
// or:
import {fetch} from 'braid-http'
// process.env["NODE_TLS_REJECT_UNAUTHORIZED"] = 0
fetch('https://localhost:3009/chat',
{subscribe: true}).andThen(
x => console.log('Got ', x)
)
```
Note: the current version of `node-fetch` doesn't properly throw errors when a
response connection dies, and thus you cannot attach a `.catch()` handler to
automatically reconnect. (See
[issue #980](https://github.com/node-fetch/node-fetch/issues/980) and
[#753](https://github.com/node-fetch/node-fetch/issues/753).) We recommend
using the `http` library (below) for requests on nodejs instead.
================================================
FILE: braid-http/test/client.html
================================================
================================================
FILE: braid-http/test/readme.md
================================================
# To Test Braid-HTTP
Run the server with:
```
node server.js
```
### Test the server alone
Run this at your command-line:
```
$ curl -v -H Subscribe:true http://localhost:9000/json
```
You should see this:
```
* Trying 127.0.0.1:9000...
* Connected to localhost (127.0.0.1) port 9000 (#0)
> GET /json HTTP/1.1
> Host: localhost:9000
> User-Agent: curl/7.79.1
> Accept: */*
> Subscribe:true
>
* Mark bundle as not supporting multiuse
< HTTP/1.1 209 unknown
< Range-Request-Allow-Methods: PATCH, PUT
< Range-Request-Allow-Units: json
< content-type: application/json
< subscribe: true
< cache-control: no-cache, no-transform
< X-Accel-Buffering: no
< Date: Wed, 29 May 2024 13:05:38 GMT
< Connection: keep-alive
< Keep-Alive: timeout=5
< Transfer-Encoding: chunked
<
Version: "test"
Parents: "oldie"
Content-Length: 16
{"this":"stuff"}
Version: "test1"
Parents: "oldie", "goodie"
hash: 42
:status: 115
Content-Length: 1
Content-Range: json [1]
1
Version: "test2"
Content-Length: 1
Content-Range: json [2]
2
Version: "test3"
Patches: 2
Content-Length: 1
Content-Range: json [3]
hash: 43
3
Content-Length: 1
Content-Range: json [4]
4
Version: "another!"
Content-Length: 3
"!"
```
...and the connection should stay open until you hit `C-c`.
### Test the client against the server
Open a browser to:
```
http://localhost:9000/
```
The page will run a series of GET+subscribe and PUT tests, and then turn green
if they succeed, and red if they failed.
If you kill and restart the server, the browser should wait a second,
reconnect and then display a **Reconnection Results** section that looks like
this:
```
Read 1 connection died
Read 3 connection died
Read 2 connection died
Read 1 {"version":"test","parents":["oldie"],"body":"{\"this\":\"stuff\"}"}!
Read 1 {"version":"test1","parents":["oldie","goodie"],"patches":[{"unit":"json","range":"[1]","content":"1"}]}!
Read 1 {"version":"test2","patches":[{"unit":"json","range":"[2]","content":"2"}]}!
Read 1 {"version":"test3","patches":[{"headers":{"content-length":"1","content-range":"json [3]"},"unit":"json","range":"[3]","content":"3"},{"headers":{"content-length":"1","content-range":"json [4]"},"unit":"json","range":"[4]","content":"4"}]}!
Read 3 {"version":"test","parents":["oldie"],"body":"{\"this\":\"stuff\"}"}!
Read 2 {"version":"test","parents":["oldie"],"body":"{\"this\":\"stuff\"}"}!
Read 2 {"version":"test1","parents":["oldie","goodie"],"patches":[{"unit":"json","range":"[1]","content":"1"}]}!
Read 2 {"version":"test2","patches":[{"unit":"json","range":"[2]","content":"2"}]}!
Read 2 {"version":"test3","patches":[{"headers":{"content-length":"1","content-range":"json [3]"},"unit":"json","range":"[3]","content":"3"},{"headers":{"content-length":"1","content-range":"json [4]"},"unit":"json","range":"[4]","content":"4"}]}!
Read 1 {"version":"another!","body":"!"}!
Read 3 {"version":"another!","body":"!"}!
Read 2 {"version":"another!","body":"!"}!
```
### Debugging Advice
If the client tests fail, plug them into https://glittle.org/diff to see
what's wrong.
You can capture a request in unix with `nc -l 9000 > test-request.txt` to listen to
port 9000 while your browser initiates a request, and then capture a response
with `nc localhost 9000 < test-request.txt` to read the request from disk and send
it to a server running on port 9000.
================================================
FILE: braid-http/test/server.js
================================================
var braidify = require('../braid-http-server.js')
var sendfile = (f, req, res) => res.end(require('fs').readFileSync(require('path').join(__dirname, f)))
require('http').createServer(
(req, res) => {
// Braidifies our server
braidify(req, res)
console.log('Request:', req.url, req.method,
req.subscribe ? ('Subscribe: ' + req.subscribe)
: 'no subscription')
// We'll serve Braid at the /json route!
if (req.url === '/json' && req.method === 'GET') {
res.setHeader('content-type', 'application/json')
// res.setHeader('accept-subscribe', 'true')
// If the client requested a subscription, let's honor it!
if (req.subscribe)
res.startSubscription()
// Send the current version
res.sendUpdate({
version: ['test'],
parents: ['oldie'],
body: JSON.stringify({this: 'stuff'})
})
if (req.subscribe) {
// Send a patch
res.sendUpdate({
VersiOn: ['test1'], // Upper/lowercase is ignored
ParEnts: ['oldie', 'goodie'],
patch: {unit: 'json', range: '[1]', content: '1'},
hash: '42',
':status': '115'
})
// Send a patch as array
res.sendUpdate({
Version: ['test2'],
patch: {unit: 'json', range: '[2]', content: '2'}
})
// Send two patches as array
res.sendUpdate({
version: ['test3'],
patches: [{unit: 'json', range: '[3]', content: '3', hash: '43'},
{unit: 'json', range: '[4]', content: '4'}]
})
// Simulate an update after the fact
setTimeout(() => res.sendUpdate({version: ['another!'], body: '"!"'}), 200)
}
// End the response, if this isn't a subscription
if (!req.subscribe) {
res.statusCode = 200
res.end()
}
}
// We'll accept Braid at the /json PUTs!
if (req.url === '/json' && req.method === 'PUT') {
req.parseUpdate().then(update => {
console.log('We got PUT', req.version, 'update', update)
res.statusCode = 200
res.end()
})
}
// Static HTML routes here:
else if (req.url === '/')
sendfile('client.html', req, res)
else if (req.url === '/braid-http-client.js')
sendfile('../braid-http-client.js', req, res)
else if (req.url === '/test-responses.txt')
sendfile('test-responses.txt', req, res)
}
).listen(9000, () => console.log("Listening on http://localhost:9000..."))
================================================
FILE: braid-http/test/test-request.txt
================================================
GET /json HTTP/1.1
Host: localhost:9000
Connection: keep-alive
Cache-Control: max-age=0
sec-ch-ua: "Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"
peer: zxam86iouzp
sec-ch-ua-mobile: ?0
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36
subscribe: true
sec-ch-ua-platform: "macOS"
Accept: */*
Sec-Fetch-Site: same-origin
Sec-Fetch-Mode: cors
Sec-Fetch-Dest: empty
Referer: http://localhost:9000/
Accept-Encoding: gzip, deflate, br
Accept-Language: en-US,en;q=0.9
================================================
FILE: braid-http/test/test-responses.txt
================================================
Read 1 {"version":["test"],"parents":["oldie"],"body":"{\"this\":\"stuff\"}"}!
Read 1 {"version":["test1"],"parents":["oldie","goodie"],"patches":[{"unit":"json","range":"[1]","content":"1"}],"extra_headers":{"hash":"42",":status":"115"}}!
Read 1 {"version":["test2"],"patches":[{"unit":"json","range":"[2]","content":"2"}]}!
Read 1 {"version":["test3"],"patches":[{"unit":"json","range":"[3]","content":"3","extra_headers":{"hash":"43"}},{"unit":"json","range":"[4]","content":"4"}]}!
Read 2 {"version":["test"],"parents":["oldie"],"body":"{\"this\":\"stuff\"}"}!
Read 2 {"version":["test1"],"parents":["oldie","goodie"],"patches":[{"unit":"json","range":"[1]","content":"1"}],"extra_headers":{"hash":"42",":status":"115"}}!
Read 2 {"version":["test2"],"patches":[{"unit":"json","range":"[2]","content":"2"}]}!
Read 2 {"version":["test3"],"patches":[{"unit":"json","range":"[3]","content":"3","extra_headers":{"hash":"43"}},{"unit":"json","range":"[4]","content":"4"}]}!
Read 3 {"version":["test"],"parents":["oldie"],"body":"{\"this\":\"stuff\"}"}!
Write test 1 returned 200
Write test 2 returned 200
Write test 3 returned 200
Write test 4 returned 200
Read 1 {"version":["another!"],"body":"\"!\""}!
Read 2 {"version":["another!"],"body":"\"!\""}!
Read 3 {"version":["another!"],"body":"\"!\""}!
================================================
FILE: json-patch/apply-patch.js
================================================
function apply_patch (obj, range, content) {
// Descend down a bunch of objects until we get to the final object
// The final object can be a slice
// Set the value in the final object
var path = range,
new_stuff = content
var path_segment = /^(\.?([^\.\[]+))|(\[((-?\d+):)?(-?\d+)\])|\[("(\\"|[^"])*")\]/
var curr_obj = obj,
last_obj = null
// Handle negative indices, like "[-9]" or "[-0]"
function de_neg (x) {
return x[0] === '-'
? curr_obj.length - parseInt(x.substr(1), 10)
: parseInt(x, 10)
}
// Now iterate through each segment of the range e.g. [3].a.b[3][9]
while (true) {
var match = path_segment.exec(path),
subpath = match ? match[0] : '',
field = match && match[2],
slice_start = match && match[5],
slice_end = match && match[6],
quoted_field = match && match[7]
// The field could be expressed as ["nnn"] instead of .nnn
if (quoted_field) field = JSON.parse(quoted_field)
slice_start = slice_start && de_neg(slice_start)
slice_end = slice_end && de_neg(slice_end)
// console.log('Descending', {curr_obj, path, subpath, field, slice_start, slice_end, last_obj})
// If it's the final item, set it
if (path.length === subpath.length) {
if (!subpath) return new_stuff
else if (field) { // Object
if (new_stuff === undefined)
delete curr_obj[field] // - Delete a field in object
else
curr_obj[field] = new_stuff // - Set a field in object
} else if (typeof curr_obj === 'string') { // String
console.assert(typeof new_stuff === 'string')
if (!slice_start) {slice_start = slice_end; slice_end = slice_end+1}
if (last_obj) {
var s = last_obj[last_field]
last_obj[last_field] = (s.slice(0, slice_start)
+ new_stuff
+ s.slice(slice_end))
} else
return obj.slice(0, slice_start) + new_stuff + obj.slice(slice_end)
} else // Array
if (slice_start) // - Array splice
[].splice.apply(curr_obj, [slice_start, slice_end-slice_start]
.concat(new_stuff))
else { // - Array set
console.assert(slice_end >= 0, 'Index '+subpath+' is too small')
console.assert(slice_end <= curr_obj.length - 1,
'Index '+subpath+' is too big')
curr_obj[slice_end] = new_stuff
}
return obj
}
// Otherwise, descend down the path
console.assert(!slice_start, 'No splices allowed in middle of path')
last_obj = curr_obj
last_field = field || slice_end
curr_obj = curr_obj[last_field]
path = path.substr(subpath.length)
}
}
module.exports = apply_patch
================================================
FILE: json-patch/package.json
================================================
{
"name": "@braid.org/json-patch",
"version": "1.0.6",
"description": "Patch JSON",
"main": "apply-patch.js",
"scripts": {
"test": "node test.js"
},
"author": "",
"license": "ISC"
}
================================================
FILE: json-patch/readme.md
================================================
# JSON Patch
This library patches JSON objects using the Braid range-patch format.
Using it:
```javascript
var patch = require('@braid.org/json-patch')
var json = {a: "foo", b: [1,2,3]}
// Replace the 2 with "a new string"
patch(json, '.b[3]', "a new string")
console.log(json) // {a: "foo", b: [1, "a new string", 3]}
// Edit that string
patch(json, '.b[3][1:5]', 'n old')
console.log(json) // {a: "foo", b: [1, "an old string", 3]}
```
This library mutates your JSON objects in-place. If you want a copy, then
clone your object first.
================================================
FILE: json-patch/test.js
================================================
var assert = require('assert')
var patch = require('.')
var json = {a: "foo", b: [1,2,3]}
// Replace 2 with 99
patch(json, '.b[1]', 99)
console.log(json)
assert.deepEqual(json, {a: "foo", b: [1, 99, 3]})
// Insert a string
patch(json, '.b[1:1]', "a new thing")
console.log(json)
assert.deepEqual(json, {a: "foo", b: [1, "a new thing", 99, 3]})
// Splice that string
patch(json, '.b[1][1:5]', "n old")
console.log(json)
assert.deepEqual(json, {a: "foo", b: [1, "an old thing", 99, 3]})
// Test case: Delete a field in an object
patch(json, ".a", undefined)
console.log(json)
assert.deepEqual(json, { b: [1, "an old thing", 99, 3] })
// Test case: Set a field in a nested object
json = { a: { c: "bar" }, b: [1, 2, 3] }
patch(json, ".a.c", "baz")
console.log(json)
assert.deepEqual(json, { a: { c: "baz" }, b: [1, 2, 3] })
// Test case: Splice an array with negative index
patch(json, ".b[-1:-0]", [4, 5])
console.log(json)
assert.deepEqual(json, { a: { c: "baz" }, b: [1, 2, 4, 5] })
// Test case: append stuff to the array
patch(json, ".b[-0:-0]", [9, 8])
console.log(json)
assert.deepEqual(json, { a: { c: "baz" }, b: [1, 2, 4, 5, 9, 8] })
// Test case: Set a value in a deeply nested object
json = { a: { c: { d: { e: "foo" } } }, b: [1, 2, 3] }
patch(json, ".a.c.d.e", "bar")
console.log(json)
assert.deepEqual(json, { a: { c: { d: { e: "bar" } } }, b: [1, 2, 3] })
console.log("All tests passed!")
================================================
FILE: kernel/antimatter.js
================================================
module.exports = require.antimatter = (node) => ({
set (args) {
var {key, patches, version, parents, origin} = args
var resource = node.resource_at(key)
if (args.is_new) {
// Next, we want to remember some information for the purposes of
// acknowledgments, namely, we'll remember how many people we
// forward this version along to (we'll actually do the forwarding
// right after this), and we also remember whether or not we are
// the originators of this version (if we originated the version,
// then we'll be responsible for sending the "global" ack when the
// time is right)..
var origin_is_keepalive = origin && resource.keepalive_peers[origin.id]
resource.acks_in_process[version] = {
origin: origin_is_keepalive && origin,
count: Object.keys(resource.keepalive_peers).length
}
if (origin_is_keepalive)
// If the origin is a keepalive_peer, then since we've already
// seen it from them, we can decrement count
resource.acks_in_process[version].count--
assert(resource.acks_in_process[version].count >= 0,
node.pid, 'Acks have below zero! Proof:',
{origin, key, version,
acks_in_process: resource.acks_in_process[version]})
}
else if (resource.acks_in_process[version])
// Q: In what situation is acks_in_process[version] false?
//
// A: Good question; the answer is that in some cases we will
// delete acks_in_process for a version if, say, we receive a
// global ack for a descendant of this version, or if we
// receive a fissure.. in such cases, we simply ignore the
// ack process for that version, and rely on a descendant
// version getting globally acknowledged.
// Now if we're not going to add the version, most commonly
// because we already possess the version, there is another
// situation that can arise, namely, someone that we forwarded the
// version to sends it back to us... How could that happen? Well,
// they may have heard about this version from someone we sent it
// to, before hearing about it from us (assuming some pretty gross
// latency).. anyway, if it happens, we can treat it like an ACK
// for the version, which is why we decrement "count" for
// acks_in_process for this version; a similar line of code exists
// inside "node.ack"
resource.acks_in_process[version].count--
// Since we may have messed with the ack count, we check it to see if
// it has gone to 0, and if it has, take the appropriate action (which
// is probably to send a global ack)
check_ack_count(node, key, resource, version)
},
ack (args) {
var {key, valid, seen, version, origin} = args
var resource = node.resource_at(key)
if (seen === 'local') {
if (resource.acks_in_process[version]) {
log('node.ack: Got a local ack! Decrement count to',
resource.acks_in_process[version].count - 1)
resource.acks_in_process[version].count--
check_ack_count(node, key, resource, version)
}
} else if (seen === 'global') {
if (!resource.time_dag[version]) return
var ancs = resource.ancestors(resource.unack_boundary)
if (ancs[version]) return
ancs = resource.ancestors(resource.acked_boundary)
if (ancs[version]) return
add_full_ack_leaf(node, resource, version)
node.bindings(key).forEach(pipe => {
if (pipe.send && (pipe.id !== origin.id))
pipe.send({method: 'ack', key, version, seen: 'global'})
})
}
},
fissure ({key, fissure, origin}) {
var resource = node.resource_at(key)
var fkey = fissure.a + ':' + fissure.b + ':' + fissure.conn
if (!resource.fissures[fkey]) {
resource.fissures[fkey] = fissure
resource.acks_in_process = {}
// First forward this fissure along
node.bindings(key).forEach(pipe => {
if (pipe.send && (!origin || (pipe.id !== origin.id)))
pipe.send({
method: 'fissure',
key,
fissure
})
})
// And if this fissure matches us, then send the anti-fissure for
// it
if (fissure.b == node.pid)
node.fissure({
key,
fissure: {
a: node.pid,
b: fissure.a,
conn: fissure.conn,
versions: fissure.versions,
parents: {},
time: fissure.time
}
})
}
},
disconnected ({key, name, versions, parents, time, origin}) {
// Todo:
// - rename "name" to "fissure".
// - rename "time" to "disconnect_time"
// if we haven't sent them a welcome (or they are not remote), then no
// need to create a fissure
if (!origin.remote_peer|| !node.resource_at(key).keepalive_peers[origin.id]) return
// now since we're disconnecting, we reset the keepalive_peers flag
delete node.resource_at(key).keepalive_peers[origin.id]
assert(key && origin)
// To do:
// - make this work for read-only connections
// - make this work for multiple keys (a disconnection should
// affect all of its keys)
var resource = node.resource_at(key),
fissure
assert(!(name || versions || parents), 'Surprise!')
// Generate the fissure
if (name) {
// Create fissure from name
var [a, b, conn] = name.split(/:/)
fissure = {
a, b, conn,
versions,
parents,
time
}
} else {
// Create fissure from scratch
// assert(resource.subscriptions[origin.id],
// `This pipe ${origin.id} is not on the resource for ${node.pid}'s ${key}`,
// resource.subscriptions)
assert(origin.id, 'Need id on the origin', origin)
assert(origin.remote_peer, 'Need a peer on origin', origin)
var versions = {}
var ack_versions = resource.ancestors(resource.acked_boundary)
Object.keys(resource.time_dag).forEach(v => {
if (!ack_versions[v] || resource.acked_boundary[v])
versions[v] = true
})
// Now collect the parents. We start with all fissures...
var parents = {...resource.fissures}
// ... and then filter down to just be the leaves of the fissure DAG
Object.values(resource.fissures).forEach(f => {
Object.keys(f.parents).forEach(p => delete parents[p])
})
Object.keys(parents).forEach(p => parents[p] = true)
fissure = {
a: node.pid,
b: origin.remote_peer,
conn: origin.connection,
versions,
parents,
time
}
}
node.fissure({key, origin, fissure})
},
welcome (args) {
var {key, versions, fissures, unack_boundary, min_leaves, parents,
origin, versions_to_add, added_versions} = args
var resource = node.resource_at(key)
// Next we process the incoming fissures, and like before, we only
// want to add new ones, and there's also this gen_fissures variable
// which is short for "generated_fissures", and records fissures which
// we created just now as part of a special case where we receive a
// fissure that we were supposedly involved with, but we don't have a
// fissure record for (this can happen when someone tries to connect
// with us, but the connection is broken even before we knew they were
// trying to connect)
var new_fissures = []
var gen_fissures = []
fissures.forEach(f => {
var key = f.a + ':' + f.b + ':' + f.conn
if (!resource.fissures[key]) {
// So we don't have this fissure.. let's add it..
new_fissures.push(f)
resource.fissures[key] = f
// Now let's check for that special case where we don't have
// the fissure, but we're one of the ends of the fissure (note
// that we don't check for f.a == node.pid because that would
// be a fissure created by us -- we're looking for fissures
// not created by us, but that we are the other end of). We
// just add these fissures to gen_fissures for now, and later
// in this function we'll iterate over gen_fissures and
// actually add these fissures to our data structure (as well
// as tell them to our peers)
//
// If we don't do this, then this fissure will never get pruned,
// because it will never find its "other half"
if (f.b == node.pid) gen_fissures.push({
a: node.pid,
b: f.a,
conn: f.conn,
versions: f.versions,
parents: {},
time: f.time
})
}
})
// There is this thing called the unack_boundary, which defines a set
// of nodes (namely everything on the boundary, and any ancestors of
// anything on the boundary), and these nodes should exhibit the
// behavior that even if a global acknowledgment is received for them,
// it should be ignored.
//
// Why should we ignore them? well, this welcome message we've received
// is kindof like an anti-fissure -- it is a new citizen in the network,
// and the whole idea of a "global ack" is that all citizens connected
// directly or transitively to ourselves have seen this version,
// but imagine that there is a "global ack" sitting the our message queue,
// but it was created before this new connection, meaning that it's
// claim has been violated (in particular, this new citizen may not
// have seen the version, and this new citizen may bring in transitive
// access to even more citizens, which also may not have seen the version),
// so rather than trying to figure out who has seen what when a new
// connection is established, we sortof blacklist global acknowledgments
// for all versions in both our, and the new citizens current versions,
// and we wait for a version created after this connection event
// to get globally acknowledged (note that this involves un-globally
// acknowledging things that we had thought were globally acknowledged,
// but not everything -- if a version is globally acknowledged by us,
// and also by the incoming citizen, then we keep that version as
// globally acknowledged)
// This next if statement deals with two cases of the welcome message.
// in one case, the welcome is sent as a response to a get, in which
// case unack_boundary is null (and you can see that we just set it to
// be absolutely all of the versions we currently know about, both in
// our own version set, and the incoming version set, since we already
// added the incoming versions to our set). If it isn't null, then we
// don't need to give it a value here (and this message must be a case
// of propoagating a welcome around the network)
//
// So conceptually, we establish the unack_boundary on the initial
// welcome (and we can't know it before then, because the person
// sending us this welcome doesn't know which versions we have), and
// then once it is established, we hardcode the result into the
// welcome messages that we send to our peers
if (!unack_boundary)
unack_boundary = Object.assign({}, resource.current_version)
// To understand this next bit of code, first know that these
// "boundary" variables are really just trying to be more effecient
// ways of storing sets of versions (which include everything on the
// boundary, as well as all the ancestors of those versions). If we
// were using sets, our code would be doing this:
//
// resource.unack_set = union(resource.unack_set, unack_set)
//
// That is, we want to union our pre-existing unacked stuff with
// the new incoming unacked stuff. But since our implementation
// uses boundaries rather than sets, we get the code that follows
// (you can see that the only modifications being made are to
// resource.unack_boundary, where we delete some stuff, and add
// some stuff, so that it represents the new boundary)
var our_conn_versions = resource.ancestors(resource.unack_boundary)
var new_conn_versions = resource.ancestors(unack_boundary)
Object.keys(resource.unack_boundary).forEach(x => {
if (new_conn_versions[x] && !unack_boundary[x])
delete resource.unack_boundary[x]
})
Object.keys(unack_boundary).forEach(x => {
if (!our_conn_versions[x]) resource.unack_boundary[x] = true
})
// So that was dealing with the unack_boundary stuff... now we want to
// deal with the globally acknowledged stuff. Basically, anything that
// is globally acknowledged by both us, and the incoming citizen, will
// remain globally acknowledged. We'll compute these versions as the
// intersection of ours and their acknowledged set, and then store
// just the boundary of the intersection set and call it "min_leaves"
// (where "min" basically means "intersection" in this case, and used
// to be paired with "max_leaves", which meant "union", and was used
// to represent the unack_boundary above)
//
// As before, min_leaves will be null on the initial welcome,
// and we'll compute it, and then subsequent welcomes will have this
// result included...
if (!min_leaves) {
if (versions.length === 0 && (!parents || Object.keys(parents).length === 0))
min_leaves = {...resource.current_version}
else {
min_leaves = parents ? {...parents} : {}
versions.forEach(v => {
if (!versions_to_add[v.version]) min_leaves[v.version] = true
})
min_leaves = resource.get_leaves(resource.ancestors(min_leaves, true))
}
}
// We are now armed with this "min_leaves" variable,
// either because we computed it, or it was given to us...
// what do we do with it? well, we want to roll-back our
// boundary of globally acknowledged stuff so that it only
// includes stuff within "min_leaves" (that is, we only want
// to keep stuff as globally acknowledged if it was already
// globally acknowledged, and also it is already known to this
// incoming citizen)
//
// As before, we're really doing a set intersection (in this case
// an intersection between min_leaves and our own acked_boundary),
// but the code looks wonkier because all our variables store
// the boundaries of sets, rather than the sets themselves
var min_versions = resource.ancestors(min_leaves)
var ack_versions = resource.ancestors(resource.acked_boundary)
Object.keys(resource.acked_boundary).forEach(x => {
if (!min_versions[x])
delete resource.acked_boundary[x]
})
Object.keys(min_leaves).forEach(x => {
if (ack_versions[x]) resource.acked_boundary[x] = true
})
// This next line of code is pretty drastic.. it says: "If we're
// connecting to someone new, then all our hard work keeping track of
// acknowledgments is now useless, since it relies on an algorithm
// that assumes there will be no patches in the network topology
// whilst the algorithm is being carried out -- and the network
// topology just changed, because now there's this new guy"
//
// Fortunately, once a new version is globally acknowledged within the
// new topology, it's acknowledgment will extend to these versions as
// well, because global acknowledgments apply to all ancestors of a
// version, and any new versions will include all existing versions as
// ancestors.
resource.acks_in_process = {}
// Ok, we're pretty much done. We've made all the patches to our own
// data structure (except for the gen_fissures, which will happen
// next), and now we're ready to propogate the information to our
// peers.
assert(unack_boundary && min_leaves && fissures && added_versions)
// In the above, when we added new versions and fissures to ourselves,
// we marked each such instance in added_versions or new_fissures, and
// if we got any new versions or fissures, then we want to tell our
// peers about it (if we didn't, then we don't need to tell anyone,
// since there's nothing new to hear about)
if ((added_versions.length > 0
|| new_fissures.length > 0
|| !resource.weve_been_welcomed)) {
// Now record that we've seen a welcome
resource.weve_been_welcomed = true
// And tell everyone about it!
node.bindings(key).forEach(pipe => {
if (pipe.send && (pipe.id !== origin.id))
pipe.send({method: 'welcome',
key, versions: added_versions, unack_boundary,
min_leaves, fissures: new_fissures})
})
}
// now we finally add the fissures we decided we need to create in
// gen_fissures... we add them after forwarding the welcome so that
// these network messages appear after the welcome (since they may
// rely on information which is in the welcome for other people to
// understand them)
gen_fissures.forEach(f => node.fissure({key, fissure:f}))
}
})
function add_full_ack_leaf(node, resource, version) {
// G: someone is telling us that "version" is fully (globally) acknowledged,
// and this fact implies that every ancestor of version is also fully
// acknowledged, which means that we don't need to keep certain information
// about them, like "acks_in_process".. this next section simply
// iterates over all the ancestors (including this version itself) and deletes
// information we don't need anymore for each one..
var marks = {}
function f(v) {
if (!marks[v]) {
marks[v] = true
delete resource.unack_boundary[v]
delete resource.acked_boundary[v]
delete resource.acks_in_process[v]
Object.keys(resource.time_dag[v]).forEach(f)
}
}
f(version)
// G: now that old information is gone, we need to add one bit of new
// information, namely that this version is fully acknowledged,
// which we express by putting it in the "acked_boundary" (and we hope
// that nobody calls this function on a version which is already fully
// acknowledged; you can check the two places where this function is called
// to verify that they guard against calling this function on a version
// which is already fully acknowledged.. note that one does so by noting
// that "acks_in_process" will always be null for versions which are fully
// acknowledged, because "acks_in_process" is deleted in section above
// for all such versions)
resource.acked_boundary[version] = true
// G: next we're going to prune.. really we could call prune whenever we want,
// this is just a somewhat reasonable time, since there is some chance
// that with this new full acknowledgment, that we might be able to prune
// more stuff than we could prune before (but we could also let the user
// call "prune" explicitly at their leisure)
start_prune(node, resource)
}
function check_ack_count(node, key, resource, version) {
// TODO: could this only take key, instead of key and resource? Or
// perhaps a resource should know its key?
assert(!resource.acks_in_process[version]
|| resource.acks_in_process[version].count >= 0,
'Acks have gone below zero!',
{key, version,
acks_in_process: resource.acks_in_process[version]})
// G: this function gets called from a couple of places, basically whenever
// someone suspects that the "count" within "acks_in_process" may have changed,
// since it might have gone all the way to zero, in which case we will act...
// of course, in some such instances, acks_in_process may have been removed
// entirely for a version, so we guard against that here, too..
if (resource.acks_in_process[version]
&& resource.acks_in_process[version].count == 0) {
// G: sweet, the count has gone to zero, that means all the acks we were
// waiting for have arrived, now there are a couple possibilities..
if (resource.acks_in_process[version].origin) {
// G: in this case, we have an "origin", which means we didn't create
// this version ourselves, and "origin" tells us who we first heard
// about it from, and so now, as per the ack-algorithm, we're going
// to send an ack back to that person (because the algorithm tells us
// to only send an ack after we have received acks from everyone
// we forwarded the information to)
let p = resource.acks_in_process[version].origin
p.send && p.send({
method: 'ack', key, seen:'local', version
})
} else {
// G: in this case, we have no "origin", which means we created
// this version ourselves, and now the fact that all our peers
// have acknowledged it means that all of their peers have also
// acknowledged. In fact, everyone in the network must have
// acknowledged it (or else we would have received a fissure
// before receiving this acknowledgment, and that fissure would
// have wiped away "acks_in_process" for this version), so that
// means this version is "fully (globally) acknowledged",
// so we'll call add_full_ack_leaf for this version..
add_full_ack_leaf(node, resource, version)
// G: but "add_full_ack_leaf" just modifies our own datastructure,
// and we must also give the good news to everyone else, so
// we send a "global" ack to all our peers (and they'll forward it
// to their peers)
node.bindings(key).forEach( pipe => {
pipe.send && pipe.send({method: 'ack', key, seen:'global', version})
})
}
}
}
function start_prune (node, resource) {
var unremovable = {}
if (!resource.fissures)
console.error('Bad resource', resource)
// First, let's prune old fissures
// Calculate which fissures we have to keep due to parenting
// rule... which we will be removing soon.
Object.entries(resource.fissures).forEach(x => {
if (!resource.fissures[x[1].b + ':' + x[1].a + ':' + x[1].conn]) {
function f(y) {
if (!unremovable[y.a + ':' + y.b + ':' + y.conn]) {
unremovable[y.a + ':' + y.b + ':' + y.conn] = true
unremovable[y.b + ':' + y.a + ':' + y.conn] = true
Object.keys(y.parents).forEach(p => {
if (resource.fissures[p]) f(resource.fissures[p])
})
}
}
f(x[1])
}
})
// Now remove the fissures
Object.entries(resource.fissures).forEach(x => {
var other_key = x[1].b + ':' + x[1].a + ':' + x[1].conn
var other = resource.fissures[other_key]
if (other) {
if (unremovable[x[0]]) {
resource.fissures[x[0]].versions = {}
resource.fissures[other_key].versions = {}
} else {
delete resource.fissures[x[0]]
delete resource.fissures[other_key]
}
}
})
// Remove fissures that have expired due to time
if (node.fissure_lifetime != null) {
var now = Date.now()
Object.entries(resource.fissures).forEach(([k, f]) => {
if (f.time == null) f.time = now
if (f.time <= now - node.fissure_lifetime) {
delete resource.fissures[k]
}
})
}
// Remove fissures that are beyond our max_fissures limit
if (node.max_fissures != null) {
let count = Object.keys(resource.fissures).length
if (count > node.max_fissures) {
Object.entries(resource.fissures).sort((a, b) => {
if (a[1].time == null) a[1].time = now
if (b[1].time == null) b[1].time = now
return a[1].time - b[1].time
}).slice(0, count - node.max_fissures).forEach(e => {
delete resource.fissures[e[0]]
})
}
}
// Now figure out which versions we want to keep,
var keep_us = {}
// incluing versions in fissures..
Object.values(resource.fissures).forEach(f => {
Object.keys(f.versions).forEach(v => keep_us[v] = true)
})
// and versions which are not fully acknowledged, or on the boundary
var acked = resource.ancestors(resource.acked_boundary)
Object.keys(resource.time_dag).forEach(x => {
if (!acked[x] || resource.acked_boundary[x]) keep_us[x] = true
})
// ok, now we want to find "bubbles" in the dag,
// with a "bottom" and "top" version,
// where any path down from the top will hit the bottom,
// and any path up from the bottom will hit the top,
// and also, the bubble should not contain any versions we want to keep
// (unless it's the bottom)
// to help us calculate bubbles,
// let's calculate children for our time dag
// (whereas the time dag just gives us parents)
var children = {}
Object.entries(resource.time_dag).forEach(([v, parents]) => {
Object.keys(parents).forEach(parent => {
if (!children[parent]) children[parent] = {}
children[parent][v] = true
})
})
// now we'll actually compute the bubbles
var to_bubble = {}
var bubble_tops = {}
var bubble_bottoms = {}
function mark_bubble(bottom, top, tag) {
if (!to_bubble[bottom]) {
to_bubble[bottom] = tag
if (bottom !== top)
Object.keys(resource.time_dag[bottom]).forEach(
p => mark_bubble(p, top, tag)
)
}
}
// This begins the O(n^2) operation that we wanna shrink to O(n)
var done = {}
function f(cur) {
if (!resource.time_dag[cur]) return
if (done[cur]) return
done[cur] = true
if (!to_bubble[cur] || bubble_tops[cur]) {
var bubble_top = find_one_bubble(cur)
if (bubble_top) {
delete to_bubble[cur]
mark_bubble(cur, bubble_top, bubble_tops[cur] || cur)
bubble_tops[bubble_top] = bubble_tops[cur] || cur
bubble_bottoms[bubble_tops[cur] || cur] = bubble_top
}
}
Object.keys(resource.time_dag[cur]).forEach(f)
}
Object.keys(resource.current_version).forEach(f)
// This is the end of an O(n^2) algorithm
to_bubble = Object.fromEntries(Object.entries(to_bubble).map(
([v, bub]) => [v, [bub, bubble_bottoms[bub]]]
))
function find_one_bubble(cur) {
var seen = {[cur]: true}
var q = Object.keys(resource.time_dag[cur])
var expecting = Object.fromEntries(q.map(x => [x, true]))
while (q.length) {
cur = q.pop()
if (!resource.time_dag[cur]) return null
if (keep_us[cur]) return null
if (Object.keys(children[cur]).every(c => seen[c])) {
seen[cur] = true
delete expecting[cur]
if (!Object.keys(expecting).length) return cur
Object.keys(resource.time_dag[cur]).forEach(p => {
q.push(p)
expecting[p] = true
})
}
}
return null
}
// now hand these bubbles to the mergeable's prune function..
if (resource.mergeable.prune)
resource.mergeable.prune(to_bubble)
}
================================================
FILE: kernel/demos/simple/simple-client.html
================================================
How to use
Run nodemon simple-server network on command-line
And type in the box above
You can watch the network messages scroll by in your terminal
================================================
FILE: kernel/demos/simple/simple-server.js
================================================
var certificate = `-----BEGIN CERTIFICATE-----
MIIDXTCCAkWgAwIBAgIJANoWGfl3pEeHMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV
BAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQwHhcNMTkwODE2MjAxNTIxWhcNMjAwODE1MjAxNTIxWjBF
MQswCQYDVQQGEwJVUzETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEA1bilKJKH1axV0OLLIwg3WxXx6MMsFL3/bv2uX9+Z22uZukJsgqnR2y+6
OCLH8opczH4Now3Od+P0G4kNSn9m+T5W5bvf9bIIDmCG/04uGCvx0L8bgYA5lyMJ
aFdcfCXu1iKvUt1LdZlds2AsBfceYCB6FwsMkUODzZ7OJ6R1aXUHxQ74me/ksoxV
P7Fmv012gRJkYn5gzvrokula2Yxb+z84TP115tALYBBpLhj5WPOXSmyVo0Lf1dGQ
JfbRxvx32pxZiBPwcNre3yzKhRue99tRuPHFCQBZSkXGuT7K9bsNnPwXfAmB2VbQ
bjezmqVGv8KnwyTRWdLaEcV9cxHCnQIDAQABo1AwTjAdBgNVHQ4EFgQUOoDGcBG8
Xm/Jj+WbIYctxhGqD6owHwYDVR0jBBgwFoAUOoDGcBG8Xm/Jj+WbIYctxhGqD6ow
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAaHjdu8Hg34Zzay4djFSo
hRno4m+tiJ4UT3oLTHRGh54JFKQPeLLEY0WbhrBDyuDJrCdyjvmqpuELPPwNRdo0
Ly3fhRIxeaN8px6V0bpdj0ePDqC0ZU5It/9jVlC0OkdG2xwJygw+xNLaHb09l7rj
ZLM+tOKQEBxZCLKqc1FLlS9MIxDKaVdI2JSBDmNl+0XyFwKM6bfI3Mk8STuZXm5A
EtWvDNbLFl6TLyKDeHNRc0LQEa74xE3yhoWO3kb9phL4A1g/I7rW+B2we4N84FfT
v5C5/zn58xabUtMVeGUi/avnVz+C4HY4ZMEIQPIodtsRcZq05RQGW8ipig7QaXnD
gQ==
-----END CERTIFICATE-----
`, private_key = `-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDVuKUokofVrFXQ
4ssjCDdbFfHowywUvf9u/a5f35nba5m6QmyCqdHbL7o4IsfyilzMfg2jDc534/Qb
iQ1Kf2b5Plblu9/1sggOYIb/Ti4YK/HQvxuBgDmXIwloV1x8Je7WIq9S3Ut1mV2z
YCwF9x5gIHoXCwyRQ4PNns4npHVpdQfFDviZ7+SyjFU/sWa/TXaBEmRifmDO+uiS
6VrZjFv7PzhM/XXm0AtgEGkuGPlY85dKbJWjQt/V0ZAl9tHG/HfanFmIE/Bw2t7f
LMqFG57321G48cUJAFlKRca5Psr1uw2c/Bd8CYHZVtBuN7OapUa/wqfDJNFZ0toR
xX1zEcKdAgMBAAECggEAWCxLh0ec3tywsvM+V3+mRt/w49TRtOUGIyZp8IfxlAL6
c0vANNAXElTIgSxoTXoj+wHuYlzp17CmH04Vu6yAMUg01acDKPyAMl5Ek8QPZE2N
AFA36t+Z4u7DjNauA1IrDRFWP9uorCXP8Jc20mc3kvUTKbqXPr8Z+5UO/G/vOMgc
QKXPoz45EbFahTwck4TQowLeKhAF3BU5fn48zuBy055q6babV1z0LDzDIUGcZqHv
4VPMLOUp1KzpwoQd6o3wwBBttJkFqBf7US3nExdq0SkHgwE/lOKgJuSMmgAWgGm5
3iO8F+Ve84206IgmhQOMw3KZjIgWdiCW/dgVbJQsQQKBgQD4/2Wr4NYfdXqotnjT
MZCx5921nFwkMyt7JndCIs49CQ3lMGtlijRtVHGhZKVHUZr4SKKfjbOAiABPCsRL
ZhvVnhlbmUioSgfMM/Y+fkCs3DdzuJE9tVuSdyQFoblY5W1dLeuLTEI3TDTos+V7
jfKsHMqF0gAbCkt7GgVpy5vCRQKBgQDbu0ibskjwF5voMuJmJdvIv0XAi91sRTRJ
RuDrH6NPU+RrVHTHRJMtGRM5zWI4b7N0KTx+J2xaJ6J/FxbfsdThgKb99gB9j3hR
F0CK/quMjAwpezWwatHarK87c//rvmIBVL82xLe3sQKxmwdCUiyhum/4l+GN+WpZ
lfP4HU4weQKBgQD18WaekBVPu31tedb8XB/c6fZ/NTN5+iT/ni374F8vwGq+L8ZU
5F8Ggns+fCgYus1EYpJm4NMlqLANYsgi5Xem12Oaq1wuBfmPxN98OL5vP5FyNyMW
/bS2hgHJokVuPid4+yuGSsu4zQgRted80+eYA1QzPAsoqlGGBVzFc/yktQKBgQDP
RcqHPFV7Tfn+vkk8bEf4BR4KNKWJZXqeCONQSEboJM3axQ9njXN73iR5qRkW/Z99
Wwy6P/wAy1SIqEImf3y9v3tHI1BxIO4xKEr1EqjGarFqS9Rod0tACRc/cPwf6DZQ
5R1+z3AyMiLFYOUnFZcOdGz9RmA5aeZ9XWuHSDWimQKBgGgmRWuGasEEMXdnkLQA
rNg1Di5DFv+KvXwgTo63MxwBs2olQ7jUsFf8khipqpByGazYgGeEa1RxDGpQrdyO
I/5N3d5VcGW4g9obfdexuuKOloyKRS2N0KNhLfEfb+qr4gRACPpyKnj5Jeohliox
bHieUzx8qriZ8KrD3PbjKqap
-----END PRIVATE KEY-----
`
node = require('../../node')()
node.pid = 'hub'
require('../../websocket-server.js')(node, certificate, private_key)
================================================
FILE: kernel/demos/sync9-chat/chat-server.js
================================================
var fs = require('fs')
var path = require('path')
var ws = require('ws')
require('dotenv').config()
// When we have the npm version, this can be improved
var lib_path = "../../../"
// Bundler doesn't actually return anything, but calling it with require
// generates the braid-bundle.js
require(path.join(lib_path, './util/braid-bundler.js'))
var sqlite = require(path.join(lib_path, './kernel/sqlite-store.js'))
var store = require(path.join(lib_path, './kernel/store.js'))
var braid = require(path.join(lib_path, './kernel/node.js'))
var braid_websocket_server = require(path.join(lib_path, './kernel/websocket-server.js'))
var braid_http_server = require(path.join(lib_path, './kernel/http-server.js'))
var webpush = require("web-push")
if (process.env.MAIL_TO
&& process.env.WEB_PUSH_PUBLIC
&& process.env.WEB_PUSH_PRIVATE)
webpush.setVapidDetails(
process.env.MAIL_TO, // Needs email address to send from
process.env.WEB_PUSH_PUBLIC,
process.env.WEB_PUSH_PRIVATE
)
var port = 3009
// Static files to serve over HTTP
var known_files = {
'/braid-bundle.js': {
path: path.join(lib_path, `/builds/braid-bundle.js`),
mime: 'text/javascript'
},
'/braidchat': {
path: path.join('.', '/chat.html'),
mime: 'text/html'
},
'/settings': {
path: path.join('.', '/settings.html'),
mime: 'text/html'
},
'/chat.js': {
path: path.join('.', '/chat.js'),
mime: 'text/javascript'
},
'/chat.css': {
path: path.join('.', '/chat.css'),
mime: 'text/css'
},
'/mobile.css': {
path: path.join('.', '/mobile.css'),
mime: 'text/css'
},
'/favicon.ico': {
path: path.join('.', '/favicon.ico'),
mime: 'image/x-icon'
},
'/white-airplane.png': {
path: path.join('.', '/white-airplane.png'),
mime: 'image/png'
},
'/black-airplane.png': {
path: path.join('.', '/black-airplane.png'),
mime: 'image/png'
},
'/settings.css': {
path: path.join('.', '/settings.css'),
mime: 'text/css'
},
'/client.js': {
path: path.join('.', '/client.js'),
mime: 'text/javascript'
},
'/worker.js': {
path: path.join('.', '/worker.js'),
mime: 'text/javascript'
},
'/icon.png': {
path: path.join('.', '/icon.png'),
mime: 'image/png'
}
}
// Keys that braid knows about, and their default values.
var known_keys = {
'/usr': {},
'/chat': []
}
let endpoints = [] //list of devices connected to webpush notifications
let last_sent = {}
async function get_body(req) {
var body = ''
await req.on('data', function(data) {
body += data
console.log('Partial body: ' + body)
})
return body
}
// A simple method to serve one of the known files
async function serve_file(req, res) {
if (req.method == 'POST') {
console.log('POST to: ' + req.url)
let body = await get_body(req)
let json_body = JSON.parse(body)
if (req.url === '/subscribe') {
if (!endpoints.includes(body)) {
console.log("Adding new endpoint")
endpoints.push(body)
}
var payload = JSON.stringify({ title: 'Test Notification on chat' })
// Sends a test notification
webpush
.sendNotification(json_body, payload)
.catch(err => console.error(err))
} else if (req.url === '/token') {
console.log("Saving token")
save_token(json_body['token'])
} else if (req.url === '/message') {
console.log("New message (sent as post request)")
let notifications = build_mobile_notifications('user', 'basic notification')
send_mobile_notifications(notifications)
}
res.writeHead(201, {'Content-Type': 'text/html'})
res.end()
} else {
if (known_keys.hasOwnProperty(req.url))
return braid_callback(req, res)
var req_path = new URL(req.url, `http://${req.headers.host}`)
var f = known_files[req_path.pathname]
if (f) {
res.writeHead(200, headers = { 'content-type': f.mime })
fs.createReadStream(f.path).pipe(res)
} else {
res.writeHead(404)
res.end()
}
}
}
var send_push_notifications = () => {
let send_to = []
for (let i = 0; i < endpoints.length; i++)
send_to.push(JSON.parse(endpoints[i]))
var payload = JSON.stringify({
title: 'New message on BraidChat',
click_action: 'https://invisible.college/chat/',
body: "BraidChat",
icon: "https://ibb.co/p4wKfsR"
})
console.log("Sending message: " + JSON.stringify(payload));
for (let i = 0; i < send_to.length; i++) {
send_to[i]['click_action'] = 'https://invisible.college/chat/'
console.log("sending webpush to user")
webpush
.sendNotification(send_to[i], payload)
.catch(err => console.error(err));
}
}
// Create either an http or https server, depending on the existence of ssl certs
var server =
(fs.existsSync('certs/private-key') && fs.existsSync('certs/certificate'))
? require('https').createServer(
{ key: fs.readFileSync('certs/private-key'),
cert: fs.readFileSync('certs/certificate') },
serve_file)
: require('http').createServer(serve_file)
// Setup the braid sqlite store at a local db
var db = sqlite('db.sqlite')
var node = braid({pid: 'server-' + Math.random().toString(36).slice(2,5)})
node.fissure_lifetime = 1000 * 60 * 60 * 24 // Fissures expire after 24 hours
var braid_callback = braid_http_server(node)
store(node, db).then(node => {
// Unsubscribe on error
// Maybe not needed
node.on_errors.push((key, origin) => node.unbind(key, origin))
// For any of the default keys, if we have no versions for them, set an initial version.
Object.keys(known_keys)
.filter(k => Object.keys(node.resource_at(k).current_version).length == 0)
.forEach(k => node.set(k, known_keys[k]))
Object.keys(known_keys)
.forEach(k => node.get(k))
var wss = new ws.Server({ server })
braid_websocket_server(node, { port, wss })
console.log('Keys at startup: ' + JSON.stringify(Object.keys(node.resources)))
server.listen(port)
console.log('Listening on port', port)
})
//App notifications
var notification_node = require("../../node.js")()
notification_node.websocket_client({url:'wss://invisible.college:3009'})
notification_node.get('/usr', add_users)
notification_node.get('/chat', update_messages)
var { Expo } = require("expo-server-sdk")
let expo = new Expo()
function update_messages(new_val) {
let message = new_val[new_val.length -1]
console.log(JSON.stringify(message))
console.log(message['body'])
if (last_sent != message['body']) {
//web notifications
send_push_notifications()
//mobile notifications
let notifications = build_mobile_notifications(get_name(message), message['body'])
send_mobile_notifications(notifications)
last_sent = message['body']
console.log("Sent message")
} else
console.log("Didn't send push notification:" + message['body'])
}
let saved_users = {}
function add_users(user_dict){
saved_users = JSON.parse(JSON.stringify(user_dict)) //new json object here
}
function get_name(message){
let name = saved_users[message['user']]
if (name == undefined)
name = "unknown"
else
name = name['displayname']
return name
}
let saved_push_tokens = []
function save_token(token) {
console.log(token.value, saved_push_tokens)
console.log(JSON.stringify(token))
var exists = saved_push_tokens.find(t => t === token.value)
if (!exists) {
console.log("new device saved for push notifications")
saved_push_tokens.push(token.value)
} else
console.log("Device was already saved")
}
//creates the mobile notifications. One for every device
var build_mobile_notifications = ( user, message ) => {
if (message === undefined) {
console.log("message is undefined")
return undefined
}
console.log("Sending push notification", {message, user},
"to", saved_push_tokens.length, 'devices.')
let notifications = []
let index = -1
for (let push_token of saved_push_tokens) {
console.log("sending to device:" + push_token)
index++
if (!Expo.isExpoPushToken(push_token)) {
console.error(`Push token ${push_token} is not a valid Expo push token`)
continue
}
notifications.push({
to: push_token,
sound: "default",
title: user,
body: message,
data: { message }
})
}
return notifications
}
//Sends the notification list
var send_mobile_notifications = (notifications) => {
if (!notifications || notifications.length == 0) {
console.log("no devices linked")
return
} else {
console.log("sending notifications:" + JSON.stringify(notifications[0]))
try {
var chunks = expo.chunkPushNotifications(notifications)
} catch (e) {
console.error('Cannot send push notification! Expo error:', e)
}
(async () => {
for (let chunk of chunks) {
try {
var receipts = await expo.sendPushNotificationsAsync(chunk)
console.log(receipts)
} catch (error) {
console.log("Error: sendPushNotificationsAsync")
console.error(error)
}
}
})()
}
}
================================================
FILE: kernel/demos/sync9-chat/chat.css
================================================
@import url('https://fonts.googleapis.com/css2?family=Recursive:wght@300;400&display=swap');
body {
padding: 0;
margin: 0;
width: 100vw;
height: 100vh;
box-sizing: border-box;
}
.grid-container {
width: 100%;
height: 100%;
display: grid;
font-family: sans-serif;
grid-template-columns: auto;
grid-template-rows: 40px auto 85px 1.5em;
grid-template-areas:
"title"
"messages"
"input"
"typing";
}
.grid-container > * {
padding: 0 10%;
}
header {
grid-area: title;
display: flex;
box-shadow: 0 2px 1px 0 rgba(0, 0, 0, 0.2);
z-index: 1;
}
.title {
margin: 0 5px 0 2px;
user-select: none;
align-self: center;
}
#settings-hover-container {
display: flex;
flex-direction: row;
align-self: center;
margin: 0;
padding: 0;
user-select: none;
}
.settings {
display: inline-block;
height: 21px;
width: 21px;
text-align: center;
align-self: flex-end;
color: #666;
padding: 5px;
}
.settings-expand {
align-self: center;
font-family: sans-serif;
margin: 0 5px;
color: #666;
}
.settings-input-collapse {
transform-origin: left;
max-width: 20ch;
border: 1.5px solid #999;
border-radius: 5px;
padding: 2px 3px 1px 4px;
height: 1.5em;
margin-left: 0;
}
#stats {
font-size: 10px;
font-family: sans-serif, sans, helvetica, arial;
color: #444;
text-align: right;
position: fixed;
right: 50px;
top: 3px;
}
#react-messages {
grid-area: messages;
overflow-y: scroll;
overflow-x: hidden;
}
.messageBox {
display: flex;
flex-direction: column;
width: 100%;
}
.messageBox:empty::after {
content: "It's too quiet in here...";
font-style: italic;
color: #777;
font-size: 1.1875em;
padding: 4px 10% 4px 5px;
width: 100%;
text-align: center;
}
.messageBox .msg {
width: 100%;
white-space: pre-wrap;
display: inline-block;
padding: 4px 10% 4px 5px;
}
.messageBox .msg.msg-collapse {
padding-bottom: 2px;
padding-top: 0;
}
.messageBox .msg .user-id {
color: #356;
font-weight: 600;
}
.messageBox .msg .timestamp {
color: #666;
font-size: 12px;
margin-left: 5px;
}
.msg.live {
opacity: 50%;
user-select: none;
}
.input {
grid-area: input;
display: flex;
margin-top: -1px;
margin-bottom: 2px;
z-index: 1;
}
#send-box {
height: auto;
flex-grow: 1;
resize: none;
font-size: 16px;
font-family: sans-serif;
padding: 5px;
border-radius: 4px;
}
#send-msg {
display: inline-block;
width: 45px;
height: 45px;
border-radius: 100%;
line-height: 45px;
text-align: center;
align-self: center;
margin: 6px;
cursor: pointer;
font-family: sans-serif;
color: white;
background-color: #48d;
border: 4px solid #7ac;
user-select: none;
}
/* Typing Indicators */
#typing {
grid-area: typing;
user-select: none;
}
#typing.hidden {
display: none;
}
#typing-text {
line-height: 1.2em;
color: #666;
user-select: none;
}
@keyframes typing-dots {
from {
transform: translateY(0px) scale(1);
opacity: 40%;
}
to {
transform: translateY(-5px) scale(1.3);
opacity: 80%;
}
/*to: {
transform: translateY(0px) scale(1);
opacity: 40%;
}*/
}
.typing-anim span {
background-color: black;
display: inline-block;
width: 4px;
height: 4px;
vertical-align: middle;
border-radius: 2px;
animation-name: typing-dots;
animation-iteration-count: infinite;
animation-duration: 0.6s;
animation-direction: alternate;
animation-timing-function: cubic-bezier(1,-0.06,.87,1.13);
}
.typing-anim span:nth-child(2) {
animation-delay: 0.2s;
}
.typing-anim span:nth-child(3) {
animation-delay: 0.4s;
}
================================================
FILE: kernel/demos/sync9-chat/chat.html
================================================
Braid Chat
Braid Chat
Username:
================================================
FILE: kernel/demos/sync9-chat/chat.js
================================================
// Create a node
var browser_id = localStorage.browser_id || localStorage.browserId || 'B-' + Math.random().toString(36).slice(2)
var escaped_id = JSON.stringify(browser_id)
var use_leadertab = false
var use_invisible_server = false
localStorage.browser_id = browser_id
var node
if (!use_leadertab)
node = require('node.js')({
pid: (localStorage.username &&
localStorage.username + '-' + Math.random().toString(36).slice(2,6))
})
print_network = true;
g_show_protocol_errors = true;
var params = new URLSearchParams(window.location.search);
var protocol = (params.get("protocol") === 'http' ? 'http' : 'ws') + (window.location.protocol === 'https:' ? 's' : '')
var braid_url = `${protocol}://${window.location.host}/`
console.log('protocol is ' + protocol)
if (use_invisible_server)
braid_url = 'wss://invisible.college:3009/'
if (!use_leadertab)
var socket = require(protocol == 'https' ? 'http-client-old.js' : 'websocket-client.js')({node, url: braid_url})
// UI Code
let create_listeners = function () {
if (use_leadertab)
node = require('leadertab-shell.js')(braid_url)
node.fissure_lifetime = 1000 * 60 * 60 * 24 // Fissures expire after 24 hours
node.default('/chat', [])
node.default('/usr', {})
// Local copy of variables
let users = {}
let messages = []
// How many milliseconds each keypress flags us as typing for
var typing_timeout = 30000
// How often to send live typing updates.
var live_type_update_freq = 50
// Subscribe for updates to a resource
node.get('/chat', update_messages)
node.get('/usr', update_users)
window.addEventListener('beforeunload', function () {
set_not_typing()
node.forget('/chat', update_messages)
node.forget('/usr', update_users)
node.close && node.close()
})
//// ----- Messagebox rendering and interactability -----
var message_box = document.getElementById("react-messages")
function render_username(user_id) {
return (user_id && users[user_id]) ? users[user_id].displayname : "Anonymous"
}
function format_header (msg) {
let timestamp = "Live"
if (msg.time) {
now = new Date()
msg_date = new Date(msg.time)
timestamp = now.getDate() === msg_date.getDate()
? msg_date.toLocaleTimeString()
: msg_date.toLocaleDateString()
}
let username = render_username(msg.user)
return [React.createElement("span", {className: "user-id", key:"username"}, username),
React.createElement("span", {className: "timestamp", key: "time"}, timestamp)]
}
function format_message(msg, i, msgs, extra_classes) {
let collapse = i && (msgs[i-1].user == msg.user) && (msg.time - msgs[i-1].time < 1200000)
// Parse the message
let body = React.createElement("div", {className: "msg-body", key: "text"}, msg.body)
let class_list = (extra_classes || []).concat(collapse ? ["msg"] : ["msg", "msg-collapse"]).join(' ')
if (collapse) {
return React.createElement('div', {className: class_list, key: i}, body)
} else {
let rendered_header = format_header(msg)
return React.createElement('div', {className: class_list, key: i},
[React.createElement("div", {className: "msg-header", key: "head"}, rendered_header),
body])
}
}
var typing_text_element = document.getElementById("typing-text")
var typing_box = document.getElementById("typing")
function draw_typing_indicator(names) {
var n = names.length
typing_box.classList.toggle("hidden", n == 0)
let typing_names
switch (n) {
case 0:
return
case 1:
typing_names = names[0]
break
case 2:
typing_names = `${names[0]} and ${names[1]}`
break
case 3:
case 4:
case 5:
names[n-1] = 'and ' + names[n-1]
typing_names = names.join(", ")
break
default:
typing_names = "Several people"
}
typing_text_element.textContent = `${typing_names} ${(n > 1) ? "are" : "is"} typing...`
}
function update_users (new_users) {
users = new_users
if (!users.hasOwnProperty(browser_id)) {
set_username(generate_username())
return
}
name_box.value = users[browser_id].displayname
update_messages(messages)
}
function update_messages(new_val) {
// Check scrolling
let should_scroll = true
let n_messages = messages.length
if (n_messages) {
let furthest_scroll = document.getElementsByClassName("msg")[n_messages - 1].getBoundingClientRect().top
let box_bottom = message_box.getBoundingClientRect().bottom
// If the last message is off the screen, we shouldn't scroll
should_scroll = box_bottom > furthest_scroll
}
let message_elements = new_val.map(format_message)
var live_classes = ["live"]
Object.entries(users).forEach(user => {
if (user[1].typing && user[0] != browser_id) {
let msg = {user: user[0], body: user[1].typing}
message_elements.push(format_message(msg, null, null, live_classes))
}
})
let message_list = React.createElement('div', {className: "messageBox", key: "messages"}, message_elements)
ReactDOM.render(
message_list,
message_box,
() => {
if (should_scroll)
message_box.scrollTop = message_box.scrollHeight - message_box.clientHeight
}
)
messages = new_val
// Update the typing indicator
let whos_typing = Object.entries(users)
.filter(user => user[1].typing && user[0] != browser_id)
.map(user => user[1].displayname)
draw_typing_indicator(whos_typing)
}
//// ---- Input field handlers ----
function reset_text(){
let grid_container = document.getElementById("grid-container")
let header_size = 40
if (screen.width < 800)
header_size = '100'
grid_container.style.gridTemplateRows = `${header_size}px auto 85px 1.5em`
var message_view = document.getElementById("react-messages")
message_view.scrollTop = message_view.scrollHeight
}
// Enable sending of messages
let sendbox = document.getElementById("send-box")
function submit() {
if (!sendbox.value.length)
return
// Preprocess outgoing message
let send_time = new Date().getTime()
let message_body = JSON.stringify([{
user: browser_id,
time: send_time,
body: sendbox.value
}])
node.setPatch('/chat', `[-0:-0] = ${message_body}`)
reset_text()
sendbox.value = ""
// Remove typing indicator
set_not_typing()
}
let typing_timeout_id
let typing = false
setInterval(update_typing, live_type_update_freq)
function set_typing(text) {
// Refresh the AFK timeout
typing = true
clearTimeout(typing_timeout_id)
typing_timeout_id = setTimeout(set_not_typing, typing_timeout)
}
function set_not_typing () {
if (!users.hasOwnProperty(browser_id))
return
if (users[browser_id].typing)
node.setPatch('/usr', `[${escaped_id}].typing = false`)
users[browser_id].typing = false
typing = false
}
function update_typing() {
if (!users.hasOwnProperty(browser_id))
return
let last_check = users[browser_id].typing
let check = sendbox.value
// If the user has changed the textbox since last tick, and the local
// UI typing hasn't timed out
if (typing && last_check != check) {
node.setPatch('/usr', `[${escaped_id}].typing = ${JSON.stringify(check)}`)
users[browser_id].typing = check
}
}
document.getElementById("send-msg").addEventListener("click", submit)
sendbox.addEventListener("keydown", e => {
if (e.keyCode == 13 && !e.shiftKey) {
e.preventDefault()
submit()
}
})
sendbox.addEventListener("input", e => {
if (sendbox.value.length > 0)
set_typing()
else
set_not_typing()
})
// Username Changing
let name_box = document.getElementById("username-change")
name_box.onchange = e => {
e.preventDefault()
let new_name = name_box.value.replace(/\W/g, '')
// Change username
name_box.value = new_name
set_username(new_name)
let expo_token = document.getElementById("expo-token")
if (expo_token.value !== "")
console.log("Mobile device found with expoToken:" + expo_token.value)
else
console.log("Not using app")
}
function generate_username () {
// Username generation stuff
var names = ["Bob", "Alice", "Joe", "Fred", "Mary", "Linda", "Mike", "Greg", "Raf"]
let name = names[Math.floor(Math.random() * names.length)]
let number = Math.floor(Math.random() * 1000)
return `${name}${number}`
}
function set_username (name) {
localStorage.username = name
let escaped_name = JSON.stringify(name)
var patch = users.hasOwnProperty(browser_id)
? `[${escaped_id}].displayname = ${escaped_name}`
: `[${escaped_id}] = {"displayname": ${escaped_name}}`
node.setPatch('/usr', patch)
}
}
if (document.readyState === "complete" ||
(document.readyState !== "loading" && !document.documentElement.doScroll))
create_listeners()
else
document.addEventListener("DOMContentLoaded", create_listeners)
// Update statistics ever N seconds
function update_stats () {
var resource = node.resource_at('/usr')
var versions = node.versions('/usr')
// Compute how many versions are fully acknowledged
var acked = 0
versions.forEach(v => { if (!resource.acks_in_process[v]) acked++ })
// And count the fissures
var fissures = node.fissures('/usr')
var unmatched_fissures = node.unmatched_fissures('/usr')
// Count how many obsolete versions are fizzed
var fizzed_vers = new Set([])
fissures.forEach(f => (f.versions || []).forEach(v => fizzed_vers.add(v)))
var obsoletes = 0
for (v of fizzed_vers)
if (!resource.time_dag[v])
obsoletes++
document.getElementById('stats').innerHTML =
`Acked Versions: ${acked}/${versions.length} `
+ `Unmatched Fissures: ${unmatched_fissures.length}/${fissures.length}`
+ (obsoletes ? ` Obsolete Fizzed Versions: ${obsoletes}` : '')
}
node.ons.push(() => setTimeout(update_stats)) // In a settimeout so it runs
update_stats() // after, not before processing
// the message
================================================
FILE: kernel/demos/sync9-chat/client.js
================================================
var public_vapid_key =
"BB2ikt9eLJydNI-1LpnaRYiogis3ydcUEw6O615fhaHsOsRRHcMZUfVSTNqun6HVb44M6PdfviDJkMWsdTO7XcM"
async function update_web_slider() {
console.log("update_web_slider")
if (document.getElementById("web_slider").checked)
await subscribe()
else
await unsubscribe()
}
async function send_notification() {
await fetch("/message", {
method: "POST",
body: JSON.stringify({
message: {
user: "User",
text:"Message"
}
}),
headers: {
"content-type": "application/json"
}
});
}
// Subscibes the user and sends a test notification
async function subscribe() {
var subscription_str = await get_subscription_string()
// Send Push Notification
console.log("Sending Push..." + subscription_str)
await fetch("/subscribe", {
method: "POST",
body: subscription_str,
headers: { "content-type": "application/json" }
})
console.log("Push Sent...")
}
// Returns a token for sending notifications to client
async function get_subscription_string () {
console.log("Registering service worker...")
var register = await navigator.serviceWorker.register("worker.js", {
scope: "/chat/"
})
console.log("Service Worker Registered...")
console.log("Registering Push...")
var subscription = await register.pushManager.subscribe({
userVisibleOnly: true,
applicationServerKey: url_base64_to_uint8_array(public_vapid_key)
})
console.log("Push Registered...")
return JSON.stringify(subscription)
}
// Sends a server token, and tells it to remove from batch
async function unsubscribe() {
console.log("remove()")
var subscription_str = await get_subscription_string()
// Send Push Notification
console.log("Sending Push for removal...")
await fetch("/chat/unsubscribe", {
method: "POST",
body: subscription_str,
headers: {
"content-type": "application/json"
}
})
console.log("Push Sent to remove user from list...")
}
function url_base64_to_uint8_array(base64_string) {
var padding = "=".repeat((4 - base64_string.length % 4) % 4);
var base64 = (base64_string + padding)
.replace(/\-/g, "+")
.replace(/_/g, "/");
var raw_data = window.atob(base64)
var output_array = new Uint8Array(raw_data.length)
for (let i=0; i < raw_data.length; ++i)
output_array[i] = raw_data.charCodeAt(i)
return output_array
}
// handles the size of input
function input_size () {
let textarea = document.getElementById("send-box")
let header_size = '40'
var ta_line_height = 45
if (screen.width < 800) {
header_size = '100'
ta_line_height = 45
}
let text_area_height = 85
let grid_container = document.getElementById("grid-container")
grid_container.style.gridTemplateRows = `${header_size}px auto 85px 1.5em`
var ta_height = textarea.scrollHeight // Get the scroll height of the textarea
textarea.style.height = ta_height
var number_of_lines = Math.floor(ta_height/ta_line_height)
if (number_of_lines == 1)
grid_container.style.gridTemplateRows = `${header_size}px auto 85px 1.5em`
else if (number_of_lines == 2)
grid_container.style.gridTemplateRows = `${header_size}px auto 125px 1.5em`
else if (number_of_lines == 3)
grid_container.style.gridTemplateRows = `${header_size}px auto 175px 1.5em`
else if (number_of_lines >= 4)
grid_container.style.gridTemplateRows = `${header_size}px auto 220px 1.5em`
var message_view = document.getElementById("react-messages")
message_view.scrollTop = message_view.scrollHeight
}
// If safari mobile, then the screen needs to be cut at the bottom
function screen_size () {
if (screen.width < 800) {
var ua = navigator.userAgent.toLowerCase()
if (ua.indexOf('safari') !== -1) {
if (ua.indexOf('chrome') > -1) {
// Chrome
} else {
console.log("safari mobile")
document.body.style.height = '90vh'
}
}
}
}
================================================
FILE: kernel/demos/sync9-chat/mobile.css
================================================
#send-msg img{
width:65px;
height:65px;
}
#send-msg {
position:absolute;
right:13%;
background-color:rgb(255,255,255,0);
border-radius: 0%;
border: 0px solid #fff;
margin-left:-100px;
margin-top:-20px;
}
#send-box {
padding-right:110px;
padding-left:5px;
}
#settings-icon {
position:absolute;
right:5px;
/* top:5px; */
width:30px;
height:30px;
}
/* css for phones */
@media only screen and (max-device-width: 640px) {
.grid-container {
width: 100%;
height: 100%;
display: grid;
font-family: sans-serif;
grid-template-columns: auto;
grid-template-rows: 100px auto 85px 1.5em;
grid-template-areas:
"title"
"messages"
"input"
"typing";
}
.grid-container > * {
padding: 0 5%;
}
/* Header size */
header {
font-size: 40px;
padding:30px;
height:100px;
}
input {
width:200px;
padding:15px;
font-size:27px;
font-family: "Times New Roman", Times, serif;
font-family: sans-serif;
}
#settings-hover-container {
margin-left:20px;
}
#settings-icon {
float:right;
right:14px;
width:45px;
height:45px;
}
/* Messages */
.messageBox .msg .user-id {
font-weight: 600;
font-size:35px;
}
.messageBox .msg .timestamp {
font-size: 16px;
margin-left: 10px;
}
.messageBox .msg {
font-size: 32px;
margin-left: 5px;
}
.messageBox {
margin-bottom:15px;
}
/* Input and send */
#send-box {
font-family: "Times New Roman", Times, serif;
font-family: sans-serif;
height: auto;
flex-grow: 3;
font-size: 45px;
/* padding: 20px; */
padding-right:120px;
padding-left:15px;
line-height:45px;
padding-top:17.5px;
padding-bottom:0px;
margin-bottom:0;
}
#send-msg {
position:absolute;
right:10%;
display: inline-block;
background-color:rgb(255,255,255,0);
border-radius: 0%;
border: 0px solid #fff;
text-align: center;
align-self: center;
margin: 6px;
cursor: pointer;
user-select: none;
margin-top:-15px;
/* margin-left:-450px; */
}
#send-msg img{
/* for the white airplane */
width:70px;
height:65px;
/*for the black airplane*/
/* width:85px;
height:85px;
margin-left:13px;
margin-top:-3px; */
}
}
================================================
FILE: kernel/demos/sync9-chat/package.json
================================================
{
"name": "sync9-chat",
"version": "0.0.1",
"description": "",
"author": "Braid Working Group",
"repository": "braid-org/braidjs",
"homepage": "https://braid.org",
"main": "server.js",
"dependencies": {
"better-sqlite3": "^5.4.3",
"dotenv": "^8.2.0",
"expo-server-sdk": "^3.5.1",
"express": "^4.17.1",
"idb": "^5.0.4",
"web-push": "^3.4.4"
}
}
================================================
FILE: kernel/demos/sync9-chat/settings.css
================================================
body {
font-family: sans-serif;
padding:10%;
text-align:center;
}
#home-icon {
display:inline;
width:30px;
height:30px;
}
.switch {
position: relative;
display: inline-block;
width: 30px;
height: 17px;
}
.switch input {
opacity: 0;
width: 0;
height: 0;
}
.slider {
position: absolute;
cursor: pointer;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: #ccc;
-webkit-transition: .4s;
transition: .4s;
}
.slider:before {
position: absolute;
content: "";
height: 13px;
width: 13px;
left: 2px;
bottom: 2px;
background-color: white;
-webkit-transition: .4s;
transition: .4s;
}
input:checked + .slider {
background-color: #2196F3;
}
input:focus + .slider {
box-shadow: 0 0 1px #2196F3;
}
input:checked + .slider:before {
-webkit-transform: translateX(13px);
-ms-transform: translateX(13px);
transform: translateX(13px);
}
/* Rounded sliders */
.slider.round {
border-radius: 17px;
}
.slider.round:before {
border-radius: 50%;
}
================================================
FILE: kernel/demos/sync9-chat/settings.html
================================================
BraidChat Settings
BraidChat Settings
Web Notifications:
Test mobile:
================================================
FILE: kernel/demos/sync9-chat/worker.js
================================================
console.log("Service Worker Loaded...");
self.addEventListener("push", e => {
const data = e.data.json();
console.log("Push Recieved...");
self.registration.showNotification(data.title, {
body: "Notified by braid",
icon: "/icon.png"
});
});
================================================
FILE: kernel/demos/wiki/wiki-client.html
================================================
================================================
FILE: kernel/demos/wiki/wiki-server.js
================================================
const port = 3007;
require('../../../util/braid-bundler.js')
var fs = require('fs')
var bundle = fs.readFileSync('../../../builds/braid-bundle.js')
var wiki_client = fs.readFileSync('wiki-client.html')
var cb = (req, res) => {
res.writeHead(200)
res.end(req.url == '/braid-bundle.js' ? bundle : wiki_client)
}
var server = (fs.existsSync('certs/private-key') && fs.existsSync('certs/certificate')) ?
require('https').createServer({
key: fs.readFileSync('certs/private-key'),
cert: fs.readFileSync('certs/certificate')
}, cb) :
require('http').createServer(cb)
server.listen(port)
var wss = new (require('ws').Server)({server})
var node = require('../../node.js')()
var store = require('../../sqlite-store.js')('db.sqlite')
require('../../store.js')(node, store).then(node => {
node.fissure_lifetime = 1000*60*60*8 // 8 hours
node.on_errors.push((key, origin) => node.unbind(key, origin))
var ws = require('../../websocket-server.js')(node, {wss})
console.log('keys at startup: ' + JSON.stringify(Object.keys(node.resources)))
console.log('serving on port: ' + port)
})
================================================
FILE: kernel/errors.js
================================================
function report (method, error) {
if (show_protocol_errors)
console.log('PROTOCOL ERROR for ' + method + ': ' + error)
throw error
}
module.exports = require.errors = (node) => ({
get (args) {
var {key, subscribe, version, parents, origin} = args
//var key = args.key, subscribe = args.subscribe, parents = args.parents
if (!key || typeof(key) !== 'string')
report('get', 'invalid key' + JSON.stringify(key))
log('get:', node.pid, key)
var resource = node.resource_at(key)
if (subscribe && subscribe.keep_alive
&& resource.keepalive_peers[origin.id])
report('get', 'we already welcomed them')
if (version && typeof(version) != 'string')
report('get', 'invalid version: ' + JSON.stringify(version))
if (parents && (typeof(parents) != 'object'
|| Object.entries(parents).some(([k, v]) => v !== true)))
report('get', 'invalid parents: ' + JSON.stringify(parents))
},
set (args) {
var {key, version, parents, patches, origin} = args
if (!key || typeof(key) !== 'string')
throw report('set', 'invalid key: ' + JSON.stringify(key))
var resource = node.resource_at(key)
// If you're trying to join a persistent consistent group, then
// you probably don't want to send any SETs before you actually
// join and know what the current version is:
if (origin && u.has_keep_alive(origin, key)
&& !resource.keepalive_peers[origin.id])
report('set', 'we did not welcome them yet')
if (!patches || !Array.isArray(patches)
|| patches.some(x => typeof(x) != 'string'))
report('set', 'invalid patches: ' + JSON.stringify(patches))
if (!version || typeof(version) != 'string')
report('set', 'invalid version: ' + JSON.stringify(version))
if (parents && (typeof(parents) != 'object'
|| Object.entries(parents).some(([k, v]) => v !== true)))
report('set', 'invalid parents: ' + JSON.stringify(parents))
},
welcome (args) {
var {key, versions, fissures, unack_boundary, min_leaves, parents, origin} = args
// Sanity-check the input
{
if (!key || typeof(key) != 'string')
report('welcome', 'invalid key: ' + JSON.stringify(key))
var resource = node.resource_at(key)
if (!resource.keepalive_peers[origin.id])
report('welcome', 'we did not welcome them yet')
if (!Array.isArray(versions) || !versions.every(v => {
if (v.version && typeof(v.version) != 'string') return false
if (!v.parents || typeof(v.parents) != 'object'
|| Object.entries(v.parents).some(([k, v]) => v !== true)) return false
if (!Array.isArray(v.patches)
|| v.patches.some(x => typeof(x) != 'string')) return false
if (v.hint) {
if (!v.hint.sort_keys) return false
if (typeof(v.hint.sort_keys) != 'object') return false
if (!Object.entries(v.hint.sort_keys).every(([index, key]) => (''+index).match(/^\d+$/) && typeof(key) == 'string')) return false
}
return true
})) {
report('welcome', 'invalid versions: ' + JSON.stringify(versions))
}
if (!Array.isArray(fissures) || !fissures.every(fissure => {
if (!fissure || typeof(fissure) != 'object') return false
if (typeof(fissure.a) != 'string') return false
if (typeof(fissure.b) != 'string') return false
if (typeof(fissure.conn) != 'string') return false
if (!fissure.versions || typeof(fissure.versions) != 'object'
|| !Object.entries(fissure.versions).every(([k, v]) => v === true)) return false
if (!fissure.parents || typeof(fissure.parents) != 'object'
|| !Object.entries(fissure.parents).every(([k, v]) => v === true)) return false
if (typeof(fissure.time) != 'number') return false
return true
})) {
report('welcome', 'invalid fissures: ' + JSON.stringify(fissures))
}
if (unack_boundary && (typeof(unack_boundary) != 'object'
|| !Object.entries(unack_boundary).every(
([k, v]) => v === true)))
report('welcome', 'invalid unack_boundary: '+JSON.stringify(unack_boundary))
if (min_leaves && (typeof(min_leaves) != 'object'
|| !Object.entries(min_leaves).every(
([k, v]) => v === true)))
report('welcome', 'invalid min_leaves: ' + JSON.stringify(min_leaves))
if (parents && (typeof(parents) != 'object'
|| !Object.entries(parents).every(
([k, v]) => v === true)))
report('welcome', 'invalid parents: ' + JSON.stringify(parents))
}
},
forget (args) {
if (!key || typeof(key) != 'string')
report('forget', 'invalid key: ' + JSON.stringify(key))
if (!node.incoming_subscriptions.has(key, origin.id))
report('forget', `pipe "${origin.id}" did not get the key "${key}" yet`)
},
ack (args) {
var {key, valid, seen, version, origin} = args
// guard against invalid messages
if (typeof(key) !== 'string')
report('ack', 'invalid key: ' + JSON.stringify(key))
var resource = node.resource_at(key)
if (!resource.keepalive_peers[origin.id])
report('ack', 'we did not welcome them yet')
if (typeof(valid) !== 'undefined')
report('ack', 'support for valid flag not yet implemented')
if (seen !== 'local' && seen !== 'global')
report('ack', 'invalid seen: ' + JSON.stringify(seen))
if (typeof(version) !== 'string')
report('ack', 'invalid version: ' + JSON.stringify(version))
},
fissure ({key, fissure, origin}) {
if (typeof(key) !== 'string')
return report('fissure', 'invalid key: ' + JSON.stringify(key))
var resource = node.resource_at(key)
if ((!fissure || typeof(fissure) !== 'object') ||
(!fissure.a || typeof(fissure.a) !== 'string') ||
(!fissure.b || typeof(fissure.b) !== 'string') ||
(!fissure.conn || typeof(fissure.conn) !== 'string') ||
(!fissure.versions || typeof(fissure.versions) !== 'object'
|| !Object.entries(fissure.versions).every(([k, v]) => v === true)) ||
(!fissure.parents || typeof(fissure.parents) !== 'object'
|| !Object.entries(fissure.parents).every(([k, v]) => v === true)) ||
(typeof(fissure.time) !== 'number'))
{
report('fissure', 'invalid fissure: ' + JSON.stringify(fissure))
}
}
})
================================================
FILE: kernel/http-client.js
================================================
// This file is still being used with the sync9-chat demo, but Mike will
// refactor it soon.
var u = require('utilities.js');
// Binds a node to a url, allowing the node to send GETS and SETS to that url
module.exports = require['http-client-old'] = function add_http_client({node, url, prefix}) {
url = url || 'https://localhost:80/'
prefix = prefix || '/*'
var enabled = true;
const controller = new AbortController();
// Make a fake pipe object
// The real ones check acks and synchronization and such
let pipe = {
id: u.random_id(),
send: send,
recv: function(args) {
if (args.method != "ping" && args.method != "pong") {
nlogf('H1', 'remote', '=->', 'local', args);
}
args.origin = pipe;
node[args.method](args);
},
//connection: "http"
};
node.bind(prefix, pipe)
function send(args) {
if (args.method === 'get')
send_get(args)
else if (args.method === 'set')
send_set(args)
let symbol = (args.method === 'get' || args.method === 'set') ? '-=>' : '-|>';
if (args.method === 'error')
symbol = '-!>'
if (args.method != "ping" && args.method != "pong") {
nlogf('H1', 'local ', symbol, 'remote', args);
}
}
// Read sets from a persistent stream
function sets_from_stream(stream, callback, finished) {
// Set up a reader
let reader = stream.getReader()
let decoder = new TextDecoder('utf-8')
let buffer = '';
let headers = false;
let patches = [];
reader.read().then(function read ({value, done}) {
if (done) {
// subscription was closed
if (buffer.trim().length)
console.debug("Connection was closed. Remaining data in buffer:", buffer);
else
console.debug("Connection was closed. Buffer was empty.")
finished();
return;
}
const chunkStr = value ? decoder.decode(value) : "";
// Remove newlines at the beginning, maybe unnecessary
buffer = (buffer + chunkStr).trimStart();
if (value)
console.debug(`Got a chunk of length ${chunkStr.length}. Current buffer:`);
else
// If there's no new chunk then we must have had some data left over after a successful parse
console.debug("Reading on unchanged buffer:")
console.debug(buffer);
// If we haven't parsed headers yet, try to parse headers.
if (!headers) {
console.debug("Trying to parse headers...")
const parsedH = parse_headers();
if (parsedH) {
headers = parsedH.headers;
// Take the parsed headers out of the buffer
buffer = buffer.substring(parsedH.consumeLength);
console.debug("Success. Headers:", headers)
} else {
console.debug("Failed to parse headers. We probably don't have enough.")
}
}
if (headers)
console.debug("Trying to parse patches...")
// Try to parse patches. parse_patches returns boolean
if (headers && parse_patches()) {
console.debug("Success. Patches:", patches)
// We have a complete message ...
let msg = {
version: headers.version ? JSON.parse(headers.version) : null,
patches: patches ? patches.slice() : null,
parents: headers.parents ? {} : null
};
if (headers.parents)
headers.parents.split(", ").forEach(x => msg.parents[JSON.parse(x)] = true)
console.debug("Assembled complete message: ", msg);
setTimeout(callback, 0, msg);
headers = false;
patches = [];
// We've gotten a SET, but actually there might be more still in the buffer.
// We have to keep reading messages until we fail, and only then can we look for the next chunk.
console.debug("Restarting in current buffer...")
return read({value: false, done: false});
} else {
if (headers)
console.debug("Couldn't parse patches. We probably don't have enough.")
console.debug("Waiting for next chunk to continue reading")
return reader.read().then(read).catch(console.error);
}
}).catch(console.error);
function parse_headers() {
// This string could contain a whole response.
// So first let's isolate to just the headers.
const end_of_headers = buffer.indexOf('\n\n');
if (end_of_headers == -1)
return false;
const stuff_to_parse = buffer.substring(0, end_of_headers)
// Now let's grab everything from these headers
var headers = {},
regex = /([\w-]+): (.*)/g,
temp
while (temp = regex.exec(stuff_to_parse))
headers[temp[1].toLowerCase()] = temp[2]
// TODO: Parse key-value pair headers and list headers.
return {headers: headers, consumeLength: end_of_headers + 2}
}
function parse_patches() {
if (headers['content-length']) {
console.debug("Got an absolute body")
// This message has "body"
const length = headers['content-length'];
if (h.length + length < buffer.length)
return false;
// ...
// This behavior is not in the initial http1 spec, so we don't have to worry about it
}
if (headers.patches) {
// Parse patches until we run out of patches to parse or get all of them
while (patches.length < headers.patches) {
buffer = buffer.trimStart();
const parsePatchHeaders = parse_headers();
if (!parsePatchHeaders) {
console.debug("Failed to parse patch headers!")
return false;
}
const patchHeaders = parsePatchHeaders.headers;
const headerLength = parsePatchHeaders.consumeLength;
// assume we have content-length...
const length = parseInt(patchHeaders['content-length']);
// Does our current buffer contain enough data that we have the entire patch?
if (buffer.length < headerLength + length) {
console.debug("Buffer is too small to contain the rest of the patch...")
return false;
}
// Assume that content-range is of the form 'json .index'
const r = patchHeaders['content-range']
const patchRange = r.startsWith("json ") ? r.substring(5) : r;
const patchValue = buffer.substring(headerLength, headerLength + length);
// We've got our patch!
patches.push(`${patchRange} = ${patchValue}`);
buffer = buffer.substring(headerLength + length);
console.debug(`Successfully parsed a patch. We now have ${patches.length}/${headers.patches}`);
}
console.debug("Parsed all patches.")
return true;
}
}
}
function send_get (msg) {
var h = {"x-client-id": node.pid};
if (msg.version) h.version = JSON.stringify(msg.version)
if (msg.parents) h.parents = Object.keys(msg.parents).map(JSON.stringify).join(', ')
if (msg.subscribe) {
if (msg.subscribe.keep_alive)
msg.subscribe.keep_alive = false;
h.subscribe = Object.entries(msg.subscribe)
.map(a => `${a[0].replace("_", "-")}=${a[1]}`)
.join(";");
}
const sendUrl = new URL(msg.key, url);
function trySend(waitTime) {
console.log(`Fetching ${sendUrl}`);
fetch(sendUrl, {method: 'GET',
mode: 'cors',
headers: new Headers(h),
signal: controller.signal})
.then(function (res) {
if (!res.ok) {
console.error("Fetch failed!", res)
return
}
sets_from_stream(res.body,
callback = setMessage => {
// Insert the method and key into this
setMessage.method = "set";
setMessage.key = msg.key;
// TODO: Don't have to do this.
node.resource_at(msg.key).weve_been_welcomed = true;
pipe.recv(setMessage);
},
finished = () => {
// Maybe close the fetch?? idk
console.warn(`Subscription to ${msg.key} ended by remote host`);
}
);
})
.catch(function (err) {
console.error("Fetch GET failed: ", err)
// Exponential backoff
setTimeout(() => trySend(Math.min(waitTime * 5, 100000)), waitTime)
})
}
trySend(100);
}
function send_set (msg) {
var h = {
'content-type': 'application/json',
'merge-type': 'sync9',
"x-client-id": node.pid
}
if (msg.version) h.version = JSON.stringify(msg.version)
if (msg.parents) h.parents = Object.keys(msg.parents).map(JSON.stringify).join(', ')
if (msg.subscribe) h.subscribe = Object.entries(msg.subscribe)
.map(a => `${a[0].replace("_", "-")}=${a[1]}`)
.join(";");
let body = msg.patch;
if (msg.patches) {
// Write patches as pseudoheaders
body = msg.patches.map(patch => {
// We should use the sync9 patch parser
const split = patch.match(/(.*?)\s*=\s*(.*)/); // (...) = (...)
const length = `content-length: ${split[2].length}`;
const range = `content-range: json ${split[1]}`;
return `${length}\n${range}\n\n${split[2]}\n`
}).join("\n");
h.patches = msg.patches.length;
}
const sendUrl = new URL(msg.key, url);
function trySend(waitTime) {
fetch(sendUrl, {method: 'PUT',
body: body,
mode: 'cors',
headers: new Headers(h)})
.then(function (res) {
res.text().then(function (text) {
console.debug(`Received SET response: status ${res.status}, body "${text}"`)
})
})
.catch(function (err) {
console.error("Fetch SET failed: ", err);
// Exponential backoff
setTimeout(() => trySend(Math.min(waitTime * 5, 100000)), waitTime)
});
}
trySend(20);
}
return {
pipe,
enabled() {return enabled},
enable() {nlog('ENABLING PIPE', pipe.id); enabled = true; }, // connect()
disable() {nlog('DISABLING PIPE',pipe.id); enabled = false; controller.abort()}, // disconnect()
toggle() {if (enabled) {disable()} else enable()}
}
}
================================================
FILE: kernel/http-server.js
================================================
// This file is still being used with the sync9-chat demo, but Mike will
// refactor it soon.
// Example braid-peer as a web server
//const fs = require('fs');
const assert = require('assert');
//const pipe = require('../pipe.js');
const parseHeaders = require('parse-headers');
var u = require('../util/utilities.js');
module.exports = function add_http_server(node) {
// Write an array of patches into the pseudoheader format.
const openPipes = {};
function writePatches(patches) {
// This will return something like:
// Patches: n
//
// content-length: 14 // patch #1
// content-range: json .range (or) json [indices]
//
// ["json object"]
//
// content-length: x // patch #2
// ...
let out = `patches: ${patches.length}\n`
for (let patch of patches) {
out += "\n"
// This should be rewritten to use sync9's patch parser.
const split = patch.match(/(.*?)\s*=\s*(.*)/);
assert(split.length == 3)
const range = split[1];
const change = split[2];
out += `content-length: ${change.length}\n`;
out += `content-range: json ${range}\n`;
out += "\n";
out += `${change}\n`;
}
return out;
}
// This function reads n patches in pseudoheader format from a ReadableStream
// and then fires a callback when they're finished
// Might be nice to use a promise here
function readPatches(n, stream, cb) {
let patches = [];
let curPatch = "";
if (n == 0)
return cb(patches);
stream.on('data', function parse (chunk) {
// Otherwise we'll have extra newline at the start. I'm not sure if this would mess up parse-headers.
curPatch = (curPatch + chunk).trimStart();
// Find out if we have an entire patch.
// This means: first, we look for a double newline.
const headerLength = curPatch.indexOf("\n\n");
if (headerLength == -1) return;
// Now that we have all the headers, we have to parse them and look for content-length
// TODO: Support Transfer-Encoding: Chunked (maybe?)
const headers = parseHeaders(curPatch.substring(0, headerLength));
assert(headers['content-length']);
const length = parseInt(headers['content-length']);
// Does our current buffer contain enough data that we have the entire patch?
if (curPatch.length < headerLength + 2 + length) return;
// Assume that content-range is of the form 'json .index'
const patchRange = headers['content-range'].startsWith("json ") ?
headers['content-range'].substring(5) :
headers['content-range'];
const patchValue = curPatch.substring(headerLength + 2, headerLength + 2 + length);
// We've got our patch!
patches.push(`${patchRange} = ${patchValue}`);
curPatch = curPatch.substring(headerLength + 2 + length);
if (patches.length == n) {
stream.pause();
cb(patches);
} else
// Try parsing for another message in the current buffer
parse("");
});
stream.on('end', () => {
// If the stream ends before we get everything, then return what we did receive
if (patches.length != n) {
console.warn(`Got an incomplete PUT: ${patches.length}/${n} patches were received`);
cb(patches);
}
})
}
// Construct a (fake) pipe object that allows writing data into a persistent stream
function responsePipe(res, id) {
// Construct pipe
const pipe = {
id: id,
send: sendVersions,
disconnect: disconnect
//connection: "http", // These are supposed to be unique ids of some sort :)
};
const allowedMethods = ["set", "welcome"]
// The node will call this method with JSON messages
function sendVersions (args) {
let symbol = allowedMethods.includes(args.method) ? '-=>' : '-|>';
if (args.method === 'error')
symbol = '-!>'
if (args.method != "ping" && args.method != "pong") {
nlogf('H1', 'server', symbol, id.slice(0,6).padEnd(6), args);
}
// The protocol doesn't support things like acks and fissures
if (!allowedMethods.includes(args.method)) {
return;
}
// Extract the three relevant fields from JSON message
let versions = [];
if (args.method == "welcome") {
versions = args.versions.map(f => ({
version: f.version,
parents: f.parents,
patches: f.patches
}))
} else if (args.method == "set") {
versions = [{
version: args.version,
parents: args.parents,
patches: args.patches
}]
}
for (let version of versions) {
if (version.version)
res.write(`Version: ${JSON.stringify(version.version)}\n`)
if (Object.keys(version.parents).length)
res.write(`Parents: ${Object.keys(version.parents).map(JSON.stringify).join(", ")}\n`)
res.write("Merge-Type: sync9\n")
res.write("Content-Type: application/json\n")
res.write(writePatches(version.patches)) // adds its own newline
res.write("\n")
}
}
function disconnect () {res.end(); }
return pipe;
}
// The entry point of the server.
// Listen for requests
function handleHttpResponse(req, res) {
// Apply hardcoded access control headers
// The cors() method will return true if the request is an OPTIONS request
// (It'll also respond 200 and end the stream)
if (cors(req, res))
return;
// There should be a better way to do this.
// Initially, this would take a message, create a pipe, and recv the message
// But it turns out that in many cases you actually want to set some data on the node
// before it receives the message but after the pipe is created
const create_pipe = (id) => {
if (openPipes[id]) {
console.error("ClientID collision!");
return;
}
let pipe = responsePipe(res, id);
openPipes[id] = {key: req.url, origin: pipe};
res.on('close', () => {
console.log(`Connection closed on ${req.url}`);
assert(openPipes[id]);
node.forget(openPipes[id]);
delete openPipes[id];
});
};
const recv = (id, msg) => {
if (msg.method != "ping" && msg.method != "pong") {
nlogf('H1', id.slice(0,6).padEnd(6), '=->', 'server', msg);
}
if (openPipes[id])
msg.origin = openPipes[id].origin;
return node[msg.method](msg);
}
// Copy headers that have the same value in HTTP as Braid
let msg = {
key: req.url
}
// Copy headers that need minor modifications but no additional conditionals
if (req.headers.version)
msg.version = JSON.parse(req.headers.version)
if (req.headers.parents) {
msg.parents = {};
req.headers.parents.split(", ").forEach(x => msg.parents[JSON.parse(x)] = true)
}
// If we end up having more methods supported, maybe make this a switch
if (req.method == "GET") {
res.setHeader('cache-control', 'no-cache, no-transform');
if (!req.headers.hasOwnProperty("subscribe")) {
// Respond over plain http
res.setHeader('content-type', 'text/json');
res.statusCode = 200;
// If the origin is just an id, then there will be no callback or subscription.
msg.origin = {id: 'null-pipe'};
// And the node will just return the value of the resource at the specified version
res.end(JSON.stringify(node.get(msg)));
return;
}
// Set some headers needed to indicate a subscription.
res.statusCode = 209;
res.setHeader("subscribe", req.headers.subscribe)
res.setHeader('content-type', 'text/braid-patches');
// res.setHeader('connection', 'Keep-Alive');
// Parse the subscribe header. Options are:
// keep-alive=true # this can actually be specified as just keep-alive, but we can support that later
// keep-alive=false
// keep-alive=number
let subStr = req.headers.subscribe.match(/keep-alive=(\w+)/)[1];
let sub = false;
if (subStr == "true")
sub = true;
else if (subStr != "false") // It's a number
sub = parseInt(subStr);
msg.subscribe = {"keep-alive": sub};
msg.method = "get"
// Receive the request
const clientID = `${req.headers['x-client-id'] || u.random_id()}=>${msg.key}`;
create_pipe(clientID);
// recv will call get(), which will return the value of the resource if successful and undefined otherwise
let result = recv(clientID, msg);
if (result == undefined)
res.end(500);
}
else if (req.method == "PUT") {
// We only support these headers right now...
assert(req.headers["content-type"] == "application/json")
assert(req.headers["merge-type"] == "sync9")
let status = 200;
if (!node.resources[msg.key])
// If we don't have the resource, it'll be created.
// We actually need to add a way to prevent clients from creating braid resources with the same names
// as file resources, which would make them unreadable.
// I think we should instead make the server explicitly bind itself to some paths.
status = 201;
res.statusCode = status;
msg.method = "set"
// Parse patches
// Try to read patches from the request body
// req.headers.patches is the number of patches expected
readPatches(req.headers.patches, req, (patches) => {
// When finished, create a pipe.
msg.patches = patches;
res.setHeader("patches", "OK");
const clientID = `${req.headers['x-client-id'] || u.random_id()}=>${msg.key}`;
// recv will call node.set, which will return `version` if successful and undefined otherwise
// TODO: Maybe return an error code from get/set?
let result = recv(clientID, msg);
if (result == undefined)
res.statusCode = 500;
res.end();
})
}
}
function cors(req, res) {
const free_the_cors = {
"Access-Control-Allow-Origin": "*"
,"Access-Control-Allow-Methods": "OPTIONS, HEAD, GET, PUT"
,"Access-Control-Allow-Headers": "subscribe, x-client-id, version, parents, merge-type, content-type"
};
Object.entries(free_the_cors).forEach(x => res.setHeader(x[0], x[1]));
if ( req.method === 'OPTIONS' ) {
res.writeHead(200);
res.end();
return true;
}
return false;
}
process.on('SIGINT', function() {
if (Object.keys(openPipes).length) {
console.log("\nForgetting H1 connections:");
Object.values(openPipes).forEach(sub => {
console.log(` pipe ${sub.origin.id} on resource${sub.key}`);
node.forget(sub);
sub.origin.disconnect();
});
console.log("Closing process");
}
process.exit();
});
return handleHttpResponse;
}
================================================
FILE: kernel/leadertab-shell.js
================================================
var util = require('utilities.js');
var store = require('store.js');
const states = {
// Don't process incoming commands and don't send outgoing ones
DISABLED: 0,
// The leader exists and it is not us.
// We should send any activity to the leader.
CLIENT: 1,
// There is no leader.
// We should try to become the leader, and save anything we get until then.
// We should also broadcast anything we do before then.
ELECTING: 2,
// We have become the leader, but we aren't ready to send things to the server yet.
// We should get the connection ready, and save anything we get until then.
ELECTED: 3,
// We are the leader.
// We should apply incoming commands and broadcast new state.
LEADER: 4
};
const signal_types = {
// The leader has submitted their letter of resignation.
// The leader is not going to handle events during the election.
// This means we have to cache incoming events.
LEADER_UNLOADING: "leader-unloading",
// The election is starting.
START_ELECTION: "start-election",
// Any client can send a PING to the leader
PING: "ping",
// Only the leader responds to a ping, and it responds with a pong.
PONG: "pong",
// A command sent by a client to the leader.
COMMAND: "command",
// The leader has received new state from the remote peer.
STATE: "state"
};
const channel_name = "braid-leadertab";
// We can basically make this as low as we want.
// Since the leader tab has a websocket open (if alive), it can instantly respond to our ping
// and it doesn't use timers.
const ping_timeout = 200;
const db_name = "braid-db";
// This table the state of the braid
const db_network_store = "braid-network";
// This table is just a mutex
const db_election_store = "election";
// Every subscribed key gets a localstorage entry of the form prefix_key
// This var is the prefix used (with the separator attached)
const ls_sub_prefix = "braidsub" + "_";
module.exports = require["leadertab-shell"] = function(url) {
// Our leaderId, probably not actually needed.
const id = util.random_id();
// Timeout handle for leader activity
let leader_alive_id;
// The channel over which we will broadcast state and commands
const channel = new BroadcastChannel(channel_name);
// Buffer for commands received during leader initialization
let command_queue = [];
// Until we're sure who the leader is, we want to buffer things.
let state = states.ELECTING;
// Try to open the DB
const dbPromise = idb.openDB(db_name, 4, { upgrade(db) {
if (!db.objectStoreNames.contains(db_network_store))
db.createObjectStore(db_network_store);
if (!db.objectStoreNames.contains(db_election_store))
db.createObjectStore(db_election_store);
}})
// Try to become the leader ASAP
dbPromise.then(becomeLeader());
// Stuff for the leader
let node;
let socket;
// The pipe.id for each registered subscription callback
// This is a local variable because when the connection is migrated, subscriptions
// will be recreated with new IDs.
// The pipe created in websocket-client.js is capable of managing subscriptions,
// but to use it we'd have to store the pipe in the db.
// TODO: Storing the pipe in the db might actually be good
let remote_get_handlers = {};
let local_defaults = {};
/**
* Route an incoming message to various handlers
*/
channel.addEventListener('message', (event) => {
if (state === states.DISABLED)
return;
switch (event.data.type) {
// Communication about braid objects
case signal_types.COMMAND:
if (state !== states.CLIENT)
handleCommand(event.data);
break;
case signal_types.STATE:
recvState(event.data);
break;
// Leader-alive verification
case signal_types.PING:
if (state === states.LEADER || state === states.ELECTED)
channel.postMessage({type: signal_types.PONG})
break;
case signal_types.PONG:
clearTimeout(leader_alive_id);
break;
// Leader changing
case signal_types.LEADER_UNLOADING:
state = states.ELECTING;
break;
case signal_types.START_ELECTION:
if (state === states.ELECTING)
becomeLeader();
break;
default:
console.warn("Unknown signal type in message", event.data);
}
})
/**
* When the leader tab is closed, it will inform other clients and start an election
*/
async function startElection(local_eligible) {
channel.postMessage({type: signal_types.LEADER_UNLOADING});
if (local_eligible)
state = states.ELECTING;
// Unset the leader
const db = await dbPromise;
try {
await db.delete(db_election_store, "leader");
} catch (e) {
console.error("Failed to delete leader. \nThis is most likely because someone else managed to do it first.");
console.error(e);
}
// Start an election
channel.postMessage({type: signal_types.START_ELECTION});
if (local_eligible)
becomeLeader();
}
function resign() {
// If this tab is the leader, it should trigger an election
if (state === states.LEADER || state === states.ELECTED) {
// TODO: Is there a way to make sure the browser doesn't shut down the JS thread
// before we've had a chance to call for an election?
startElection();
}
// The only case in which we'll have a socket and not be the leader
// is if we were the leader and we were impeached for inactivity
if (socket)
socket.disable();
state = states.DISABLED;
}
/**
* Using the electionstore as a mutex, attempt to set ourselves as the leader.
* On success, prepare the leader responsibilities.
* On failure, make ourselves a client.
*/
async function becomeLeader() {
console.log("Trying to become leader...")
// Try to set ourselves as the leader
try {
const db = await dbPromise;
const tx = db.transaction(db_election_store, "readwrite");
// This promise will reject if leaderKey is already set.
await Promise.all([
tx.store.add(id, "leader"),
tx.done
]);
} catch (e) {
// If we get a constrainterror or aborterror, that means the above promise rejected.
if (e.name !== 'ConstraintError' && e.name !== 'AbortError')
console.error(e);
// So we're a client.
state = states.CLIENT;
console.log("We're a client.")
// We can also forget the command queue.
command_queue.length = 0;
// Finally, check the leader for activity.
pingLeader();
return;
}
console.log("We became the leader.")
// If we get here, then we successfully added our id to the store, making us the leader.
state = states.ELECTED;
// Create a node
node = braidShell.node = require("braid.js")();
// Fast forward the node using the db
await store(node, {
async get(key) {
return (await dbPromise).get(db_network_store, key);
},
async set(key, data) {
return (await dbPromise).put(db_network_store, data, key);
},
async del(key) {
return (await dbPromise).delete(db_network_store, key);
},
async list_keys() {
return (await dbPromise).getAllKeys(db_network_store);
}
});
Object.entries(local_defaults)
.map(([key, value]) => node.default(key, value))
// Connect the node to the network
socket = require(url.startsWith("http") ? 'http-client.js' : 'websocket-client.js')({node, url});
socket.addEventListener("connect", () => {
// Resend GETs that we might have lost while migrating
Object.keys(localStorage)
.filter(k => k.startsWith(ls_sub_prefix))
.forEach(storage_key => {
let braid_key = storage_key.substring(ls_sub_prefix.length);
// see https://stackoverflow.com/q/12862624
if ((+localStorage.getItem(storage_key)) > 0)
remote_get_handlers[braid_key] = subscribe(braid_key)
})
// Now we're done, so we can start leading.
state = states.LEADER;
// Do anything that we might have queued up during the election.
while (command_queue.length)
handleCommand(command_queue.shift());
});
socket.enable();
}
/**
* Create a subscription to a remote key, and send the results over the broacast channel.
*/
function subscribe(key) {
if (remote_get_handlers.hasOwnProperty(key))
throw `Attempted double-subscription of ${key}`
function cb(val) {
// Whenever we get a new version of key
let outMessage = {type: signal_types.STATE, key, val};
// Send it to everyone else
channel.postMessage(outMessage);
// Receive it ourselves
recvState(outMessage);
};
node.get(key, cb);
return cb.pipe.id;
}
/**
* Apply commands send over the broadcast channel to the node.
*/
function handleCommand(command) {
// During the election, we don't know who will end up as the leader.
// If it could be us, we want to enqueue messages, and process or discard them later.
if (state === states.ELECTING || state === states.ELECTED) {
command_queue.push(command);
return;
}
// Have the node receive the command
switch (command.method) {
case "get": {
let ls_sub_key = ls_sub_prefix + command.key
// Localstorage returns null for unknown properties
// and +null == 0
let sub_count = +localStorage.getItem(ls_sub_key);
if (sub_count++ === 0)
remote_get_handlers[command.key] = subscribe(command.key);
else
channel.postMessage({
type: signal_types.STATE,
key: command.key,
val: node.resource_at(command.key).mergeable.read()
})
localStorage.setItem(ls_sub_key, sub_count);
break;
}
case "set":
node.setPatch(command.key, command.patch);
break;
case "forget": {
// This is going to look very similar to the "get" code
let ls_sub_key = ls_sub_prefix + command.key
let sub_count = +localStorage.getItem(ls_sub_key);
if (sub_count <= 0)
throw `Can't unsub from ${command.key} because we aren't subscribed to it`
let id = remote_get_handlers[command.key];
// If this was the last sub, send the forget upstream
if (--sub_count === 0) {
node.forget(command.key, {pipe: {id}});
delete remote_get_handlers[command.key];
}
localStorage.setItem(ls_sub_key, sub_count)
break;
}
default:
console.warn("Can't handle message", command);
}
}
/**
* Send a command when requested by the local frontend.
*/
function send(message) {
message.type = signal_types.COMMAND;
// Unless we're definitely the leader, broadcast stuff
if (state === states.CLIENT || state === states.ELECTING)
channel.postMessage(message);
if (state !== states.CLIENT)
handleCommand(message);
}
/**
* Inform the frontend of new state
*/
function recvState(message) {
if (local_get_handlers.hasOwnProperty(message.key))
local_get_handlers[message.key].forEach(f => f(message.val));
}
/**
* Ping the leader to make sure it's alive
*/
function pingLeader(time) {
if ((state !== states.CLIENT && state !== states.ELECTING)
|| document.visibilityState !== "visible")
return;
clearTimeout(leader_alive_id);
channel.postMessage({type: signal_types.PING});
// Start the election, and tell this tab that it's a candidate
leader_alive_id = setTimeout(() => startElection(true), time || ping_timeout);
}
document.addEventListener("visibilitychange", () => pingLeader(), false);
// Bind the shell methods
let braidShell = {};
let local_get_handlers = {};
braidShell.ping = pingLeader;
// It is the responsibility of the programmer to call close() before the page unloads!
braidShell.close = resign;
// Allow the frontend to get the state
braidShell.getState = () => state;
braidShell.get = (key, cb) => {
// TODO
if (!cb)
throw "callback is required when using leadertab"
cb.id = util.random_id();
// Add callback
if (local_get_handlers[key])
local_get_handlers[key].push(cb);
else {
local_get_handlers[key] = [cb];
send({method: "get", key: key})
}
};
braidShell.set = (key, value) => {
send({method: "set", key, patch: [`= ${JSON.stringify(value)}`]});
};
braidShell.setPatch = (key, patch) => {
send({method: "set", key, patch});
};
braidShell.forget = (key, cb) => {
let index = local_get_handlers[key].findIndex(e => e.id === cb.id);
if (index == -1)
return;
local_get_handlers[key].splice(index, 1);
if (local_get_handlers[key].length == 0)
send({method: "forget", key});
};
braidShell.default = (key, val) => {
local_defaults[key] = val;
if ((state === states.LEADER || state === states.ELECTED) && node)
node.default(key, val);
}
return braidShell;
};
================================================
FILE: kernel/llww.js
================================================
module.exports = require.llww = (resource) => {
resource.value = undefined
return {
add_version (version, parents, patches) {
patches.forEach(patch => apply_patch(patch, resource))
},
read (version) {
assert(!version)
return resource.value
},
generate_braid (versions) {
if (resource.value === undefined)
return []
assert(!versions || is_current_version(versions, resource))
return [{
patches: [` = ${JSON.stringify(resource.value)}`]
}]
}
}
}
var is_current_version = (versions, resource) =>
Object.keys(versions).length === Object.keys(resource.current_version).length
&& Object.keys(versions).every(v => resource.current_version[v] === true)
var parse_patch = require('../../../util/utilities.js').parse_patch
function apply_patch (patch, resource) {
// Todo: Handle slices
var parse = parse_patch(patch)
console.log('applying', {parse, to: resource.value})
if (parse.path.length > 0) {
var target = resource.value
for (var i = 0; i < parse.path.length - 1; i++)
target = target[p]
target[parse.patch.length] = parse.value
}
else
resource.value = parse.value
}
================================================
FILE: kernel/node.js
================================================
u = require('../util/utilities.js')
module.exports = require.node = function create_node(node_data = {}) {
var node = {}
node.init = (node_data) => {
node.pid = node_data.pid || u.random_id()
node.resources = node_data.resources || {}
for (var key of Object.keys(node.resources)) {
node.resources[key] = create_resource(node.resources[key])
}
if (node_data.fissure_lifetime !== null)
node.fissure_lifetime = node_data.fissure_lifetime
if (node.fissure_lifetime === undefined)
node.fissure_lifetime = 1000 * 60 * 60 * 8 // Default to 8 hours
node.max_fissures = node_data.max_fissures
node.defaults = Object.assign(u.dict(), node.defaults || {})
node.default_patterns = node.default_patterns || []
node.ons = []
node.on_errors = []
node.incoming_subscriptions = u.one_to_many() // Maps `key' to `pipes' subscribed to our key
node.antimatter = require('./antimatter')(node)
node.protocol_errors = require('./errors' )(node)
}
node.init(node_data)
node.resource_at = (key) => {
if (typeof key !== 'string')
throw (JSON.stringify(key) + ' is not a key!')
if (!node.resources[key])
node.resources[key] = create_resource()
return node.resources[key]
}
var default_pipe = {id: 'null-pipe'}
// Can be called as:
// - get(key)
// - get(key, cb)
// - get({key, origin, ...})
node.get = (...args) => {
var key, version, parents, subscribe, origin
// First rewrite the arguments if called as get(key) or get(key, cb)
if (typeof args[0] === 'string') {
key = args[0]
var cb = args[1]
origin = (cb
? {id: u.random_id(), send(args) {
// We have new data with every 'set' or 'welcome message
if ((args.method === 'set' || args.method === 'welcome')
&& (node.resource_at(key).weve_been_welcomed
// But we only wanna return once we have
// applied any relevant default. We know
// the default has been applied because
// there will be at least one version.
&& !(default_val_for(key)
&& !node.current_version(key)))) {
// Let's also ensure this doesn't run until
// (weve_been_welcomed || zero get handlers are registered)
// And if there is a .default out there, then
// make sure the state has at least one version
// before calling.
cb(node.resource_at(key).mergeable.read())}}}
: default_pipe)
if (cb) cb.pipe = origin
}
else {
// Else each parameter is passed explicitly
({key, version, parents, subscribe, origin} = args[0])
}
// Set defaults
if (!version)
// We might default keep_alive to false in a future version
subscribe = subscribe || {keep_alive: true}
if (!origin)
origin = {id: u.random_id()}
// Define handy variables
var resource = node.resource_at(key)
// Handle errors
try {
node.protocol_errors.get({...args, key, subscribe, version, parents, origin})
}
catch (errors) { return errors }
node.ons.forEach(on => on('get', {key, version, parents, subscribe, origin}))
// Now record this subscription to the bus
node.incoming_subscriptions.add(key, origin.id, origin)
// ...and bind the origin pipe to future sets
node.bind(key, origin)
// If this is the first subscription, fire the .on_get handlers
if (node.incoming_subscriptions.count(key) === 1) {
log('node.get:', node.pid, 'firing .on_get for',
node.bindings(key).length, 'pipes!')
// This one is getting called afterward
node.bindings(key).forEach(pipe => {
var best_t = -Infinity
var best_parents = null
Object.values(node.resource_at(key).fissures).forEach(f => {
if (f.a == node.pid && f.b == pipe.remote_peer && f.time > best_t) {
best_t = f.time
best_parents = f.versions
}
})
pipe.send && pipe.send({
method:'get', key, version, parents: best_parents, subscribe
})
})
}
// Now if the person connecting with us wants to be a citizen, they'll
// set "pid", and we'll want to send them a "get" as well so that we
// can learn about their updates -- of course, when they get that get,
// we don't want an echo war of gets begetting gets, so when someone
// sends the initial get, they set "initial" to true, but we respond
// with a get with initial not set to true
// Ok, now if we're going to be sending this person updates, we should
// start by catching them up to our current state, which we'll do by
// sending a "welcome". "generate_braid" calculates the versions
// comprising this welcome (we need to calculate them because we store
// the versions inside a space dag, and we need to pull them out...
// note that it wouldn't work to just keep the versions around on the
// side, because we also prune the space dag, meaning that the
// versions generated here may be different than the version we
// originally received, though hopefully no versions already known to
// this incoming peer will have been modified, or if they have been,
// hopefully those versions are deep enough in the incoming peer's
// version dag that they are not the direct parents of any new edits
// made by them... we strive to enforce this fact with the pruning
// algorithm)
var welcome_msg = node.create_welcome_message(key, parents)
// Remember this subscription from origin so that we can fissure if
// our connection to origin breaks
if (u.has_keep_alive(origin, key))
resource.keepalive_peers[origin.id] = {
id: origin.id,
connection: origin.connection,
remote_peer: origin.remote_peer
}
// G: ok, here we actually send out the welcome
origin.send && origin.send(welcome_msg)
return resource.mergeable.read(version)
}
node.create_welcome_message = (key, parents) => {
var resource = node.resource_at(key),
versions = resource.mergeable.generate_braid(parents)
// G: oh yes, we also send them all of our fissures, so they can know
// to keep those versions alive
var fissures = Object.values(resource.fissures)
// here we are setting "parents" equal to the leaves (aka "frontier")
// of all ancestors of parents
//
// Mike asks: Why not just have parents? I notice it triggers a
// desync bug in one of the websocket trials when I remove this line.
parents = (parents && Object.keys(parents).length
? resource.get_leaves(resource.ancestors(parents, true))
: {})
return {method: 'welcome', key, versions, fissures, parents}
}
node.error = ({key, type, in_response_to, origin}) => {
node.on_errors.forEach(f => f(key, origin))
}
// Can be called as:
// - set(key, val) // Set key to val
// - set(key, null, '= "foo"') // Patch with a patch
// - set(key, null, ['= "foo"', ...]) // Patch with multiple patches
// - set({key, patches, origin, ...})
node.set = (...args) => {
var key, patches, version, parents, origin
// First rewrite the arguments if called as set(key, ...)
if (typeof args[0] === 'string') {
key = args[0]
patches = args[2]
if (typeof patches === 'string')
patches = [patches]
if (!patches)
patches = ['= ' + JSON.stringify(args[1])]
}
else {
// Else each parameter is passed explicitly
({key, patches, version, parents, origin} = args[0])
}
var resource = node.resource_at(key)
// Set defaults
if (!version) version = u.random_id()
if (!parents) parents = {...resource.current_version}
// Catch protocol errors
try {
node.protocol_errors.set({...args, key, version, parents, patches, origin})
}
catch (errors) { return errors }
log('set:', {key, version, parents, patches, origin})
for (p in parents) {
if (!resource.time_dag[p]) {
// Todo: make this work with origin == null
origin && origin.send && origin.send({
method: 'error',
key,
type: 'cannot merge: missing parents',
in_response_to: {
method: 'set',
key, patches, version, parents
}
})
node.on_errors.forEach(f => f(key, origin))
return
}
}
node.ons.forEach(on => on('set', {key, patches, version, parents, origin}))
// Cool, someone is giving us a new version to add to our
// datastructure. it might seem like we would just go ahead and add
// it, but instead we only add it under certain conditions, namely one
// of the following must be true:
//
// !origin : in this case there is no origin, meaning the version was
// created locally, so we definitely want to add it.
//
// !resource.time_dag[version] : in this case the version must have come
// from someone else (or !origin would be true), but we don't have
// the version ourselves (otherwise it would be inside our time_dag),
// so we want to add this new version we haven't seen before.
var is_new = !origin // Was created locally
|| !resource.time_dag[version] // Or we don't have it yet
if (is_new) {
// G: so we're going to go ahead and add this version to our
// datastructure, step 1 is to call "add_version" on the resource..
resource.add_version(version, parents, patches)
// G: and now for the forwarding of the version to all our peers,
// (unless we received this "set" from one of our peers,
// in which case we don't want to send it back to them)
log('set: broadcasting to',
node.bindings(key)
.filter(p => p.send && (!origin || p.id !== origin.id))
.map (p => p.id),
'pipes from', origin && origin.id)
node.bindings(key).forEach(pipe => {
if (pipe.send && (!origin || (pipe.id !== origin.id))) {
log('set: sending now from', node.pid, pipe.type)
pipe.send({method: 'set',
key, patches, version, parents})
}
})
}
node.antimatter.set({
...args,
key, patches, version, parents, origin, is_new
})
return version
}
node.set_patch = node.setPatch = (key, patch) => node.set({key, patches: [patch]})
// Todo:
// - Rename min_leaves and unack_boundary to unack_from and unack_to
node.welcome = (args) => {
var {key, versions, fissures, unack_boundary, min_leaves, parents, origin} = args
// Note: `versions` is actually array of set messages.
// Catch protocol errors
try {
node.protocol_errors.welcome(args)
}
catch (errors) { return errors }
var resource = node.resource_at(key)
// let people know about the welcome
node.ons.forEach(
on => on('welcome', {key, versions, fissures, unack_boundary, min_leaves, origin})
)
// Some of the incoming versions we may already have. So one might
// ask, why don't we just filter the versions according to which ones
// we already have? why this versions_to_add nonsense? The issue is
// that there may be versions which we don't have, but that we don't
// want to add either, presumably because we pruned them, and this
// code seeks to filter out such versions. The basic strategy is that
// for each incoming version, if we already have that version, not
// only do we want to not add it, but we don't want to add any
// incoming ancestors of that version either (because we must already
// have them, or else we did have them, and pruned them)
var versions_to_add = {}
versions.forEach(v => versions_to_add[v.version] = v.parents)
versions.forEach(v => {
// For each incoming version...
// ... if we have this version already:
if (resource.time_dag[v.version]) {
// Then remove it and its ancestors from our "stuff to add"
remove_ancestors(v.version)
function remove_ancestors (v) {
if (versions_to_add[v]) {
Object.keys(versions_to_add[v]).forEach(remove_ancestors)
delete versions_to_add[v]
}
}
}
})
// Now versions_to_add will only contain truthy values for versions
// which we really do want to add (they are new to us, and they are
// not repeats of some version we had in the past, but pruned away)
var added_versions = []
for (var v of versions) {
if (versions_to_add[v.version]) {
if (!Object.keys(v.parents).every(p => resource.time_dag[p]))
return send_error()
resource.add_version(v.version, v.parents, v.patches, v.hint)
added_versions.push(v)
}
}
function send_error() {
origin.send && origin.send({
method: 'error',
key,
type: 'cannot merge: missing necessary versions',
in_response_to: {
method: 'welcome',
key, versions, fissures, unack_boundary, min_leaves
}
})
node.on_errors.forEach(f => f(key, origin))
}
// Let's also check to make sure we have the min_leaves and unack_boundary,
// if they are specified..
if (((min_leaves && Object.keys(min_leaves).some(k => !resource.time_dag[k]))
|| (unack_boundary && Object.keys(unack_boundary).some(k => !resource.time_dag[k]))))
return send_error()
node.antimatter.welcome({...args, versions_to_add, added_versions})
// Now that we processed the welcome, set defaults if we have one
var default_val = default_val_for(key)
if (default_val && !node.current_version(key)) {
node.set({key, patches: [` = ${JSON.stringify(default_val)}`], version: 'default_version', parents: {}})
}
}
// Can be called as:
// - forget(key, cb), with the same cb passed to get(key, cb)
// - forget({key, origin})
node.forget = (...args) => {
var key, origin, cb
if (typeof(args[0]) === 'string') {
key = args[0]
cb = args[1]
origin = cb.pipe
} else {
({key, origin} = args[0])
}
log(`forget: ${node.pid}, ${key}->${origin.id}`)
// Catch protocol errors
try {
node.protocol_errors.forget({...args, key, origin})
}
catch (errors) { return errors }
node.ons.forEach(on => on('forget', {key, origin}))
var resource = node.resource_at(key)
delete resource.keepalive_peers[origin.id]
node.unbind(key, origin)
node.incoming_subscriptions.delete(key, origin.id)
// todo: what are the correct conditions to send the forget?
// for now, we just support the hub-spoke model, where only clients
// send forget.
// here is what the todo said before:
// TODO: if this is the last subscription, send forget to all gets_out
// origin.send({method: 'forget', key})
if (cb && node.incoming_subscriptions.count(key) == 0) {
node.bindings(key).forEach(pipe => {
pipe.send && pipe.send({
method:'forget', key, origin
})
})
}
}
node.ack = (args) => {
var {key, valid, seen, version, origin} = args
try {
node.protocol_errors.ack(args)
}
catch (errors) { return errors }
node.ons.forEach(on => on('ack', {key, valid, seen, version, origin}))
log('node.ack: Acking!!!!', {key, seen, version, origin})
node.antimatter.ack(args)
}
node.fissure = ({key, fissure, origin}) => {
try {
node.protocol_errors.fissure({key, fissure, origin})
}
catch (errors) { return errors }
node.ons.forEach(on => on('fissure', {key, fissure, origin}))
node.antimatter.fissure({key, fissure, origin})
}
node.disconnected = ({key, name, versions, parents, time, origin}) => {
// Todo:
// - rename "name" to "fissure".
// - rename "time" to "disconnect_time"
if (!time) time = Date.now()
node.ons.forEach(on => on('disconnected', {key, name, versions, parents, time, origin}))
// unbind them (but only if they are bound)
if (node.bindings(key).some(p => p.id == origin.id)) node.unbind(key, origin)
node.antimatter.disconnected({key, name, versions, parents, time, origin})
}
node.delete = () => {
// NOT IMPLEMENTED: idea: use "undefined" to represent deletion
// update: we now have a {type: "deleted"} thing (like {type: "location"}),
// may be useful for this
}
node.current_version = (key) =>
Object.keys(node.resource_at(key).current_version).join('-') || null
node.versions = (key) => Object.keys(node.resource_at(key).time_dag)
node.fissures = (key) => Object.values(node.resource_at(key).fissures).map(
fiss => ({ ...fiss,
// Reformat `versions` and `parents` as arrays
parents: Object.keys(fiss.parents),
versions: Object.keys(fiss.versions) }))
node.unmatched_fissures = (key) => {
var result = []
var fissures = node.resource_at(key).fissures
outer_loop:
for (fiss in fissures) {
for (fiss2 in fissures) {
if ( fissures[fiss].conn === fissures[fiss2].conn
&& fissures[fiss].a === fissures[fiss2].b
&& fissures[fiss].b === fissures[fiss2].a)
continue outer_loop
}
fiss = fissures[fiss]
result.push({...fiss,
// Reformat `versions` and `parents` as arrays
parents: Object.keys(fiss.parents),
versions: Object.keys(fiss.versions)})
}
return result
}
node.default = (key, val) => {
var is_wildcard = key[key.length-1] === '*'
var v = val
if (is_wildcard) {
// Wildcard vals must be functions
if (typeof val !== 'function')
v = () => val
node.default_patterns[key.substr(0,key.length-1)] = v
}
else
node.defaults[key] = val
}
function default_val_for (key) {
if (key in node.defaults) {
// console.log('Default('+key+') is', node.defaults[key])
return node.defaults[key]
}
for (pattern in node.default_patterns)
if (pattern === key.substr(0, pattern.length)) {
// console.log('Default('+key+') is', node.default_patterns[pattern])
return node.default_patterns[pattern](key)
}
}
node._default_val_for = default_val_for;
function create_resource(resource = {}) {
// The version history
if (!resource.time_dag) resource.time_dag = {}
if (!resource.current_version) resource.current_version = {}
if (!resource.version_cache) resource.version_cache = {}
resource.add_version = (version, parents, patches, hint) => {
if (resource.time_dag[version])
return
if (!Object.keys(parents).length
&& Object.keys(resource.time_dag).length)
return
resource.time_dag[version] = {...parents}
// TODO: Store hint in the version_cache; not sort_keys
var sort_keys = (hint && hint.sort_keys) || undefined
resource.version_cache[version] = JSON.parse(JSON.stringify({
version, parents, patches, sort_keys
}))
Object.keys(parents).forEach(k => {
if (resource.current_version[k])
delete resource.current_version[k]
})
resource.current_version[version] = true
resource.mergeable.add_version(version, parents, patches, hint)
}
resource.ancestors = (versions, ignore_nonexistent) => {
var result = {}
// console.log('ancestors:', versions)
function recurse (version) {
if (result[version]) return
if (!resource.time_dag[version]) {
if (ignore_nonexistent) return
assert(false, 'The version '+version+' no existo')
}
result[version] = true
Object.keys(resource.time_dag[version]).forEach(recurse)
}
Object.keys(versions).forEach(recurse)
return result
}
resource.get_leaves = (versions) => {
var leaves = {...versions}
Object.keys(versions).forEach(v => {
Object.keys(resource.time_dag[v]).forEach(p => delete leaves[p])
})
return leaves
}
// A data structure that can merge simultaneous operations
if (!resource.merge_type) resource.merge_type = 'sync9'
resource.mergeable = require(
`../${resource.merge_type}/${resource.merge_type}.js`
)(resource)
// Peers that we have sent a welcome message to
if (!resource.keepalive_peers) resource.keepalive_peers = {}
// Have we been welcomed yet? (Has the data loaded?)
if (!resource.weve_been_welcomed) resource.weve_been_welcomed = false
// Disconnections that have occurred in the network without a forget()
if (!resource.fissures) resource.fissures = {}
// Acknowledgement data
if (!resource.acked_boundary) resource.acked_boundary = {}
if (!resource.unack_boundary) resource.unack_boundary = {}
if (!resource.acks_in_process) resource.acks_in_process = {}
return resource
}
node.create_resource = create_resource
// ===============================================
//
// Bindings:
//
// Attaching pipes to events
//
function pattern_matcher () {
// The pipes attached to each key, maps e.g. 'get /point/3' to '/30'
var handlers = u.one_to_many()
var wildcard_handlers = [] // An array of {prefix, funk}
var matcher = {
// A set of timers, for keys to send forgets on
bind (key, pipe, allow_wildcards) {
allow_wildcards = true // temporarily
if (allow_wildcards && key[key.length-1] === '*')
wildcard_handlers.push({prefix: key, pipe: pipe})
else
handlers.add(key, pipe.id, pipe)
// Now check if the method is a get and there's a gotton
// key in this space, and if so call the handler.
},
unbind (key, pipe, allow_wildcards) {
allow_wildcards = true // temporarily
if (allow_wildcards && key[key.length-1] === '*')
// Delete wildcard connection
for (var i=0; i require('./websocket-client.js')({
...args,
node: node,
create_websocket: () => new (require('ws'))(args.url)
})
return node
}
================================================
FILE: kernel/package.json
================================================
{
"name": "braid-bus",
"version": "0.0.1",
"description": "",
"scripts": {
"test": "node test/tests.js",
"prepublish": "node ../util/braid-bundler.js"
},
"author": "Braid Working Group",
"repository": "braid-org/braidjs",
"homepage": "https://braid.org",
"files": [
"*"
],
"main": "bus.js",
"exports": {
"require": "./bus.js",
"import": "./bus.mjs"
},
"dependencies": {
"better-sqlite3": "^5.4.3",
"parse-headers": "^2.0.4",
"ws": "^7.3.1"
}
}
================================================
FILE: kernel/pipe.js
================================================
// A pipe is a network connection that can get disconnected and reconnected.
//
// A pipe can send and receive. The user supplies a `send_function` that:
//
// • will be called from pipe.send(), and
// • will return a result to pipe.recv().
//
// When a pipe disconnects, it will automatically send out fissures. When it
// re-connects, it will automatically re-establish connections.
//
// Todo:
// • Describe the connect process and connect() function
//
module.exports = require.pipe = function create_pipe({node, id, send, connect, disconnect, type}) {
assert(node && send && connect, {node,send,connect})
id = id || u.random_id()
var ping_time = 50000
var death_time = 40000
var ping_timer = null
function on_pong() {
if (typeof(g_is_wiki_tester) != 'undefined') { return }
clearTimeout(ping_timer)
ping_timer = setTimeout(() => {
send.call(pipe, {method: 'ping'})
ping_timer = setTimeout(() => disconnect.call(this), death_time)
}, ping_time)
}
// The Pipe Object!
var pipe = {
// A pipe holds some state:
id: id,
type: type, // Only used for debugging
connection: null,
connecting: false,
remote_peer: null,
most_recent_remote_peer: null,
subscribed_keys: u.dict(),
//remote: true,
// It can Send and Receive messages
send (args) {
var we_welcomed = args.key && node.resource_at(args.key).keepalive_peers[this.id]
assert(args.method !== 'hello')
// Record new keys
if (args.method === 'get') {
assert(!this.connection
|| !this.subscribed_keys[args.key]
|| !this.subscribed_keys[args.key].we_requested,
'Duplicate get 1:', args,
{connection: this.connection,
subscription: this.subscribed_keys[args.key]})
assert(args.key, node.resource_at(args.key).mergeable)
// Initialize subscribed_keys
this.subscribed_keys[args.key] =
this.subscribed_keys[args.key] || {}
// Remember that we requested this subscription
this.subscribed_keys[args.key].we_requested = args.subscribe
// If this is the first message, let's try to connect the pipe.
if ( this.connecting) return
if (!this.connection) {
this.connecting = true
// Run the programmer's connect function
connect.call(this)
// Don't run the send code below, since we'll send this
// get when the connection completes
return
}
}
else if (args.method === 'forget') {
// Record forgotten keys
delete this.subscribed_keys[args.key].we_requested
node.unbind(args.key, this)
}
else if (args.method === 'welcome' && !args.unack_boundary) {
// If we haven't welcomed them yet, ignore this message
}
else if (!we_welcomed) {
// Oh shit, I think this is a bug. Cause if they welcomed us,
// we wanna send them shit too... but maybe we need to start
// by welcoming them.
log('gooooo away', we_welcomed)
return
}
// Clean out the origin... because we don't use that.
delete args.origin
// And now send the message
if (this.connection)
send.call(this, args)
else
log('FAILED to send, because pipe not yet connected..')
},
recv (args) {
var we_welcomed = args.key && node.resource_at(args.key).keepalive_peers[this.id]
// ping/pong system
if (args.method === 'ping') {
send.call(this, {method: 'pong'})
return
} else if (args.method === 'pong') {
on_pong()
return
}
// The hello method is only for pipes
if (args.method === 'hello') {
this.connection = (this.connection < args.connection
? this.connection : args.connection)
this.most_recent_remote_peer = this.remote_peer = args.my_name_is
// hello messages don't do anything else (they are just for
// the pipe)
return
}
if (args.method === 'welcome'
&& !we_welcomed
/*&& !this.subscribed_keys[args.key].we_requested*/) {
// Then we need to welcome them too
let parents = {...args.parents}
args.versions.forEach(v => parents[v.version] = true)
this.send(node.create_welcome_message(args.key, parents))
// Now we store a subset of this pipe in a place that will
// eventually be saved to disk. When a node comes up after a
// crash, it'll need to create and send fissures for everyone
// it's welcomed. So right here we store the info necessary
// to fissure.
let resource = node.resource_at(args.key)
resource.keepalive_peers[this.id] = {id: this.id,
connection: this.connection,
remote_peer: this.remote_peer}
}
// Remember new subscriptions from them
if (args.method === 'get') {
// assert(!(this.subscribed_keys[args.key]
// && this.subscribed_keys[args.key].they_requested),
// 'Duplicate get 2:', args,
// {subscription: this.subscribed_keys[args.key]})
// Initialize subscribed_keys
this.subscribed_keys[args.key] =
this.subscribed_keys[args.key] || {}
// Record their subscription
this.subscribed_keys[args.key].they_requested = args.subscribe
}
args.origin = this
node[args.method](args)
if (args.method === 'get')
log('pipe.recv: New remote!', this.id,
'Now we have',
node.bindings(args.key).filter(pipe => pipe.remote).length)
},
// It can Connect and Disconnect
connected () {
// console.log('pipe.connect:', this.id, this.connection || '')
if (this.connection) {
log('pipe.connect:', this.id, 'already exists! abort!')
return
}
this.connecting = false
// Create a new connection ID
this.connection = u.random_id()
// Initiate connection with peer
log('sending hello..')
send.call(this, {method: 'hello',
connection: this.connection,
my_name_is: node.pid})
// Send gets for all the subscribed keys again
for (k in this.subscribed_keys) {
// This one is getting called earlier.
//
// The send() function wants to make sure this isn't a
// duplicate request, so let's delete the old one now so
// that we can recreate it.
var subscribe = this.subscribed_keys[k].we_requested
delete this.subscribed_keys[k].we_requested
var best_t = -Infinity
var best_parents = null
Object.values(node.resource_at(k).fissures).forEach(f => {
if (f.a == node.pid && f.b == this.most_recent_remote_peer && f.time > best_t) {
best_t = f.time
best_parents = f.versions
}
})
this.send({
key: k,
subscribe: subscribe,
method: 'get',
parents: best_parents
})
}
on_pong()
},
disconnected () {
clearTimeout(ping_timer)
for (var k in this.subscribed_keys) {
if (u.has_keep_alive(this, k))
// Tell the node. It'll make fissures.
node.disconnected({key:k, origin: this})
// Drop all subscriptions not marked keep_alive
var s = this.subscribed_keys[k]
if (!(s.we_requested && s.we_requested.keep_alive ))
delete s.we_requested
if (!(s.they_requested && s.they_requested.keep_alive))
delete s.they_requested
// If both are gone, remove the whole subscription
if (!(s.we_requested || s.they_requested))
delete this.subscribed_keys[k]
}
this.connecting = false
this.connection = null
this.remote_peer = null
},
printy_stuff (key) {
return {id: this.id,
w: !!node.resource_at(key).keepalive_peers[this.id],
k_a: u.has_keep_alive(this, key),
peer: this.remote_peer,
c: !!this.connection
}
}
}
return pipe
}
================================================
FILE: kernel/readme.md
================================================
# A prototype Braid Kernel
An abstraction for distributed state.
## Status
We've built some cool algorithms in here, but it isn't cleaned up for release
yet. Mike is working on it!
## Running the code
If you have nodejs installed, then set it up with:
```
npm install
```
### Chat demo
You can run the chat server with:
```
cd demos/sync9-chat
node chat-server.js
```
Then open a web browser to `http://localhost:3009/braidchat` (for a websocket connection) or `.../braidchat?protocol=http` for a backwards-compatible http/1.1 connection.
### Wiki demo
You can run the wiki server with:
```
node demos/wiki/wiki-server.js
```
And then open `http://localhost:3009/`.
### Seeing the guts
For any command, you can tell it to print out all network traffic in a table
by adding the command-line argument `--network` to it, like this:
```
node chat-server.js --network
```
Then you'll see something like this:
```
ws: server --> C-j2lm GET {"key":"/usr","parents":null,"subscribe":{"keep_alive":true}}
ws: server --> C-j2lm WELCOME {"key":"/usr","versions":[{"version":null,"parents":{},"changes":[" = {\"B-0bnyC1mdA9\":\"FirefoxHTTP\"}"]}
ws: C-j2lm --> server WELCOME {"key":"/chat","versions":[],"fissures":[],"parents":null}
ws: C-j2lm --> server WELCOME {"key":"/usr","versions":[],"fissures":[],"parents":null}
ws: C-j2lm --> server SET {"key":"/usr","patches":["[\"B-0bnyC1mdA9\"] = \"FrefoxHTTP\""],"version":"bz2gyet9cv6","parents":{"66mn2f0vco8":true}}
```
## Running tests:
```
npm test
```
If you want to see what it's doing, print out the network traffic with:
```
npm test network
```
What if one of the trials crashes? To debug it, re-run that particular trial
with:
```
npm test solo 68
```
This will re-run trial 68, and print out debugging info so you can find the
problem and fix it.
You can also configure parameters to test at the top of `test/tests.js`.
================================================
FILE: kernel/sqlite-store.js
================================================
// options = {
// table_name: 'store' // <-- default, a table of this name will be created in sqlite
// }
// options also passed down to 'store.js'
module.exports = require['sqlite-store'] = function create_sqlite_store(filename, tablename) {
var db = new (require('better-sqlite3'))(filename)
if (!tablename)
tablename = 'store'
db.pragma('journal_mode = WAL')
db.prepare(`create table if not exists ${tablename} (key text primary key, val text)`).run()
const GET_STATEMENT = db.prepare(`select * from ${tablename} where key = ?`)
const SET_STATEMENT = db.prepare(`replace into ${tablename} (key, val) values (?, ?)`)
const DEL_STATEMENT = db.prepare(`delete from ${tablename} where key = ?`)
const LIST_STATEMENT = db.prepare(`select key from ${tablename}`);
return {
get(key) {
var row = GET_STATEMENT.get([key])
return row && row.val
},
set(key, data) {
SET_STATEMENT.run([key, data])
},
del(key) {
DEL_STATEMENT.run([key])
},
list_keys() {
return LIST_STATEMENT.all().map(x => x.key);
}
}
}
================================================
FILE: kernel/store.js
================================================
// options = {
// compress_if_inactive_time: 4000 // <-- default, means it will compress 4 seconds after the last edit, as long as no other edits happen
// compress_after_this_many: 10000 // <-- default, means it will compress if there are 10000 uncompressed edits
// }
// db = {
// get(key, cb)
// set(key, val, cb)
// del(key, cb)
// list_keys(cb)
//}
module.exports = require.store = function create_store(node, db, options) {
if (!options) options = {}
if (options.compress_if_inactive_time == null) options.compress_if_inactive_time = 4000
if (options.compress_after_this_many == null) options.compress_after_this_many = 10000
var inactive_timers = {}
var nexts = {}
let pid = db.get('pid');
node.pid = pid || node.pid;
// Set the node's PID, and then play back the db into the node
db.set('pid', node.pid)
fastforward()
// When something happens in the node, record it, and reset the the inactivity timer
node.ons.push((method, arg) => {
var key = arg.key
add(key, { method, arg })
var n = nexts[key]
if (typeof (g_debug_WS_messages) != 'undefined') {
if (n[1] >= options.compress_after_this_many)
g_debug_WS_messages.push(() => compress(key))
} else {
clearTimeout(inactive_timers[key])
// If we've had enough messages, compress right away
// Otherwise, compress in a few seconds
inactive_timers[key] = setTimeout(() => compress(key),
n[1] >= options.compress_after_this_many ? 0 : options.compress_if_inactive_time)
}
})
// Ensure the node knows that it's totally disconnected at startup.
Object.entries(node.resources).forEach(([key, r]) =>
Object.values(r.keepalive_peers).forEach(pipe => {
node.disconnected({ key, origin: pipe })
})
)
return Promise.all(Object.keys(nexts).map(compress)).then(_ => node);
function fastforward() {
// console.log("Fast-forwarding braid state using db...")
// For all ab:... keys
let keys = db.list_keys();
keys.filter(k => k.match(/^ab:/)).map((k) => {
let ab = db.get(k);
// Get the part after ab
// Sorry this isn't more informative, I do not understand the db format
let key = k.slice(3)
var i = find_open_index(ab, key, (val) => {
// Pass the stored braid messages to the node
let msg = JSON.parse(val)
if (!msg.method) {
node.resources[key] = node.create_resource(msg)
Object.values(node.resources[key].keepalive_peers).forEach(pipe => {
pipe.remote = true
node.bind(key, pipe)
node.incoming_subscriptions.add(key, pipe.id, pipe)
})
}
else node[msg.method](msg.arg)
})
// Set nexts once we've found the open index
nexts[key] = [ab, i];
})
}
function add(key, x) {
var n = nexts[key]
if (!n) {
db.set(`ab:${key}`, 'a');
n = nexts[key] = ['a', 0]
}
// Try to set the key as the next element in the sequence
try {
db.set(`${n[0]}:${n[1]++}:${key}`, JSON.stringify(x))
} catch (err) {
console.error(err);
console.error(`Failed to set key ${n[0]}:${n[1]++}:${key} to value`);
console.dir(x, { depth: 5 });
}
}
function compress(key) {
var n = nexts[key]
if (!n) return
var ab = (n[0] == 'a') ? 'b' : 'a'
let i = find_open_index(ab, key, (_, ii) =>
// Count up and delete
db.del(`${ab}:${ii}:${key}`)
)
// At the top
nexts[key] = [ab, 0]
add(key, node.resource_at(key));
db.set(`ab:${key}`, ab)
for (let ii = n[1] - 1; ii >= 0; ii--)
db.del(`${n[0]}:${ii}:${key}`)
}
function find_open_index(ab, key, intermediate) {
let i = 0;
let val;
while (val = db.get(`${ab}:${i}:${key}`)) {
// Do something with the lower values of i
intermediate && intermediate(val, i++);
}
return i;
}
}
================================================
FILE: kernel/test/tests.js
================================================
require('../../sync9/sync9.js')
require('../../util/utilities.js')
//show_debug = true
var n_peers = 3
var n_steps_per_trial = 100
var n_trials = 100
var rand = null
var random_seed_base = '000_hi_010bcdefg'
show_protocol_errors = true
solo_trial = null
if (!is_browser && process.argv.length >= 4 && process.argv[2] === 'solo') {
solo_trial = parseInt(process.argv[3])
// show_debug = true
print_network = true
}
// show_debug = true
// print_network = true
var sim = {
n_peers,
n_steps_per_trial,
n_trials,
rand,
step,
add_peer,
peers_dict: {},
peers: []
}
sim.vis = is_browser
? require('../demos/visualization/visualization.js')(
{rand: Math.create_rand(''), ...sim}
)
: {add_frame() {}}
var vis = sim.vis
function add_peer (node, peer_number) {
sim.peers.push(node)
make_alphabet(node, peer_number)
sim.peers_dict[node.pid] = node
}
function make_alphabet (node, peer_number) {
var alphabets = [
'abcdefghijklmnopqrstuvwxyz',
'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'⬅︎⬇︎⬆︎',
''
]
node.letters = alphabets[peer_number] || ''
for (var i = 0; i < 26; i++)
node.letters += String.fromCharCode(12032 + 1000*peer_number + i)
node.letters_i = 0
// console.log('Node', peer_number, 'letters:', node.letters)
}
function save_node_copy(node) {
var x = JSON.parse(JSON.stringify(node))
x.connected_to = {}
node.bindings('my_key').forEach(pipe => {
var [from, to] = pipe.id.split('-')
if (pipe.connecting || pipe.connection) {
x.connected_to[to] = true
}
})
return x
}
var num_edits = 0
function step(frame_num) {
// Randomly choose whether to do an action vs. process the network
if (rand() < 0.1) {
// Do an action
if (rand() < 0.9) {
// Edit text
var i = Math.floor(rand() * n_peers)
var peer = sim.peers[i]
// ..but only if we have at least one version already, which
// is really to make sure we've received "root" already (but
// we can't check for "root" since it may get pruned away)
if (peer.resources['my_key'] &&
Object.keys(peer.resources['my_key'].time_dag).length) {
if (peer.letters_i >= peer.letters.length)
peer.letters_i = 0
var e = create_random_edit(peer.resources['my_key'],
peer.letters[peer.letters_i++])
num_edits++
peer.set({key: 'my_key',
patches: e.patches, version: e.version, parents: e.parents})
}
log(' editing', frame_num, peer.pid, e ? e.patches : '')
} else {
// Disconnect or reconnect
log(' toggling network', frame_num)
network.toggle_pipe()
}
} else {
// Receive incoming network message
if (network.receive_message) {
log(' receiving message', frame_num)
var i = Math.floor(rand() * n_peers)
var peer = sim.peers[i]
network.receive_message(peer)
}
}
vis.add_frame({
frame_num,
peers: sim.peers.map(x => save_node_copy(x))
})
}
function create_random_edit(resource, letters) {
letters = letters || 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
var str = resource.mergeable.read()
var start = Math.floor(rand() * (str.length + 1))
var del = Math.floor(rand() * rand() * (str.length - start + 1))
var ins = letters[Math.floor(rand() * letters.length)].repeat(Math.floor(rand() * 4) + (del == 0 ? 1 : 0))
var version = rand().toString(36).slice(2)
resource.next_version_id = (resource.next_version_id || 0) + 1
var version = letters[0] + resource.next_version_id
var patches = [`[${start}:${start + del}] = ` + JSON.stringify(ins)]
return {
version,
parents : Object.assign({}, resource.current_version),
patches
}
}
function setup_test () {
sim.peers = []
sim.peers_dict = {}
if (is_browser)
sim.rand = rand = Math.create_rand('')
network.setup()
// Start sending get() messages over the pipes!
sim.peers.forEach(node => node.get({
key: 'my_key',
subscribe: {keep_alive: true},
origin: {id: 'fake' + rand().toString(36).slice(2,6)}
}))
// Create initial root version
{
// There are two modes of operations. The differentiator is that in
// one mode, you can prune down to a single version, and in the other,
// you can only prune down to (in the worst case) the number of
// versions there are peers that have ever been a part of the system.
// (But often less than that.)
// In the first mode, you must dictate that all peers don't add
// anything unless they've already received a version from someone
// else, and you then need a special peer that creates the first
// version.
// But you can add something to a field of nothing. There used to be
// a root node that was always there, but now you're allowed to have a
// version with parents where the parents is the empty set, and all
// the algorithms are fine with that.
// So now when we create a new timedag, a special peer will create the
// first version and send it to everyone else. And that's what we do
// in the tests code right now. And we do that so that we can prune
// down to one node, and that tells us that the tests are working, at
// the end of the tests. It knows that everything should have exactly
// one version, that's the same thing, for all peers.
let p = sim.peers[0]
p.set({key: 'my_key', version: 'root', parents: {}, patches: ['=""']})
vis.add_frame({
peers: sim.peers.map(x => save_node_copy(x))
})
}
}
function evaluate_trial (trial_num) {
log('Ok!! Now winding things up.')
// Make sure the resource exists on each peer
sim.peers.forEach((x, i) => {
if (!x.resources.my_key) {
console.log('missing my_key for ' + x.pid)
total_success = false
throw 'bad'
}
})
// Do all peers have the same resulting value?
var first_peer_val = sim.peers[0].resources.my_key.mergeable.read()
var same_values = sim.peers.every(
p => u.deep_equals(p.resources.my_key.mergeable.read(), first_peer_val)
)
// Are all time dags pruned down to a single version?
var multiple_versions = sim.peers.some(
p => Object.keys(p.resources.my_key.time_dag).length > 1
)
// Are all fissures cleaned up?
var fissures_exist = sim.peers.some(
p => Object.keys(p.resources.my_key.fissures).length > 0
)
// Where there any problems?
total_success = same_values && !multiple_versions && !fissures_exist
// If so, print them out
if (show_debug || !total_success) {
console.log('TOTAL', total_success ? 'SUCCESS' : 'FAILURE')
sim.peers.forEach(
n => console.log(n.pid+':', JSON.stringify(n.resources.my_key.mergeable.read()))
)
var results = {same_values, multiple_versions, fissures_exist}
for (k in results)
console.log(k+':', results[k])
console.log('trial_num:', trial_num)
if (!total_success) throw 'stop'
}
}
// Synchronous version of the simulator
// - Fast and deterministic. For testing the core algorithm.
function run_trials () {
if (solo_trial)
run_trial(solo_trial)
else
for (var i=0; i < n_trials; i++) {
console.log('Running trial', network.name, i)
run_trial(i)
}
}
function run_trial (trial_num) {
rand = sim.rand = Math.create_rand(random_seed_base + ':' + trial_num)
setup_test()
// Now do all the stuff
for (var t=0; t < n_steps_per_trial; t++) {
log('looping', t)
step(t)
}
network.wrapup()
evaluate_trial(trial_num)
if (network.die) network.die()
}
// Async version of the simulator
// - For testing actual network activity
run_trials.async = (cb) => {
if (solo_trial)
run_trial.async(solo_trial, cb)
else {
var i = -1
function next_trial () {
i++
console.log('Running trial', network.name, i)
if (i === n_trials)
setImmediate(cb)
else
setImmediate(() => run_trial.async(i, next_trial))
}
setTimeout(next_trial, 10)
}
}
run_trial.async = (trial_num, cb) => {
rand = sim.rand = Math.create_rand(random_seed_base + ':' + trial_num)
setup_test()
var t = -1
function run_step () {
t++
if (t === n_steps_per_trial)
network.wrapup(() => {
evaluate_trial(trial_num)
if (network.die)
network.die(() => setImmediate(cb))
else
setImmediate(cb)
})
else {
log(' step', t)
step(t)
setTimeout(run_step, 0)
}
}
run_step()
}
var networks = [
'./virtual-p2p.js',
'./websocket-test.js'
]
var network
if (is_browser) {
network = require('./virtual-p2p.js')(sim)
setup_test()
vis.loop()
} else
networks.forEach( n => {
network = require(n)(sim)
console.log('Running', n.substr(2), 'trials!')
if (network.sync)
run_trials()
else
run_trials.async(() => {
console.log('Done with all trials!')
process.exit()
})
})
================================================
FILE: kernel/test/virtual-p2p.js
================================================
// Tests using a virtual network
module.exports = require['virtual-p2p'] = (sim) => (
{
name: 'virtual',
sync: true,
setup () {
for (var i = 0; i < sim.n_peers; i++) {
// Make a peer node
var node = require('../node.js')()
node.pid = 'P' + (i + 1) // Give it an ID
node.incoming = [] // Give it an incoming message queue
sim.add_peer(node, i) // Give it an alphabet
}
// sim.peers.forEach(p => sim.peers_dict[p.pid] = p)
// Create pipes that connect peers
this.pipes = {}
var create_vpipe = (from, to) => {
var pipes = this.pipes
var pipe = pipes[from.pid + '-' + to.pid] = require('../pipe.js')({
node: from,
id: from.pid + '-' + to.pid,
// The send function
send (args) {
if (!this.connection) {
log('sim-pipe.send: starting connection cause it was null on ', this)
this.connected()
}
// console.log('>> ', this.id, args)
assert(from.pid !== to.pid)
args = JSON.parse(JSON.stringify(args))
to.incoming.push([from.pid,
() => {
pipes[to.pid + '-' + from.pid].recv(
JSON.parse(JSON.stringify(args)))
},
'msg_id:' + sim.rand().toString(36).slice(2),
args.method, JSON.parse(JSON.stringify(args))])
},
// The connect functions
connect () { this.connected() },
disconnect () { this.disconnected() }
})
from.bind('my_key', pipe)
}
// Create pipes for all the peers
for (var p1 = 0; p1 < sim.n_peers; p1++)
for (var p2 = p1 + 1; p2 < sim.n_peers; p2++) {
let peer1 = sim.peers[p1],
peer2 = sim.peers[p2]
// Virutal Pipe for A -> B
create_vpipe(peer1, peer2)
// Virtual Pipe for B -> A
create_vpipe(peer2, peer1)
}
},
wrapup (cb) {
var sent_joiner = false
// Connect all the pipes together
for (var pipe in this.pipes) {
this.pipes[pipe].connected()
notes = ['connecting ' + this.pipes[pipe]]
sim.vis.add_frame({
t: -1,
peers: sim.peers.map(x => JSON.parse(JSON.stringify(x)))
})
}
// Now let all the remaining incoming messages get processed
do {
sim.peers.forEach(p => {
while (p.incoming.length > 0) {
notes = []
// Process the message.
p.incoming.shift()[1]()
// That might have added messages to another peer's queue.
sim.vis.add_frame({
peer_notes: {[p.pid]: notes},
peers: sim.peers.map(x => JSON.parse(JSON.stringify(x)))
})
}
})
var more_messages_exist = sim.peers.some(p => p.incoming.length > 0)
// Once everything's clear, make a joiner
if (!more_messages_exist && !sent_joiner) {
var i = Math.floor(sim.rand() * sim.n_peers)
var p = sim.peers[i]
log('creating joiner')
notes = ['creating joiner']
// Create it!
p.set('my_key', null, [])
sent_joiner = true
sim.vis.add_frame({
peer_notes: {[p.pid]: notes},
peers: sim.peers.map(x => JSON.parse(JSON.stringify(x)))
})
// That'll make messages exist again
more_messages_exist = true
}
} while (more_messages_exist)
if (cb) cb()
},
receive_message (peer) {
var inbox = peer.incoming
if (inbox.length > 0) {
var possible_peers = {}
inbox.forEach(x => possible_peers[x[0]] = true)
possible_peers = Object.keys(possible_peers)
var chosen_peer = possible_peers[
Math.floor(sim.rand() * possible_peers.length)]
var msg = inbox.splice(inbox.findIndex(x => x[0] == chosen_peer),
1)
msg[0][1]()
}
},
toggle_pipe () {
var pipe_keys = Object.keys(this.pipes),
random_index = Math.floor(sim.rand() * pipe_keys.length),
random_pipe = this.pipes[pipe_keys[random_index]],
[pid, other_pid] = pipe_keys[random_index].split('-'),
peer = sim.peers_dict[pid],
other_pipe = this.pipes[other_pid + '-' + pid],
other_peer = sim.peers_dict[other_pid]
// Toggle the pipe!
assert(!!random_pipe.connection === !!other_pipe.connection,
random_pipe.connection, other_pipe.connection)
if (random_pipe.connection) {
random_pipe.disconnected()
other_pipe.disconnected()
peer.incoming = peer.incoming.filter(x => x[0] !== other_pid)
other_peer.incoming = other_peer.incoming.filter(x => x[0] !== pid)
} else {
random_pipe.connected()
other_pipe.connected()
}
}
}
)
================================================
FILE: kernel/test/websocket-test.js
================================================
// Tests for the braid-websocket protocol
module.exports = require['websocket-test'] = (sim) => (
{
name: 'websocket',
sync: false,
certificate: `-----BEGIN CERTIFICATE-----
MIIDXTCCAkWgAwIBAgIJANoWGfl3pEeHMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV
BAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQwHhcNMTkwODE2MjAxNTIxWhcNMjAwODE1MjAxNTIxWjBF
MQswCQYDVQQGEwJVUzETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEA1bilKJKH1axV0OLLIwg3WxXx6MMsFL3/bv2uX9+Z22uZukJsgqnR2y+6
OCLH8opczH4Now3Od+P0G4kNSn9m+T5W5bvf9bIIDmCG/04uGCvx0L8bgYA5lyMJ
aFdcfCXu1iKvUt1LdZlds2AsBfceYCB6FwsMkUODzZ7OJ6R1aXUHxQ74me/ksoxV
P7Fmv012gRJkYn5gzvrokula2Yxb+z84TP115tALYBBpLhj5WPOXSmyVo0Lf1dGQ
JfbRxvx32pxZiBPwcNre3yzKhRue99tRuPHFCQBZSkXGuT7K9bsNnPwXfAmB2VbQ
bjezmqVGv8KnwyTRWdLaEcV9cxHCnQIDAQABo1AwTjAdBgNVHQ4EFgQUOoDGcBG8
Xm/Jj+WbIYctxhGqD6owHwYDVR0jBBgwFoAUOoDGcBG8Xm/Jj+WbIYctxhGqD6ow
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAaHjdu8Hg34Zzay4djFSo
hRno4m+tiJ4UT3oLTHRGh54JFKQPeLLEY0WbhrBDyuDJrCdyjvmqpuELPPwNRdo0
Ly3fhRIxeaN8px6V0bpdj0ePDqC0ZU5It/9jVlC0OkdG2xwJygw+xNLaHb09l7rj
ZLM+tOKQEBxZCLKqc1FLlS9MIxDKaVdI2JSBDmNl+0XyFwKM6bfI3Mk8STuZXm5A
EtWvDNbLFl6TLyKDeHNRc0LQEa74xE3yhoWO3kb9phL4A1g/I7rW+B2we4N84FfT
v5C5/zn58xabUtMVeGUi/avnVz+C4HY4ZMEIQPIodtsRcZq05RQGW8ipig7QaXnD
gQ==
-----END CERTIFICATE-----
`,
private_key: `-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDVuKUokofVrFXQ
4ssjCDdbFfHowywUvf9u/a5f35nba5m6QmyCqdHbL7o4IsfyilzMfg2jDc534/Qb
iQ1Kf2b5Plblu9/1sggOYIb/Ti4YK/HQvxuBgDmXIwloV1x8Je7WIq9S3Ut1mV2z
YCwF9x5gIHoXCwyRQ4PNns4npHVpdQfFDviZ7+SyjFU/sWa/TXaBEmRifmDO+uiS
6VrZjFv7PzhM/XXm0AtgEGkuGPlY85dKbJWjQt/V0ZAl9tHG/HfanFmIE/Bw2t7f
LMqFG57321G48cUJAFlKRca5Psr1uw2c/Bd8CYHZVtBuN7OapUa/wqfDJNFZ0toR
xX1zEcKdAgMBAAECggEAWCxLh0ec3tywsvM+V3+mRt/w49TRtOUGIyZp8IfxlAL6
c0vANNAXElTIgSxoTXoj+wHuYlzp17CmH04Vu6yAMUg01acDKPyAMl5Ek8QPZE2N
AFA36t+Z4u7DjNauA1IrDRFWP9uorCXP8Jc20mc3kvUTKbqXPr8Z+5UO/G/vOMgc
QKXPoz45EbFahTwck4TQowLeKhAF3BU5fn48zuBy055q6babV1z0LDzDIUGcZqHv
4VPMLOUp1KzpwoQd6o3wwBBttJkFqBf7US3nExdq0SkHgwE/lOKgJuSMmgAWgGm5
3iO8F+Ve84206IgmhQOMw3KZjIgWdiCW/dgVbJQsQQKBgQD4/2Wr4NYfdXqotnjT
MZCx5921nFwkMyt7JndCIs49CQ3lMGtlijRtVHGhZKVHUZr4SKKfjbOAiABPCsRL
ZhvVnhlbmUioSgfMM/Y+fkCs3DdzuJE9tVuSdyQFoblY5W1dLeuLTEI3TDTos+V7
jfKsHMqF0gAbCkt7GgVpy5vCRQKBgQDbu0ibskjwF5voMuJmJdvIv0XAi91sRTRJ
RuDrH6NPU+RrVHTHRJMtGRM5zWI4b7N0KTx+J2xaJ6J/FxbfsdThgKb99gB9j3hR
F0CK/quMjAwpezWwatHarK87c//rvmIBVL82xLe3sQKxmwdCUiyhum/4l+GN+WpZ
lfP4HU4weQKBgQD18WaekBVPu31tedb8XB/c6fZ/NTN5+iT/ni374F8vwGq+L8ZU
5F8Ggns+fCgYus1EYpJm4NMlqLANYsgi5Xem12Oaq1wuBfmPxN98OL5vP5FyNyMW
/bS2hgHJokVuPid4+yuGSsu4zQgRted80+eYA1QzPAsoqlGGBVzFc/yktQKBgQDP
RcqHPFV7Tfn+vkk8bEf4BR4KNKWJZXqeCONQSEboJM3axQ9njXN73iR5qRkW/Z99
Wwy6P/wAy1SIqEImf3y9v3tHI1BxIO4xKEr1EqjGarFqS9Rod0tACRc/cPwf6DZQ
5R1+z3AyMiLFYOUnFZcOdGz9RmA5aeZ9XWuHSDWimQKBgGgmRWuGasEEMXdnkLQA
rNg1Di5DFv+KvXwgTo63MxwBs2olQ7jUsFf8khipqpByGazYgGeEa1RxDGpQrdyO
I/5N3d5VcGW4g9obfdexuuKOloyKRS2N0KNhLfEfb+qr4gRACPpyKnj5Jeohliox
bHieUzx8qriZ8KrD3PbjKqap
-----END PRIVATE KEY-----
`,
setup () {
// Make the hub
var hub = require('../node.js')()
hub.pid = 'hub'
sim.add_peer(hub, 0)
this.server = require('../websocket-server.js')(
hub,
this.certificate,
this.private_key
)
// Make the clients
var clients = []
for (var i = 1; i < sim.n_peers; i++) {
var client = require('../node.js')()
client.pid = 'C' + i
sim.add_peer(client, i)
clients.push(client)
}
// Create pipes that connect peers to the hub
this.client_pipes = {}
for (var i = 0; i < clients.length; i++)
this.client_pipes[clients[i].pid] = require('../websocket-client.js')({
node: clients[i],
url: 'ws://localhost:3007/',
prefix: '*'
})
WebSocket = require('ws')
},
wrapup (cb) {
nlog('Wrapping up!')
// Connect all the pipes together
for (var pipe in this.client_pipes)
if (!this.client_pipes[pipe].enabled())
this.client_pipes[pipe].enable()
// Make a joiner after a delay
setTimeout(make_joiner, 100)
// And be done after another one
setTimeout(cb, 200)
function make_joiner () {
var i = Math.floor(sim.rand() * sim.n_peers)
var p = sim.peers[i]
notes = ['creating joiner']
// Create it!
p.set('my_key', null, [])
sim.vis.add_frame({
peer_notes: {[p.pid]: notes},
peers: sim.peers.map(x => JSON.parse(JSON.stringify(x)))
})
}
},
die (cb) {
// Disable the clients
for (var k in this.client_pipes)
this.client_pipes[k].disable()
// Kill the server
this.server.dead = true
this.server.close(cb)
},
toggle_pipe () {
var pipes = Object.keys(this.client_pipes)
var rand_pipe = this.client_pipes[
pipes[Math.floor(sim.rand() * pipes.length)]]
nlog('toggling', rand_pipe.pipe.id, 'to',
rand_pipe.enabled() ? 'disabled':'enabled')
if (rand_pipe.enabled())
rand_pipe.disable()
else
rand_pipe.enable()
}
}
)
================================================
FILE: kernel/test/wiki-perf.html
================================================
================================================
FILE: kernel/test/wiki-tester.js
================================================
require('../../util/utilities.js')
var page_key = '/foo'
g_current_server = null
g_debug_WS_messages = []
g_debug_WS_messages_delayed = []
debug_WS_process_messages = function () {
while (g_debug_WS_messages.length) {
g_debug_WS_messages.shift()()
}
g_debug_WS_messages = g_debug_WS_messages_delayed
g_debug_WS_messages_delayed = []
}
debug_WSS = function () {
return debug_WSS.the_one = {
on_conns: [],
on(event_type, func) {
if (event_type == 'connection') this.on_conns.push(func)
else throw 'bad'
},
ws_array: [],
close() {
this.ws_array.forEach(ws =>
g_debug_WS_messages.push(() => {
// console.log(`SERVER CLOSING C-${ws.id}`)
ws.onclose && ws.onclose()
}))
debug_WSS.the_one = null
}
}
}
debug_WS = function (id) {
// console.log(`C-${id} ATTEMPTING CONNECTING TO SERVER`)
var self = {
id,
on_messages: [],
on_closes: [],
is_open: true,
send(msg) {
// var m = JSON.parse(msg)
// console.log(`C-${self.id} SEND: ` + m.method + ' ' + (m.seen || ''))
// if (m.versions) console.log('versions: ', m.versions)
// if (m.patches) console.log('version: ', m.version, m.parents, m.patches)
// console.log(`C-${self.id} SEND: ` + JSON.stringify(JSON.parse(msg), null, ' '))
this.on_messages.forEach(f =>
g_debug_WS_messages.push(() => {
// console.log(`S RECV from:C-${self.id} : ` + m.method + ' ' + (m.seen || ''))
// if (m.versions) console.log('versions: ', m.versions)
// if (m.patches) console.log('version: ', m.version, m.parents, m.patches)
// if (!self.is_open) console.log('NOT OPEN!')
// console.log(`S RECV from:C-${self.id} : ` + JSON.stringify(JSON.parse(msg), null, ' '))
f(msg)
}))
},
terminate() {
if (!self.is_open) throw 'closing closed socket'
self.is_open = false
// console.log(`CLOSING C-${self.id}`)
g_debug_WS_messages.push(() =>
this.onclose && this.onclose())
this.on_closes.forEach(f =>
g_debug_WS_messages.push(() => f()))
this.on_closes = []
this.on_messages = []
if (debug_WSS.the_one)
debug_WSS.the_one.ws_array.splice(debug_WSS.the_one.ws_array.indexOf(self), 1)
}
}
self.close = self.terminate
g_debug_WS_messages.push(() => {
if (debug_WSS.the_one) {
debug_WSS.the_one.ws_array.push(self)
debug_WSS.the_one.on_conns.forEach(f => {
// console.log(`C-${self.id} CONNECTING TO SERVER`)
f({
on(event_type, func) {
if (event_type == 'message') self.on_messages.push(func)
else if (event_type == 'close') self.on_closes.push(func)
},
send(msg) {
// var m = JSON.parse(msg)
// console.log(`S SEND to:C-${self.id} : ` + m.method + ' ' + (m.seen || ''))
// if (m.versions) console.log('versions: ', m.versions)
// if (m.patches) console.log('version: ', m.version, m.parents, m.patches)
// console.log(`S SEND to:C-${self.id} : ` + JSON.stringify(JSON.parse(msg), null, ' '))
g_debug_WS_messages.push(() => {
// console.log(`C-${self.id} RECV: ` + m.method + ' ' + (m.seen || ''))
// if (m.versions) console.log('versions: ', m.versions)
// if (m.patches) console.log('version: ', m.version, m.parents, m.patches)
// if (!self.is_open) console.log('NOT OPEN!')
// console.log(`C-${self.id} RECV: ` + JSON.stringify(JSON.parse(msg), null, ' '))
self.onmessage({data: msg})
})
}
}, {socket: {remoteAddress: 'fake-ip-address'}})
})
self.onopen && self.onopen()
} else {
self.onclose && self.onclose()
}
})
return self
}
var ds = require('../../util/diff.js')
var performance = require('perf_hooks').performance
g_profile = {
keys: {},
begin(key) {
if (!this.keys[key]) this.keys[key] = {count: 0, time: 0}
if (this.keys[key].begin != null) throw 'unbalanced begin! key: ' + key
this.keys[key].begin = performance.now()
},
end(key) {
if (!this.keys[key]) throw 'unbalanced end! key: ' + key
this.keys[key].time += performance.now() - this.keys[key].begin
delete this.keys[key].begin
this.keys[key].count++
},
mark(key) {
if (!this.keys[key] || this.keys[key].begin == null) this.begin(key)
else this.end(key)
},
print() {
Object.entries(this.keys).forEach(([k, v]) => {
console.log(`${k}\t${v.time / v.count}\t${v.time}\t${v.count}`)
})
}
}
g_prune_counter = 0
g_prune_period = 0
async function main() {
// var a = '' + require('fs').readFileSync('actions.json')
// a = JSON.parse(a)
// run_experiment_from_actions(a)
// return
g_profile.begin('whole thing')
var best_t = Infinity
var best_seed = null
var exp_time_est = 1
var longest = 0
var longest_seed = null
var N = 2000
var ST = performance.now()
var times = []
for (var i = 0; i < N; i++) {
let sttt = performance.now()
var seed = '__acb_def_fff_fF246__:' + i
// N = 1
// seed = '__abb__29:4'
console.log('seed: ' + seed)
var st = performance.now()
var r = await run_experiment(seed)
times.push(performance.now() - sttt)
if (!r.ok && r.t < best_t) {
best_t = r.t
best_seed = seed
require('fs').writeFileSync('actions.json', JSON.stringify(r.actions, null, ' '))
}
var t = performance.now() - st
if (t > longest) {
longest = t
longest_seed = seed
}
exp_time_est = 0.9 * exp_time_est + 0.1 * t
console.log(`exp_time_est = ${exp_time_est}, t=${t}`)
console.log(`total time est = ${(exp_time_est * (N - i - 1))/1000/60}min`)
}
console.log('best_t = ' + best_t)
console.log('best_seed = ' + best_seed)
console.log('longest = ' + longest)
console.log('longest_seed = ' + longest_seed)
console.log('time(sec) = ' + (performance.now() - ST)/1000)
g_profile.end('whole thing')
g_profile.print()
// console.log('times: ' + JSON.stringify(times))
}
async function run_experiment(rand_seed) {
Math.randomSeed(rand_seed)
g_debug_WS_messages = []
g_debug_WS_messages_delayed = []
debug_WSS.the_one = null
var trials = 30
var db = create_db()
var server = null
var clients = []
var log_stuff = false
var actions = []
for (var t = 0; t < trials; t++) {
Date.now = () => t
var st = performance.now()
try {
log_stuff && console.log('----------------------------- trial ' + t)
if (!server && Math.random() < 0.4) {
log_stuff && console.log('> starting server')
actions.push({action: 'starting server', rand: Math.random.get_state()})
server = await create_server(db)
} else if (server && Math.random() < 0.3) {
log_stuff && console.log('> closing server')
actions.push({action: 'closing server', rand: Math.random.get_state()})
server.close()
server = null
} else {
if (clients.length == 0 || (clients.length < 5 && Math.random() < 0.2)) {
log_stuff && console.log('> creating client')
actions.push({action: 'creating client', rand: Math.random.get_state()})
clients.push(create_client())
} else {
let ci = Math.floor(Math.random() * clients.length)
let c = clients[ci]
if (!c.is_open && Math.random() < 0.3) {
log_stuff && console.log('> re-opening client')
actions.push({action: 're-opening client', id: c.id, rand: Math.random.get_state()})
c.open()
} else if (c.is_open && Math.random() < 0.4) {
if (Math.random() < 0.5) {
log_stuff && console.log('> closing client (temporarily)')
actions.push({action: 'closing client (temporarily)', id: c.id, rand: Math.random.get_state()})
c.close(false, false)
} else {
var send_forget = Math.random() < 0.333
var send_deletes = send_forget || Math.random() < 0.5
log_stuff && console.log('> killing client' + (send_deletes ? ', sending deletes' : '') + (send_forget ? ', sending forget' : ''))
actions.push({action: 'killing client', send_forget, send_deletes, id: c.id, rand: Math.random.get_state()})
c.close(send_deletes, send_forget)
clients.splice(ci, 1)
}
} else if (c.is_open) {
var inner_actions = []
for (let cii = 0; cii < clients.length; cii++) {
if (cii == ci || Math.random() < 0.2) {
let c = clients[cii]
let text = c.get()
let start = Math.floor(Math.random() * (text.length + 1))
let len = Math.floor(Math.random() * (text.length - start + 1))
let ins = String.fromCharCode(65 + Math.floor(Math.random() * 26)).repeat(Math.floor(Math.random() * 4) + (len == 0 ? 1 : 0))
log_stuff && console.log(`> C-${c.id} changing text ` + JSON.stringify(text) + `.splice(${start}, ${len}, ${JSON.stringify(ins)})`)
inner_actions.push({start, len, ins, id: c.id, rand: Math.random.get_state()})
c.set(start, len, ins)
}
}
actions.push({action: 'editing', inner_actions, rand: Math.random.get_state()})
} else {
log_stuff && console.log('> doing nothing..')
actions.push({action: 'doing nothing..'})
}
}
}
debug_WS_process_messages()
log_stuff && console.log(`server: ${server ? `"${server.get()}"` : 'down'}`)
log_stuff && clients.forEach(c => console.log(`${c.id} client ${c.is_open ? ':' : 'X'} "${c.get()}"`))
if (true) {
// console.log('SERVER: ' + (server ? server.get_more() : 'down'))
// clients.forEach(c => console.log(`CLIENT ${c.id} = ${c.get_more()}`))
// console.log('SERVER: ' + (g_current_server ? g_current_server.get_null() : 'not started'))
// clients.forEach(c => console.log(`CLIENT ${c.id} = ${c.get_null()}`))
}
if (server && clients.some(c => c.is_open)) {
let text = server.get()
if (clients.some(c => c.is_open && c.get() != text)) {
console.log('NOT THE SAME!')
return {ok: false, t, actions}
}
// work here
let o = server.node.resource_at(page_key).mergeable.read()
if (!o || !o.cursors || Object.keys(o.cursors).length > clients.length) {
console.log('TOO MANY CURSORS!')
return {ok: false, t, actions}
}
}
} catch (e) {
console.log('EXCEPTION', e)
return {ok: false, t, actions}
}
//actions.push({time: performance.now() - st})
}
return {ok: true, actions}
}
async function run_experiment_from_actions(actions) {
Math.randomSeed('just needed to make set_state available')
g_debug_WS_messages = []
g_debug_WS_messages_delayed = []
debug_WSS.the_one = null
var db = create_db()
var server = null
var clients = []
var log_stuff = true
var t = 0
for (var a of actions) {
Date.now = () => t
// console.log('a.action = ' + a.action)
try {
log_stuff && console.log('----------------------------- trial ' + t)
if (a.action == 'starting server') {
log_stuff && console.log('> starting server')
Math.random.set_state(a.rand)
server = await create_server(db)
} else if (a.action == 'closing server') {
log_stuff && console.log('> closing server')
Math.random.set_state(a.rand)
server.close()
server = null
} else {
if (a.action == 'creating client') {
log_stuff && console.log('> creating client')
Math.random.set_state(a.rand)
clients.push(create_client())
} else {
if (a.action == 're-opening client') {
log_stuff && console.log('> re-opening client')
var c = clients.find(c => c.id == a.id)
Math.random.set_state(a.rand)
c.open()
} else if (a.action == 'closing client (temporarily)') {
log_stuff && console.log('> closing client (temporarily)')
var c = clients.find(c => c.id == a.id)
Math.random.set_state(a.rand)
c.close(false)
} else if (a.action == 'killing client') {
log_stuff && console.log('> killing client' + (a.send_deletes ? ', sending deletes' : '') + (a.send_forget ? ', sending forget' : ''))
var c = clients.find(c => c.id == a.id)
Math.random.set_state(a.rand)
c.close(a.send_deletes, a.send_forget)
clients.splice(clients.findIndex(c => c.id == a.id), 1)
} else if (a.action == 'editing') {
for (let inner_a of a.inner_actions) {
let start = inner_a.start
let len = inner_a.len
let ins = inner_a.ins
let c = clients.find(c => c.id == inner_a.id)
let text = c.get()
log_stuff && console.log(`> C-${c.id} changing text ` + JSON.stringify(text) + `.splice(${start}, ${len}, ${JSON.stringify(ins)})`)
Math.random.set_state(inner_a.rand)
c.set(start, len, ins)
}
Math.random.set_state(a.rand)
} else if (a.action == 'doing nothing..') {
log_stuff && console.log('> doing nothing..')
} else throw 'bad'
}
}
debug_WS_process_messages()
log_stuff && console.log(`server: ${server ? `"${server.get()}"` : 'down'}`)
log_stuff && clients.forEach(c => console.log(`${c.id} client ${c.is_open ? ':' : 'X'} "${c.get()}"`))
if (true) {
console.log('time dags:')
var show = (s) => console.log(JSON.stringify(s.time_dag, null, ' '))
if (g_current_server) show(g_current_server.node.resource_at(page_key))
clients.forEach(c => show(c.node.resource_at(page_key)))
// console.log('version_cache:')
// var show = (s) => console.log(JSON.stringify(s.version_cache, null, ' '))
// if (g_current_server) show(g_current_server.node.resource_at(page_key))
// clients.forEach(c => show(c.node.resource_at(page_key)))
// console.log('incoming_subscriptions:')
// var show = (s) => console.log(s.incoming_subscriptions.toString())
// if (g_current_server) show(g_current_server.node)
// clients.forEach(c => show(c.node))
// console.log('space dags:')
// var show = (s) => console.log(JSON.stringify(s.space_dag, null, ' '))
// if (g_current_server) show(g_current_server.node.resource_at(page_key))
// clients.forEach(c => show(c.node.resource_at(page_key)))
// console.log('read:')
// function show2(s) {
// console.log(JSON.stringify(s.mergeable && s.mergeable.read(), null, ' '))
// }
// if (g_current_server) show2(g_current_server.node.resource_at(page_key))
// clients.forEach(c => show2(c.node.resource_at(page_key)))
// console.log('fiss:')
// function show3(s) {
// console.log(JSON.stringify(s.fissures, null, ' '))
// }
// if (g_current_server) show3(g_current_server.node.resource_at(page_key))
// clients.forEach(c => show3(c.node.resource_at(page_key)))
// console.log('fissures:')
// function show2(s) { console.log(JSON.stringify(s.fissures, null, ' ')) }
// if (g_current_server) show2(g_current_server.node.resource_at(page_key))
// clients.forEach(c => show2(c.node.resource_at(page_key)))
// console.log('full versions:')
// function show(s) { console.log(JSON.stringify(s, null, ' ')) }
// if (g_current_server) show(g_current_server.node.resource_at(page_key))
// clients.forEach(c => show(c.node.resource_at(page_key)))
// console.log('SERVER: ', (g_current_server ? g_current_server.node.resource_at(page_key).mergeable.read() : 'not started'))
// console.log('SERVER: ' + (g_current_server ? g_current_server.get_time() : 'not started'))
// clients.forEach(c => console.log(`CLIENT ${c.id} = ${c.get_time()}`))
// clients.forEach(c => console.log(`CLIENT ${c.id} = ${c.get_more()}`))
// console.log('null versions:')
// console.log('SERVER: ', (g_current_server ? g_current_server.get_null() : 'not started'))
//clients.forEach(c => console.log(`CLIENT ${c.id} = ${c.get_null()}`))
// if (g_current_server)
// console.log('SERVER: ' + JSON.stringify(g_current_server.node.resource_at(page_key), null, ' '))
// console.log('fissures:')
// console.log('SERVER: ', (g_current_server ? g_current_server.node.resource_at(page_key).fissures : 'not started'))
}
if (server && clients.some(c => c.is_open)) {
let text = server.get()
if (clients.some(c => c.is_open && c.get() != text)) {
console.log('NOT THE SAME!')
return {ok: false, t}
}
// work here
let o = server.node.resource_at(page_key).mergeable.read()
if (!o || !o.cursors || Object.keys(o.cursors).length > clients.length) {
console.log('TOO MANY CURSORS!')
return {ok: false, t, actions}
}
}
} catch (e) {
console.log('EXCEPTION', e)
return {ok: false, t}
}
t++
}
return {ok: true}
}
main()
function create_db() {
return g_db = {
data: {},
get(key) { return this.data[key] },
set(key, val) { this.data[key] = val },
del(key) { delete this.data[key] },
list_keys() { return Object.keys(this.data) }
}
}
async function create_server(db) {
db.compress_if_inactive_time = 1000 * 1000
db.compress_after_this_many = 10
var node = require('../node.js')()
//node.fissure_lifetime = 1 // 4
await require('../store.js')(node, db)
node.on_errors.push((key, origin) => {
node.unbind(key, origin)
})
var wss = require('../websocket-server.js')(node, {wss: new debug_WSS()})
return g_current_server = {
node,
get() {
var o = node.resource_at(page_key).mergeable.read()
return o && o.text
},
close() {
wss.dead = true
wss.close()
}
}
}
function create_client() {
var node = require('../node.js')()
node.default(page_key, {cursors: {}, text: ''})
var ws_client = require('../websocket-client.js')({node, create_websocket: () => {
return new debug_WS(node.pid)
}})
var cursor_lifetime = 1 // 10000
var ready = false
var text = ''
var selectionStart = 0
var selectionEnd = 0
function send_diff(from, to) {
var v = node.set(page_key, null, ds.diff_convert_to_my_format(ds.diff_main(from, to)).map(x =>
`.text[${x[0]}:${x[0] + x[1]}] = ${JSON.stringify(x[2])}`
))
}
function send_cursor_update(start, end) {
node.set(page_key, null, [`.cursors[${JSON.stringify(node.pid)}] = ${JSON.stringify({start: {type: 'location', path: `.text[${start}]`}, end: {type: 'location', path: `.text[${end}]`}, time: Date.now()})}`])
}
var cb = x => {
ready = true
text = x.text
if (x.cursors[node.pid]) {
selectionStart = x.cursors[node.pid].start
selectionEnd = x.cursors[node.pid].end
}
}
node.get(page_key, cb)
node.ons.push((method, arg) => {
if (method != 'welcome' && method != 'fissure') return
if (arg.key != page_key) return
var fs = {}
if (method == 'welcome') {
for (let f of arg.fissures)
fs[`${f.a}:${f.b}:${f.conn}`] = f
} else {
let f = arg.fissure
fs[`${f.a}:${f.b}:${f.conn}`] = f
}
var rest = () => {
var o = node.resource_at(page_key).mergeable.read()
if (!o || !o.cursors) return
Object.assign(fs, node.resource_at(page_key).fissures)
var delete_us = {}
Object.values(fs).forEach(f => {
if (!fs[`${f.b}:${f.a}:${f.conn}`]) {
if (o.cursors[f.b]) delete_us[f.b] = true
}
})
var now = Date.now()
Object.entries(o.cursors).forEach(([k, v]) => {
if (k != node.pid && v.time <= now - cursor_lifetime) delete_us[k] = true
})
var patches = Object.keys(delete_us).map(k => `delete .cursors[${JSON.stringify(k)}]`)
if (patches.length) node.set(page_key, null, patches)
}
if (g_debug_WS_messages) g_debug_WS_messages.push(rest)
else setTimeout(rest, 0)
})
node.on_errors.push((key, origin) => {
// console.log('CLIENT ON ERROR')
text = ''
selectionStart = 0
selectionEnd = 0
delete node.resources[key]
node.unbind(key, origin)
var subscribe = ws_client.pipe.subscribed_keys[key].we_requested
delete ws_client.pipe.subscribed_keys[key].we_requested
ws_client.pipe.send({
key,
subscribe,
method: 'get'
})
})
var self
return self = {
id: node.pid,
node,
is_open: true,
get: () => {
return text
},
set: (x, del, ins) => {
if (!ready) return
var new_text = text.slice(0, x) + ins + text.slice(x + del)
send_diff(text, new_text)
if (x + ins.length <= new_text.length)
send_cursor_update(x + ins.length, x + ins.length)
else
send_cursor_update(new_text.length, new_text.length)
},
close: (send_deletes, send_forget) => {
if (ready && send_deletes) node.set(page_key, null, [`delete .cursors[${JSON.stringify(node.pid)}]`])
if (send_forget) node.forget(page_key, cb)
ws_client.disable()
self.is_open = false
},
open: () => {
ws_client.enable()
self.is_open = true
}
}
}
================================================
FILE: kernel/websocket-client.js
================================================
// Example braid-peer as a web browser client
module.exports = require['websocket-client'] = function add_websocket_client({node, url, prefix, create_websocket}) {
url = url || 'ws://localhost:3007/'
prefix = prefix || '/*'
var client_creds = null
var enabled = true
var sock
create_websocket = create_websocket || function () {
return new WebSocket(url + '.braid-websocket')
}
var reconnect_timeout = null
var listeners = {};
var addEventListener = (type, cb) => {
if (!(type in listeners)) {
listeners[type] = [];
}
listeners[type].push(cb);
}
var dispatchEvent = (event) => {
if (!(event.type in listeners)) {
return true;
}
var stack = listeners[event.type].slice();
for (var i = 0, l = stack.length; i < l; i++) {
stack[i].call(this, event);
}
return !event.defaultPrevented;
}
var connect = () => {
clearTimeout(reconnect_timeout)
if (!enabled) { return }
sock = create_websocket()
sock.onopen = () => {
if (onclose_called_already) { return }
pipe.connected()
dispatchEvent({type: "connect"})
}
sock.onmessage = message => {
if (onclose_called_already) { return }
var text = message.data;
var msg = JSON.parse(text);
if (msg.method != "ping" && msg.method != "pong") {
nlogf('WS', 'remote', '-->', 'local ', msg);
}
pipe.recv(msg)
}
var onclose_called_already = false
var local_sock = sock
sock.onclose = (a) => {
if (onclose_called_already) { return }
onclose_called_already = true
if (local_sock != sock) { return }
pipe.disconnected()
if (enabled) {
if (typeof(g_debug_WS_messages_delayed) != 'undefined')
g_debug_WS_messages_delayed.push(connect)
else reconnect_timeout = setTimeout(connect, 5000)
}
dispatchEvent({type: "disconnect"});
}
sock.onerror = () => {}
}
var disconnect = () => {
sock.close()
sock.onclose()
}
var pipe = require('./pipe.js')({
id: node.pid,
type: 'ws-client',
node,
connect,
disconnect,
send: (msg) => {
let text = JSON.stringify(msg);
if (msg.method != "ping" && msg.method != "pong") {
nlogf('WS', 'local ', '-->', 'remote', msg);
}
sock.send(text);
}
})
node.bind(prefix, pipe)
return {
pipe,
addEventListener,
enabled() {return enabled},
enable() {nlog('ENABLING PIPE', pipe.id);enabled = true; connect()},
disable() {nlog('DISABLING PIPE',pipe.id);enabled = false; disconnect()},
toggle() {if (enabled) {disable()} else enable()}
}
}
================================================
FILE: kernel/websocket-server.js
================================================
// Example braid-peer as a web server
// options = {
// port: // default is 3007
// wss: // default is null, will create a 'ws' module WebSocket.Server with the given port
// }
module.exports = require['websocket-server'] = function add_websocket_server(node, options) {
if (!options) options = {}
var s = options.wss || new (require('ws')).Server({port: options.port || 3007})
s.on('connection', function(conn, req) {
var pipe = require('./pipe.js')({node, connect, disconnect, send})
const peer_name = (m) => (pipe.remote_peer || (m || {}).my_name_is || 'C-?').toString();
const ip = req.socket.remoteAddress;
// console.log(`New connection from ${ip}`)
conn.on('message', (text) => {
var msg = JSON.parse(text);
if (msg.method != "ping" && msg.method != "pong") {
nlogf('WS', peer_name(msg).slice(0,6).padEnd(6), '-->', 'server', msg);
}
pipe.recv(msg)
})
conn.on('close', () => {
log('ws: socket closed ', s.dead ? '<>' : '')
if (s.dead) return
pipe.disconnected()
})
pipe.connected()
function connect () {
// we're connected already, nothing to do
log('ws-serve: connected')
// pipe.connected() <-- this is called just above
}
function disconnect () {
conn.terminate()
}
function send (msg) {
let text = JSON.stringify(msg);
if (msg.method != "ping" && msg.method != "pong") {
nlogf('WS', 'server', '-->', peer_name().slice(0,6).padEnd(6), msg);
}
conn.send(text);
}
})
return s
}
================================================
FILE: readme.md
================================================
# The Braidjs Monorepo
By versioning our code together, it becomes easier to interoperate.
- Each top-level folder is a project. Add yours!
- Now you can make breaking changes (like a protocol change), without
actually *breaking* anything—upgrade all the relevant code, across
multiple projects, at once!
This is not my code. This is *our* code.
### Projects
Add yours today!
- `antimatter`: [An implementation of the Antimatter Algorithm](https://github.com/braid-org/braidjs/tree/master/antimatter)
- `antimatter_wiki`: [An example Wiki using Antimatter](https://github.com/braid-org/braidjs/tree/master/antimatter_wiki)
- `braid-http`: [A reference implementation of the Braid Protocol](https://github.com/braid-org/braidjs/tree/master/braid-http)
- `json-patch`: [Applies a Range-Patch to JSON](https://github.com/braid-org/braidjs/tree/master/json-patch)
- `kernel`: [A prototype Braid Kernel](https://github.com/braid-org/braidjs/tree/master/kernel)
- `simpleton`: [A very simple and fast CRDT sync for light clients](https://github.com/braid-org/braidjs/tree/master/simpleton)
- `sync9`: [A CRDT that supports pruning history](https://github.com/braid-org/braidjs/tree/master/sync9)
- `util`: [A set of common utilities](https://github.com/braid-org/braidjs/tree/master/util)
Read more about braid at https://braid.org!
### Faq
Q. Wait... can a single repo support multiple NPM packages?
- A. Yep! Just create a `package.json` in your project's root folder, and
then run `npm publish` from it.
================================================
FILE: simple_d_ton/index.js
================================================
console.log("v13")
let { Doc, Branch, OpLog } = require("diamond-types-node")
let braidify = require("braid-http").http_server
let fs = require("fs")
let waiting_puts = 0
let prev_put_p = null
async function simple_d_ton(req, res, options = {}) {
options = {
db_folder: null, // Default db_folder
key: req.url.split('?')[0], // Default key
...options // Override with all options passed in
}
let resource = await get_resource(options.key, options.db_folder)
braidify(req, res)
let peer = req.headers["peer"]
res.my_peer = peer
let desired_type = options.type ?? req.headers.accept?.split(',')[0]
res.setHeader("Access-Control-Allow-Origin", "*")
res.setHeader("Access-Control-Allow-Methods", "*")
res.setHeader("Access-Control-Allow-Headers", "*")
res.setHeader("Access-Control-Expose-Headers", "*")
function my_end(statusCode, x) {
res.statusCode = statusCode
res.end(x ?? '')
}
if (req.method == "OPTIONS") return my_end(200)
if (req.method == "DELETE") {
await resource.delete_me()
return my_end(200)
}
if ((req.method == "GET" || req.method == "HEAD") && (desired_type != "text/html") && req.subscribe) {
res.setHeader("Content-Type", desired_type + '; charset=utf-8')
res.setHeader("Editable", "true")
if (req.headers["merge-type"] != "dt") {
res.setHeader("Merge-Type", "simpleton")
if (req.method == "HEAD") return my_end(200)
res.startSubscription({
onClose: (_) => resource.simpleton_clients.delete(res),
})
let version = resource.doc.getRemoteVersion().map((x) => encode_version(...x))
let x = { version }
if (!req.parents && !req.version) {
x.parents = []
x.body = resource.doc.get()
res.sendVersion(x)
} else {
x.parents = req.version ? req.version : req.parents
res.my_last_seen_version = x.parents
// only send them a version from these parents if we have these parents (otherwise we'll assume these parents are more recent, probably versions they created but haven't sent us yet, and we'll send them appropriate rebased updates when they send us these versions)
let local_version = OpLog_remote_to_local(resource.doc, x.parents)
if (local_version) {
x.patches = get_xf_patches(resource.doc, local_version)
res.sendVersion(x)
}
}
res.my_last_sent_version = version
resource.simpleton_clients.add(res)
} else {
res.setHeader("Merge-Type", "dt")
if (req.method == "HEAD") return my_end(200)
res.startSubscription({ onClose: (_) => resource.clients.delete(res) })
let updates = null
if (resource.need_defrag) {
console.log(`doing defrag..`)
resource.need_defrag = false
resource.doc = defrag_dt(resource.doc)
}
if (!req.parents && !req.version) {
res.sendVersion({
version: ["root"],
parents: [],
body: "",
})
updates = OpLog_get_patches(resource.doc.toBytes(), resource.doc.getOpsSince([]))
} else {
// Then start the subscription from the Parents in request
let parents = Object.fromEntries((req.parents ? req.parents : req.version).map((x) => [x, true]))
let local_version = []
let [agents, versions, parentss] = parseDT([...resource.doc.toBytes()])
for (let i = 0; i < versions.length; i++) {
if (parents[versions[i].join("-")]) local_version.push(i)
}
local_version = new Uint32Array(local_version)
updates = OpLog_get_patches(resource.doc.getPatchSince(local_version), resource.doc.getOpsSince(local_version))
}
for (let u of updates) {
u.version = decode_version(u.version)
u.version[1] += u.end - u.start - 1
u.version = u.version.join("-")
res.sendVersion({
version: [u.version],
parents: u.parents,
patches: [{ unit: u.unit, range: u.range, content: u.content }],
})
}
// Output at least *some* data, or else chrome gets confused and
// thinks the connection failed. This isn't strictly necessary,
// but it makes fewer scary errors get printed out in the JS
// console.
if (updates.length === 0) res.write("\r\n")
resource.clients.add(res)
}
return
}
if (req.method == "GET" || req.method == "HEAD") {
res.setHeader("Content-Type", desired_type + '; charset=utf-8')
res.setHeader("Accept-Subscribe", "true")
let doc = null
if (req.version || req.parents) {
let frontier = {}
req.version?.forEach((x) => (frontier[x] = true))
req.parents?.forEach((x) => (frontier[x] = true))
let local_version = []
let [agents, versions, parentss] = parseDT([...resource.doc.toBytes()])
for (let i = 0; i < versions.length; i++) {
if (frontier[versions[i].join("-")]) {
local_version.push(i)
}
}
local_version = new Uint32Array(local_version)
let after_versions = {}
let [_, after_versions_array, __] = parseDT([...resource.doc.getPatchSince(local_version)])
for (let v of after_versions_array) after_versions[v.join("-")] = true
let new_doc = new Doc()
let op_runs = resource.doc.getOpsSince([])
let i = 0
op_runs.forEach((op_run) => {
let parents = parentss[i].map((x) => x.join("-"))
let start = op_run.start
let end = start + 1
let content = op_run.content?.[0]
let len = op_run.end - op_run.start
let base_i = i
for (let j = 1; j <= len; j++) {
let I = base_i + j
if (
j == len ||
parentss[I].length != 1 ||
parentss[I][0][0] != versions[I - 1][0] ||
parentss[I][0][1] != versions[I - 1][1] ||
versions[I][0] != versions[I - 1][0] ||
versions[I][1] != versions[I - 1][1] + 1
) {
for (; i < I; i++) {
let version = versions[i].join("-")
if (!after_versions[version]) {
new_doc.mergeBytes(
OpLog_create_bytes(
version,
parentss[i].map((x) => x.join("-")),
content ? start + (i - base_i) : start,
content?.[0]
)
)
}
if (op_run.content) content = content.slice(1)
}
content = ""
}
if (op_run.content) content += op_run.content[j]
}
})
doc = new_doc
} else doc = resource.doc
const buffer = Buffer.from(doc.get(), "utf8")
res.setHeader("Content-Length", buffer.length)
res.setHeader(
"Version",
doc
.getRemoteVersion()
.map((x) => encode_version(...x))
.map((x) => JSON.stringify(x))
.join(", ")
)
if (req.method == "HEAD") return my_end(200)
return my_end(200, buffer)
}
if (req.method == "PUT" || req.method == "POST" || req.method == "PATCH") {
if (waiting_puts >= 100) {
console.log(`The server is busy.`)
return my_end(503, "The server is busy.")
}
waiting_puts++
console.log(`waiting_puts(after++) = ${waiting_puts}`)
let my_prev_put_p = prev_put_p
let done_my_turn = null
prev_put_p = new Promise(
(done) =>
(done_my_turn = (statusCode, x) => {
waiting_puts--
console.log(`waiting_puts(after--) = ${waiting_puts}`)
my_end(statusCode, x)
done()
})
)
let patches = await req.patches()
await my_prev_put_p
if (patches[0]?.unit === 'everything') {
patches[0].unit = 'text'
patches[0].range = `[0:${count_code_points(resource.doc.get())}]`
}
let og_patches = patches
patches = patches.map((p) => ({
...p,
range: p.range.match(/\d+/g).map((x) => parseInt(x)),
...(p.content ? {content: [...p.content]} : {}),
}))
let change_count = patches.reduce((a, b) => a + b.content.length + (b.range[1] - b.range[0]), 0)
let og_v = req.version[0] || `${Math.random().toString(36).slice(2, 7)}-${change_count - 1}`
// reduce the version sequence by the number of char-edits
let v = decode_version(og_v)
v = encode_version(v[0], v[1] + 1 - change_count)
let parents = resource.doc.getRemoteVersion().map((x) => encode_version(...x))
let og_parents = req.parents || parents
let ps = og_parents
if (!ps.length) ps = ["root"]
let v_before = resource.doc.getLocalVersion()
let bytes = []
let offset = 0
for (let p of patches) {
// delete
for (let i = p.range[0]; i < p.range[1]; i++) {
bytes.push(OpLog_create_bytes(v, ps, p.range[1] - 1 + offset, null))
offset--
ps = [v]
v = decode_version(v)
v = encode_version(v[0], v[1] + 1)
}
// insert
for (let i = 0; i < p.content?.length ?? 0; i++) {
let c = p.content[i]
bytes.push(OpLog_create_bytes(v, ps, p.range[1] + offset, c))
offset++
ps = [v]
v = decode_version(v)
v = encode_version(v[0], v[1] + 1)
}
}
try {
for (let b of bytes) resource.doc.mergeBytes(b)
} catch (e) {
console.log(`EEE= ${e}:${e.stack}`)
// we couldn't apply the version, presumably because we're missing its parents.
// we want to send a 4XX error, so the client will resend this request later,
// hopefully after we've received the necessary parents.
// here are some 4XX error code options..
//
// - 425 Too Early
// - pros: our message is too early
// - cons: associated with some "Early-Data" http thing, which we're not using
// - 400 Bad Request
// - pros: pretty generic
// - cons: implies client shouldn't resend as-is
// - 409 Conflict
// - pros: doesn't imply modifications needed
// - cons: the message is not conflicting with anything
// - 412 Precondition Failed
// - pros: kindof true.. the precondition of having another version has failed..
// - cons: not strictly true, as this code is associated with http's If-Unmodified-Since stuff
// - 422 Unprocessable Content
// - pros: it's true
// - cons: implies client shouldn't resend as-is (at least, it says that here: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/422)
// - 428 Precondition Required
// - pros: the name sounds right
// - cons: typically implies that the request was missing an http conditional field like If-Match. that is to say, it implies that the request is missing a precondition, not that the server is missing a precondition
return done_my_turn(425, "The server is missing the parents of this version.")
}
resource.need_defrag = true
let v_after = resource.doc.getLocalVersion()
if (JSON.stringify(v_before) === JSON.stringify(v_after)) {
console.log(`we got a version we already had: ${v_before}`)
return done_my_turn(200)
}
if (req.headers["merge-type"] != "dt") {
patches = get_xf_patches(resource.doc, v_before)
console.log(JSON.stringify({ patches }))
let version = resource.doc.getRemoteVersion().map((x) => encode_version(...x))
for (let client of resource.simpleton_clients) {
if (client.my_peer == peer) {
client.my_last_seen_version = [og_v]
}
function set_timeout(time_override) {
if (client.my_timeout) clearTimeout(client.my_timeout)
client.my_timeout = setTimeout(() => {
let version = resource.doc.getRemoteVersion().map((x) => encode_version(...x))
let x = { version }
x.parents = client.my_last_seen_version
console.log("rebasing after timeout.. ")
console.log(" client.my_unused_version_count = " + client.my_unused_version_count)
x.patches = get_xf_patches(resource.doc, OpLog_remote_to_local(resource.doc, client.my_last_seen_version))
console.log(`sending from rebase: ${JSON.stringify(x)}`)
client.sendVersion(x)
client.my_last_sent_version = x.version
delete client.my_timeout
}, time_override ?? Math.min(3000, 23 * Math.pow(1.5, client.my_unused_version_count - 1)))
}
if (client.my_timeout) {
if (client.my_peer == peer) {
if (!v_eq(client.my_last_sent_version, og_parents)) {
// note: we don't add to client.my_unused_version_count,
// because we're already in a timeout;
// we'll just extend it here..
set_timeout()
} else {
// hm.. it appears we got a correctly parented version,
// which suggests that maybe we can stop the timeout early
set_timeout(0)
}
}
continue
}
let x = { version }
if (client.my_peer == peer) {
if (!v_eq(client.my_last_sent_version, og_parents)) {
client.my_unused_version_count = (client.my_unused_version_count ?? 0) + 1
set_timeout()
continue
} else {
delete client.my_unused_version_count
}
x.parents = req.version
if (!v_eq(version, req.version)) {
console.log("rebasing..")
x.patches = get_xf_patches(resource.doc, OpLog_remote_to_local(resource.doc, [og_v]))
} else {
// this client already has this version,
// so let's pretend to send it back, but not
console.log(`not reflecting back to simpleton`)
client.my_last_sent_version = x.version
continue
}
} else {
x.parents = parents
x.patches = patches
}
console.log(`sending: ${JSON.stringify(x)}`)
client.sendVersion(x)
client.my_last_sent_version = x.version
}
} else {
if (resource.simpleton_clients.size) {
patches = get_xf_patches(resource.doc, v_before)
let x = { version: [og_v], parents, patches }
console.log(`sending: ${JSON.stringify(x)}`)
for (let client of resource.simpleton_clients) {
if (client.my_timeout) continue
client.sendVersion(x)
client.my_last_sent_version = x.version
}
}
}
let x = {
version: [og_v],
parents: og_parents,
patches: og_patches,
}
for (let client of resource.clients) {
if (client.my_peer != peer) client.sendVersion(x)
}
await resource.db_delta(resource.doc.getPatchSince(v_before))
options.put_cb?.(options.key, resource.doc.get())
return done_my_turn(200)
}
throw new Error("unknown")
}
async function get_resource(key, db_folder) {
let cache = get_resource.cache || (get_resource.cache = {})
if (cache[key]) return cache[key]
let resource = {}
resource.clients = new Set()
resource.simpleton_clients = new Set()
resource.doc = new Doc("server")
let { change, delete_me } = db_folder
? await file_sync(
db_folder,
encodeURIComponent(key),
(bytes) => resource.doc.mergeBytes(bytes),
() => resource.doc.toBytes()
)
: { change: () => {}, delete_me: () => {} }
resource.db_delta = change
resource.doc = defrag_dt(resource.doc)
resource.need_defrag = false
resource.delete_me = () => {
delete_me()
delete cache[key]
}
return (cache[key] = resource)
}
async function file_sync(db_folder, filename_base, process_delta, get_init) {
let currentNumber = 0
let currentSize = 0
let threshold = 0
// Ensure the existence of db_folder
try {
await fs.promises.access(db_folder);
} catch (err) {
if (err.code === 'ENOENT') {
await fs.promises.mkdir(db_folder, { recursive: true });
} else {
throw err;
}
}
// Read existing files and sort by numbers.
async function get_sorted_files() {
let re = new RegExp("^" + filename_base.replace(/[^a-zA-Z0-9]/g, "\\$&") + "\\.\\d+$")
return (await fs.promises.readdir(db_folder))
.filter((a) => re.test(a))
.sort((a, b) => parseInt(a.match(/\d+$/)[0]) - parseInt(b.match(/\d+$/)[0]))
.map((a) => `${db_folder}/${a}`)
}
const files = await get_sorted_files()
// Try to process files starting from the highest number.
let done = false
for (let i = files.length - 1; i >= 0; i--) {
if (done) {
await fs.promises.unlink(files[i])
continue
}
try {
const filename = files[i]
console.log(`trying to process file: ${filename}`)
const data = await fs.promises.readFile(filename)
let cursor = 0
let isFirstChunk = true
while (cursor < data.length) {
const chunkSize = data.readUInt32LE(cursor)
cursor += 4
const chunk = data.slice(cursor, cursor + chunkSize)
cursor += chunkSize
if (isFirstChunk) {
isFirstChunk = false
threshold = chunkSize * 10
}
process_delta(chunk)
}
currentSize = data.length
currentNumber = parseInt(filename.match(/\d+$/)[0])
done = true
} catch (error) {
console.error(`Error processing file: ${files[i]}`)
await fs.promises.unlink(files[i])
}
}
return {
change: async (bytes) => {
currentSize += bytes.length + 4 // we account for the extra 4 bytes for uint32
const filename = `${db_folder}/${filename_base}.${currentNumber}`
if (currentSize < threshold) {
console.log(`appending to db..`)
let buffer = Buffer.allocUnsafe(4)
buffer.writeUInt32LE(bytes.length, 0)
await fs.promises.appendFile(filename, buffer)
await fs.promises.appendFile(filename, bytes)
console.log("wrote to : " + filename)
} else {
try {
console.log(`starting new db..`)
currentNumber++
const init = get_init()
const buffer = Buffer.allocUnsafe(4)
buffer.writeUInt32LE(init.length, 0)
const newFilename = `${db_folder}/${filename_base}.${currentNumber}`
await fs.promises.writeFile(newFilename, buffer)
await fs.promises.appendFile(newFilename, init)
console.log("wrote to : " + newFilename)
currentSize = 4 + init.length
threshold = currentSize * 10
try {
await fs.promises.unlink(filename)
} catch (e) {}
} catch (e) {
console.log(`e = ${e.stack}`)
}
}
},
delete_me: async () => {
await Promise.all(
(
await get_sorted_files()
).map((file) => {
return new Promise((resolve, reject) => {
fs.unlink(file, (err) => {
if (err) {
console.error(`Error deleting file: ${file}`)
reject(err)
} else {
console.log(`Deleted file: ${file}`)
resolve()
}
})
})
})
)
},
}
}
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
function defrag_dt(doc) {
let fresh_doc = new Doc("server")
fresh_doc.mergeBytes(doc.toBytes())
return fresh_doc
}
function OpLog_get_patches(bytes, op_runs) {
// console.log(`op_runs = `, op_runs);
let [agents, versions, parentss] = parseDT([...bytes])
// console.log(JSON.stringify({agents, versions, parentss}, null, 4))
let i = 0
let patches = []
op_runs.forEach((op_run) => {
let version = versions[i].join("-")
let parents = parentss[i].map((x) => x.join("-"))
let start = op_run.start
let end = start + 1
if (op_run.content) op_run.content = [...op_run.content]
let content = op_run.content?.[0]
let len = op_run.end - op_run.start
for (let j = 1; j <= len; j++) {
let I = i + j
if (
j == len ||
parentss[I].length != 1 ||
parentss[I][0][0] != versions[I - 1][0] ||
parentss[I][0][1] != versions[I - 1][1] ||
versions[I][0] != versions[I - 1][0] ||
versions[I][1] != versions[I - 1][1] + 1
) {
patches.push({
version,
parents,
unit: "text",
range: content ? `[${start}:${start}]` : `[${start}:${end}]`,
content: content ?? "",
start,
end,
})
if (j == len) break
version = versions[I].join("-")
parents = parentss[I].map((x) => x.join("-"))
start = op_run.start + j
content = ""
}
end++
if (op_run.content) content += op_run.content[j]
}
i += len
})
return patches
}
function parseDT(byte_array) {
if (new TextDecoder().decode(new Uint8Array(byte_array.splice(0, 8))) !== "DMNDTYPS") throw new Error("dt parse error, expected DMNDTYPS")
if (byte_array.shift() != 0) throw new Error("dt parse error, expected version 0")
let agents = []
let versions = []
let parentss = []
while (byte_array.length) {
let id = byte_array.shift()
let len = read_varint(byte_array)
if (id == 1) {
} else if (id == 3) {
let goal = byte_array.length - len
while (byte_array.length > goal) {
agents.push(read_string(byte_array))
}
} else if (id == 20) {
} else if (id == 21) {
let seqs = {}
let goal = byte_array.length - len
while (byte_array.length > goal) {
let part0 = read_varint(byte_array)
let has_jump = part0 & 1
let agent_i = (part0 >> 1) - 1
let run_length = read_varint(byte_array)
let jump = 0
if (has_jump) {
let part2 = read_varint(byte_array)
jump = part2 >> 1
if (part2 & 1) jump *= -1
}
let base = (seqs[agent_i] || 0) + jump
for (let i = 0; i < run_length; i++) {
versions.push([agents[agent_i], base + i])
}
seqs[agent_i] = base + run_length
}
} else if (id == 23) {
let count = 0
let goal = byte_array.length - len
while (byte_array.length > goal) {
let run_len = read_varint(byte_array)
let parents = []
let has_more = 1
while (has_more) {
let x = read_varint(byte_array)
let is_foreign = 0x1 & x
has_more = 0x2 & x
let num = x >> 2
if (x == 1) {
parents.push(["root"])
} else if (!is_foreign) {
parents.push(versions[count - num])
} else {
parents.push([agents[num - 1], read_varint(byte_array)])
}
}
parentss.push(parents)
count++
for (let i = 0; i < run_len - 1; i++) {
parentss.push([versions[count - 1]])
count++
}
}
} else {
byte_array.splice(0, len)
}
}
function read_string(byte_array) {
return new TextDecoder().decode(new Uint8Array(byte_array.splice(0, read_varint(byte_array))))
}
function read_varint(byte_array) {
let result = 0
let shift = 0
while (true) {
if (byte_array.length === 0) throw new Error("byte array does not contain varint")
let byte_val = byte_array.shift()
result |= (byte_val & 0x7f) << shift
if ((byte_val & 0x80) == 0) return result
shift += 7
}
}
return [agents, versions, parentss]
}
function OpLog_create_bytes(version, parents, pos, ins) {
// console.log(`args = ${JSON.stringify({ version, parents, pos, ins }, null, 4)}`)
function write_varint(bytes, value) {
while (value >= 0x80) {
bytes.push((value & 0x7f) | 0x80)
value >>= 7
}
bytes.push(value)
}
function write_string(byte_array, str) {
let str_bytes = new TextEncoder().encode(str)
write_varint(byte_array, str_bytes.length)
byte_array.push(...str_bytes)
}
version = decode_version(version)
parents = parents.map(decode_version)
let bytes = []
bytes = bytes.concat(Array.from(new TextEncoder().encode("DMNDTYPS")))
bytes.push(0)
let file_info = []
let agent_names = []
let agents = new Set()
agents.add(version[0])
for (let p of parents) if (p.length > 1) agents.add(p[0])
agents = [...agents]
// console.log(JSON.stringify({ agents, parents }, null, 4));
let agent_to_i = {}
for (let [i, agent] of agents.entries()) {
agent_to_i[agent] = i
write_string(agent_names, agent)
}
file_info.push(3)
write_varint(file_info, agent_names.length)
file_info.push(...agent_names)
bytes.push(1)
write_varint(bytes, file_info.length)
bytes.push(...file_info)
let branch = []
if (parents[0].length > 1) {
let frontier = []
for (let [i, [agent, seq]] of parents.entries()) {
let has_more = i < parents.length - 1
let mapped = agent_to_i[agent]
let n = ((mapped + 1) << 1) | (has_more ? 1 : 0)
write_varint(frontier, n)
write_varint(frontier, seq)
}
branch.push(12)
write_varint(branch, frontier.length)
branch.push(...frontier)
}
bytes.push(10)
write_varint(bytes, branch.length)
bytes.push(...branch)
let patches = []
if (ins) {
let inserted_content_bytes = []
inserted_content_bytes.push(0) // ins (not del, which is 1)
inserted_content_bytes.push(13) // "content" enum (rather than compressed)
let encoder = new TextEncoder()
let utf8Bytes = encoder.encode(ins)
inserted_content_bytes.push(1 + utf8Bytes.length) // length of content chunk
inserted_content_bytes.push(4) // "plain text" enum
for (let b of utf8Bytes) inserted_content_bytes.push(b) // actual text
inserted_content_bytes.push(25) // "known" enum
inserted_content_bytes.push(1) // length of "known" chunk
inserted_content_bytes.push(3) // content of length 1, and we "know" it
patches.push(24)
write_varint(patches, inserted_content_bytes.length)
patches.push(...inserted_content_bytes)
}
// write in the version
let version_bytes = []
let [agent, seq] = version
let agent_i = agent_to_i[agent]
let jump = seq
write_varint(version_bytes, ((agent_i + 1) << 1) | (jump != 0 ? 1 : 0))
write_varint(version_bytes, 1)
if (jump) write_varint(version_bytes, jump << 1)
patches.push(21)
write_varint(patches, version_bytes.length)
patches.push(...version_bytes)
// write in "op" bytes (some encoding of position)
let op_bytes = []
write_varint(op_bytes, (pos << 4) | (pos ? 2 : 0) | (ins ? 0 : 4))
patches.push(22)
write_varint(patches, op_bytes.length)
patches.push(...op_bytes)
// write in parents
let parents_bytes = []
write_varint(parents_bytes, 1)
if (parents[0].length > 1) {
for (let [i, [agent, seq]] of parents.entries()) {
let has_more = i < parents.length - 1
let agent_i = agent_to_i[agent]
write_varint(parents_bytes, ((agent_i + 1) << 2) | (has_more ? 2 : 0) | 1)
write_varint(parents_bytes, seq)
}
} else write_varint(parents_bytes, 1)
patches.push(23)
write_varint(patches, parents_bytes.length)
patches.push(...parents_bytes)
// write in patches
bytes.push(20)
write_varint(bytes, patches.length)
bytes.push(...patches)
// console.log(bytes);
return bytes
}
function OpLog_remote_to_local(doc, frontier) {
let map = Object.fromEntries(frontier.map((x) => [x, true]))
let local_version = []
let [agents, versions, parentss] = parseDT([...doc.toBytes()])
for (let i = 0; i < versions.length; i++) {
if (map[doc.localToRemoteVersion([i])[0].join("-")]) {
local_version.push(i)
}
}
return frontier.length == local_version.length && new Uint32Array(local_version)
}
function encode_version(agent, seq) {
return agent + "-" + seq
}
function decode_version(v) {
let a = v.split("-")
if (a.length > 1) a[1] = parseInt(a[1])
return a
}
function v_eq(v1, v2) {
return v1.length == v2.length && v1.every((x, i) => x == v2[i])
}
function get_xf_patches(doc, v) {
let patches = []
for (let xf of doc.xfSince(v)) {
patches.push(
xf.kind == "Ins"
? {
unit: "text",
range: `[${xf.start}:${xf.start}]`,
content: xf.content,
}
: {
unit: "text",
range: `[${xf.start}:${xf.end}]`,
content: "",
}
)
}
return relative_to_absolute_patches(patches)
}
function relative_to_absolute_patches(patches) {
let avl = create_avl_tree((node) => {
let parent = node.parent
if (parent.left == node) {
parent.left_size -= node.left_size + node.size
} else {
node.left_size += parent.left_size + parent.size
}
})
avl.root.size = Infinity
avl.root.left_size = 0
function resize(node, new_size) {
if (node.size == new_size) return
let delta = new_size - node.size
node.size = new_size
while (node.parent) {
if (node.parent.left == node) node.parent.left_size += delta
node = node.parent
}
}
for (let p of patches) {
let [start, end] = p.range.match(/\d+/g).map((x) => 1 * x)
let del = end - start
let node = avl.root
while (true) {
if (start < node.left_size || (node.left && node.content == null && start == node.left_size)) {
node = node.left
} else if (start > node.left_size + node.size || (node.content == null && start == node.left_size + node.size)) {
start -= node.left_size + node.size
node = node.right
} else {
start -= node.left_size
break
}
}
let remaining = start + del - node.size
if (remaining < 0) {
if (node.content == null) {
if (start > 0) {
let x = { size: 0, left_size: 0 }
avl.add(node, "left", x)
resize(x, start)
}
let x = { size: 0, left_size: 0, content: p.content, del }
avl.add(node, "left", x)
resize(x, count_code_points(x.content))
resize(node, node.size - (start + del))
} else {
node.content = node.content.slice(0, codePoints_to_index(node.content, start)) + p.content + node.content.slice(codePoints_to_index(node.content, start + del))
resize(node, count_code_points(node.content))
}
} else {
let next
let middle_del = 0
while (remaining >= (next = avl.next(node)).size) {
remaining -= next.size
middle_del += next.del ?? next.size
resize(next, 0)
avl.del(next)
}
if (node.content == null) {
if (next.content == null) {
if (start == 0) {
node.content = p.content
node.del = node.size + middle_del + remaining
resize(node, count_code_points(node.content))
} else {
let x = {
size: 0,
left_size: 0,
content: p.content,
del: node.size - start + middle_del + remaining,
}
resize(node, start)
avl.add(node, "right", x)
resize(x, count_code_points(x.content))
}
resize(next, next.size - remaining)
} else {
next.del += node.size - start + middle_del
next.content = p.content + next.content.slice(codePoints_to_index(next.content, remaining))
resize(node, start)
if (node.size == 0) avl.del(node)
resize(next, count_code_points(next.content))
}
} else {
if (next.content == null) {
node.del += middle_del + remaining
node.content = node.content.slice(0, codePoints_to_index(node.content, start)) + p.content
resize(node, count_code_points(node.content))
resize(next, next.size - remaining)
} else {
node.del += middle_del + next.del
node.content = node.content.slice(0, codePoints_to_index(node.content, start)) + p.content + next.content.slice(codePoints_to_index(next.content, remaining))
resize(node, count_code_points(node.content))
resize(next, 0)
avl.del(next)
}
}
}
}
let new_patches = []
let offset = 0
let node = avl.root
while (node.left) node = node.left
while (node) {
if (node.content == null) {
offset += node.size
} else {
new_patches.push({
unit: patches[0].unit,
range: `[${offset}:${offset + node.del}]`,
content: node.content,
})
offset += node.del
}
node = avl.next(node)
}
return new_patches
}
function create_avl_tree(on_rotate) {
let self = { root: { height: 1 } }
self.calc_height = (node) => {
node.height = 1 + Math.max(node.left?.height ?? 0, node.right?.height ?? 0)
}
self.rechild = (child, new_child) => {
if (child.parent) {
if (child.parent.left == child) {
child.parent.left = new_child
} else {
child.parent.right = new_child
}
} else {
self.root = new_child
}
if (new_child) new_child.parent = child.parent
}
self.rotate = (node) => {
on_rotate(node)
let parent = node.parent
let left = parent.right == node ? "left" : "right"
let right = parent.right == node ? "right" : "left"
parent[right] = node[left]
if (parent[right]) parent[right].parent = parent
self.calc_height(parent)
self.rechild(parent, node)
parent.parent = node
node[left] = parent
}
self.fix_avl = (node) => {
self.calc_height(node)
let diff = (node.right?.height ?? 0) - (node.left?.height ?? 0)
if (Math.abs(diff) >= 2) {
if (diff > 0) {
if ((node.right.left?.height ?? 0) > (node.right.right?.height ?? 0)) self.rotate(node.right.left)
self.rotate((node = node.right))
} else {
if ((node.left.right?.height ?? 0) > (node.left.left?.height ?? 0)) self.rotate(node.left.right)
self.rotate((node = node.left))
}
self.fix_avl(node)
} else if (node.parent) self.fix_avl(node.parent)
}
self.add = (node, side, add_me) => {
let other_side = side == "left" ? "right" : "left"
add_me.height = 1
if (node[side]) {
node = node[side]
while (node[other_side]) node = node[other_side]
node[other_side] = add_me
} else {
node[side] = add_me
}
add_me.parent = node
self.fix_avl(node)
}
self.del = (node) => {
if (node.left && node.right) {
let cursor = node.right
while (cursor.left) cursor = cursor.left
cursor.left = node.left
// breaks abstraction
cursor.left_size = node.left_size
let y = cursor
while (y.parent != node) {
y = y.parent
y.left_size -= cursor.size
}
node.left.parent = cursor
if (cursor == node.right) {
self.rechild(node, cursor)
self.fix_avl(cursor)
} else {
let x = cursor.parent
self.rechild(cursor, cursor.right)
cursor.right = node.right
node.right.parent = cursor
self.rechild(node, cursor)
self.fix_avl(x)
}
} else {
self.rechild(node, node.left || node.right || null)
if (node.parent) self.fix_avl(node.parent)
}
}
self.next = (node) => {
if (node.right) {
node = node.right
while (node.left) node = node.left
return node
} else {
while (node.parent && node.parent.right == node) node = node.parent
return node.parent
}
}
return self
}
function count_code_points(str) {
let code_points = 0;
for (let i = 0; i < str.length; i++) {
if (str.charCodeAt(i) >= 0xD800 && str.charCodeAt(i) <= 0xDBFF) i++;
code_points++;
}
return code_points;
}
function index_to_codePoints(str, index) {
let i = 0
let c = 0
while (i < index && i < str.length) {
const charCode = str.charCodeAt(i)
i += (charCode >= 0xd800 && charCode <= 0xdbff) ? 2 : 1
c++
}
return c
}
function codePoints_to_index(str, codePoints) {
let i = 0
let c = 0
while (c < codePoints && i < str.length) {
const charCode = str.charCodeAt(i)
i += (charCode >= 0xd800 && charCode <= 0xdbff) ? 2 : 1
c++
}
return i
}
module.exports = { simple_d_ton }
================================================
FILE: simple_d_ton/package.json
================================================
{
"name": "simple_d_ton",
"version": "0.0.24",
"description": "Serve diamond-types and simpleton requests.",
"author": "Braid Working Group",
"repository": "braid-org/braidjs",
"homepage": "https://braid.org",
"main": "./index.js",
"dependencies": {
"diamond-types-node": "^1.0.2",
"braid-http": "^0.3.14"
}
}
================================================
FILE: simpleton/client.js
================================================
// requires braid-http@0.3.14
//
// url: simpleton resource endpoint
//
// apply_remote_update: ({patches, state}) => {...}
// this is for incoming changes;
// one of these will be non-null,
// and can be applied to the current state.
//
// generate_local_diff_update: (prev_state) => {...}
// this is to generate outgoing changes,
// and if there are changes, returns { patches, state }
//
// content_type: overrides the Accept and Content-Type headers
//
// returns { changed(): (diff_function) => {...} }
// this is for outgoing changes;
// diff_function = () => ({patches, new_version}).
//
function simpleton_client(url, { apply_remote_update, generate_local_diff_update, content_type }) {
var peer = Math.random().toString(36).substr(2)
var current_version = []
var prev_state = ""
var char_counter = -1
var outstanding_changes = 0
var max_outstanding_changes = 10
braid_fetch_wrapper(url, {
headers: { "Merge-Type": "simpleton",
...(content_type ? {Accept: content_type} : {}) },
subscribe: true,
retry: true,
parents: () => current_version.length ? current_version : null,
peer
}).then(res =>
res.subscribe(update => {
// Only accept the update if its parents == our current version
update.parents.sort()
if (current_version.length === update.parents.length
&& current_version.every((v, i) => v === update.parents[i])) {
current_version = update.version.sort()
update.state = update.body
if (update.patches) {
for (let p of update.patches) p.range = p.range.match(/\d+/g).map((x) => 1 * x)
update.patches.sort((a, b) => a.range[0] - b.range[0])
// convert from code-points to js-indicies
let c = 0
let i = 0
for (let p of update.patches) {
while (c < p.range[0]) {
i += get_char_size(prev_state, i)
c++
}
p.range[0] = i
while (c < p.range[1]) {
i += get_char_size(prev_state, i)
c++
}
p.range[1] = i
}
}
prev_state = apply_remote_update(update)
}
})
)
return {
changed: async () => {
if (outstanding_changes >= max_outstanding_changes) return
while (true) {
var update = generate_local_diff_update(prev_state)
if (!update) return // Stop if there wasn't a change!
var {patches, state} = update
// convert from js-indicies to code-points
let c = 0
let i = 0
for (let p of patches) {
while (i < p.range[0]) {
i += get_char_size(prev_state, i)
c++
}
p.range[0] = c
while (i < p.range[1]) {
i += get_char_size(prev_state, i)
c++
}
p.range[1] = c
char_counter += p.range[1] - p.range[0]
char_counter += count_code_points(p.content)
p.unit = "text"
p.range = `[${p.range[0]}:${p.range[1]}]`
}
var version = [peer + "-" + char_counter]
var parents = current_version
current_version = version
prev_state = state
outstanding_changes++
await braid_fetch_wrapper(url, {
headers: { "Merge-Type": "simpleton",
...(content_type ? {"Content-Type": content_type} : {}) },
method: "PUT",
retry: true,
version, parents, patches,
peer
})
outstanding_changes--
}
}
}
}
function get_char_size(s, i) {
const charCode = s.charCodeAt(i)
return (charCode >= 0xd800 && charCode <= 0xdbff) ? 2 : 1
}
function count_code_points(str) {
let code_points = 0
for (let i = 0; i < str.length; i++) {
if (str.charCodeAt(i) >= 0xd800 && str.charCodeAt(i) <= 0xdbff) i++
code_points++
}
return code_points
}
async function braid_fetch_wrapper(url, params) {
if (!params.retry) throw "wtf"
var waitTime = 10
if (params.subscribe) {
var subscribe_handler = null
connect()
async function connect() {
try {
var c = await braid_fetch(url, { ...params, parents: params.parents?.() })
c.subscribe((...args) => subscribe_handler?.(...args), on_error)
waitTime = 10
} catch (e) {
on_error(e)
}
}
function on_error(e) {
console.log('eee = ' + e.stack)
setTimeout(connect, waitTime)
waitTime = Math.min(waitTime * 2, 3000)
}
return {subscribe: handler => { subscribe_handler = handler }}
} else {
return new Promise((done) => {
send()
async function send() {
try {
var res = await braid_fetch(url, params)
if (res.status !== 200) throw "status not 200: " + res.status
done(res)
} catch (e) {
setTimeout(send, waitTime)
waitTime = Math.min(waitTime * 2, 3000)
}
}
})
}
}
================================================
FILE: simpleton/demo.js
================================================
console.log("v9")
process.on("uncaughtException", (e) => console.log(e.stack))
process.on("unhandledRejection", (e) => console.log(e.stack))
let simpleton_handle = require("./simpleton_lib.js").handle
var port = 61870
let cpu_usage = 0
if (true) {
require("child_process").execSync(`npm install os-utils`, {
stdio: "inherit",
})
var os = require("os-utils")
os.cpuUsage((x) => (cpu_usage = x))
setInterval(() => {
os.cpuUsage((x) => (cpu_usage = x))
}, 1000)
}
const server = require("http2").createSecureServer(
{
key: require("fs").readFileSync("./privkey.pem"),
cert: require("fs").readFileSync("./fullchain.pem"),
allowHTTP1: true,
},
async (req, res) => {
let silent = req.url == "//time"
if (!silent)
console.log(
`${req.method} ${req.url} v:${
req.headers["Version"] || req.headers["version"] || ""
}`
)
res.setHeader("Access-Control-Allow-Origin", "*")
res.setHeader("Access-Control-Allow-Methods", "*")
res.setHeader("Access-Control-Allow-Headers", "*")
res.statusCode = 200
if (!silent) console.log("req.headers: " + JSON.stringify(req.headers))
if (req.method == "OPTIONS") {
return res.end("ok")
}
if (req.method == "GET" && req.url == `//time`) {
res.setHeader("Content-Type", "application/json")
return res.end(JSON.stringify({ time: Date.now(), cpu_usage }))
}
return simpleton_handle(req.url, req, res)
}
)
server.listen(port, () => {
console.log(`server started on port ${port}`)
})
================================================
FILE: simpleton/index.js
================================================
module.exports = {
create_simpleton_client: require('./client').create_simpleton_client,
handle: require('./server').handle
}
================================================
FILE: simpleton/index.mjs
================================================
import client from './client.js'
import server from './server.js'
var create_simpleton_client = client.create_simpleton_client
var handle = server.handle
export { create_simpleton_client, handle }
export default { create_simpleton_client, handle }
================================================
FILE: simpleton/package.json
================================================
{
"name": "simpleton_braid",
"version": "0.2.2",
"description": "An implementation of the simpleton protocol for Node.js and Browsers",
"author": "Braid Working Group",
"repository": "braid-org/braidjs",
"homepage": "https://braid.org",
"files": [
"client.js",
"server.js",
"index.js",
"index.mjs"
],
"main": "./index.js",
"exports": {
"require": "./index.js",
"import": "./index.mjs"
},
"dependencies": {
"diamond-types-node": "^1.0.2",
"braid-http": "^0.3.3"
}
}
================================================
FILE: simpleton/server.js
================================================
console.log("simpleton.js: v163")
let { Doc, Branch, OpLog } = require("diamond-types-node")
let braidify = require("braid-http").http_server
let fs = require("fs")
let waiting_puts = 0
let prev_put_p = null
async function handle(req, res, options = {}) {
options = {
db_folder: null, // Default db_folder
key: req.url.split('?')[0], // Default key
...options // Override with all options passed in
}
let start_time = Date.now()
let resource = await get_resource(options.key, options.db_folder)
braidify(req, res)
let peer = req.headers["peer"]
res.my_peer = peer
res.setHeader("Access-Control-Allow-Origin", "*")
res.setHeader("Access-Control-Allow-Methods", "*")
res.setHeader("Access-Control-Allow-Headers", "*")
function my_end(statusCode, x) {
res.statusCode = statusCode
res.end(x)
}
if (req.method == "OPTIONS") return my_end(200)
if (req.method == "DELETE") {
await resource.delete_me()
return my_end(200, "")
}
if ((req.method == "GET" || req.method == "HEAD") && req.subscribe) {
res.setHeader("Content-Type", "text/plain")
res.setHeader("Editable", "true")
res.setHeader("Merge-Type", "simpleton")
if (req.method == "HEAD") return my_end(200)
res.startSubscription({
onClose: (_) => resource.clients.delete(res),
})
let version = resource.doc.getRemoteVersion().map((x) => encode_version(...x))
let x = { version }
if (!req.parents && !req.version) {
x.parents = []
x.body = resource.doc.get()
res.sendVersion(x)
} else {
x.parents = req.version ? req.version : req.parents
res.my_last_seen_version = x.parents
// only send them a version from these parents if we have these parents (otherwise we'll assume these parents are more recent, probably versions they created but haven't sent us yet, and we'll send them appropriate rebased updates when they send us these versions)
let local_version = OpLog_remote_to_local(resource.doc, x.parents)
if (local_version) {
x.patches = get_xf_patches(resource.doc, local_version)
res.sendVersion(x)
}
}
res.my_last_sent_version = version
resource.clients.add(res)
return
}
if ((req.method == "GET" || req.method == "HEAD") && !req.subscribe) {
res.setHeader("Accept-Subscribe", "true")
let doc = resource.doc
const buffer = Buffer.from(doc.get(), "utf8")
res.setHeader("Content-Type", "text/plain")
res.setHeader("Content-Length", buffer.length)
res.setHeader(
"Version",
doc
.getRemoteVersion()
.map((x) => encode_version(...x))
.map((x) => JSON.stringify(x))
.join(", ")
)
if (req.method == "HEAD") return my_end(200)
return my_end(200, buffer)
}
if (req.method == "PUT" || req.method == "POST" || req.method == "PATCH") {
let wait_time = 0
if (waiting_puts >= 100) {
console.log(`The server is busy.`)
return my_end(503, "The server is busy.")
}
waiting_puts++
console.log(`waiting_puts(after++) = ${waiting_puts}`)
let my_prev_put_p = prev_put_p
let done_my_turn = null
prev_put_p = new Promise(
(done) =>
(done_my_turn = (statusCode, x) => {
waiting_puts--
console.log(`waiting_puts(after--) = ${waiting_puts}`)
x.wait_time = wait_time
x.server_time_taken = Date.now() - start_time
my_end(statusCode, x)
done()
})
)
let patches = await req.patches()
await my_prev_put_p
wait_time = Date.now() - start_time
start_time = Date.now()
let og_patches = patches
patches = patches.map((p) => ({
...p,
range: p.range.match(/\d+/g).map((x) => parseInt(x)),
}))
let og_v = req.version[0]
// reduce the version sequence by the number of char-edits
let v = decode_version(og_v)
v = encode_version(v[0], v[1] + 1 - patches.reduce((a, b) => a + b.content.length + (b.range[1] - b.range[0]), 0))
let ps = req.parents
if (!ps?.length) ps = ["root"]
let v_before = resource.doc.getLocalVersion()
let parents = resource.doc.getRemoteVersion().map((x) => encode_version(...x))
let bytes = []
let offset = 0
for (let p of patches) {
// delete
for (let i = p.range[0]; i < p.range[1]; i++) {
bytes.push(OpLog_create_bytes(v, ps, p.range[1] - 1 + offset, null))
offset--
ps = [v]
v = decode_version(v)
v = encode_version(v[0], v[1] + 1)
}
// insert
for (let i = 0; i < p.content?.length ?? 0; i++) {
let c = p.content[i]
bytes.push(OpLog_create_bytes(v, ps, p.range[1] + offset, c))
offset++
ps = [v]
v = decode_version(v)
v = encode_version(v[0], v[1] + 1)
}
}
try {
for (let b of bytes) resource.doc.mergeBytes(b)
} catch (e) {
console.log(`EEE= ${e}:${e.stack}`)
// we couldn't apply the version, presumably because we're missing its parents.
// we want to send a 4XX error, so the client will resend this request later,
// hopefully after we've received the necessary parents.
// here are some 4XX error code options..
//
// - 425 Too Early
// - pros: our message is too early
// - cons: associated with some "Early-Data" http thing, which we're not using
// - 400 Bad Request
// - pros: pretty generic
// - cons: implies client shouldn't resend as-is
// - 409 Conflict
// - pros: doesn't imply modifications needed
// - cons: the message is not conflicting with anything
// - 412 Precondition Failed
// - pros: kindof true.. the precondition of having another version has failed..
// - cons: not strictly true, as this code is associated with http's If-Unmodified-Since stuff
// - 422 Unprocessable Content
// - pros: it's true
// - cons: implies client shouldn't resend as-is (at least, it says that here: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/422)
// - 428 Precondition Required
// - pros: the name sounds right
// - cons: typically implies that the request was missing an http conditional field like If-Match. that is to say, it implies that the request is missing a precondition, not that the server is missing a precondition
return done_my_turn(425, "The server is missing the parents of this version.")
}
resource.need_defrag = true
let v_after = resource.doc.getLocalVersion()
if (JSON.stringify(v_before) === JSON.stringify(v_after)) {
console.log(`we got a version we already had: ${v_before}`)
return done_my_turn(200, "")
}
await resource.db_delta(resource.doc.getPatchSince(v_before))
patches = get_xf_patches(resource.doc, v_before)
console.log(JSON.stringify({ patches }))
let version = resource.doc.getRemoteVersion().map((x) => encode_version(...x))
for (let client of resource.clients) {
if (client.my_peer == peer) {
client.my_last_seen_version = [og_v]
}
function set_timeout(time_override) {
if (client.my_timeout) clearTimeout(client.my_timeout)
client.my_timeout = setTimeout(() => {
let version = resource.doc.getRemoteVersion().map((x) => encode_version(...x))
let x = { version }
x.parents = client.my_last_seen_version
console.log("rebasing after timeout.. ")
console.log(" client.my_unused_version_count = " + client.my_unused_version_count)
x.patches = get_xf_patches(resource.doc, OpLog_remote_to_local(resource.doc, client.my_last_seen_version))
console.log(`sending from rebase: ${JSON.stringify(x)}`)
client.sendVersion(x)
client.my_last_sent_version = x.version
delete client.my_timeout
}, time_override ?? Math.min(3000, 23 * Math.pow(1.5, client.my_unused_version_count - 1)))
}
if (client.my_timeout) {
if (client.my_peer == m.peer) {
if (!v_eq(client.my_last_sent_version, req.parents)) {
// note: we don't add to client.my_unused_version_count,
// because we're already in a timeout;
// we'll just extend it here..
set_timeout()
} else {
// hm.. it appears we got a correctly parented version,
// which suggests that maybe we can stop the timeout early
set_timeout(0)
}
}
continue
}
let x = { version }
if (client.my_peer == peer) {
if (!v_eq(client.my_last_sent_version, req.parents)) {
client.my_unused_version_count = (client.my_unused_version_count ?? 0) + 1
set_timeout()
continue
} else {
delete client.my_unused_version_count
}
x.parents = req.version
if (!v_eq(version, req.version)) {
console.log("rebasing..")
x.patches = get_xf_patches(resource.doc, OpLog_remote_to_local(resource.doc, [og_v]))
} else {
// this client already has this version,
// so let's pretend to send it back, but not
console.log(`not reflecting back to simpleton`)
client.my_last_sent_version = x.version
continue
}
} else {
x.parents = parents
x.patches = patches
}
console.log(`sending: ${JSON.stringify(x)}`)
client.sendVersion(x)
client.my_last_sent_version = x.version
}
return done_my_turn(200, "")
}
throw new Error("unknown")
}
async function get_resource(key, db_folder) {
let cache = get_resource.cache || (get_resource.cache = {})
if (cache[key]) return cache[key]
let resource = {}
resource.clients = new Set()
resource.simpleton_clients = new Set()
resource.doc = new Doc("server")
let { change, delete_me } = db_folder
? await file_sync(
db_folder,
encodeURIComponent(key),
(bytes) => resource.doc.mergeBytes(bytes),
() => resource.doc.toBytes()
)
: { change: () => { }, delete_me: () => { } }
resource.db_delta = change
resource.doc = defrag_dt(resource.doc)
resource.need_defrag = false
resource.delete_me = () => {
delete_me()
delete cache[key]
}
return (cache[key] = resource)
}
async function file_sync(db_folder, filename_base, process_delta, get_init) {
let currentNumber = 0
let currentSize = 0
let threshold = 0
// Ensure the existence of db_folder
try {
await fs.promises.access(db_folder);
} catch (err) {
if (err.code === 'ENOENT') {
await fs.promises.mkdir(db_folder, { recursive: true });
} else {
throw err;
}
}
// Read existing files and sort by numbers.
async function get_sorted_files() {
let re = new RegExp("^" + filename_base.replace(/[^a-zA-Z0-9]/g, "\\$&") + "\\.\\d+$")
return (await fs.promises.readdir(db_folder))
.filter((a) => re.test(a))
.sort((a, b) => parseInt(a.match(/\d+$/)[0]) - parseInt(b.match(/\d+$/)[0]))
.map((a) => `${db_folder}/${a}`)
}
const files = await get_sorted_files()
// Try to process files starting from the highest number.
let done = false
for (let i = files.length - 1; i >= 0; i--) {
if (done) {
await fs.promises.unlink(files[i])
continue
}
try {
const filename = files[i]
console.log(`trying to process file: ${filename}`)
const data = await fs.promises.readFile(filename)
let cursor = 0
let isFirstChunk = true
while (cursor < data.length) {
const chunkSize = data.readUInt32LE(cursor)
cursor += 4
const chunk = data.slice(cursor, cursor + chunkSize)
cursor += chunkSize
if (isFirstChunk) {
isFirstChunk = false
threshold = chunkSize * 10
}
process_delta(chunk)
}
currentSize = data.length
currentNumber = parseInt(filename.match(/\d+$/)[0])
done = true
} catch (error) {
console.error(`Error processing file: ${files[i]}`)
await fs.promises.unlink(files[i])
}
}
return {
change: async (bytes) => {
currentSize += bytes.length + 4 // we account for the extra 4 bytes for uint32
const filename = `${db_folder}/${filename_base}.${currentNumber}`
if (currentSize < threshold) {
console.log(`appending to db..`)
let buffer = Buffer.allocUnsafe(4)
buffer.writeUInt32LE(bytes.length, 0)
await fs.promises.appendFile(filename, buffer)
await fs.promises.appendFile(filename, bytes)
console.log("wrote to : " + filename)
} else {
try {
console.log(`starting new db..`)
currentNumber++
const init = get_init()
const buffer = Buffer.allocUnsafe(4)
buffer.writeUInt32LE(init.length, 0)
const newFilename = `${db_folder}/${filename_base}.${currentNumber}`
await fs.promises.writeFile(newFilename, buffer)
await fs.promises.appendFile(newFilename, init)
console.log("wrote to : " + newFilename)
currentSize = 4 + init.length
threshold = currentSize * 10
try {
await fs.promises.unlink(filename)
} catch (e) { }
} catch (e) {
console.log(`e = ${e.stack}`)
}
}
},
delete_me: async () => {
await Promise.all(
(
await get_sorted_files()
).map((file) => {
return new Promise((resolve, reject) => {
fs.unlink(file, (err) => {
if (err) {
console.error(`Error deleting file: ${file}`)
reject(err)
} else {
console.log(`Deleted file: ${file}`)
resolve()
}
})
})
})
)
},
}
}
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
function defrag_dt(doc) {
let fresh_doc = new Doc("server")
fresh_doc.mergeBytes(doc.toBytes())
return fresh_doc
}
function parseDT(byte_array) {
if (new TextDecoder().decode(new Uint8Array(byte_array.splice(0, 8))) !== "DMNDTYPS") throw new Error("dt parse error, expected DMNDTYPS")
if (byte_array.shift() != 0) throw new Error("dt parse error, expected version 0")
let agents = []
let versions = []
let parentss = []
while (byte_array.length) {
let id = byte_array.shift()
let len = read_varint(byte_array)
if (id == 1) {
} else if (id == 3) {
let goal = byte_array.length - len
while (byte_array.length > goal) {
agents.push(read_string(byte_array))
}
} else if (id == 20) {
} else if (id == 21) {
let seqs = {}
let goal = byte_array.length - len
while (byte_array.length > goal) {
let part0 = read_varint(byte_array)
let has_jump = part0 & 1
let agent_i = (part0 >> 1) - 1
let run_length = read_varint(byte_array)
let jump = 0
if (has_jump) {
let part2 = read_varint(byte_array)
jump = part2 >> 1
if (part2 & 1) jump *= -1
}
let base = (seqs[agent_i] || 0) + jump
for (let i = 0; i < run_length; i++) {
versions.push([agents[agent_i], base + i])
}
seqs[agent_i] = base + run_length
}
} else if (id == 23) {
let count = 0
let goal = byte_array.length - len
while (byte_array.length > goal) {
let run_len = read_varint(byte_array)
let parents = []
let has_more = 1
while (has_more) {
let x = read_varint(byte_array)
let is_foreign = 0x1 & x
has_more = 0x2 & x
let num = x >> 2
if (x == 1) {
parents.push(["root"])
} else if (!is_foreign) {
parents.push(versions[count - num])
} else {
parents.push([agents[num - 1], read_varint(byte_array)])
}
}
parentss.push(parents)
count++
for (let i = 0; i < run_len - 1; i++) {
parentss.push([versions[count - 1]])
count++
}
}
} else {
byte_array.splice(0, len)
}
}
function read_string(byte_array) {
return new TextDecoder().decode(new Uint8Array(byte_array.splice(0, read_varint(byte_array))))
}
function read_varint(byte_array) {
let result = 0
let shift = 0
while (true) {
if (byte_array.length === 0)
throw new Error("byte array does not contain varint")
let byte_val = byte_array.shift()
result |= (byte_val & 0x7f) << shift
if ((byte_val & 0x80) == 0) return result
shift += 7
}
}
return [agents, versions, parentss]
}
function OpLog_create_bytes(version, parents, pos, ins) {
function write_varint(bytes, value) {
while (value >= 0x80) {
bytes.push((value & 0x7f) | 0x80)
value >>= 7
}
bytes.push(value)
}
function write_string(byte_array, str) {
let str_bytes = new TextEncoder().encode(str)
write_varint(byte_array, str_bytes.length)
byte_array.push(...str_bytes)
}
version = decode_version(version)
parents = parents.map(decode_version)
let bytes = []
bytes = bytes.concat(Array.from(new TextEncoder().encode("DMNDTYPS")))
bytes.push(0)
let file_info = []
let agent_names = []
let agents = new Set()
agents.add(version[0])
for (let p of parents) if (p.length > 1) agents.add(p[0])
agents = [...agents]
// console.log(JSON.stringify({ agents, parents }, null, 4));
let agent_to_i = {}
for (let [i, agent] of agents.entries()) {
agent_to_i[agent] = i
write_string(agent_names, agent)
}
file_info.push(3)
write_varint(file_info, agent_names.length)
file_info.push(...agent_names)
bytes.push(1)
write_varint(bytes, file_info.length)
bytes.push(...file_info)
let branch = []
if (parents[0]?.length > 1) {
let frontier = []
for (let [i, [agent, seq]] of parents.entries()) {
let has_more = i < parents.length - 1
let mapped = agent_to_i[agent]
let n = ((mapped + 1) << 1) | (has_more ? 1 : 0)
write_varint(frontier, n)
write_varint(frontier, seq)
}
branch.push(12)
write_varint(branch, frontier.length)
branch.push(...frontier)
}
bytes.push(10)
write_varint(bytes, branch.length)
bytes.push(...branch)
let patches = []
if (ins) {
let inserted_content_bytes = []
inserted_content_bytes.push(0) // ins (not del, which is 1)
inserted_content_bytes.push(13) // "content" enum (rather than compressed)
let encoder = new TextEncoder()
let utf8Bytes = encoder.encode(ins)
inserted_content_bytes.push(1 + utf8Bytes.length) // length of content chunk
inserted_content_bytes.push(4) // "plain text" enum
for (let b of utf8Bytes) inserted_content_bytes.push(b) // actual text
inserted_content_bytes.push(25) // "known" enum
inserted_content_bytes.push(1) // length of "known" chunk
inserted_content_bytes.push(3) // content of length 1, and we "know" it
patches.push(24)
write_varint(patches, inserted_content_bytes.length)
patches.push(...inserted_content_bytes)
}
// write in the version
let version_bytes = []
let [agent, seq] = version
let agent_i = agent_to_i[agent]
let jump = seq
write_varint(version_bytes, ((agent_i + 1) << 1) | (jump != 0 ? 1 : 0))
write_varint(version_bytes, 1)
if (jump) write_varint(version_bytes, jump << 1)
patches.push(21)
write_varint(patches, version_bytes.length)
patches.push(...version_bytes)
// write in "op" bytes (some encoding of position)
let op_bytes = []
write_varint(op_bytes, (pos << 4) | (pos ? 2 : 0) | (ins ? 0 : 4))
patches.push(22)
write_varint(patches, op_bytes.length)
patches.push(...op_bytes)
// write in parents
let parents_bytes = []
write_varint(parents_bytes, 1)
if (parents[0]?.length > 1) {
for (let [i, [agent, seq]] of parents.entries()) {
let has_more = i < parents.length - 1
let agent_i = agent_to_i[agent]
write_varint(
parents_bytes,
((agent_i + 1) << 2) | (has_more ? 2 : 0) | 1
)
write_varint(parents_bytes, seq)
}
} else write_varint(parents_bytes, 1)
patches.push(23)
write_varint(patches, parents_bytes.length)
patches.push(...parents_bytes)
// write in patches
bytes.push(20)
write_varint(bytes, patches.length)
bytes.push(...patches)
// console.log(bytes);
return bytes
}
function OpLog_remote_to_local(doc, frontier) {
let map = Object.fromEntries(frontier.map((x) => [x, true]))
let local_version = []
let [agents, versions, parentss] = parseDT([...doc.toBytes()])
for (let i = 0; i < versions.length; i++) {
if (map[doc.localToRemoteVersion([i])[0].join("-")]) {
local_version.push(i)
}
}
return (
frontier.length == local_version.length &&
new Uint32Array(local_version)
)
}
function encode_version(agent, seq) {
return agent + "-" + seq
}
function decode_version(v) {
let a = v.split("-")
if (a.length > 1) a[1] = parseInt(a[1])
return a
}
function v_eq(v1, v2) {
return v1.length == v2.length && v1.every((x, i) => x == v2[i])
}
function get_xf_patches(doc, v) {
let patches = []
for (let xf of doc.xfSince(v)) {
patches.push(
xf.kind == "Ins"
? {
unit: "text",
range: `[${xf.start}:${xf.start}]`,
content: xf.content,
}
: {
unit: "text",
range: `[${xf.start}:${xf.end}]`,
content: "",
}
)
}
return relative_to_absolute_patches(patches)
}
function relative_to_absolute_patches(patches) {
let avl = create_avl_tree((node) => {
let parent = node.parent
if (parent.left == node) {
parent.left_size -= node.left_size + node.size
} else {
node.left_size += parent.left_size + parent.size
}
})
avl.root.size = Infinity
avl.root.left_size = 0
function resize(node, new_size) {
if (node.size == new_size) return
let delta = new_size - node.size
node.size = new_size
while (node.parent) {
if (node.parent.left == node) node.parent.left_size += delta
node = node.parent
}
}
for (let p of patches) {
let [start, end] = p.range.match(/\d+/g).map((x) => 1 * x)
let del = end - start
let node = avl.root
while (true) {
if (
start < node.left_size ||
(node.left && node.content == null && start == node.left_size)
) {
node = node.left
} else if (
start > node.left_size + node.size ||
(node.content == null && start == node.left_size + node.size)
) {
start -= node.left_size + node.size
node = node.right
} else {
start -= node.left_size
break
}
}
let remaining = start + del - node.size
if (remaining < 0) {
if (node.content == null) {
if (start > 0) {
let x = { size: 0, left_size: 0 }
avl.add(node, "left", x)
resize(x, start)
}
let x = { size: 0, left_size: 0, content: p.content, del }
avl.add(node, "left", x)
resize(x, x.content.length)
resize(node, node.size - (start + del))
} else {
node.content =
node.content.slice(0, start) +
p.content +
node.content.slice(start + del)
resize(node, node.content.length)
}
} else {
let next
let middle_del = 0
while (remaining >= (next = avl.next(node)).size) {
remaining -= next.size
middle_del += next.del ?? next.size
resize(next, 0)
avl.del(next)
}
if (node.content == null) {
if (next.content == null) {
if (start == 0) {
node.content = p.content
node.del = node.size + middle_del + remaining
resize(node, node.content.length)
} else {
let x = {
size: 0,
left_size: 0,
content: p.content,
del: node.size - start + middle_del + remaining,
}
resize(node, start)
avl.add(node, "right", x)
resize(x, x.content.length)
}
resize(next, next.size - remaining)
} else {
next.del += node.size - start + middle_del
next.content = p.content + next.content.slice(remaining)
resize(node, start)
if (node.size == 0) avl.del(node)
resize(next, next.content.length)
}
} else {
if (next.content == null) {
node.del += middle_del + remaining
node.content = node.content.slice(0, start) + p.content
resize(node, node.content.length)
resize(next, next.size - remaining)
} else {
node.del += middle_del + next.del
node.content =
node.content.slice(0, start) +
p.content +
next.content.slice(remaining)
resize(node, node.content.length)
resize(next, 0)
avl.del(next)
}
}
}
}
let new_patches = []
let offset = 0
let node = avl.root
while (node.left) node = node.left
while (node) {
if (node.content == null) {
offset += node.size
} else {
new_patches.push({
unit: patches[0].unit,
range: `[${offset}:${offset + node.del}]`,
content: node.content,
})
offset += node.del
}
node = avl.next(node)
}
return new_patches
}
function create_avl_tree(on_rotate) {
let self = { root: { height: 1 } }
self.calc_height = (node) => {
node.height =
1 + Math.max(node.left?.height ?? 0, node.right?.height ?? 0)
}
self.rechild = (child, new_child) => {
if (child.parent) {
if (child.parent.left == child) {
child.parent.left = new_child
} else {
child.parent.right = new_child
}
} else {
self.root = new_child
}
if (new_child) new_child.parent = child.parent
}
self.rotate = (node) => {
on_rotate(node)
let parent = node.parent
let left = parent.right == node ? "left" : "right"
let right = parent.right == node ? "right" : "left"
parent[right] = node[left]
if (parent[right]) parent[right].parent = parent
self.calc_height(parent)
self.rechild(parent, node)
parent.parent = node
node[left] = parent
}
self.fix_avl = (node) => {
self.calc_height(node)
let diff = (node.right?.height ?? 0) - (node.left?.height ?? 0)
if (Math.abs(diff) >= 2) {
if (diff > 0) {
if (
(node.right.left?.height ?? 0) >
(node.right.right?.height ?? 0)
)
self.rotate(node.right.left)
self.rotate((node = node.right))
} else {
if (
(node.left.right?.height ?? 0) >
(node.left.left?.height ?? 0)
)
self.rotate(node.left.right)
self.rotate((node = node.left))
}
self.fix_avl(node)
} else if (node.parent) self.fix_avl(node.parent)
}
self.add = (node, side, add_me) => {
let other_side = side == "left" ? "right" : "left"
add_me.height = 1
if (node[side]) {
node = node[side]
while (node[other_side]) node = node[other_side]
node[other_side] = add_me
} else {
node[side] = add_me
}
add_me.parent = node
self.fix_avl(node)
}
self.del = (node) => {
if (node.left && node.right) {
let cursor = node.right
while (cursor.left) cursor = cursor.left
cursor.left = node.left
// breaks abstraction
cursor.left_size = node.left_size
let y = cursor
while (y.parent != node) {
y = y.parent
y.left_size -= cursor.size
}
node.left.parent = cursor
if (cursor == node.right) {
self.rechild(node, cursor)
self.fix_avl(cursor)
} else {
let x = cursor.parent
self.rechild(cursor, cursor.right)
cursor.right = node.right
node.right.parent = cursor
self.rechild(node, cursor)
self.fix_avl(x)
}
} else {
self.rechild(node, node.left || node.right || null)
if (node.parent) self.fix_avl(node.parent)
}
}
self.next = (node) => {
if (node.right) {
node = node.right
while (node.left) node = node.left
return node
} else {
while (node.parent && node.parent.right == node) node = node.parent
return node.parent
}
}
return self
}
module.exports = { handle }
================================================
FILE: sync9/old-vis/visualization.html
================================================
================================================
FILE: sync9/old-vis/visualization.js
================================================
module.exports = require.visualization = function create_vis(sim) {
var tau = Math.PI*2
var debug_frames = []
var add_frame = (f) => debug_frames && debug_frames.push(f)
var vis = {loop, add_frame}
var a = document.createElement('div')
a.style.display = 'grid'
a.style['grid-template-rows'] = '1fr 20px'
a.style.width = '100%'
a.style.height = '100%'
document.body.append(a)
var c = document.createElement('canvas')
c.width = 1000 * devicePixelRatio
c.height = (window.innerHeight - 20) * devicePixelRatio
c.style.width = (c.width / devicePixelRatio) + 'px'
c.style.height = (c.height / devicePixelRatio) + 'px'
var g = c.getContext('2d')
a.append(c)
// var top_part = document.createElement('div')
// a.append(top_part)
var slider = document.createElement('input')
slider.style.width = '50%'
slider.setAttribute('type', 'range')
slider.setAttribute('min', '0')
slider.setAttribute('max', debug_frames.length - 1)
slider.setAttribute('value', debug_frames.length - 1)
slider.oninput = () => {
is_on = false
draw_frame(1*slider.value, 0)
}
a.append(slider)
c.addEventListener('mousedown', () => {
is_on = !is_on
})
var loop_count = 0
var loop_inbetween_count = 0
var is_on = true
function loop() {
if (is_on) {
if (loop_inbetween_count == 0) {
try {
step(loop_count)
} catch (e) {
console.log('e:', e)
console.log('error on loop_count = ' + loop_count)
throw 'stop'
}
loop_count++
}
if (debug_frames.length > 1)
draw_frame(debug_frames.length - 2, loop_inbetween_count / 10)
if (debug_frames.length > 300) debug_frames = debug_frames.slice(100)
slider.setAttribute('max', debug_frames.length - 2)
slider.value = debug_frames.length - 2
loop_inbetween_count = (loop_inbetween_count + 1) % 1
}
setTimeout(loop, 30)
}
function draw_frame(di, percent) {
if (di == null) di = debug_frames.length - 1
var d = debug_frames[di]
g.clearRect(0, 0, c.width, c.height)
draw_network(c, g, debug_frames, di, percent, 0, 0, 800, 800, 300)
sim.peers.forEach((p, i) => {
p = d.peers[i]
var x = 800
var y = 20 + 450*i
var r = 10
if (p.resources.my_key) {
draw_fissure_dag(c, g, debug_frames, di, i, x, y, 100, 300, r)
draw_time_dag(c, g, debug_frames, di, i, x + 100, y, 300, 300, r)
var v = p.resources.my_key.space_dag
var S = null
if (v && v.t == 'val') v = space_dag_get(v.S, 0)
if (v && v.t == 'lit') v = v.S
if (typeof(v) == 'string') S = create_space_dag_node(null, v)
if (v && v.t == 'str') S = v.S
if (S) draw_space_dag(p, g, S, x + 400, y)
}
})
draw_text(c, g, 'f# = ' + d.frame_num + ' + ' + percent, 0, 0, 'grey', 'left', 'top')
// top_part.innerHTML = ''
// top_part.style.display = 'grid'
// top_part.style['grid-template-columns'] = '1fr 1fr 1fr'
// sim.peers.forEach((p, i) => {
// p = d.peers[i]
// var dd = document.createElement('textarea')
// dd.value = '= ' + (p.keys.my_key ? JSON.stringify(sync9_read(p.keys.my_key.s9)) : 'n/a') + '\n\n' + JSON.stringify(p, null, ' ')
// top_part.append(dd)
// })
}
function draw_text(c, g, text, x, y, color, x_align, y_align, font) {
g.font = font || '15px Arial'
if (color) g.fillStyle = color
g.textAlign = x_align || 'left'
g.textBaseline = y_align || 'middle'
g.fillText(text, x, y)
}
function draw_network(c, g, frames, fi, percent, x, y, w, h, r) {
var peers = frames[fi].peers
g.beginPath()
g.lineWidth = 0.5
g.strokeStyle = 'red'
g.rect(x, y, w, h)
g.stroke()
g.beginPath()
g.arc(x + w/2, y + h/2, r, 0, tau)
g.stroke()
var plank = w/30
for (var i = 0; i < peers.length; i++) {
for (var ii = i + 1; ii < peers.length; ii++) {
var a = tau / peers.length * i
var aa = tau / peers.length * ii
var p = peers[i]
var other_p = peers[ii]
var connected = Object.keys(p.connected_to).some(pid => pid == other_p.pid) || Object.keys(other_p.connected_to).some(pid => pid == p.pid)
if (connected) {
g.beginPath()
g.strokeStyle = 'darkgrey'
g.lineWidth = w/30
g.moveTo(x + w/2 + Math.cos(a)*r, y + h/2 + Math.sin(a)*r)
g.lineTo(x + w/2 + Math.cos(aa)*r, y + h/2 + Math.sin(aa)*r)
g.stroke()
}
function func(i, ii, m, a, aa) {
if (m[0] != peers[ii].pid) return
var before_frame = fi
while ((before_frame >= 0) && frames[before_frame].peers[i].incoming.some(mm => mm[2] == m[2])) before_frame--
var after_frame = fi
while ((after_frame < frames.length) && frames[after_frame].peers[i].incoming.some(mm => mm[2] == m[2])) after_frame++
var p1 = [x + w/2 + Math.cos(a)*r, y + h/2 + Math.sin(a)*r]
var p2 = [x + w/2 + Math.cos(aa)*r, y + h/2 + Math.sin(aa)*r]
var f = lerp(before_frame, 0, after_frame, 1, fi + percent)
var pos = lerp(0, p1, 1, p2, f)
if (m[3] == 'hello') {
g.save()
g.translate(pos[0], pos[1])
g.rotate(Math.atan2(p2[1] - p1[1], p2[0] - p1[0]) + tau/4)
draw_text(c, g, 'H', 0, 0, 'white', 'center', 'middle')
g.restore()
g.beginPath()
var rot_by = tau/2 - (23.5 * tau/360)
var forward = norm(sub(p2, pos))
var t0 = add(pos, mul(forward, w/30*8/10))
var len = (w/30 / 2) / Math.sin(23.5 * tau/360)
var t1 = add(t0, mul(rot(forward, rot_by), len))
var t2 = add(t0, mul(rot(forward, -rot_by), len))
g.moveTo(t1[0], t1[1])
g.lineTo(t0[0], t0[1])
g.lineTo(t2[0], t2[1])
g.lineWidth = 1
g.strokeStyle = 'white'
g.stroke()
g.beginPath()
var rot_by = tau/8
var t0 = add(pos, mul(forward, -w/30 * 0.45))
var len = (w/30 / 2) / Math.sin(tau/8)
var t1 = add(t0, mul(rot(forward, rot_by), len))
var t2 = add(t0, mul(rot(forward, -rot_by), len))
g.moveTo(t1[0], t1[1])
g.lineTo(t0[0], t0[1])
g.lineTo(t2[0], t2[1])
g.lineWidth = 2
g.strokeStyle = 'white'
g.stroke()
} else if (m[3] == 'get') {
g.save()
g.translate(pos[0], pos[1])
g.rotate(Math.atan2(p2[1] - p1[1], p2[0] - p1[0]) + tau/4)
draw_text(c, g, 'G', 0, 0, 'white', 'center', 'middle')
g.restore()
g.beginPath()
var rot_by = tau/2 - (23.5 * tau/360)
var forward = norm(sub(p2, pos))
var t0 = add(pos, mul(forward, w/30*8/10))
var len = (w/30 / 2) / Math.sin(23.5 * tau/360)
var t1 = add(t0, mul(rot(forward, rot_by), len))
var t2 = add(t0, mul(rot(forward, -rot_by), len))
g.moveTo(t1[0], t1[1])
g.lineTo(t0[0], t0[1])
g.lineTo(t2[0], t2[1])
g.lineWidth = 1
g.strokeStyle = 'white'
g.stroke()
g.beginPath()
var rot_by = tau/8
var t0 = add(pos, mul(forward, -w/30 * 0.45))
var len = (w/30 / 2) / Math.sin(tau/8)
var t1 = add(t0, mul(rot(forward, rot_by), len))
var t2 = add(t0, mul(rot(forward, -rot_by), len))
g.moveTo(t1[0], t1[1])
g.lineTo(t0[0], t0[1])
g.lineTo(t2[0], t2[1])
g.lineWidth = 2
g.strokeStyle = 'white'
g.stroke()
} else if (m[3] == 'welcome') {
var rr = plank*0.5
for (var a = 0; a < 5; a++) {
g.beginPath()
g.arc(pos[0] + Math.cos(tau/5*a)*rr, pos[1] + Math.sin(tau/5*a)*rr, plank * 0.35, 0, tau)
g.fillStyle = m[4].unack_boundary ? 'lightblue' : 'white'
g.fill()
g.beginPath()
g.arc(pos[0] + Math.cos(tau/5*a)*rr, pos[1] + Math.sin(tau/5*a)*rr, plank * 0.35, 0, tau)
g.lineWidth = 1
g.strokeStyle = 'blue'
g.stroke()
}
} else if (m[3] == 'set') {
g.beginPath()
g.arc(pos[0], pos[1], plank * 0.7, 0, tau)
g.fillStyle = 'white'
g.fill()
var my_text = m[4].version
draw_text(c, g, my_text, pos[0], pos[1], 'blue', 'center', 'middle')
g.beginPath()
g.arc(pos[0], pos[1], plank * 0.7, 0, tau)
g.lineWidth = 1
g.strokeStyle = 'blue'
g.stroke()
} else if (m[3] == 'ack') {
g.beginPath()
g.arc(pos[0], pos[1], plank * 0.7, 0, tau)
g.fillStyle = (m[4].seen == 'local') ? 'lightblue' : 'blue'
g.fill()
var my_text = m[4].version
draw_text(c, g, my_text, pos[0], pos[1], (m[4][2] == 'local') ? 'blue' : 'white', 'center', 'middle')
g.beginPath()
g.arc(pos[0], pos[1], plank * 0.7, 0, tau)
g.lineWidth = 1
g.strokeStyle = 'blue'
g.stroke()
} else if (m[3] == 'fissure') {
var fis = m[4].fissure
var rand = Math.create_rand(fis.conn)
var color = '#' + rand().toString(16).slice(2, 8)
var rr = 10 * (1 + rand())
g.beginPath()
g.arc(pos[0], pos[1], plank * 0.7, 0, tau)
g.fillStyle = 'black'
g.fill()
g.beginPath()
if (fis.a < fis.b) {
g.arc(pos[0], pos[1], rr, tau/4, tau*3/4)
} else {
g.arc(pos[0], pos[1], rr, tau*3/4, tau/4)
}
g.strokeStyle = color
g.lineWidth = 2
g.stroke()
} else {
throw 'unknown message type: ' + m[3]
}
}
peers[i].incoming.forEach(m => func(i, ii, m, aa, a))
peers[ii].incoming.forEach(m => func(ii, i, m, a, aa))
}
}
peers.forEach((p, i) => {
var a = tau / peers.length * i
g.beginPath()
g.fillStyle = p.incoming.length > 0 ? 'blue' : 'green'
var pos = [
x + w/2 + Math.cos(a)*r,
y + h/2 + Math.sin(a)*r
]
g.arc(pos[0], pos[1], w/30, 0, tau)
g.fill()
})
}
function draw_fissure_dag(c, g, frames, fi, pi, x, y, w, h, r) {
var peers = frames[fi].peers
var peer = peers[pi].resources.my_key
if (!peer) return
var fs = {}
Object.values(peer.fissures).forEach(f => {
var ff = fs[f.conn]
if (!ff) {
var rand = Math.create_rand(f.conn)
ff = fs[f.conn] = {
id: f.conn,
color: '#' + rand().toString(16).slice(2, 8),
radius: r * (1 + rand()),
parents: {}
}
}
if (f.a < f.b) ff.has_side_a = true
if (f.b < f.a) ff.has_side_b = true
Object.keys(f.parents).forEach(p => {
// work here
if (!peer.fissures[p]) {
//debugger
ff.has_issue = true
return
}
ff.parents[peer.fissures[p].conn] = true
})
})
function get_layer(k) {
if (fs[k].layer) return fs[k].layer
return fs[k].layer = Object.keys(fs[k].parents).reduce((x, p) => {
return Math.max(x, get_layer(p) + 1)
}, 0)
}
Object.keys(fs).forEach(get_layer)
var layer_members = {}
var num_layers = 0
Object.values(fs).forEach(f => {
layer_members[f.layer] = layer_members[f.layer] || []
layer_members[f.layer].push(f.id)
if (f.layer >= num_layers) num_layers = f.layer + 1
})
Object.values(layer_members).forEach(layer => {
layer.sort().forEach((k, i) => {
fs[k].layer_i = i
})
})
function get_node_pos(f) {
var layer_count = layer_members[f.layer].length
return [
lerp(0, x + r, layer_count, x + w - r, f.layer_i + 0.5),
y + r + (f.layer * r*4)
]
}
Object.values(fs).forEach(f => {
var a = get_node_pos(f)
g.beginPath()
Object.keys(f.parents).map(x => fs[x]).forEach(p => {
var b = get_node_pos(p)
g.moveTo(a[0], a[1])
g.lineTo(b[0], b[1])
})
g.lineWidth = 3
g.strokeStyle = 'lightblue'
g.stroke()
})
Object.values(fs).forEach(f => {
var node_pos = get_node_pos(f)
var rand = Math.create_rand(f.id)
var color = '#' + rand().toString(16).slice(2, 8)
var rr = r * (1 + rand())
g.beginPath()
g.arc(node_pos[0], node_pos[1], rr, 0, tau)
g.fillStyle = f.has_issue ? 'red' : 'white'
g.fill()
g.beginPath()
if (f.has_side_a) {
g.arc(node_pos[0], node_pos[1], rr, tau/4, tau*3/4)
}
if (f.has_side_b) {
g.arc(node_pos[0], node_pos[1], rr, tau*3/4, tau/4)
}
g.strokeStyle = color
g.lineWidth = 2
g.stroke()
})
}
function draw_time_dag(c, g, frames, fi, pi, x, y, w, h, r) {
var peers = frames[fi].peers
var resource = peers[pi].resources.my_key
if (!resource) return
var s9 = resource.mergeable
g.lineWidth = 3
var vs = {}
function get_layer(v) {
if (!vs[v]) vs[v] = {vid: v}
if (vs[v].layer) return vs[v].layer
return vs[v].layer = Object.keys(resource.time_dag[v]).reduce((x, p) => {
return Math.max(x, get_layer(p) + 1)
}, 0)
}
Object.keys(resource.time_dag).forEach(get_layer)
var layer_members = {}
var num_layers = 0
Object.values(vs).forEach(v => {
layer_members[v.layer] = layer_members[v.layer] || []
layer_members[v.layer].push(v.vid)
if (v.layer >= num_layers) num_layers = v.layer + 1
})
Object.values(layer_members).forEach(layer => {
layer.sort().forEach((v, i) => {
vs[v].layer_i = i
})
})
function get_node_pos(v) {
var layer_count = layer_members[v.layer].length
return [
lerp(0, x + r, layer_count + 1, x + w - r, v.layer_i + 1),
y + r + (v.layer * r*3)
]
}
Object.entries(vs).forEach(e => {
var a_pos = get_node_pos(e[1])
g.beginPath()
Object.keys(resource.time_dag[e[0]]).forEach(p => {
g.moveTo(a_pos[0], a_pos[1])
var b_pos = get_node_pos(vs[p])
g.lineTo(b_pos[0], b_pos[1])
})
g.strokeStyle = 'lightblue'
g.stroke()
})
var fully_acked = {}
function mark_fully_acked_rec(v) {
if (!fully_acked[v]) {
fully_acked[v] = true
Object.keys(resource.time_dag[v]).forEach(mark_fully_acked_rec)
}
}
Object.keys(resource.acked_boundary).forEach(mark_fully_acked_rec)
Object.entries(vs).forEach(e => {
var node_pos = get_node_pos(e[1])
g.beginPath()
g.arc(node_pos[0], node_pos[1], r, 0, tau)
g.fillStyle = 'white'
g.fill()
if (resource.acks_in_process[e[0]]) {
var current_count = Math.max(0, resource.acks_in_process[e[0]].count)
var max_count = 0
var search_i = fi
try {
let x = null
while (x = frames[search_i].peers[pi].resources.my_key.acks_in_process[e[0]]) {
max_count = x.count
search_i--
}
} catch (e) {}
var percent_done = (max_count - current_count) / max_count
if (percent_done > 0) {
g.beginPath()
g.arc(node_pos[0], node_pos[1], r, 0, tau/2, true)
if (percent_done == 1) {
g.arc(node_pos[0], node_pos[1], r, tau/2, 0, true)
} else if (percent_done < 0.5) {
var x = lerp(0, r, 0.5, 0, percent_done)
var C = (r*r - x*x) / (2*x)
var angle = Math.atan2(r, C)
g.arc(node_pos[0], node_pos[1] + C, C + x, tau*3/4 - angle, tau*3/4 + angle)
} else if (percent_done > 0.5) {
var x = lerp(0.5, 0, 1, r, percent_done)
var C = (r*r - x*x) / (2*x)
var angle = Math.atan2(r, C)
g.arc(node_pos[0], node_pos[1] - C, C + x, tau/4 - angle, tau/4 + angle)
} else {
g.arc(node_pos[0], node_pos[1] + C, C + x, 0, tau)
}
g.fillStyle = 'lightblue'
g.fill()
}
}
g.beginPath()
g.arc(node_pos[0], node_pos[1], r, 0, tau)
if (fully_acked[e[0]]) {
g.fillStyle = 'blue'
g.fill()
} else {
g.strokeStyle = 'blue'
g.stroke()
}
draw_text(c, g, e[0].slice(0, 3), node_pos[0] + r, node_pos[1] + r, 'grey', 'left', 'top')
})
Object.keys(resource.unack_boundary).forEach(v => {
g.beginPath()
g.fillStyle = 'white'
var node_pos = get_node_pos(vs[v])
g.arc(node_pos[0], node_pos[1], r * 0.5, 0, Math.PI*2)
g.fill()
})
Object.values(resource.fissures).forEach(f => {
Object.keys(f.versions).forEach(v => {
if (!resource.time_dag[v]) return
g.beginPath()
var rand = Math.create_rand(f.conn)
g.strokeStyle = '#' + rand().toString(16).slice(2, 8)
var node_pos = get_node_pos(vs[v])
//var rr = r * 1.45
var rr = r * (1 + rand())
g.lineWidth = 2
if (f.a < f.b) {
// work here
g.arc(node_pos[0], node_pos[1], rr, tau/4, tau*3/4)
// g.moveTo(node_pos[0] - rr, node_pos[1] - rr)
// g.lineTo(node_pos[0] + rr, node_pos[1] - rr)
// g.lineTo(node_pos[0] + rr, node_pos[1] + rr)
// g.lineTo(node_pos[0] - rr, node_pos[1] + rr)
} else {
g.arc(node_pos[0], node_pos[1], rr, tau/4, tau*3/4, true)
// var rrr = Math.sqrt(2) * rr
// g.moveTo(node_pos[0] - rrr, node_pos[1])
// g.lineTo(node_pos[0], node_pos[1] - rrr)
// g.lineTo(node_pos[0] + rrr, node_pos[1])
// g.lineTo(node_pos[0], node_pos[1] + rrr)
// g.closePath()
}
g.stroke()
})
})
}
function draw_space_dag(c, g, S, x, y) {
function helper(node, y, px, py) {
g.beginPath()
g.moveTo(x, y)
g.lineTo(px, py)
g.lineWidth = 1
g.strokeStyle = 'lightblue'
g.stroke()
var begin_x
var end_x
draw_text(c, g, node.vid ? node.vid.slice(0, 3) : '', x, y + 25, 'grey', 'left', 'middle')
var my_text = node.elems + (node.end_cap ? '*' : '')
draw_text(c, g, my_text, x, y, Object.keys(node.deleted_by).length > 0 ? 'red' : 'blue', 'left', 'middle', '20px Arial')
var width = g.measureText(my_text).width
x += width
var px = x
x += 10
for (var n of node.nexts) helper(n, y + 40, px, y)
if (node.next) helper(node.next, y, px, y)
}
if (typeof(S) == 'string') helper(sync9_create_space_dag_node('lit', S))
else helper(S, y, x, y)
}
function lerp(t0, v0, t1, v1, t) {
function inner_lerp(t0, v0, t1, v1, t) {
return (t - t0) * (v1 - v0) / (t1 - t0) + v0
}
if (typeof(v0) == 'object') {
return v0.map((x, i) => inner_lerp(t0, x, t1, v1[i], t))
} else return inner_lerp(t0, v0, t1, v1, t)
}
function rot(a, r) {
return [
a[0] * Math.cos(r) + a[1] * -Math.sin(r),
a[0] * Math.sin(r) + a[1] * Math.cos(r)]
}
var mul = (a, s) => a.map(a => a * s)
var sum = (a) => a.reduce((a, b) => a + b, 0)
var lenSq = (a) => sum(a.map(x => x*x))
var len = (a) => Math.sqrt(lenSq(a))
var norm = (a) => mul(a, 1 / len(a))
var add = (a, b) => a.map((a, i) => a + b[i])
var sub = (a, b) => a.map((a, i) => a - b[i])
return vis
}
================================================
FILE: sync9/sync9.js
================================================
// Adapted from https://github.com/dglittle/cdn/blob/gh-pages/sync9_047.html
module.exports = require.sync9 = function create (resource) {
if (!resource.space_dag) resource.space_dag = null
return {
read (version) {
return read(resource, version)
},
add_version (version, parents, patches, hint) {
return add_version(resource, version, parents, patches,
hint && hint.sort_keys)
},
generate_braid (versions) {
var ancestors = (versions && Object.keys(versions).length
? resource.ancestors(versions, true)
: {})
var versions = generate_braid(resource, x => ancestors[x])
// Hey Greg: Why are we cloning versions here? -Mike
versions = JSON.parse(JSON.stringify(versions))
versions.forEach(x => {
// we want to put some of this stuff in a "hint" field,
// as per the protocol
if (x.sort_keys) {
x.hint = {sort_keys: x.sort_keys}
delete x.sort_keys
}
})
return versions
},
prune (bubbles) {
return prune(resource, bubbles)
}
}
}
function generate_braid(resource, is_anc) {
if (Object.keys(resource.time_dag).length === 0)
return []
return Object.entries(resource.version_cache).filter(
x => !is_anc(x[0])
).map(
([version, set_message]) => {
return resource.version_cache[version]
= set_message || generate_set_message(version)
})
function generate_set_message(version) {
if (!Object.keys(resource.time_dag[version]).length) {
return {
version,
parents: {},
patches: [` = ${JSON.stringify(read_raw(resource, v => v == version))}`]
}
}
var is_lit = x => !x || typeof(x) !== 'object' || x.t === 'lit'
var get_lit = x => (x && typeof(x) === 'object' && x.t === 'lit') ? x.S : x
var ancs = resource.ancestors({[version]: true})
delete ancs[version]
var is_anc = x => ancs[x]
var path = []
var patches = []
var sort_keys = {}
recurse(resource.space_dag)
function recurse(x) {
if (is_lit(x)) {
} else if (x.t === 'val') {
space_dag_generate_braid(x.S, resource, version, is_anc).forEach(s => {
if (s[2].length) {
patches.push(`${path.join('')} = ${JSON.stringify(s[2][0])}`)
if (s[3]) sort_keys[patches.length - 1] = s[3]
}
})
traverse_space_dag(x.S, is_anc, node => {
node.elems.forEach(recurse)
})
} else if (x.t === 'arr') {
space_dag_generate_braid(x.S, resource, version, is_anc).forEach(s => {
patches.push(`${path.join('')}[${s[0]}:${s[0] + s[1]}] = ${JSON.stringify(s[2])}`)
if (s[3]) sort_keys[patches.length - 1] = s[3]
})
var i = 0
traverse_space_dag(x.S, is_anc, node => {
node.elems.forEach(e => {
path.push(`[${i++}]`)
recurse(e)
path.pop()
})
})
} else if (x.t === 'obj') {
Object.entries(x.S).forEach(e => {
path.push('[' + JSON.stringify(e[0]) + ']')
recurse(e[1])
path.pop()
})
} else if (x.t === 'str') {
space_dag_generate_braid(x.S, resource, version, is_anc).forEach(s => {
patches.push(`${path.join('')}[${s[0]}:${s[0] + s[1]}] = ${JSON.stringify(s[2])}`)
if (s[3]) sort_keys[patches.length - 1] = s[3]
})
}
}
return {
version,
parents: Object.assign({}, resource.time_dag[version]),
patches,
sort_keys
}
}
}
function space_dag_generate_braid(S, resource, version, is_anc) {
var splices = []
function add_ins(offset, ins, sort_key, end_cap) {
if (typeof(ins) !== 'string')
ins = ins.map(x => read_raw(x, () => false))
if (splices.length > 0) {
var prev = splices[splices.length - 1]
if (prev[0] + prev[1] === offset && !end_cap && (prev[4] === 'i' || (prev[4] === 'r' && prev[1] === 0))) {
prev[2] = prev[2].concat(ins)
return
}
}
splices.push([offset, 0, ins, sort_key, end_cap ? 'r' : 'i'])
}
function add_del(offset, del, ins) {
if (splices.length > 0) {
var prev = splices[splices.length - 1]
if (prev[0] + prev[1] === offset && prev[4] !== 'i') {
prev[1] += del
return
}
}
splices.push([offset, del, ins, null, 'd'])
}
var offset = 0
function helper(node, _version, end_cap) {
if (_version === version) {
add_ins(offset, node.elems.slice(0), node.sort_key, end_cap)
} else if (node.deleted_by[version] && node.elems.length > 0) {
add_del(offset, node.elems.length, node.elems.slice(0, 0))
}
if ((!_version || is_anc(_version)) && !Object.keys(node.deleted_by).some(is_anc)) {
offset += node.elems.length
}
node.nexts.forEach(next => helper(next, next.version, node.end_cap))
if (node.next) helper(node.next, _version)
}
helper(S, null)
splices.forEach(s => {
// if we have replaces with 0 deletes,
// make them have at least 1 delete..
// this can happen when there are multiple replaces of the same text,
// and our code above will associate those deletes with only one of them
if (s[4] === 'r' && s[1] === 0) s[1] = 1
})
return splices
}
function prune(resource, to_bubble) {
assert(resource.time_dag, 'No time dag on ' + JSON.stringify(resource))
var is_lit = x => !x || typeof(x) != 'object' || x.t == 'lit'
var get_lit = x => (x && typeof(x) == 'object' && x.t == 'lit') ? x.S : x
var seen_annotations = {}
see_annotations(resource.space_dag)
function see_annotations(x, is_lit_override) {
if (is_lit_override || is_lit(x)) {
if (!is_lit_override && x && typeof(x) == 'object' && x.t == 'lit') x = x.S
if (Array.isArray(x)) for (y of x) see_annotations(y, true)
else if (x && typeof(x) == 'object') {
if (x.type == 'location') seen_annotations[x.id] = true
else for (y of Object.values(x)) see_annotations(y, true)
}
} else if (x.t == 'val') {
traverse_space_dag(x.S, () => true, node => {
node.elems.forEach(x => see_annotations(x))
}, true)
} else if (x.t == 'arr') {
traverse_space_dag(x.S, () => true, node => {
node.elems.forEach(x => see_annotations(x))
}, true)
} else if (x.t == 'obj') {
Object.values(x.S).forEach(x => see_annotations(x))
}
}
function recurse(x) {
if (is_lit(x)) return x
if (x.t == 'val') {
space_dag_prune(x.S, to_bubble)
traverse_space_dag(x.S, () => true, node => {
node.elems = node.elems.slice(0, 1).map(recurse)
}, true)
if (x.S.nexts.length == 0 && !x.S.next && x.S.elems.length == 1 && is_lit(x.S.elems[0])) return x.S.elems[0]
return x
}
if (x.t == 'arr') {
space_dag_prune(x.S, to_bubble, seen_annotations)
traverse_space_dag(x.S, () => true, node => {
node.elems = node.elems.map(recurse)
}, true)
if (x.S.nexts.length == 0 && !x.S.next && x.S.elems.every(is_lit) && !Object.keys(x.S.deleted_by).length && !x.S.annotations) return {t: 'lit', S: x.S.elems.map(get_lit)}
return x
}
if (x.t == 'obj') {
Object.entries(x.S).forEach(e => {
var y = x.S[e[0]] = recurse(e[1])
if (is_lit(y) && y && typeof(y) == 'object' && y.S.type == 'deleted')
delete x.S[e[0]]
})
if (Object.values(x.S).every(is_lit)) {
var o = {}
Object.entries(x.S).forEach(e => o[e[0]] = get_lit(e[1]))
return {t: 'lit', S: o}
}
return x
}
if (x.t == 'str') {
space_dag_prune(x.S, to_bubble, seen_annotations)
if (x.S.nexts.length == 0 && !x.S.next && !Object.keys(x.S.deleted_by).length && !x.S.annotations) return x.S.elems
return x
}
}
resource.space_dag = recurse(resource.space_dag)
Object.entries(to_bubble).forEach(([version, bubble]) => {
if (version === bubble[1])
resource.time_dag[bubble[0]] = resource.time_dag[bubble[1]]
if (version !== bubble[0]) {
delete resource.time_dag[version]
delete resource.version_cache[version]
} else resource.version_cache[version] = null
})
// Now we check to see if we can collapse the spacedag down to a literal.
//
// Todo: Should this code be looking so intimately at antimatter data,
// like the acked_boundary and fissures? Shouldn't that part be computed
// in the antimatter section? Maybe it should just pass the result of
// this computation into the prune() function as a paramter?
//
// (This code also assumes there is a God (a single first version adder))
var leaves = Object.keys(resource.current_version)
var acked_boundary = Object.keys(resource.acked_boundary)
var fiss = Object.keys(resource.fissures)
if (leaves.length === 1 && acked_boundary.length === 1
&& leaves[0] === acked_boundary[0] && fiss.length === 0
&& !Object.keys(seen_annotations).length) {
resource.time_dag = { [leaves[0]]: {} }
var val = read_raw(resource)
resource.space_dag = (val && typeof(val) === 'object'
? {t: 'lit', S: val}
: val)
}
}
function space_dag_prune(S, to_bubble, seen_annotations) {
traverse_space_dag(S, () => true, node => {
if (to_bubble[node.version] && to_bubble[node.version][0] != node.version) {
if (!node.sort_key) node.sort_key = node.version
node.version = to_bubble[node.version][0]
}
for (var x of Object.keys(node.deleted_by)) {
if (to_bubble[x]) {
delete node.deleted_by[x]
node.deleted_by[to_bubble[x][0]] = true
}
}
if (node.annotations) {
for (k of Object.keys(node.annotations))
if (!seen_annotations[k]) delete node.annotations[k]
if (!Object.keys(node.annotations).length) delete node.annotations
}
}, true)
function set_nnnext(node, next) {
while (node.next) node = node.next
node.next = next
}
do_line(S, S.version)
function do_line(node, version) {
var prev = null
while (node) {
if (node.nexts[0] && node.nexts[0].version == version) {
for (let i = 0; i < node.nexts.length; i++) {
delete node.nexts[i].version
delete node.nexts[i].sort_key
set_nnnext(node.nexts[i], i + 1 < node.nexts.length ? node.nexts[i + 1] : node.next)
}
node.next = node.nexts[0]
node.nexts = []
}
if (node.deleted_by[version]) {
if (node.annotations) Object.keys(node.annotations).forEach(k => node.annotations[k] = 0)
node.elems = node.elems.slice(0, 0)
node.deleted_by = {}
if (prev) { node = prev; continue }
}
var next = node.next
if (!node.nexts.length && next && (!node.elems.length || !next.elems.length || (Object.keys(node.deleted_by).every(x => next.deleted_by[x]) && Object.keys(next.deleted_by).every(x => node.deleted_by[x])))) {
if (next.annotations) {
node.annotations = node.annotations || {}
Object.entries(next.annotations).forEach(e => {
node.annotations[e[0]] = node.elems.length + e[1]
})
}
if (!node.elems.length) node.deleted_by = next.deleted_by
node.elems = node.elems.concat(next.elems)
node.end_cap = next.end_cap
node.nexts = next.nexts
node.next = next.next
continue
}
for (let n of node.nexts) do_line(n, n.version)
prev = node
node = next
}
}
}
function add_version(resource, version, parents, patches, sort_keys, is_anc) {
let make_lit = x => (x && typeof(x) == 'object') ? {t: 'lit', S: x} : x
if (!sort_keys) sort_keys = {}
if (!Object.keys(parents).length) {
var parse = parse_patch(patches[0])
resource.space_dag = make_lit(parse.value)
parse.annotations && create_annotations(parse.annotations)
return
}
if (!is_anc) {
if (parents == resource.current_version)
is_anc = (_version) => _version != version
else {
var ancs = resource.ancestors(parents)
is_anc = _version => ancs[_version]
}
}
var annotations = {}
patches.forEach((patch, i) => {
var sort_key = sort_keys[i]
var parse = parse_patch(patch)
Object.assign(annotations, parse.annotations)
var cur = resolve_path(parse)
if (!parse.slice) {
if (cur.t != 'val') throw 'bad'
var len = space_dag_length(cur.S, is_anc)
space_dag_add_version(cur.S, version, [[0, len, [parse.delete ? make_lit({type: 'deleted'}) : make_lit(parse.value)]]], sort_key, is_anc)
} else {
if (typeof parse.value === 'string' && cur.t !== 'str')
throw `Cannot splice string ${JSON.stringify(parse.value)} into non-string`
if (parse.value instanceof Array && cur.t !== 'arr')
throw `Cannot splice array ${JSON.stringify(parse.value)} into non-array`
if (parse.value instanceof Array)
parse.value = parse.value.map(x => make_lit(x))
var r0 = parse.slice[0]
var r1 = parse.slice[1]
if (r0 < 0 || Object.is(r0, -0) || r1 < 0 || Object.is(r1, -0)) {
let len = space_dag_length(cur.S, is_anc)
if (r0 < 0 || Object.is(r0, -0)) r0 = len + r0
if (r1 < 0 || Object.is(r1, -0)) r1 = len + r1
}
space_dag_add_version(
cur.S, version, [[r0, r1 - r0, parse.value]], sort_key, is_anc
)
}
})
create_annotations(annotations)
function create_annotations(annotations) {
var prev_is_anc = is_anc
is_anc = v => prev_is_anc(v) || v == version
Object.entries(annotations).forEach(e => {
e[1].slice = [0, 0]
var cur = resolve_path(e[1])
function helper(node, offset) {
if (offset <= e[1].pos && e[1].pos <= offset + node.elems.length) {
node.annotations = node.annotations || {}
node.annotations[e[0]] = e[1].pos - offset
return false
}
}
if (e[1].pos == 0) helper(cur.S, 0)
else traverse_space_dag(cur.S, is_anc, helper)
})
}
function resolve_path(parse) {
var cur = resource.space_dag
if (!cur || typeof(cur) != 'object' || cur.t == 'lit')
cur = resource.space_dag = {t: 'val', S: create_space_dag_node(null, [cur])}
var prev_S = null
var prev_i = 0
for (var i=0; i make_lit(x)))
} else {
if (typeof(cur.S) != 'object') throw 'bad'
new_cur.t = 'obj'
new_cur.S = {}
Object.entries(cur.S).forEach(e => new_cur.S[e[0]] = make_lit(e[1]))
}
cur = new_cur
space_dag_set(prev_S, prev_i, cur, is_anc)
}
if (cur.t == 'obj') {
let x = cur.S[key]
if (!x || typeof(x) != 'object' || x.t == 'lit')
x = cur.S[key] = {t: 'val', S: create_space_dag_node(null, [x == undefined ? {t: 'lit', S: {type: 'deleted'}} : x])}
cur = x
} else if (i == parse.path.length - 1 && !parse.slice) {
parse.slice = [key, key + 1]
parse.value = (cur.t == 'str') ? parse.value : [parse.value]
} else if (cur.t == 'arr') {
cur = space_dag_get(prev_S = cur.S, prev_i = key, is_anc)
} else throw 'bad'
}
if (parse.slice) {
if (cur.t == 'val') cur = space_dag_get(prev_S = cur.S, prev_i = 0, is_anc)
if (typeof(cur) == 'string') {
cur = {t: 'str', S: create_space_dag_node(null, cur)}
space_dag_set(prev_S, prev_i, cur, is_anc)
} else if (cur.t == 'lit') {
if (!(cur.S instanceof Array)) throw 'bad'
cur = {t: 'arr', S: create_space_dag_node(null, cur.S.map(x => make_lit(x)))}
space_dag_set(prev_S, prev_i, cur, is_anc)
}
}
return cur
}
}
function read(x, is_anc) {
if (!is_anc) is_anc = () => true
var annotations = {}
return finalize(read_raw(x, is_anc, annotations))
function finalize(x) {
if (Array.isArray(x))
for (var i = 0; i < x.length; i++) x[i] = finalize(x[i])
else if (x && typeof(x) == 'object') {
if (x.type == 'location')
return annotations[x.id]
else {
var y = {}
Object.entries(x).forEach(e => {
if (e[1] && typeof(e[1]) == 'object' && e[1].type == 'deleted') return
var key = e[0].match(/^_+type$/) ? e[0].slice(1) : e[0]
y[key] = finalize(e[1])
})
return y
}
}
return x
}
}
function read_raw(x, is_anc, annotations) {
if (!is_anc) is_anc = () => true
else if (typeof(is_anc) == 'string') {
var ancs = x.ancestors({[is_anc]: true})
is_anc = v => ancs[v]
} else if (typeof(is_anc) == 'object') {
var ancs = x.ancestors(is_anc)
is_anc = v => ancs[v]
}
return finalize(rec_read(x))
function rec_read(x) {
if (x && typeof(x) == 'object') {
if (!x.t) return rec_read(x.space_dag)
if (x.t == 'lit') return JSON.parse(JSON.stringify(x.S))
if (x.t == 'val') return rec_read(space_dag_get(x.S, 0, is_anc))
if (x.t == 'obj') {
var o = {}
Object.entries(x.S).forEach(([k, v]) => o[k] = rec_read(v))
return o
}
if (x.t == 'arr') {
var a = []
traverse_space_dag(x.S, is_anc, (node, _, __, ___, ____, deleted) => {
if (annotations && node.annotations) Object.entries(node.annotations).forEach(e => {
annotations[e[0]] = a.length + (deleted ? 0 : e[1])
})
if (!deleted) {
node.elems.forEach((e) => {
a.push(rec_read(e))
})
}
}, true)
return a
}
if (x.t == 'str') {
var s = []
var len = 0
traverse_space_dag(x.S, is_anc, (node, _, __, ___, ____, deleted) => {
if (annotations && node.annotations) Object.entries(node.annotations).forEach(e => {
annotations[e[0]] = len + (deleted ? 0 : e[1])
})
if (!deleted) {
s.push(node.elems)
len += node.elems.length
}
}, true)
return s.join('')
}
throw 'bad'
} return x
}
function finalize(x) {
if (Array.isArray(x)) x.forEach(x => finalize(x))
else if (x && typeof(x) == 'object') {
if (!annotations && x.type == 'location') delete x.id
else Object.values(x).forEach(x => finalize(x))
}
return x
}
}
function create_space_dag_node(version, elems, end_cap, sort_key) {
return {
version : version,
sort_key : sort_key,
elems : elems,
deleted_by : {},
end_cap : end_cap,
nexts : [],
next : null
}
}
function space_dag_get(S, i, is_anc) {
var ret = null
var offset = 0
traverse_space_dag(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
ret = node.elems[i - offset]
return false
}
offset += node.elems.length
})
return ret
}
function space_dag_set(S, i, v, is_anc) {
var offset = 0
traverse_space_dag(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
node.elems[i - offset] = v
return false
}
offset += node.elems.length
})
}
function space_dag_length(S, is_anc) {
var count = 0
traverse_space_dag(S, is_anc ? is_anc : () => true, node => {
count += node.elems.length
})
return count
}
function space_dag_break_node(node, x, end_cap, new_next) {
var tail = create_space_dag_node(null, node.elems.slice(x), node.end_cap)
Object.assign(tail.deleted_by, node.deleted_by)
tail.nexts = node.nexts
tail.next = node.next
node.elems = node.elems.slice(0, x)
node.end_cap = end_cap
node.nexts = new_next ? [new_next] : []
node.next = tail
var annotations = node.annotations || {}
delete node.annotations
Object.entries(annotations).forEach(e => {
if (e[1] <= x) {
node.annotations = node.annotations || {}
node.annotations[e[0]] = e[1]
} else {
tail.annotations = tail.annotations || {}
tail.annotations[e[0]] = e[1] - x
}
})
return tail
}
function space_dag_add_version(S, version, splices, sort_key, is_anc) {
function add_to_nexts(nexts, to) {
var i = binarySearch(nexts, function (x) {
if ((to.sort_key || to.version) < (x.sort_key || x.version)) return -1
if ((to.sort_key || to.version) > (x.sort_key || x.version)) return 1
return 0
})
nexts.splice(i, 0, to)
}
var si = 0
var delete_up_to = 0
// `node` is a patch
var process_patch = (node, offset, has_nexts, prev, _version, deleted) => {
var s = splices[si]
if (!s) return false
if (deleted) {
if (s[1] == 0 && s[0] == offset) {
if (node.elems.length == 0 && !node.end_cap && has_nexts) return
var new_node = create_space_dag_node(version, s[2], null, sort_key)
if (node.elems.length == 0 && !node.end_cap)
add_to_nexts(node.nexts, new_node)
else
space_dag_break_node(node, 0, undefined, new_node)
si++
}
return
}
if (s[1] == 0) {
var d = s[0] - (offset + node.elems.length)
if (d > 0) return
if (d == 0 && !node.end_cap && has_nexts) return
var new_node = create_space_dag_node(version, s[2], null, sort_key)
if (d == 0 && !node.end_cap) {
add_to_nexts(node.nexts, new_node)
} else {
space_dag_break_node(node, s[0] - offset, undefined, new_node)
}
si++
return
}
if (delete_up_to <= offset) {
var d = s[0] - (offset + node.elems.length)
if (d >= 0) return
delete_up_to = s[0] + s[1]
if (s[2]) {
var new_node = create_space_dag_node(version, s[2], null, sort_key)
if (s[0] == offset && prev && prev.end_cap) {
add_to_nexts(prev.nexts, new_node)
} else {
space_dag_break_node(node, s[0] - offset, true, new_node)
return
}
} else {
if (s[0] == offset) {
} else {
space_dag_break_node(node, s[0] - offset)
return
}
}
}
if (delete_up_to > offset) {
if (delete_up_to <= offset + node.elems.length) {
if (delete_up_to < offset + node.elems.length) {
space_dag_break_node(node, delete_up_to - offset)
}
si++
}
node.deleted_by[version] = true
return
}
}
var f = is_anc
var exit_early = {}
var offset = 0
function traverse(node, prev, version) {
var has_nexts = node.nexts.find(next => f(next.version))
var deleted = Object.keys(node.deleted_by).some(version => f(version))
if (process_patch(node, offset, has_nexts, prev, version, deleted) == false)
throw exit_early
if (!deleted) {
offset += node.elems.length
}
for (var next of node.nexts)
if (f(next.version)) traverse(next, null, next.version)
if (node.next) traverse(node.next, node, version)
}
try {
if (!S) debugger
traverse(S, null, S.version)
} catch (e) {
if (e != exit_early) throw e
}
}
function traverse_space_dag(S, f, cb, view_deleted, tail_cb) {
var exit_early = {}
var offset = 0
function helper(node, prev, version) {
var has_nexts = node.nexts.find(next => f(next.version))
var deleted = Object.keys(node.deleted_by).some(version => f(version))
if (view_deleted || !deleted) {
if (cb(node, offset, has_nexts, prev, version, deleted) == false)
throw exit_early
offset += node.elems.length
}
for (var next of node.nexts)
if (f(next.version)) helper(next, null, next.version)
if (node.next) helper(node.next, node, version)
else if (tail_cb) tail_cb(node)
}
try {
helper(S, null, S.version)
} catch (e) {
if (e != exit_early) throw e
}
}
var parse_patch = require('../util/utilities.js').parse_patch
// modified from https://stackoverflow.com/questions/22697936/binary-search-in-javascript
function binarySearch(ar, compare_fn) {
var m = 0;
var n = ar.length - 1;
while (m <= n) {
var k = (n + m) >> 1;
var cmp = compare_fn(ar[k]);
if (cmp > 0) {
m = k + 1;
} else if(cmp < 0) {
n = k - 1;
} else {
return k;
}
}
return m;
}
================================================
FILE: util/apply-patch.js
================================================
function apply_patch (obj, range, content) {
// Descend down a bunch of objects until we get to the final object
// The final object can be a slice
// Set the value in the final object
var path = range,
new_stuff = content
// We will break up the path into segments, like:
//
// Path: ".foo.bar[3]"
//
// Segments:
// - ".foo"
// - ".bar"
// - "[3]"
var path_segment = /^(\.([^\.\[]+))|(\[((-?\d+):)?(-?\d+)\])/
var curr_obj = obj,
last_obj = null
// Then we'll iterate through each segment, and descend into the obj.
//
// When we reach the *last* segment, we set its value to `content`, and
// then we're done!
do {
// Grab the next segment from the path
var match = path_segment.exec(path),
subpath = match[0],
field = match[2],
slice_start = match[5],
slice_end = match[6]
slice_start = slice_start && de_neg(slice_start)
slice_end = slice_end && de_neg(slice_end)
// If this is not the last segment, then let's iterate one step deeper
// into the object until we find the thing we're supposed to replace.
if (path.length !== subpath.length) {
console.assert(!slice_start, 'No splices allowed in middle of path')
last_obj = curr_obj
last_field = field
curr_obj = curr_obj[field || slice_end]
path = path.substr(subpath.length)
}
// Otherwise, we made it! Let's replace the range with its new
// contents!
else {
// There are 4 things we can set the values of:
// Case 1: Object
if (field)
curr_obj[field] = new_stuff
// Case 2: Strings
else if (typeof curr_obj == 'string') { // String
console.assert(typeof new_stuff == 'string')
if (!slice_start) {
slice_start = slice_end;
slice_end = slice_end+1
}
if (last_obj) {
var s = last_obj[last_field]
last_obj[last_field] = (s.slice(0, slice_start)
+ new_stuff
+ s.slice(slice_end))
} else
return obj.slice(0, slice_start) + new_stuff + obj.slice(slice_end)
}
// Then it's an Array! We have two ways to set an Array:
else {
// Case 3: Array Splice (e.g. [3:9] = [1]
if (slice_start)
[].splice.apply(curr_obj, [slice_start, slice_end-slice_start]
.concat(new_stuff))
// Case 4: Array Set (e.g. [3] = true
else {
console.assert(slice_end >= 0, 'Index '+subpath+' is too small')
console.assert(slice_end <= curr_obj.length - 1,
'Index '+subpath+' is too big')
curr_obj[slice_end] = new_stuff
}
}
return obj
}
} while (true)
// This helper converts negative indices, like "[-9]" or "[-0]"
function de_neg (x) {
return x[0] === '-'
? curr_obj.length - parseInt(x.substr(1))
: parseInt(x)
}
}
if (require.main === module) {
// Tests!
console.log('\nTests:')
console.log(apply_patch({a: 'b'}, '.a', 'c'))
console.log(apply_patch([1,2,3], '[1]', 9))
console.log(apply_patch([1,2,3], '[1:-0]', [10,100]))
console.log(apply_patch([1,2,{a:'b'}], '[2].b', 9))
console.log(apply_patch([1,2,{a:'b'}], '[2].a', 99))
// Answer key
console.log(`\nCorrect Answers:
{ a: 'c' }
[ 1, 9, 3 ]
[ 1, 10, 100 ]
[ 1, 2, { a: 'b', b: 9 } ]
[ 1, 2, { a: 99 } ]
`)
}
================================================
FILE: util/braid-bundler.js
================================================
// Bundles up the client javascript file.
var files = [
'util/require.js',
'util/utilities.js',
'sync9/sync9.js',
'kernel/antimatter.js',
'kernel/errors.js',
'kernel/node.js',
'kernel/pipe.js',
'util/diff.js',
'kernel/store.js',
'kernel/websocket-client.js',
'kernel/http-client.js',
'braidify/braidify-client.js',
'kernel/leadertab-shell.js',
]
var fs = require('fs')
// Translate relative directories
var file_at = (f) => require('path').join(__dirname, '..', f)
// Create builds/ directory if it doesn't exist
if (!fs.existsSync(file_at('builds')))
fs.mkdirSync(file_at('builds'))
// Write the bundle file
fs.writeFileSync(
file_at('builds/braid-bundle.js'),
files.map(f => fs.readFileSync(file_at(f))).join('\n')
)
================================================
FILE: util/diff.js
================================================
function diff_convert_to_my_format(d, factor) {
if (factor === undefined) factor = 1
var x = []
var ii = 0
for (var i = 0; i < d.length; i++) {
var dd = d[i]
if (dd[0] == DIFF_EQUAL) {
ii += dd[1].length
continue
}
var xx = [ii, 0, '']
if (dd[0] == DIFF_INSERT * factor) {
xx[2] = dd[1]
} else if (dd[0] == DIFF_DELETE * factor) {
xx[1] = dd[1].length
ii += xx[1]
}
if (i + 1 < d.length) {
dd = d[i + 1]
if (dd[0] != DIFF_EQUAL) {
if (dd[0] == DIFF_INSERT * factor) {
xx[2] = dd[1]
} else if (dd[0] == DIFF_DELETE * factor) {
xx[1] = dd[1].length
ii += xx[1]
}
i++
}
}
x.push(xx)
}
return x
}
/**
* This library modifies the diff-patch-match library by Neil Fraser
* by removing the patch and match functionality and certain advanced
* options in the diff function. The original license is as follows:
*
* ===
*
* Diff Match and Patch
*
* Copyright 2006 Google Inc.
* http://code.google.com/p/google-diff-match-patch/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The data structure representing a diff is an array of tuples:
* [[DIFF_DELETE, 'Hello'], [DIFF_INSERT, 'Goodbye'], [DIFF_EQUAL, ' world.']]
* which means: delete 'Hello', add 'Goodbye' and keep ' world.'
*/
var DIFF_DELETE = -1;
var DIFF_INSERT = 1;
var DIFF_EQUAL = 0;
/**
* Find the differences between two texts. Simplifies the problem by stripping
* any common prefix or suffix off the texts before diffing.
* @param {string} text1 Old string to be diffed.
* @param {string} text2 New string to be diffed.
* @param {Int} cursor_pos Expected edit position in text1 (optional)
* @return {Array} Array of diff tuples.
*/
function diff_main(text1, text2, cursor_pos) {
// Check for equality (speedup).
if (text1 == text2) {
if (text1) {
return [[DIFF_EQUAL, text1]];
}
return [];
}
// Check cursor_pos within bounds
if (cursor_pos < 0 || text1.length < cursor_pos) {
cursor_pos = null;
}
// Trim off common prefix (speedup).
var commonlength = diff_commonPrefix(text1, text2);
var commonprefix = text1.substring(0, commonlength);
text1 = text1.substring(commonlength);
text2 = text2.substring(commonlength);
// Trim off common suffix (speedup).
commonlength = diff_commonSuffix(text1, text2);
var commonsuffix = text1.substring(text1.length - commonlength);
text1 = text1.substring(0, text1.length - commonlength);
text2 = text2.substring(0, text2.length - commonlength);
// Compute the diff on the middle block.
var diffs = diff_compute_(text1, text2);
// Restore the prefix and suffix.
if (commonprefix) {
diffs.unshift([DIFF_EQUAL, commonprefix]);
}
if (commonsuffix) {
diffs.push([DIFF_EQUAL, commonsuffix]);
}
diff_cleanupMerge(diffs);
if (cursor_pos != null) {
diffs = fix_cursor(diffs, cursor_pos);
}
return diffs;
};
/**
* Find the differences between two texts. Assumes that the texts do not
* have any common prefix or suffix.
* @param {string} text1 Old string to be diffed.
* @param {string} text2 New string to be diffed.
* @return {Array} Array of diff tuples.
*/
function diff_compute_(text1, text2) {
var diffs;
if (!text1) {
// Just add some text (speedup).
return [[DIFF_INSERT, text2]];
}
if (!text2) {
// Just delete some text (speedup).
return [[DIFF_DELETE, text1]];
}
var longtext = text1.length > text2.length ? text1 : text2;
var shorttext = text1.length > text2.length ? text2 : text1;
var i = longtext.indexOf(shorttext);
if (i != -1) {
// Shorter text is inside the longer text (speedup).
diffs = [[DIFF_INSERT, longtext.substring(0, i)],
[DIFF_EQUAL, shorttext],
[DIFF_INSERT, longtext.substring(i + shorttext.length)]];
// Swap insertions for deletions if diff is reversed.
if (text1.length > text2.length) {
diffs[0][0] = diffs[2][0] = DIFF_DELETE;
}
return diffs;
}
if (shorttext.length == 1) {
// Single character string.
// After the previous speedup, the character can't be an equality.
return [[DIFF_DELETE, text1], [DIFF_INSERT, text2]];
}
// Check to see if the problem can be split in two.
var hm = diff_halfMatch_(text1, text2);
if (hm) {
// A half-match was found, sort out the return data.
var text1_a = hm[0];
var text1_b = hm[1];
var text2_a = hm[2];
var text2_b = hm[3];
var mid_common = hm[4];
// Send both pairs off for separate processing.
var diffs_a = diff_main(text1_a, text2_a);
var diffs_b = diff_main(text1_b, text2_b);
// Merge the results.
return diffs_a.concat([[DIFF_EQUAL, mid_common]], diffs_b);
}
return diff_bisect_(text1, text2);
};
/**
* Find the 'middle snake' of a diff, split the problem in two
* and return the recursively constructed diff.
* See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
* @param {string} text1 Old string to be diffed.
* @param {string} text2 New string to be diffed.
* @return {Array} Array of diff tuples.
* @private
*/
function diff_bisect_(text1, text2) {
// Cache the text lengths to prevent multiple calls.
var text1_length = text1.length;
var text2_length = text2.length;
var max_d = Math.ceil((text1_length + text2_length) / 2);
var v_offset = max_d;
var v_length = 2 * max_d;
var v1 = new Array(v_length);
var v2 = new Array(v_length);
// Setting all elements to -1 is faster in Chrome & Firefox than mixing
// integers and undefined.
for (var x = 0; x < v_length; x++) {
v1[x] = -1;
v2[x] = -1;
}
v1[v_offset + 1] = 0;
v2[v_offset + 1] = 0;
var delta = text1_length - text2_length;
// If the total number of characters is odd, then the front path will collide
// with the reverse path.
var front = (delta % 2 != 0);
// Offsets for start and end of k loop.
// Prevents mapping of space beyond the grid.
var k1start = 0;
var k1end = 0;
var k2start = 0;
var k2end = 0;
for (var d = 0; d < max_d; d++) {
// Walk the front path one step.
for (var k1 = -d + k1start; k1 <= d - k1end; k1 += 2) {
var k1_offset = v_offset + k1;
var x1;
if (k1 == -d || (k1 != d && v1[k1_offset - 1] < v1[k1_offset + 1])) {
x1 = v1[k1_offset + 1];
} else {
x1 = v1[k1_offset - 1] + 1;
}
var y1 = x1 - k1;
while (x1 < text1_length && y1 < text2_length &&
text1.charAt(x1) == text2.charAt(y1)) {
x1++;
y1++;
}
v1[k1_offset] = x1;
if (x1 > text1_length) {
// Ran off the right of the graph.
k1end += 2;
} else if (y1 > text2_length) {
// Ran off the bottom of the graph.
k1start += 2;
} else if (front) {
var k2_offset = v_offset + delta - k1;
if (k2_offset >= 0 && k2_offset < v_length && v2[k2_offset] != -1) {
// Mirror x2 onto top-left coordinate system.
var x2 = text1_length - v2[k2_offset];
if (x1 >= x2) {
// Overlap detected.
return diff_bisectSplit_(text1, text2, x1, y1);
}
}
}
}
// Walk the reverse path one step.
for (var k2 = -d + k2start; k2 <= d - k2end; k2 += 2) {
var k2_offset = v_offset + k2;
var x2;
if (k2 == -d || (k2 != d && v2[k2_offset - 1] < v2[k2_offset + 1])) {
x2 = v2[k2_offset + 1];
} else {
x2 = v2[k2_offset - 1] + 1;
}
var y2 = x2 - k2;
while (x2 < text1_length && y2 < text2_length &&
text1.charAt(text1_length - x2 - 1) ==
text2.charAt(text2_length - y2 - 1)) {
x2++;
y2++;
}
v2[k2_offset] = x2;
if (x2 > text1_length) {
// Ran off the left of the graph.
k2end += 2;
} else if (y2 > text2_length) {
// Ran off the top of the graph.
k2start += 2;
} else if (!front) {
var k1_offset = v_offset + delta - k2;
if (k1_offset >= 0 && k1_offset < v_length && v1[k1_offset] != -1) {
var x1 = v1[k1_offset];
var y1 = v_offset + x1 - k1_offset;
// Mirror x2 onto top-left coordinate system.
x2 = text1_length - x2;
if (x1 >= x2) {
// Overlap detected.
return diff_bisectSplit_(text1, text2, x1, y1);
}
}
}
}
}
// Diff took too long and hit the deadline or
// number of diffs equals number of characters, no commonality at all.
return [[DIFF_DELETE, text1], [DIFF_INSERT, text2]];
};
/**
* Given the location of the 'middle snake', split the diff in two parts
* and recurse.
* @param {string} text1 Old string to be diffed.
* @param {string} text2 New string to be diffed.
* @param {number} x Index of split point in text1.
* @param {number} y Index of split point in text2.
* @return {Array} Array of diff tuples.
*/
function diff_bisectSplit_(text1, text2, x, y) {
var text1a = text1.substring(0, x);
var text2a = text2.substring(0, y);
var text1b = text1.substring(x);
var text2b = text2.substring(y);
// Compute both diffs serially.
var diffs = diff_main(text1a, text2a);
var diffsb = diff_main(text1b, text2b);
return diffs.concat(diffsb);
};
/**
* Determine the common prefix of two strings.
* @param {string} text1 First string.
* @param {string} text2 Second string.
* @return {number} The number of characters common to the start of each
* string.
*/
function diff_commonPrefix(text1, text2) {
// Quick check for common null cases.
if (!text1 || !text2 || text1.charAt(0) != text2.charAt(0)) {
return 0;
}
// Binary search.
// Performance analysis: http://neil.fraser.name/news/2007/10/09/
var pointermin = 0;
var pointermax = Math.min(text1.length, text2.length);
var pointermid = pointermax;
var pointerstart = 0;
while (pointermin < pointermid) {
if (text1.substring(pointerstart, pointermid) ==
text2.substring(pointerstart, pointermid)) {
pointermin = pointermid;
pointerstart = pointermin;
} else {
pointermax = pointermid;
}
pointermid = Math.floor((pointermax - pointermin) / 2 + pointermin);
}
return pointermid;
};
/**
* Determine the common suffix of two strings.
* @param {string} text1 First string.
* @param {string} text2 Second string.
* @return {number} The number of characters common to the end of each string.
*/
function diff_commonSuffix(text1, text2) {
// Quick check for common null cases.
if (!text1 || !text2 ||
text1.charAt(text1.length - 1) != text2.charAt(text2.length - 1)) {
return 0;
}
// Binary search.
// Performance analysis: http://neil.fraser.name/news/2007/10/09/
var pointermin = 0;
var pointermax = Math.min(text1.length, text2.length);
var pointermid = pointermax;
var pointerend = 0;
while (pointermin < pointermid) {
if (text1.substring(text1.length - pointermid, text1.length - pointerend) ==
text2.substring(text2.length - pointermid, text2.length - pointerend)) {
pointermin = pointermid;
pointerend = pointermin;
} else {
pointermax = pointermid;
}
pointermid = Math.floor((pointermax - pointermin) / 2 + pointermin);
}
return pointermid;
};
/**
* Do the two texts share a substring which is at least half the length of the
* longer text?
* This speedup can produce non-minimal diffs.
* @param {string} text1 First string.
* @param {string} text2 Second string.
* @return {Array.} Five element Array, containing the prefix of
* text1, the suffix of text1, the prefix of text2, the suffix of
* text2 and the common middle. Or null if there was no match.
*/
function diff_halfMatch_(text1, text2) {
var longtext = text1.length > text2.length ? text1 : text2;
var shorttext = text1.length > text2.length ? text2 : text1;
if (longtext.length < 4 || shorttext.length * 2 < longtext.length) {
return null; // Pointless.
}
/**
* Does a substring of shorttext exist within longtext such that the substring
* is at least half the length of longtext?
* Closure, but does not reference any external variables.
* @param {string} longtext Longer string.
* @param {string} shorttext Shorter string.
* @param {number} i Start index of quarter length substring within longtext.
* @return {Array.} Five element Array, containing the prefix of
* longtext, the suffix of longtext, the prefix of shorttext, the suffix
* of shorttext and the common middle. Or null if there was no match.
* @private
*/
function diff_halfMatchI_(longtext, shorttext, i) {
// Start with a 1/4 length substring at position i as a seed.
var seed = longtext.substring(i, i + Math.floor(longtext.length / 4));
var j = -1;
var best_common = '';
var best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b;
while ((j = shorttext.indexOf(seed, j + 1)) != -1) {
var prefixLength = diff_commonPrefix(longtext.substring(i),
shorttext.substring(j));
var suffixLength = diff_commonSuffix(longtext.substring(0, i),
shorttext.substring(0, j));
if (best_common.length < suffixLength + prefixLength) {
best_common = shorttext.substring(j - suffixLength, j) +
shorttext.substring(j, j + prefixLength);
best_longtext_a = longtext.substring(0, i - suffixLength);
best_longtext_b = longtext.substring(i + prefixLength);
best_shorttext_a = shorttext.substring(0, j - suffixLength);
best_shorttext_b = shorttext.substring(j + prefixLength);
}
}
if (best_common.length * 2 >= longtext.length) {
return [best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b, best_common];
} else {
return null;
}
}
// First check if the second quarter is the seed for a half-match.
var hm1 = diff_halfMatchI_(longtext, shorttext,
Math.ceil(longtext.length / 4));
// Check again based on the third quarter.
var hm2 = diff_halfMatchI_(longtext, shorttext,
Math.ceil(longtext.length / 2));
var hm;
if (!hm1 && !hm2) {
return null;
} else if (!hm2) {
hm = hm1;
} else if (!hm1) {
hm = hm2;
} else {
// Both matched. Select the longest.
hm = hm1[4].length > hm2[4].length ? hm1 : hm2;
}
// A half-match was found, sort out the return data.
var text1_a, text1_b, text2_a, text2_b;
if (text1.length > text2.length) {
text1_a = hm[0];
text1_b = hm[1];
text2_a = hm[2];
text2_b = hm[3];
} else {
text2_a = hm[0];
text2_b = hm[1];
text1_a = hm[2];
text1_b = hm[3];
}
var mid_common = hm[4];
return [text1_a, text1_b, text2_a, text2_b, mid_common];
};
/**
* Reorder and merge like edit sections. Merge equalities.
* Any edit section can move as long as it doesn't cross an equality.
* @param {Array} diffs Array of diff tuples.
*/
function diff_cleanupMerge(diffs) {
diffs.push([DIFF_EQUAL, '']); // Add a dummy entry at the end.
var pointer = 0;
var count_delete = 0;
var count_insert = 0;
var text_delete = '';
var text_insert = '';
var commonlength;
while (pointer < diffs.length) {
switch (diffs[pointer][0]) {
case DIFF_INSERT:
count_insert++;
text_insert += diffs[pointer][1];
pointer++;
break;
case DIFF_DELETE:
count_delete++;
text_delete += diffs[pointer][1];
pointer++;
break;
case DIFF_EQUAL:
// Upon reaching an equality, check for prior redundancies.
if (count_delete + count_insert > 1) {
if (count_delete !== 0 && count_insert !== 0) {
// Factor out any common prefixies.
commonlength = diff_commonPrefix(text_insert, text_delete);
if (commonlength !== 0) {
if ((pointer - count_delete - count_insert) > 0 &&
diffs[pointer - count_delete - count_insert - 1][0] ==
DIFF_EQUAL) {
diffs[pointer - count_delete - count_insert - 1][1] +=
text_insert.substring(0, commonlength);
} else {
diffs.splice(0, 0, [DIFF_EQUAL,
text_insert.substring(0, commonlength)]);
pointer++;
}
text_insert = text_insert.substring(commonlength);
text_delete = text_delete.substring(commonlength);
}
// Factor out any common suffixies.
commonlength = diff_commonSuffix(text_insert, text_delete);
if (commonlength !== 0) {
diffs[pointer][1] = text_insert.substring(text_insert.length -
commonlength) + diffs[pointer][1];
text_insert = text_insert.substring(0, text_insert.length -
commonlength);
text_delete = text_delete.substring(0, text_delete.length -
commonlength);
}
}
// Delete the offending records and add the merged ones.
if (count_delete === 0) {
diffs.splice(pointer - count_insert,
count_delete + count_insert, [DIFF_INSERT, text_insert]);
} else if (count_insert === 0) {
diffs.splice(pointer - count_delete,
count_delete + count_insert, [DIFF_DELETE, text_delete]);
} else {
diffs.splice(pointer - count_delete - count_insert,
count_delete + count_insert, [DIFF_DELETE, text_delete],
[DIFF_INSERT, text_insert]);
}
pointer = pointer - count_delete - count_insert +
(count_delete ? 1 : 0) + (count_insert ? 1 : 0) + 1;
} else if (pointer !== 0 && diffs[pointer - 1][0] == DIFF_EQUAL) {
// Merge this equality with the previous one.
diffs[pointer - 1][1] += diffs[pointer][1];
diffs.splice(pointer, 1);
} else {
pointer++;
}
count_insert = 0;
count_delete = 0;
text_delete = '';
text_insert = '';
break;
}
}
if (diffs[diffs.length - 1][1] === '') {
diffs.pop(); // Remove the dummy entry at the end.
}
// Second pass: look for single edits surrounded on both sides by equalities
// which can be shifted sideways to eliminate an equality.
// e.g: ABAC -> ABAC
var changes = false;
pointer = 1;
// Intentionally ignore the first and last element (don't need checking).
while (pointer < diffs.length - 1) {
if (diffs[pointer - 1][0] == DIFF_EQUAL &&
diffs[pointer + 1][0] == DIFF_EQUAL) {
// This is a single edit surrounded by equalities.
if (diffs[pointer][1].substring(diffs[pointer][1].length -
diffs[pointer - 1][1].length) == diffs[pointer - 1][1]) {
// Shift the edit over the previous equality.
diffs[pointer][1] = diffs[pointer - 1][1] +
diffs[pointer][1].substring(0, diffs[pointer][1].length -
diffs[pointer - 1][1].length);
diffs[pointer + 1][1] = diffs[pointer - 1][1] + diffs[pointer + 1][1];
diffs.splice(pointer - 1, 1);
changes = true;
} else if (diffs[pointer][1].substring(0, diffs[pointer + 1][1].length) ==
diffs[pointer + 1][1]) {
// Shift the edit over the next equality.
diffs[pointer - 1][1] += diffs[pointer + 1][1];
diffs[pointer][1] =
diffs[pointer][1].substring(diffs[pointer + 1][1].length) +
diffs[pointer + 1][1];
diffs.splice(pointer + 1, 1);
changes = true;
}
}
pointer++;
}
// If shifts were made, the diff needs reordering and another shift sweep.
if (changes) {
diff_cleanupMerge(diffs);
}
};
/*
* Modify a diff such that the cursor position points to the start of a change:
* E.g.
* cursor_normalize_diff([[DIFF_EQUAL, 'abc']], 1)
* => [1, [[DIFF_EQUAL, 'a'], [DIFF_EQUAL, 'bc']]]
* cursor_normalize_diff([[DIFF_INSERT, 'new'], [DIFF_DELETE, 'xyz']], 2)
* => [2, [[DIFF_INSERT, 'new'], [DIFF_DELETE, 'xy'], [DIFF_DELETE, 'z']]]
*
* @param {Array} diffs Array of diff tuples
* @param {Int} cursor_pos Suggested edit position. Must not be out of bounds!
* @return {Array} A tuple [cursor location in the modified diff, modified diff]
*/
function cursor_normalize_diff (diffs, cursor_pos) {
if (cursor_pos === 0) {
return [DIFF_EQUAL, diffs];
}
for (var current_pos = 0, i = 0; i < diffs.length; i++) {
var d = diffs[i];
if (d[0] === DIFF_DELETE || d[0] === DIFF_EQUAL) {
var next_pos = current_pos + d[1].length;
if (cursor_pos === next_pos) {
return [i + 1, diffs];
} else if (cursor_pos < next_pos) {
// copy to prevent side effects
diffs = diffs.slice();
// split d into two diff changes
var split_pos = cursor_pos - current_pos;
var d_left = [d[0], d[1].slice(0, split_pos)];
var d_right = [d[0], d[1].slice(split_pos)];
diffs.splice(i, 1, d_left, d_right);
return [i + 1, diffs];
} else {
current_pos = next_pos;
}
}
}
throw new Error('cursor_pos is out of bounds!')
}
/*
* Modify a diff such that the edit position is "shifted" to the proposed edit location (cursor_position).
*
* Case 1)
* Check if a naive shift is possible:
* [0, X], [ 1, Y] -> [ 1, Y], [0, X] (if X + Y === Y + X)
* [0, X], [-1, Y] -> [-1, Y], [0, X] (if X + Y === Y + X) - holds same result
* Case 2)
* Check if the following shifts are possible:
* [0, 'pre'], [ 1, 'prefix'] -> [ 1, 'pre'], [0, 'pre'], [ 1, 'fix']
* [0, 'pre'], [-1, 'prefix'] -> [-1, 'pre'], [0, 'pre'], [-1, 'fix']
* ^ ^
* d d_next
*
* @param {Array} diffs Array of diff tuples
* @param {Int} cursor_pos Suggested edit position. Must not be out of bounds!
* @return {Array} Array of diff tuples
*/
function fix_cursor (diffs, cursor_pos) {
var norm = cursor_normalize_diff(diffs, cursor_pos);
var ndiffs = norm[1];
var cursor_pointer = norm[0];
var d = ndiffs[cursor_pointer];
var d_next = ndiffs[cursor_pointer + 1];
if (d == null) {
// Text was deleted from end of original string,
// cursor is now out of bounds in new string
return diffs;
} else if (d[0] !== DIFF_EQUAL) {
// A modification happened at the cursor location.
// This is the expected outcome, so we can return the original diff.
return diffs;
} else {
if (d_next != null && d[1] + d_next[1] === d_next[1] + d[1]) {
// Case 1)
// It is possible to perform a naive shift
ndiffs.splice(cursor_pointer, 2, d_next, d)
return merge_tuples(ndiffs, cursor_pointer, 2)
} else if (d_next != null && d_next[1].indexOf(d[1]) === 0) {
// Case 2)
// d[1] is a prefix of d_next[1]
// We can assume that d_next[0] !== 0, since d[0] === 0
// Shift edit locations..
ndiffs.splice(cursor_pointer, 2, [d_next[0], d[1]], [0, d[1]]);
var suffix = d_next[1].slice(d[1].length);
if (suffix.length > 0) {
ndiffs.splice(cursor_pointer + 2, 0, [d_next[0], suffix]);
}
return merge_tuples(ndiffs, cursor_pointer, 3)
} else {
// Not possible to perform any modification
return diffs;
}
}
}
/*
* Try to merge tuples with their neigbors in a given range.
* E.g. [0, 'a'], [0, 'b'] -> [0, 'ab']
*
* @param {Array} diffs Array of diff tuples.
* @param {Int} start Position of the first element to merge (diffs[start] is also merged with diffs[start - 1]).
* @param {Int} length Number of consecutive elements to check.
* @return {Array} Array of merged diff tuples.
*/
function merge_tuples (diffs, start, length) {
// Check from (start-1) to (start+length).
for (var i = start + length - 1; i >= 0 && i >= start - 1; i--) {
if (i + 1 < diffs.length) {
var left_d = diffs[i];
var right_d = diffs[i+1];
if (left_d[0] === right_d[1]) {
diffs.splice(i, 2, [left_d[0], left_d[1] + right_d[1]]);
}
}
}
return diffs;
}
module.exports = require.diff = {
diff_convert_to_my_format,
diff_main
}
================================================
FILE: util/require.js
================================================
// These 8 lines let browsers import modules with require().
function require (thing) {
thing = thing.split('/')
thing = thing[thing.length-1]
if (thing.slice(-3) === '.js')
thing = thing.slice(0,-3)
console.assert(require[thing], `require("${thing}") failed because
================================================
FILE: yarnball/yarnball.js
================================================
console.log('yarnball 0.001')
function create_yarnball_client(base_url) {
var self = {}
var conns = {}
self.get = async (key, cb) => {
if (!conns[key]) conns[key] = {cbs: []}
conns[key].cbs.push(cb)
if (!conns[key].loom) {
conns[key].load_promise = new Promise(done => conns[key].load_done = done)
conns[key].loom = create_loom_client(null, base_url + key, L => {
var x = L.read()
conns[key].last_val = x
conns[key].load_done()
for (let c of conns[key].cbs) c(x)
})
}
await conns[key].load_promise
return conns[key]?.last_val
}
self.forget = (key, cb) => {
if (!conns[key]) return
var i = conns[key].cbs.findIndex(x => x == cb)
if (i >= 0) {
conns[key].cbs.splice(i, 1)
if (!conns[key].cbs.length) {
conns[key].loom.close()
delete conns[key]
}
}
}
self.set = (key, ...patches) => {
if (conns[key]?.last_val != undefined) {
conns[key].loom.set(...patches)
var x = conns[key].loom.read()
for (let c of conns[key].cbs) c(x)
} else throw 'can not yet set'
}
return self
}
function create_loom_client(L, url, on_change) {
var ws = null
L = create_loom(L, (to, x) => {
ws.readyState == 1 && ws.send(JSON.stringify(x))
})
L.close = () => {
L.forget()
ws.onopen = () => {}
ws.onmessage = () => {}
ws.onclose = () => {}
ws.close()
}
connect()
function connect() {
ws = new WebSocket(url)
ws.onopen = () => {
console.log('CONNECTED!')
L.get()
}
ws.onmessage = x => {
console.log(`RECV: ${x.data}`)
x = JSON.parse(x.data)
if (x.cmd == 'error') {
alert('trouble syncing.. note you cannot sync to the same doc twice on the same machine')
ws.onclose = () => {}
ws.close()
return
}
var y = L.receive(x)
if (!ws.my_peer) ws.my_peer = x.peer
if (x.cmd == 'welcome' || x.cmd == 'set') on_change(L, y)
}
ws.onclose = () => {
if (ws.my_peer) L.disconnect(ws.my_peer)
setTimeout(connect, 3000)
}
}
return L
}
function create_loom(L, send) {
L = L ?? {}
if (!L.id) L.id = Math.random().toString(36).slice(2)
if (!L.next_seq) L.next_seq = 0
L.S = L.S ?? null
L.T = L.T ?? {}
L.current_version = L.current_version ?? {}
L.peers = L.peers ?? {}
L.version_cache = L.version_cache ?? {}
L.fissures = L.fissures ?? {}
L.acked_boundary = L.acked_boundary ?? {}
L.unack_boundary = L.unack_boundary ?? {}
L.acks_in_process = L.acks_in_process ?? {}
var orig_send = send
send = (to, msg) => {
orig_send(to, {peer: L.id, conn: L.peers[to], ...msg})
}
L.get = peer => {
send(peer, {cmd: 'get', conn: Math.random().toString(36).slice(2)})
}
L.forget = peer => {
send(peer, {cmd: 'forget'})
}
L.disconnect = peer => {
if (!L.peers[peer]) return
var conn = L.peers[peer]
delete L.peers[peer]
var versions = {}
var ack_versions = ancestors(L.acked_boundary)
Object.keys(L.T).forEach(v => {
if (!ack_versions[v] || L.acked_boundary[v]) versions[v] = true
})
L.receive({cmd: 'fissure', fissure: {a: L.id, b: peer, conn, versions, time: Date.now()}})
}
L.set = (...patches) => {
L.receive({cmd: 'set', version: `${L.next_seq++}@${L.id}`, parents: {...L.current_version}, patches})
}
L.read = (is_anc) => {
if (!is_anc) is_anc = () => true
else if (typeof(is_anc) == 'string') {
var ancs = x.ancestors({[is_anc]: true})
is_anc = v => ancs[v]
} else if (typeof(is_anc) == 'object') {
var ancs = x.ancestors(is_anc)
is_anc = v => ancs[v]
}
return rec_read(L.S)
function rec_read(x) {
if (x && typeof(x) == 'object') {
if (x.t == 'lit') return JSON.parse(JSON.stringify(x.S))
if (x.t == 'val') return rec_read(space_dag_get(x.S, 0, is_anc))
if (x.t == 'obj') {
var o = {}
Object.entries(x.S).forEach(([k, v]) => {
var x = rec_read(v)
if (x != null) o[k] = x
})
return o
}
if (x.t == 'arr') {
var a = []
traverse_space_dag(x.S, is_anc, (node, _, __, ___, ____, deleted) => {
if (!deleted) node.elems.forEach((e) => a.push(rec_read(e)))
}, true)
return a
}
if (x.t == 'str') {
var s = []
traverse_space_dag(x.S, is_anc, (node, _, __, ___, ____, deleted) => {
if (!deleted) s.push(node.elems)
}, true)
return s.join('')
}
throw 'bad'
} return x
}
}
L.receive = ({cmd, version, parents, patches, fissure, versions, fissures, unack_boundary, min_leaves, peer, conn}) => {
if (cmd == 'get' || cmd == 'get_back') {
if (L.peers[peer]) throw 'bad'
L.peers[peer] = conn
if (cmd == 'get') send(peer, {cmd: 'get_back'})
send(peer, {cmd: 'welcome',
versions: generate_braid(parents),
fissures: Object.values(L.fissures),
parents: parents && Object.keys(parents).length ? get_leaves(ancestors(parents, true)) : {}
})
} else if (cmd == 'forget') {
if (!L.peers[peer]) throw 'bad'
delete L.peers[peer]
L.acks_in_process = {}
} else if (cmd == 'set') {
for (p in parents) if (!L.T[p]) return send(peer, {cmd: 'error'})
if (!peer || !L.T[version]) {
var rebased_splices = add_version(version, parents, patches)
for (let p of Object.keys(L.peers)) if (p != peer) send(p, {cmd: 'set', version, parents, patches})
L.acks_in_process[version] = {origin: peer, count: Object.keys(L.peers).length}
if (peer) L.acks_in_process[version].count--
} else if (L.acks_in_process[version]) L.acks_in_process[version].count--
check_ack_count(version)
return rebased_splices
} else if (cmd == 'ack1') {
if (L.acks_in_process[version]) {
L.acks_in_process[version].count--
check_ack_count(version)
}
} else if (cmd == 'ack2') {
if (!L.T[version]) return
if (ancestors(L.unack_boundary)[version]) return
if (ancestors(L.acked_boundary)[version]) return
add_full_ack_leaf(version)
for (let p of Object.keys(L.peers)) if (p != peer) send(p, {cmd: 'ack2', version})
} else if (cmd == 'fissure') {
var key = fissure.a + ':' + fissure.b + ':' + fissure.conn
if (!L.fissures[key]) {
L.fissures[key] = fissure
L.acks_in_process = {}
for (let p of Object.keys(L.peers)) if (p != peer) send(p, {cmd: 'fissure', fissure})
if (fissure.b == L.id) L.receive({cmd: 'fissure', fissure: {...fissure, a: L.id, b: fissure.a}})
}
} else if (cmd == 'welcome') {
var versions_to_add = {}
versions.forEach(v => versions_to_add[v.version] = v.parents)
versions.forEach(v => {
if (L.T[v.version]) {
remove_ancestors(v.version)
function remove_ancestors(v) {
if (versions_to_add[v]) {
Object.keys(versions_to_add[v]).forEach(remove_ancestors)
delete versions_to_add[v]
}
}
}
})
var send_error = () => send(peer, {cmd: 'error'})
var added_versions = []
for (var v of versions) {
if (versions_to_add[v.version]) {
if (!Object.keys(v.parents).every(p => L.T[p])) return send_error()
add_version(v.version, v.parents, v.patches, v.sort_keys)
added_versions.push(v)
}
}
if (((min_leaves && Object.keys(min_leaves).some(k => !L.T[k])) || (unack_boundary && Object.keys(unack_boundary).some(k => !L.T[k])))) return send_error()
var new_fissures = []
var gen_fissures = []
fissures.forEach(f => {
var key = f.a + ':' + f.b + ':' + f.conn
if (!L.fissures[key]) {
new_fissures.push(f)
L.fissures[key] = f
if (f.b == L.id) gen_fissures.push({...f, a: L.id, b: f.a})
}
})
if (!unack_boundary) unack_boundary = {...L.current_version}
var our_conn_versions = ancestors(L.T, L.unack_boundary)
var new_conn_versions = ancestors(L.T, unack_boundary)
Object.keys(L.unack_boundary).forEach(x => {
if (new_conn_versions[x] && !unack_boundary[x])
delete L.unack_boundary[x]
})
Object.keys(unack_boundary).forEach(x => {
if (!our_conn_versions[x]) L.unack_boundary[x] = true
})
if (!min_leaves) {
if (versions.length === 0 && (!parents || Object.keys(parents).length === 0))
min_leaves = {...L.current_version}
else {
min_leaves = parents ? {...parents} : {}
versions.forEach(v => {
if (!versions_to_add[v.version]) min_leaves[v.version] = true
})
min_leaves = get_leaves(ancestors(min_leaves, true))
}
}
var min_versions = ancestors(min_leaves)
var ack_versions = ancestors(L.acked_boundary)
Object.keys(L.acked_boundary).forEach(x => {
if (!min_versions[x]) delete L.acked_boundary[x]
})
Object.keys(min_leaves).forEach(x => {
if (ack_versions[x]) L.acked_boundary[x] = true
})
L.acks_in_process = {}
if (added_versions.length > 0 || new_fissures.length > 0) {
for (let p of Object.keys(L.peers)) if (p != peer) send(p, {cmd: 'welcome', key, versions: added_versions, unack_boundary,min_leaves, fissures: new_fissures})
}
gen_fissures.forEach(f => L.receive({cmd: 'fissure', fissure: f}))
}
}
var is_lit = x => !x || typeof(x) != 'object' || x.t == 'lit'
var get_lit = x => (x && typeof(x) == 'object' && x.t == 'lit') ? x.S : x
let make_lit = x => (x && typeof(x) == 'object') ? {t: 'lit', S: x} : x
function prune() {
var unremovable = {}
Object.entries(L.fissures).forEach(x => {
var other_key = x[1].b + ':' + x[1].a + ':' + x[1].conn
var other = L.fissures[other_key]
if (other) {
delete L.fissures[x[0]]
delete L.fissures[other_key]
}
})
if (L.fissure_lifetime != null) {
var now = Date.now()
Object.entries(L.fissures).forEach(([k, f]) => {
if (f.time == null) f.time = now
if (f.time <= now - L.fissure_lifetime) {
delete L.fissures[k]
}
})
}
var keep_us = {}
Object.values(L.fissures).forEach(f => {
Object.keys(f.versions).forEach(v => keep_us[v] = true)
})
var acked = ancestors(L.T, L.acked_boundary)
Object.keys(L.T).forEach(x => {
if (!acked[x] || L.acked_boundary[x]) keep_us[x] = true
})
var children = {}
Object.entries(L.T).forEach(([v, parents]) => {
Object.keys(parents).forEach(parent => {
if (!children[parent]) children[parent] = {}
children[parent][v] = true
})
})
var to_bubble = {}
var bubble_tops = {}
var bubble_bottoms = {}
function mark_bubble(bottom, top, tag) {
if (!to_bubble[bottom]) {
to_bubble[bottom] = tag
if (bottom !== top) Object.keys(L.T[bottom]).forEach(p => mark_bubble(p, top, tag))
}
}
var done = {}
function f(cur) {
if (!L.T[cur]) return
if (done[cur]) return
done[cur] = true
if (!to_bubble[cur] || bubble_tops[cur]) {
var bubble_top = find_one_bubble(cur)
if (bubble_top) {
delete to_bubble[cur]
mark_bubble(cur, bubble_top, bubble_tops[cur] || cur)
bubble_tops[bubble_top] = bubble_tops[cur] || cur
bubble_bottoms[bubble_tops[cur] || cur] = bubble_top
}
}
Object.keys(L.T[cur]).forEach(f)
}
Object.keys(L.current_version).forEach(f)
function find_one_bubble(cur) {
var seen = {[cur]: true}
var q = Object.keys(L.T[cur])
var expecting = Object.fromEntries(q.map(x => [x, true]))
while (q.length) {
cur = q.pop()
if (!L.T[cur]) return null
if (keep_us[cur]) return null
if (Object.keys(children[cur]).every(c => seen[c])) {
seen[cur] = true
delete expecting[cur]
if (!Object.keys(expecting).length) return cur
Object.keys(L.T[cur]).forEach(p => {
q.push(p)
expecting[p] = true
})
}
}
return null
}
to_bubble = Object.fromEntries(Object.entries(to_bubble).map(
([v, bub]) => [v, [bub, bubble_bottoms[bub]]]
))
apply_bubbles(to_bubble)
}
function add_full_ack_leaf(version) {
var marks = {}
function f(v) {
if (!marks[v]) {
marks[v] = true
delete L.unack_boundary[v]
delete L.acked_boundary[v]
delete L.acks_in_process[v]
Object.keys(L.T[v]).forEach(f)
}
}
f(version)
L.acked_boundary[version] = true
prune(L)
}
function check_ack_count(version) {
if (L.acks_in_process[version] && L.acks_in_process[version].count == 0) {
if (L.acks_in_process[version].origin) {
send(L.acks_in_process[version].origin, {cmd: 'ack1', version})
} else {
add_full_ack_leaf(version)
for (let p of Object.keys(L.peers)) send(p, {cmd: 'ack2', version})
}
}
}
function generate_braid(versions) {
var anc = versions && Object.keys(versions).length ? ancestors(versions, true) : {}
var is_anc = x => anc[x]
if (Object.keys(L.T).length === 0) return []
return Object.entries(L.version_cache).filter(x => !is_anc(x[0])).map(([version, set_message]) => {
return L.version_cache[version] = set_message || generate_set_message(version)
})
function generate_set_message(version) {
if (!Object.keys(L.T[version]).length) {
return {
version,
parents: {},
patches: [` = ${JSON.stringify(L.read(v => v == version))}`]
}
}
var is_lit = x => !x || typeof(x) !== 'object' || x.t === 'lit'
var get_lit = x => (x && typeof(x) === 'object' && x.t === 'lit') ? x.S : x
var ancs = ancestors({[version]: true})
delete ancs[version]
var is_anc = x => ancs[x]
var path = []
var patches = []
var sort_keys = {}
recurse(L.S)
function recurse(x) {
if (is_lit(x)) {
} else if (x.t === 'val') {
space_dag_generate_braid(x.S, version, is_anc).forEach(s => {
if (s[2].length) {
patches.push(`${path.join('')} = ${JSON.stringify(s[2][0])}`)
if (s[3]) sort_keys[patches.length - 1] = s[3]
}
})
traverse_space_dag(x.S, is_anc, node => {
node.elems.forEach(recurse)
})
} else if (x.t === 'arr') {
space_dag_generate_braid(x.S, version, is_anc).forEach(s => {
patches.push(`${path.join('')}[${s[0]}:${s[0] + s[1]}] = ${JSON.stringify(s[2])}`)
if (s[3]) sort_keys[patches.length - 1] = s[3]
})
var i = 0
traverse_space_dag(x.S, is_anc, node => {
node.elems.forEach(e => {
path.push(`[${i++}]`)
recurse(e)
path.pop()
})
})
} else if (x.t === 'obj') {
Object.entries(x.S).forEach(e => {
path.push('[' + JSON.stringify(e[0]) + ']')
recurse(e[1])
path.pop()
})
} else if (x.t === 'str') {
space_dag_generate_braid(x.S, version, is_anc).forEach(s => {
patches.push(`${path.join('')}[${s[0]}:${s[0] + s[1]}] = ${JSON.stringify(s[2])}`)
if (s[3]) sort_keys[patches.length - 1] = s[3]
})
}
}
return {
version,
parents: {...L.T[version]},
patches,
sort_keys
}
}
}
function apply_bubbles(to_bubble) {
function recurse(x) {
if (is_lit(x)) return x
if (x.t == 'val') {
space_dag_apply_bubbles(x.S, to_bubble)
traverse_space_dag(x.S, () => true, node => {
node.elems = node.elems.slice(0, 1).map(recurse)
}, true)
if (x.S.nexts.length == 0 && !x.S.next && x.S.elems.length == 1 && is_lit(x.S.elems[0])) return x.S.elems[0]
return x
}
if (x.t == 'arr') {
space_dag_apply_bubbles(x.S, to_bubble)
traverse_space_dag(x.S, () => true, node => {
node.elems = node.elems.map(recurse)
}, true)
if (x.S.nexts.length == 0 && !x.S.next && x.S.elems.every(is_lit) && !Object.keys(x.S.deleted_by).length) return {t: 'lit', S: x.S.elems.map(get_lit)}
return x
}
if (x.t == 'obj') {
Object.entries(x.S).forEach(e => {
var y = x.S[e[0]] = recurse(e[1])
if (y == null) delete x.S[e[0]]
})
if (Object.values(x.S).every(is_lit)) {
var o = {}
Object.entries(x.S).forEach(e => o[e[0]] = get_lit(e[1]))
return {t: 'lit', S: o}
}
return x
}
if (x.t == 'str') {
space_dag_apply_bubbles(x.S, to_bubble)
if (x.S.nexts.length == 0 && !x.S.next && !Object.keys(x.S.deleted_by).length) return x.S.elems
return x
}
}
L.S = recurse(L.S)
Object.entries(to_bubble).forEach(([version, bubble]) => {
if (version === bubble[1])
L.T[bubble[0]] = L.T[bubble[1]]
if (version !== bubble[0]) {
delete L.T[version]
delete L.version_cache[version]
} else L.version_cache[version] = null
})
var leaves = Object.keys(L.current_version)
var acked_boundary = Object.keys(L.acked_boundary)
var fiss = Object.keys(L.fissures)
if (leaves.length == 1 && acked_boundary.length == 1
&& leaves[0] == acked_boundary[0] && fiss.length == 0) {
L.T = { [leaves[0]]: {} }
L.S = make_lit(L.read())
}
}
function add_version(version, parents, patches, sort_keys, is_anc) {
if (L.T[version]) return
L.T[version] = {...parents}
L.version_cache[version] = JSON.parse(JSON.stringify({
version, parents, patches, sort_keys
}))
Object.keys(parents).forEach(k => {
if (L.current_version[k])
delete L.current_version[k]
})
L.current_version[version] = true
if (!sort_keys) sort_keys = {}
if (!Object.keys(parents).length) {
var parse = parse_patch(patches[0])
L.S = make_lit(parse.value)
return
}
if (!is_anc) {
if (parents == L.current_version) {
is_anc = _version => _version != version
} else {
var ancs = ancestors(parents)
is_anc = _version => ancs[_version]
}
}
return patches.map((patch, i) => {
var sort_key = sort_keys[i]
var parse = parse_patch(patch)
var cur = resolve_path(parse)
if (!parse.slice) {
if (cur.t != 'val') throw 'bad'
var len = space_dag_length(cur.S, is_anc)
space_dag_add_version(cur.S, version, [[0, len, [parse.delete ? null : make_lit(parse.value)]]], sort_key, is_anc)
} else {
if (typeof parse.value === 'string' && cur.t !== 'str')
throw `Cannot splice string ${JSON.stringify(parse.value)} into non-string`
if (parse.value instanceof Array && cur.t !== 'arr')
throw `Cannot splice array ${JSON.stringify(parse.value)} into non-array`
if (parse.value instanceof Array)
parse.value = parse.value.map(x => make_lit(x))
var r0 = parse.slice[0]
var r1 = parse.slice[1]
if (r0 < 0 || Object.is(r0, -0) || r1 < 0 || Object.is(r1, -0)) {
let len = space_dag_length(cur.S, is_anc)
if (r0 < 0 || Object.is(r0, -0)) r0 = len + r0
if (r1 < 0 || Object.is(r1, -0)) r1 = len + r1
}
return space_dag_add_version(cur.S, version, [[r0, r1 - r0, parse.value]], sort_key, is_anc)
}
})
function resolve_path(parse) {
var cur = L.S
if (!cur || typeof(cur) != 'object' || cur.t == 'lit')
cur = L.S = {t: 'val', S: create_space_dag_node(null, [cur])}
var prev_S = null
var prev_i = 0
for (var i=0; i make_lit(x)))
} else {
if (typeof(cur.S) != 'object') throw 'bad'
new_cur.t = 'obj'
new_cur.S = {}
Object.entries(cur.S).forEach(e => new_cur.S[e[0]] = make_lit(e[1]))
}
cur = new_cur
space_dag_set(prev_S, prev_i, cur, is_anc)
}
if (cur.t == 'obj') {
let x = cur.S[key]
if (!x || typeof(x) != 'object' || x.t == 'lit')
x = cur.S[key] = {t: 'val', S: create_space_dag_node(null, [x == null ? null : x])}
cur = x
} else if (i == parse.path.length - 1 && !parse.slice) {
parse.slice = [key, key + 1]
parse.value = (cur.t == 'str') ? parse.value : [parse.value]
} else if (cur.t == 'arr') {
cur = space_dag_get(prev_S = cur.S, prev_i = key, is_anc)
} else throw 'bad'
}
if (parse.slice) {
if (cur.t == 'val') cur = space_dag_get(prev_S = cur.S, prev_i = 0, is_anc)
if (typeof(cur) == 'string') {
cur = {t: 'str', S: create_space_dag_node(null, cur)}
space_dag_set(prev_S, prev_i, cur, is_anc)
} else if (cur.t == 'lit') {
if (!(cur.S instanceof Array)) throw 'bad'
cur = {t: 'arr', S: create_space_dag_node(null, cur.S.map(x => make_lit(x)))}
space_dag_set(prev_S, prev_i, cur, is_anc)
}
}
return cur
}
}
function ancestors(versions, ignore_nonexistent) {
var result = {}
function recurse(version) {
if (result[version]) return
if (!L.T[version]) {
if (ignore_nonexistent) return
throw `The version ${version} no existo`
}
result[version] = true
Object.keys(L.T[version]).forEach(recurse)
}
Object.keys(versions).forEach(recurse)
return result
}
function get_leaves(versions) {
var leaves = {...versions}
Object.keys(versions).forEach(v => {
Object.keys(L.T[v]).forEach(p => delete leaves[p])
})
return leaves
}
return L
}
function create_space_dag_node(version, elems, end_cap, sort_key) {
return {
version : version,
sort_key : sort_key,
elems : elems,
deleted_by : {},
end_cap : end_cap,
nexts : [],
next : null
}
}
function space_dag_generate_braid(S, version, is_anc) {
var splices = []
function add_ins(offset, ins, sort_key, end_cap) {
if (typeof(ins) !== 'string')
ins = ins.map(x => read_raw(x, () => false))
if (splices.length > 0) {
var prev = splices[splices.length - 1]
if (prev[0] + prev[1] === offset && !end_cap && (prev[4] === 'i' || (prev[4] === 'r' && prev[1] === 0))) {
prev[2] = prev[2].concat(ins)
return
}
}
splices.push([offset, 0, ins, sort_key, end_cap ? 'r' : 'i'])
}
function add_del(offset, del, ins) {
if (splices.length > 0) {
var prev = splices[splices.length - 1]
if (prev[0] + prev[1] === offset && prev[4] !== 'i') {
prev[1] += del
return
}
}
splices.push([offset, del, ins, null, 'd'])
}
var offset = 0
function helper(node, _version, end_cap) {
if (_version === version) {
add_ins(offset, node.elems.slice(0), node.sort_key, end_cap)
} else if (node.deleted_by[version] && node.elems.length > 0) {
add_del(offset, node.elems.length, node.elems.slice(0, 0))
}
if ((!_version || is_anc(_version)) && !Object.keys(node.deleted_by).some(is_anc)) {
offset += node.elems.length
}
node.nexts.forEach(next => helper(next, next.version, node.end_cap))
if (node.next) helper(node.next, _version)
}
helper(S, null)
splices.forEach(s => {
// if we have replaces with 0 deletes,
// make them have at least 1 delete..
// this can happen when there are multiple replaces of the same text,
// and our code above will associate those deletes with only one of them
if (s[4] === 'r' && s[1] === 0) s[1] = 1
})
return splices
}
function space_dag_apply_bubbles(S, to_bubble) {
traverse_space_dag(S, () => true, node => {
if (to_bubble[node.version] && to_bubble[node.version][0] != node.version) {
if (!node.sort_key) node.sort_key = node.version
node.version = to_bubble[node.version][0]
}
for (var x of Object.keys(node.deleted_by)) {
if (to_bubble[x]) {
delete node.deleted_by[x]
node.deleted_by[to_bubble[x][0]] = true
}
}
}, true)
function set_nnnext(node, next) {
while (node.next) node = node.next
node.next = next
}
do_line(S, S.version)
function do_line(node, version) {
var prev = null
while (node) {
if (node.nexts[0] && node.nexts[0].version == version) {
for (let i = 0; i < node.nexts.length; i++) {
delete node.nexts[i].version
delete node.nexts[i].sort_key
set_nnnext(node.nexts[i], i + 1 < node.nexts.length ? node.nexts[i + 1] : node.next)
}
node.next = node.nexts[0]
node.nexts = []
}
if (node.deleted_by[version]) {
node.elems = node.elems.slice(0, 0)
node.deleted_by = {}
if (prev) { node = prev; continue }
}
var next = node.next
if (!node.nexts.length && next && (!node.elems.length || !next.elems.length || (Object.keys(node.deleted_by).every(x => next.deleted_by[x]) && Object.keys(next.deleted_by).every(x => node.deleted_by[x])))) {
if (!node.elems.length) node.deleted_by = next.deleted_by
node.elems = node.elems.concat(next.elems)
node.end_cap = next.end_cap
node.nexts = next.nexts
node.next = next.next
continue
}
for (let n of node.nexts) do_line(n, n.version)
prev = node
node = next
}
}
}
function space_dag_get(S, i, is_anc) {
var ret = null
var offset = 0
traverse_space_dag(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
ret = node.elems[i - offset]
return false
}
offset += node.elems.length
})
return ret
}
function space_dag_set(S, i, v, is_anc) {
var offset = 0
traverse_space_dag(S, is_anc ? is_anc : () => true, (node) => {
if (i - offset < node.elems.length) {
node.elems[i - offset] = v
return false
}
offset += node.elems.length
})
}
function space_dag_length(S, is_anc) {
var count = 0
traverse_space_dag(S, is_anc ? is_anc : () => true, node => {
count += node.elems.length
})
return count
}
function space_dag_break_node(node, x, end_cap, new_next) {
var tail = create_space_dag_node(null, node.elems.slice(x), node.end_cap)
Object.assign(tail.deleted_by, node.deleted_by)
tail.nexts = node.nexts
tail.next = node.next
node.elems = node.elems.slice(0, x)
node.end_cap = end_cap
node.nexts = new_next ? [new_next] : []
node.next = tail
return tail
}
function space_dag_add_version(S, version, splices, sort_key, is_anc) {
var rebased_splices = []
function add_to_nexts(nexts, to) {
var i = binarySearch(nexts, function (x) {
if ((to.sort_key || to.version) < (x.sort_key || x.version)) return -1
if ((to.sort_key || to.version) > (x.sort_key || x.version)) return 1
return 0
})
nexts.splice(i, 0, to)
}
var si = 0
var delete_up_to = 0
var process_patch = (node, offset, has_nexts, prev, _version, deleted) => {
var s = splices[si]
if (!s) return false
if (deleted) {
if (s[1] == 0 && s[0] == offset) {
if (node.elems.length == 0 && !node.end_cap && has_nexts) return
var new_node = create_space_dag_node(version, s[2], null, sort_key)
rebased_splices.push([rebase_offset, 0, s[2]])
if (node.elems.length == 0 && !node.end_cap)
add_to_nexts(node.nexts, new_node)
else
space_dag_break_node(node, 0, undefined, new_node)
si++
}
return
}
if (s[1] == 0) {
var d = s[0] - (offset + node.elems.length)
if (d > 0) return
if (d == 0 && !node.end_cap && has_nexts) return
var new_node = create_space_dag_node(version, s[2], null, sort_key)
rebased_splices.push([rebase_offset + s[0] - offset, 0, s[2]])
if (d == 0 && !node.end_cap) {
add_to_nexts(node.nexts, new_node)
} else {
space_dag_break_node(node, s[0] - offset, undefined, new_node)
}
si++
return
}
if (delete_up_to <= offset) {
var d = s[0] - (offset + node.elems.length)
if (d >= 0) return
delete_up_to = s[0] + s[1]
if (s[2]) {
var new_node = create_space_dag_node(version, s[2], null, sort_key)
rebased_splices.push([rebase_offset + s[0] - offset, 0, s[2]])
if (s[0] == offset && prev && prev.end_cap) {
add_to_nexts(prev.nexts, new_node)
} else {
space_dag_break_node(node, s[0] - offset, true, new_node)
return
}
} else {
if (s[0] == offset) {
} else {
space_dag_break_node(node, s[0] - offset)
return
}
}
}
if (delete_up_to > offset) {
if (delete_up_to <= offset + node.elems.length) {
if (delete_up_to < offset + node.elems.length) {
space_dag_break_node(node, delete_up_to - offset)
}
si++
}
node.deleted_by[version] = true
rebased_splices.push([rebase_offset, node.elems.length, ''])
return
}
}
var f = is_anc
var exit_early = {}
var offset = 0
var rebase_offset = 0
function traverse(node, prev, version) {
var rebase_deleted = Object.keys(node.deleted_by).length > 0
if (!version || f(version)) {
var has_nexts = node.nexts.find(next => f(next.version))
var deleted = Object.keys(node.deleted_by).some(version => f(version))
if (process_patch(node, offset, has_nexts, prev, version, deleted) == false) throw exit_early
if (!deleted) offset += node.elems.length
}
if (!rebase_deleted) rebase_offset += node.elems.length
for (var next of node.nexts) traverse(next, null, next.version)
if (node.next) traverse(node.next, node, version)
}
try {
traverse(S, null, S.version)
} catch (e) {
if (e != exit_early) throw e
}
return rebased_splices
}
function traverse_space_dag(S, f, cb, view_deleted, tail_cb) {
var exit_early = {}
var offset = 0
function helper(node, prev, version) {
var has_nexts = node.nexts.find(next => f(next.version))
var deleted = Object.keys(node.deleted_by).some(version => f(version))
if (view_deleted || !deleted) {
if (cb(node, offset, has_nexts, prev, version, deleted) == false)
throw exit_early
offset += node.elems.length
}
for (var next of node.nexts)
if (f(next.version)) helper(next, null, next.version)
if (node.next) helper(node.next, node, version)
else if (tail_cb) tail_cb(node)
}
try {
helper(S, null, S.version)
} catch (e) {
if (e != exit_early) throw e
}
}
function parse_patch(patch) {
var ret = { path : [] }
var re = /^(delete)\s+|\.?([^\.\[ =]+)|\[((\-?\d+)(:\-?\d+)?|'(\\'|[^'])*'|"(\\"|[^"])*")\]|\s*=\s*([\s\S]*)/g
var m
while (m = re.exec(patch)) {
if (m[1]) ret.delete = true
else if (m[2]) ret.path.push(m[2])
else if (m[3] && m[5]) ret.slice = [JSON.parse(m[4]), JSON.parse(m[5].substr(1))]
else if (m[3]) ret.path.push(JSON.parse(m[3]))
else if (m[8]) ret.value = JSON.parse(m[8])
}
return ret
}
// modified from https://stackoverflow.com/questions/22697936/binary-search-in-javascript
function binarySearch(ar, compare_fn) {
var m = 0;
var n = ar.length - 1;
while (m <= n) {
var k = (n + m) >> 1;
var cmp = compare_fn(ar[k]);
if (cmp > 0) {
m = k + 1;
} else if(cmp < 0) {
n = k - 1;
} else {
return k;
}
}
return m;
}