git ssb

1+

Dominic / secure-scuttlebutt



Commit ce09cc52ffa6179ead8f994580143324501aa661

Merge branch 'rm-legacy'

Dominic Tarr committed on 1/15/2019, 11:19:02 AM
Parent: c01134463891ae1d25f93fc89424b752ead934e2
Parent: 7d1179b29f604c4ca98619d4d66822474f29955f

Files changed

codec.jschanged
create.jschanged
index.jschanged
minimal.jschanged
package.jsonchanged
test/add.jschanged
test/end-to-end.jschanged
test/feed.jschanged
test/history.jschanged
test/links.jschanged
test/log.jschanged
test/mesages-by-type.jschanged
test/msg-encoding.jschanged
test/validation.jschanged
test/write-stream.jschanged
test/migration.jsdeleted
api.mdadded
defaults.jsdeleted
legacy.jsdeleted
lib/validators.jsadded
codec.jsView
@@ -3,4 +3,5 @@
33 encode: JSON.stringify,
44 buffer: false,
55 type: 'ssb'
66 }
7+
create.jsView
@@ -1,11 +1,152 @@
1-var level = require('level')
2-var sublevel = require('level-sublevel/bytewise')
3-var SSB = require('./')
1+'use strict'
42
3+var join = require('path').join
4+var EventEmitter = require('events')
5+
6+var pull = require('pull-stream')
7+var ref = require('ssb-ref')
8+var ssbKeys = require('ssb-keys')
9+
10+var u = require('./util')
11+
12+function isString (s) {
13+ return typeof s === 'string'
14+}
15+
16+function errorCB (err) {
17+ if (err) throw err
18+}
19+
520 module.exports = function (path, opts, keys) {
6- opts = opts || require('./defaults')
7- return SSB(
8- sublevel(level(path, {
9- valueEncoding: require('./codec')
10- })), opts, keys, path)
21+ //_ was legacy db. removed that, but for backwards compatibilty reasons do not change interface
22+ if(!path) throw new Error('path must be provided')
23+
24+ keys = keys || ssbKeys.generate()
25+
26+ var db = require('./db')(join(opts.path || path, 'flume'), keys, opts)
27+
28+ // UGLY HACK, but...
29+ // fairly sure that something up the stack expects ssb to be an event emitter.
30+ db.__proto__ = new EventEmitter() // eslint-disable-line
31+
32+ db.opts = opts
33+
34+ var _get = db.get
35+
36+ db.get = function (key, cb) {
37+ let isPrivate = false
38+ let unbox
39+ if (typeof key === 'object') {
40+ isPrivate = key.private === true
41+ unbox = key.unbox
42+ key = key.id
43+ }
44+
45+ if (ref.isMsg(key)) {
46+ return db.keys.get(key, function (err, data) {
47+ if (err) return cb(err)
48+
49+ if (isPrivate && unbox) {
50+ data = db.unbox(data, unbox)
51+ }
52+
53+ let result
54+
55+ if (isPrivate) {
56+ result = data.value
57+ } else {
58+ result = u.originalValue(data.value)
59+ }
60+
61+ cb(null, result)
62+ })
63+ } else if (ref.isMsgLink(key)) {
64+ var link = ref.parseLink(key)
65+ return db.get({
66+ id: link.link,
67+ private: true,
68+ unbox: link.query.unbox.replace(/\s/g, '+')
69+ }, cb)
70+ } else if (Number.isInteger(key)) {
71+ _get(key, cb) // seq
72+ } else {
73+ throw new Error('ssb-db.get: key *must* be a ssb message id or a flume offset')
74+ }
75+ }
76+
77+ db.add = function (msg, cb) {
78+ db.queue(msg, function (err, data) {
79+ if (err) cb(err)
80+ else db.flush(function () { cb(null, data) })
81+ })
82+ }
83+
84+ db.createFeed = function (keys) {
85+ if (!keys) keys = ssbKeys.generate()
86+ function add (content, cb) {
87+ // LEGACY: hacks to support add as a continuable
88+ if (!cb) { return function (cb) { add(content, cb) } }
89+
90+ db.append({ content: content, keys: keys }, cb)
91+ }
92+ return {
93+ add: add,
94+ publish: add,
95+ id: keys.id,
96+ keys: keys
97+ }
98+ }
99+
100+ db.createRawLogStream = function (opts) {
101+ return db.stream(opts)
102+ }
103+
104+ // pull in the features that are needed to pass the tests
105+ // and that sbot, etc uses but are slow.
106+ require('./extras')(db, opts, keys)
107+
108+ // writeStream - used in (legacy) replication.
109+ db.createWriteStream = function (cb) {
110+ cb = cb || errorCB
111+ return pull(
112+ pull.asyncMap(function (data, cb) {
113+ db.queue(data, function (err, msg) {
114+ if (err) {
115+ db.emit('invalid', err, msg)
116+ }
117+ setImmediate(cb)
118+ })
119+ }),
120+ pull.drain(null, function (err) {
121+ if (err) return cb(err)
122+ db.flush(cb)
123+ })
124+ )
125+ }
126+
127+ // should be private
128+ db.createHistoryStream = db.clock.createHistoryStream
129+
130+ // called with [id, seq] or "<id>:<seq>"
131+ db.getAtSequence = function (seqid, cb) {
132+ // will NOT expose private plaintext
133+ db.clock.get(isString(seqid) ? seqid.split(':') : seqid, function (err, value) {
134+ if (err) cb(err)
135+ else cb(null, u.originalData(value))
136+ })
137+ }
138+
139+ db.getVectorClock = function (_, cb) {
140+ if (!cb) cb = _
141+ db.last.get(function (err, h) {
142+ if (err) return cb(err)
143+ var clock = {}
144+ for (var k in h) { clock[k] = h[k].sequence }
145+ cb(null, clock)
146+ })
147+ }
148+
149+ return db
11150 }
151+
152+
index.jsView
@@ -1,174 +1,142 @@
1-'use strict'
1+//var SecretStack = require('secret-stack')
2+var create = require('./create')
3+var ssbKeys = require('ssb-keys')
4+var path = require('path')
5+var osenv = require('osenv')
6+var mkdirp = require('mkdirp')
7+var rimraf = require('rimraf')
8+var mdm = require('mdmanifest')
9+var valid = require('./lib/validators')
10+var pkg = require('./package.json')
211
3-var join = require('path').join
4-var EventEmitter = require('events')
12+function isString(s) { return 'string' === typeof s }
13+function isObject(o) { return 'object' === typeof o }
14+function isFunction (f) { return 'function' === typeof f }
15+// create SecretStack definition
16+var fs = require('fs')
17+var manifest = mdm.manifest(fs.readFileSync(path.join(__dirname, 'api.md'), 'utf-8'))
518
6-var pull = require('pull-stream')
7-var ref = require('ssb-ref')
8-var ssbKeys = require('ssb-keys')
19+manifest.seq = 'async'
20+manifest.usage = 'sync'
21+manifest.clock = 'async'
22+manifest.version = 'sync'
923
10-var u = require('./util')
24+module.exports = {
25+ manifest: manifest,
26+ permissions: {
27+ master: {allow: null, deny: null},
28+ anonymous: {allow: ['createHistoryStream'], deny: null}
29+ },
30+ init: function (api, opts) {
1131
12-function isString (s) {
13- return typeof s === 'string'
14-}
32+ // .temp: use a /tmp data directory
33+ // (useful for testing)
34+ if(opts.temp) {
35+ var name = isString(opts.temp) ? opts.temp : ''+Date.now()
36+ opts.path = path.join(osenv.tmpdir(), name)
37+ rimraf.sync(opts.path)
38+ }
1539
16-function errorCB (err) {
17- if (err) throw err
18-}
40+ // load/create secure scuttlebutt data directory
41+ mkdirp.sync(opts.path)
1942
20-module.exports = function (_db, opts, keys, path) {
21- path = path || _db.location
43+ if(!opts.keys)
44+ opts.keys = ssbKeys.generate('ed25519', opts.seed && Buffer.from(opts.seed, 'base64'))
2245
23- keys = keys || ssbKeys.generate()
46+ if(!opts.path)
47+ throw new Error('opts.path *must* be provided, or use opts.temp=name to create a test instance')
2448
25- var db = require('./db')(join(opts.path || path, 'flume'), keys, opts)
26-
27- // legacy database
28- if (_db) require('./legacy')(_db, db)
29- else db.ready.set(true)
30-
31- db.sublevel = function (a, b) {
32- return _db.sublevel(a, b)
33- }
34-
35- // UGLY HACK, but...
36- // fairly sure that something up the stack expects ssb to be an event emitter.
37- db.__proto__ = new EventEmitter() // eslint-disable-line
38-
39- db.opts = opts
40-
41- var _get = db.get
42-
43- db.get = function (key, cb) {
44- let isPrivate = false
45- let unbox
46- if (typeof key === 'object') {
47- isPrivate = key.private === true
48- unbox = key.unbox
49- key = key.id
49+ // main interface
50+ var ssb = create(opts.path, opts, opts.keys)
51+ //treat the main feed as remote, because it's likely handled like that by others.
52+ var feed = ssb.createFeed(opts.keys, {remote: true})
53+ var _close = api.close
54+ var close = function (arg, cb) {
55+ if('function' === typeof arg) cb = arg
56+ // override to close the SSB database
57+ ssb.close(function (err) {
58+ if (err) throw err
59+ console.log("fallback to close")
60+ _close(cb) //multiserver doesn't take a callback on close.
61+ })
5062 }
5163
52- if (ref.isMsg(key)) {
53- return db.keys.get(key, function (err, data) {
54- if (err) return cb(err)
55-
56- if (isPrivate && unbox) {
57- data = db.unbox(data, unbox)
64+ function since () {
65+ var plugs = {}
66+ var sync = true
67+ for(var k in ssb) {
68+ if(ssb[k] && isObject(ssb[k]) && isFunction(ssb[k].since)) {
69+ plugs[k] = ssb[k].since.value
70+ sync = sync && (plugs[k] === ssb.since.value)
5871 }
59-
60- let result
61-
62- if (isPrivate) {
63- result = data.value
64- } else {
65- result = u.originalValue(data.value)
66- }
67-
68- cb(null, result)
69- })
70- } else if (ref.isMsgLink(key)) {
71- var link = ref.parseLink(key)
72- return db.get({
73- id: link.link,
74- private: true,
75- unbox: link.query.unbox.replace(/\s/g, '+')
76- }, cb)
77- } else if (Number.isInteger(key)) {
78- _get(key, cb) // seq
79- } else {
80- throw new Error('ssb-db.get: key *must* be a ssb message id or a flume offset')
72+ }
73+ return {
74+ since: ssb.since.value,
75+ plugins: plugs,
76+ sync: sync,
77+ }
8178 }
82- }
79+ var self
80+ return self = {
81+ id : feed.id,
82+ keys : opts.keys,
8383
84- db.add = function (msg, cb) {
85- db.queue(msg, function (err, data) {
86- if (err) cb(err)
87- else db.flush(function () { cb(null, data) })
88- })
89- }
84+ ready : function () {
85+ return ssb.ready.value
86+ },
9087
91- db.createFeed = function (keys) {
92- if (!keys) keys = ssbKeys.generate()
93- function add (content, cb) {
94- // LEGACY: hacks to support add as a continuable
95- if (!cb) { return function (cb) { add(content, cb) } }
88+ progress : function () {
89+ return ssb.progress
90+ },
9691
97- db.append({ content: content, keys: keys }, cb)
98- }
99- return {
100- add: add,
101- publish: add,
102- id: keys.id,
103- keys: keys
104- }
105- }
92+ status : function () {
93+ return {progress: self.progress(), db: ssb.status, sync: since() }
94+ },
10695
107- db.createRawLogStream = function (opts) {
108- return db.stream(opts)
109- }
96+ version : function () {
97+ return pkg.version
98+ },
11099
111- // pull in the features that are needed to pass the tests
112- // and that sbot, etc uses but are slow.
113- require('./extras')(db, opts, keys)
100+ //temporary!
101+ _flumeUse :
102+ function (name, flumeview) {
103+ ssb.use(name, flumeview)
104+ return ssb[name]
105+ },
114106
115- // writeStream - used in (legacy) replication.
116- db.createWriteStream = function (cb) {
117- cb = cb || errorCB
118- return pull(
119- pull.asyncMap(function (data, cb) {
120- db.queue(data, function (err, msg) {
121- if (err) {
122- db.emit('invalid', err, msg)
123- }
124- setImmediate(cb)
125- })
126- }),
127- pull.drain(null, function (err) {
128- if (err) return cb(err)
129- db.flush(cb)
130- })
131- )
132- }
107+ // usage : valid.sync(usage, 'string?|boolean?'),
108+ close : close,
133109
134- // should be private
135- db.createHistoryStream = db.clock.createHistoryStream
110+ publish : valid.async(feed.add, 'string|msgContent'),
111+ add : valid.async(ssb.add, 'msg'),
112+ queue : valid.async(ssb.queue, 'msg'),
113+ get : valid.async(ssb.get, 'msgLink|number|object'),
136114
137- // called with [id, seq] or "<id>:<seq>"
138- db.getAtSequence = function (seqid, cb) {
139- // will NOT expose private plaintext
140- db.clock.get(isString(seqid) ? seqid.split(':') : seqid, function (err, value) {
141- if (err) cb(err)
142- else cb(null, u.originalData(value))
143- })
144- }
115+ post : ssb.post,
116+ addMap : ssb.addMap,
145117
146- db.getVectorClock = function (_, cb) {
147- if (!cb) cb = _
148- db.last.get(function (err, h) {
149- if (err) return cb(err)
150- var clock = {}
151- for (var k in h) { clock[k] = h[k].sequence }
152- cb(null, clock)
153- })
154- }
118+ since : since,
155119
156- if (_db) {
157- var close = db.close
158- db.close = function (cb) {
159- var n = 2
160- _db.close(next); close(next)
161-
162- function next (err) {
163- if (err && n > 0) {
164- n = -1
165- return cb(err)
166- }
167- if (--n) return
168- cb()
169- }
120+ getPublicKey : ssb.getPublicKey,
121+ latest : ssb.latest,
122+ getLatest : valid.async(ssb.getLatest, 'feedId'),
123+ latestSequence : valid.async(ssb.latestSequence, 'feedId'),
124+ createFeed : ssb.createFeed,
125+ whoami : function () { return { id: feed.id } },
126+ query : ssb.query,
127+ createFeedStream : valid.source(ssb.createFeedStream, 'readStreamOpts?'),
128+ createHistoryStream : valid.source(ssb.createHistoryStream, ['createHistoryStreamOpts'], ['feedId', 'number?', 'boolean?']),
129+ createLogStream : valid.source(ssb.createLogStream, 'readStreamOpts?'),
130+ createUserStream : valid.source(ssb.createUserStream, 'createUserStreamOpts'),
131+ links : valid.source(ssb.links, 'linksOpts'),
132+ sublevel : ssb.sublevel,
133+ messagesByType : valid.source(ssb.messagesByType, 'string|messagesByTypeOpts'),
134+ createWriteStream : ssb.createWriteStream,
135+ getVectorClock : ssb.getVectorClock,
136+ getAtSequence : ssb.getAtSequence,
137+ addUnboxer : ssb.addUnboxer,
138+ box : ssb.box,
170139 }
171140 }
141+}
172142
173- return db
174-}
minimal.jsView
@@ -103,9 +103,9 @@
103103 }
104104
105105 // NOTE: must use db.ready.set(true) at when migration is complete
106106 // false says the database is not ready yet!
107- var db = Flume(log, false, chainMaps)
107+ var db = Flume(log, true, chainMaps)
108108 .use('last', require('./indexes/last')())
109109
110110 var state = V.initial()
111111 var ready = false
package.jsonView
@@ -15,17 +15,14 @@
1515 "flumelog-offset": "^3.3.1",
1616 "flumeview-hashtable": "^1.0.3",
1717 "flumeview-level": "^3.0.5",
1818 "flumeview-reduce": "^1.3.9",
19- "level": "^4.0.0",
20- "level-sublevel": "^6.6.2",
2119 "ltgt": "^2.2.0",
20+ "mdmanifest": "^1.0.8",
2221 "monotonic-timestamp": "~0.0.8",
22+ "muxrpc-validation": "^3.0.0",
2323 "obv": "0.0.1",
2424 "pull-cont": "^0.1.1",
25- "pull-level": "^2.0.3",
26- "pull-live": "^1.0.1",
27- "pull-paramap": "^1.1.6",
2825 "pull-stream": "^3.4.0",
2926 "ssb-keys": "^7.1.3",
3027 "ssb-msgs": "^5.0.0",
3128 "ssb-ref": "^2.12.0",
@@ -37,8 +34,9 @@
3734 "mkdirp": "^0.5.1",
3835 "osenv": "^0.1.5",
3936 "pull-abortable": "~4.1.0",
4037 "rimraf": "^2.6.2",
38+ "secret-stack": "^5.0.0",
4139 "ssb-feed": "^2.2.1",
4240 "tape": "^4.8.0",
4341 "typewiselite": "~1.0.0"
4442 },
@@ -63,4 +61,5 @@
6361 ],
6462 "files": "test/defaults.js"
6563 }
6664 }
65+
test/add.jsView
@@ -76,5 +76,5 @@
7676 })
7777 })
7878 }
7979
80-if (!module.parent) { module.exports(require('../defaults')) }
80+if (!module.parent) { module.exports({}) }
test/end-to-end.jsView
@@ -170,5 +170,5 @@
170170 })
171171 })
172172 }
173173
174-if (!module.parent) { module.exports(require('../defaults')) }
174+if (!module.parent) { module.exports({}) }
test/feed.jsView
@@ -158,5 +158,5 @@
158158 })
159159 })
160160 }
161161
162-if (!module.parent) { module.exports(require('../defaults')) }
162+if (!module.parent) { module.exports({}) }
test/history.jsView
@@ -192,5 +192,5 @@
192192 )
193193 })
194194 }
195195
196-if (!module.parent) { module.exports(require('../defaults')) }
196+if (!module.parent) { module.exports({}) }
test/links.jsView
@@ -131,5 +131,5 @@
131131 })
132132 })
133133 }
134134
135-if (!module.parent) { module.exports(require('../defaults')) }
135+if (!module.parent) { module.exports({}) }
test/log.jsView
@@ -137,5 +137,5 @@
137137 })
138138 })
139139 }
140140
141-if (!module.parent) { module.exports(require('../defaults')) }
141+if (!module.parent) { module.exports({}) }
test/mesages-by-type.jsView
@@ -76,5 +76,5 @@
7676 })
7777 })
7878 }
7979
80-if (!module.parent) { module.exports(require('../defaults')) }
80+if (!module.parent) { module.exports({}) }
test/msg-encoding.jsView
@@ -80,5 +80,5 @@
8080 })
8181 })
8282 }
8383
84-if (!module.parent) { module.exports(require('../defaults')) }
84+if (!module.parent) { module.exports({}) }
test/validation.jsView
@@ -168,5 +168,5 @@
168168 )
169169 })
170170 }
171171
172-if (!module.parent) { module.exports(require('../defaults')) }
172+if (!module.parent) { module.exports(require('../')) }
test/write-stream.jsView
@@ -67,5 +67,5 @@
6767 )
6868 })
6969 }
7070
71-if (!module.parent) { module.exports(require('../defaults')) }
71+if (!module.parent) { module.exports({}) }
test/migration.jsView
@@ -1,116 +1,0 @@
1-var createFeed = require('ssb-feed')
2-var ssbKeys = require('ssb-keys')
3-var timestamp = require('monotonic-timestamp')
4-var level = require('level')
5-var sublevel = require('level-sublevel')
6-var latest = {}
7-var log = []
8-var db
9-
10-var tape = require('tape')
11-
12-tape('generate fake feed', function (t) {
13- var start = Date.now()
14- var feed = createFeed({
15- getLatest: function (id, cb) {
16- cb(null, latest[id])
17- },
18- add: function (msg, cb) {
19- latest[msg.author] = { key: '%' + ssbKeys.hash(JSON.stringify(msg, null, 2)), value: msg }
20- log.push(msg)
21- cb()
22- }
23- }, ssbKeys.generate())
24-
25- var l = 10000
26- while (l--) { feed.add({ type: 'test', text: 'hello1', l: l }, function () {}) }
27-
28- console.log('generate', Date.now() - start)
29- t.end()
30-})
31-
32-tape('populate legacy database', function (t) {
33- var start = Date.now()
34- db = sublevel(level('/tmp/test-ssb-feed_' + Date.now(), {
35- valueEncoding: require('../codec')
36- }))
37-
38- require('../legacy')(db)
39-
40- ;(function next () {
41- var batch = log.splice(0, 1000)
42- db.batch(batch.map(function (msg) {
43- var key = '%' + ssbKeys.hash(JSON.stringify(msg, null, 2))
44- return {
45- key: key,
46- value: {
47- key: key, value: msg, timestamp: +timestamp()
48- },
49- type: 'put'
50- }
51- }), function (err) {
52- if (err) throw err
53-
54- if (log.length) {
55- console.log(log.length)
56- setTimeout(next)
57- } else {
58- console.log('legacy-write', Date.now() - start)
59- t.end()
60- }
61- })
62- })()
63-})
64-
65-tape('migrate', function (t) {
66- var start = Date.now()
67- var flume = require('../db')('/tmp/test-ssb-migration_' + Date.now())
68-
69- var int = setInterval(function () {
70- console.log(flume.progress)
71- }, 100)
72-
73- flume.ready(function (isReady) {
74- if (isReady) {
75- console.log('ready!', flume.since.value)
76- console.log(flume.progress)
77- console.log('migrate', Date.now() - start)
78- clearInterval(int)
79- t.equal(flume.progress.current, flume.progress.target)
80- t.end()
81- }
82- })
83-
84- require('../legacy')(db, flume)
85-})
86-
87-tape('progress looks right on empty database', function (t) {
88- var db = sublevel(level('/tmp/test-ssb-feed_' + Date.now(), {
89- valueEncoding: require('../codec')
90- }))
91-
92- var flume = require('../db')('/tmp/test-ssb-migration_' + Date.now())
93-
94- flume.ready(function (b) {
95- if (b) {
96- console.log('ready?', flume.progress)
97- t.ok(flume.progress, 'progress object is defined')
98- t.notOk(flume.progress.migration, 'progress.migration is undefined')
99- setTimeout(function () {
100- t.equal(
101- flume.progress.indexes.current,
102- -1,
103- 'current is -1'
104- )
105- t.equal(
106- flume.progress.indexes.target,
107- -1,
108- 'target is -1'
109- )
110- t.end()
111- }, 200)
112- }
113- })
114-
115- require('../legacy')(db, flume)
116-})
api.mdView
@@ -1,0 +1,341 @@
1+# ssb-server
2+
3+Secure-scuttlebutt API server
4+
5+
6+
7+## get: async
8+
9+Get a message by its hash-id.
10+
11+```bash
12+get {msgid}
13+```
14+
15+```js
16+get(msgid, cb)
17+```
18+
19+
20+
21+## createFeedStream: source
22+
23+(feed) Fetch messages ordered by their claimed timestamps.
24+
25+```bash
26+feed [--live] [--gt index] [--gte index] [--lt index] [--lte index] [--reverse] [--keys] [--values] [--limit n]
27+```
28+
29+```js
30+createFeedStream({ live:, gt:, gte:, lt:, lte:, reverse:, keys:, values:, limit:, fillCache:, keyEncoding:, valueEncoding: })
31+```
32+
33+Create a stream of the data in the database, ordered by the timestamp claimed by the author.
34+NOTE - the timestamp is not verified, and may be incorrect.
35+The range queries (gt, gte, lt, lte) filter against this claimed timestap.
36+
37+ - `live` (boolean, default: `false`): Keep the stream open and emit new messages as they are received.
38+ - `gt` (greater than), `gte` (greater than or equal) define the lower bound of the range to be streamed. Only records where the key is greater than (or equal to) this option will be included in the range. When `reverse=true` the order will be reversed, but the records streamed will be the same.
39+ - `lt` (less than), `lte` (less than or equal) define the higher bound of the range to be streamed. Only key/value pairs where the key is less than (or equal to) this option will be included in the range. When `reverse=true` the order will be reversed, but the records streamed will be the same.
40+ - `reverse` (boolean, default: `false`): a boolean, set true and the stream output will be reversed. Beware that due to the way LevelDB works, a reverse seek will be slower than a forward seek.
41+ - `keys` (boolean, default: `true`): whether the `data` event should contain keys. If set to `true` and `values` set to `false` then `data` events will simply be keys, rather than objects with a `key` property.
42+ - `values` (boolean, default: `true`): whether the `data` event should contain values. If set to `true` and `keys` set to `false` then `data` events will simply be values, rather than objects with a `value` property.
43+ - `limit` (number, default: `-1`): limit the number of results collected by this stream. This number represents a *maximum* number of results and may not be reached if you get to the end of the data first. A value of `-1` means there is no limit. When `reverse=true` the highest keys will be returned instead of the lowest keys.
44+ - `fillCache` (boolean, default: `false`): wheather LevelDB's LRU-cache should be filled with data read.
45+ - `keyEncoding` / `valueEncoding` (string): the encoding applied to each read piece of data.
46+
47+
48+
49+## createLogStream: source
50+
51+(log) Fetch messages ordered by the time received.
52+
53+```bash
54+log [--live] [--gt index] [--gte index] [--lt index] [--lte index] [--reverse] [--keys] [--values] [--limit n]
55+```
56+
57+```js
58+createLogStream({ live:, gt:, gte:, lt:, lte:, reverse:, keys:, values:, limit:, fillCache:, keyEncoding:, valueEncoding: })
59+```
60+
61+Creates a stream of the messages that have been written to this instance, in the order they arrived.
62+The objects in this stream will be of the form:
63+
64+```
65+{ key: Hash, value: Message, timestamp: timestamp }
66+```
67+
68+`timestamp` is the time which the message was received.
69+It is generated by [monotonic-timestamp](https://github.com/dominictarr/monotonic-timestamp).
70+The range queries (gt, gte, lt, lte) filter against this receive timestap.
71+
72+
73+ - `live` (boolean, default: `false`): Keep the stream open and emit new messages as they are received.
74+ - `gt` (greater than), `gte` (greater than or equal) define the lower bound of the range to be streamed. Only records where the key is greater than (or equal to) this option will be included in the range. When `reverse=true` the order will be reversed, but the records streamed will be the same.
75+ - `lt` (less than), `lte` (less than or equal) define the higher bound of the range to be streamed. Only key/value pairs where the key is less than (or equal to) this option will be included in the range. When `reverse=true` the order will be reversed, but the records streamed will be the same.
76+ - `reverse` (boolean, default: `false`): a boolean, set true and the stream output will be reversed. Beware that due to the way LevelDB works, a reverse seek will be slower than a forward seek.
77+ - `keys` (boolean, default: `true`): whether the `data` event should contain keys. If set to `true` and `values` set to `false` then `data` events will simply be keys, rather than objects with a `key` property.
78+ - `values` (boolean, default: `false`): whether the `data` event should contain values. If set to `true` and `keys` set to `false` then `data` events will simply be values, rather than objects with a `value` property.
79+ - `limit` (number, default: `-1`): limit the number of results collected by this stream. This number represents a *maximum* number of results and may not be reached if you get to the end of the data first. A value of `-1` means there is no limit. When `reverse=true` the highest keys will be returned instead of the lowest keys.
80+ - `fillCache` (boolean, default: `false`): wheather LevelDB's LRU-cache should be filled with data read.
81+ - `keyEncoding` / `valueEncoding` (string): the encoding applied to each read piece of data.
82+
83+
84+
85+## messagesByType: source
86+
87+(logt) Retrieve messages with a given type, ordered by receive-time.
88+
89+
90+```bash
91+logt --type {type} [--live] [--gt index] [--gte index] [--lt index] [--lte index] [--reverse] [--keys] [--values] [--limit n]
92+```
93+
94+```js
95+messagesByType({ type:, live:, gt:, gte:, lt:, lte:, reverse:, keys:, values:, limit:, fillCache:, keyEncoding:, valueEncoding: })
96+```
97+
98+All messages must have a type, so this is a good way to select messages that an application might use.
99+Like in createLogStream, the range queries (gt, gte, lt, lte) filter against the receive timestap.
100+
101+ - `type` (string): The type of the messages to emit.
102+ - `live` (boolean, default: `false`): Keep the stream open and emit new messages as they are received.
103+ - `gt` (greater than), `gte` (greater than or equal) define the lower bound of the range to be streamed. Only records where the key is greater than (or equal to) this option will be included in the range. When `reverse=true` the order will be reversed, but the records streamed will be the same.
104+ - `lt` (less than), `lte` (less than or equal) define the higher bound of the range to be streamed. Only key/value pairs where the key is less than (or equal to) this option will be included in the range. When `reverse=true` the order will be reversed, but the records streamed will be the same.
105+ - `reverse` (boolean, default: `false`): a boolean, set true and the stream output will be reversed. Beware that due to the way LevelDB works, a reverse seek will be slower than a forward seek.
106+ - `keys` (boolean, default: `true`): whether the `data` event should contain keys. If set to `true` and `values` set to `false` then `data` events will simply be keys, rather than objects with a `key` property.
107+ - `values` (boolean, default: `true`): whether the `data` event should contain values. If set to `true` and `keys` set to `false` then `data` events will simply be values, rather than objects with a `value` property.
108+ - `limit` (number, default: `-1`): limit the number of results collected by this stream. This number represents a *maximum* number of results and may not be reached if you get to the end of the data first. A value of `-1` means there is no limit. When `reverse=true` the highest keys will be returned instead of the lowest keys.
109+ - `fillCache` (boolean, default: `false`): wheather LevelDB's LRU-cache should be filled with data read.
110+ - `keyEncoding` / `valueEncoding` (string): the encoding applied to each read piece of data.
111+
112+
113+
114+## createHistoryStream: source
115+
116+(hist) Fetch messages from a specific user, ordered by sequence numbers.
117+
118+```bash
119+hist {feedid} [seq] [live]
120+hist --id {feedid} [--seq n] [--live] [--limit n] [--keys] [--values]
121+```
122+
123+```js
124+createHistoryStream(id, seq, live)
125+createHistoryStream({ id:, seq:, live:, limit:, keys:, values: })
126+```
127+
128+`createHistoryStream` and `createUserStream` serve the same purpose.
129+`createHistoryStream` exists as a separate call because it provides fewer range parameters, which makes it safer for RPC between untrusted peers.
130+
131+ - `id` (FeedID, required): The id of the feed to fetch.
132+ - `seq` (number, default: `0`): If `seq > 0`, then only stream messages with sequence numbers greater than `seq`.
133+ - `live` (boolean, default: `false`): Keep the stream open and emit new messages as they are received.
134+ - `keys` (boolean, default: `true`): whether the `data` event should contain keys. If set to `true` and `values` set to `false` then `data` events will simply be keys, rather than objects with a `key` property.
135+ - `values` (boolean, default: `true`): whether the `data` event should contain values. If set to `true` and `keys` set to `false` then `data` events will simply be values, rather than objects with a `value` property.
136+ - `limit` (number, default: `-1`): limit the number of results collected by this stream. This number represents a *maximum* number of results and may not be reached if you get to the end of the data first. A value of `-1` means there is no limit. When `reverse=true` the highest keys will be returned instead of the lowest keys.
137+
138+
139+## createUserStream: source
140+
141+Fetch messages from a specific user, ordered by sequence numbers.
142+
143+```bash
144+createUserStream --id {feedid} [--live] [--gt index] [--gte index] [--lt index] [--lte index] [--reverse] [--keys] [--values] [--limit n]
145+```
146+
147+```js
148+createUserStream({ id:, live:, gt:, gte:, lt:, lte:, reverse:, keys:, values:, limit:, fillCache:, keyEncoding:, valueEncoding: })
149+```
150+
151+`createHistoryStream` and `createUserStream` serve the same purpose.
152+`createHistoryStream` exists as a separate call because it provides fewer range parameters, which makes it safer for RPC between untrusted peers.
153+
154+The range queries (gt, gte, lt, lte) filter against the sequence number.
155+
156+ - `id` (FeedID, required): The id of the feed to fetch.
157+ - `live` (boolean, default: `false`): Keep the stream open and emit new messages as they are received.
158+ - `gt` (greater than), `gte` (greater than or equal) define the lower bound of the range to be streamed. Only records where the key is greater than (or equal to) this option will be included in the range. When `reverse=true` the order will be reversed, but the records streamed will be the same.
159+ - `lt` (less than), `lte` (less than or equal) define the higher bound of the range to be streamed. Only key/value pairs where the key is less than (or equal to) this option will be included in the range. When `reverse=true` the order will be reversed, but the records streamed will be the same.
160+ - `reverse` (boolean, default: `false`): a boolean, set true and the stream output will be reversed. Beware that due to the way LevelDB works, a reverse seek will be slower than a forward seek.
161+ - `keys` (boolean, default: `true`): whether the `data` event should contain keys. If set to `true` and `values` set to `false` then `data` events will simply be keys, rather than objects with a `key` property.
162+ - `values` (boolean, default: `true`): whether the `data` event should contain values. If set to `true` and `keys` set to `false` then `data` events will simply be values, rather than objects with a `value` property.
163+ - `limit` (number, default: `-1`): limit the number of results collected by this stream. This number represents a *maximum* number of results and may not be reached if you get to the end of the data first. A value of `-1` means there is no limit. When `reverse=true` the highest keys will be returned instead of the lowest keys.
164+ - `fillCache` (boolean, default: `false`): wheather LevelDB's LRU-cache should be filled with data read.
165+ - `keyEncoding` / `valueEncoding` (string): the encoding applied to each read piece of data.
166+
167+
168+## createWriteStream: sink
169+
170+write a number of messages to the local store.
171+will error if messages are not valid, but will accept
172+messages that the ssb-server doesn't replicate.
173+
174+
175+## links: source
176+
177+Get a stream of messages, feeds, or blobs that are linked to/from an id.
178+
179+```bash
180+links [--source id|filter] [--dest id|filter] [--rel value] [--keys] [--values] [--live] [--reverse]
181+```
182+
183+```js
184+links({ source:, dest:, rel:, keys:, values:, live:, reverse: })
185+```
186+
187+The objects in this stream will be of the form:
188+
189+```
190+{ source: ID, rel: String, dest: ID, key: MsgID }
191+```
192+
193+ - `source` (string, optional): An id or filter, specifying where the link should originate from. To filter, just use the sigil of the type you want: `@` for feeds, `%` for messages, and `&` for blobs.
194+ - `dest` (string, optional): An id or filter, specifying where the link should point to. To filter, just use the sigil of the type you want: `@` for feeds, `%` for messages, and `&` for blobs.
195+ - `rel` (string, optional): Filters the links by the relation string.
196+ - `live` (boolean, default: `false`): Keep the stream open and emit new messages as they are received.
197+ - `reverse` (boolean, default: `false`): a boolean, set true and the stream output will be reversed. Beware that due to the way LevelDB works, a reverse seek will be slower than a forward seek.
198+ - `keys` (boolean, default: `true`): whether the `data` event should contain keys. If set to `true` and `values` set to `false` then `data` events will simply be keys, rather than objects with a `key` property.
199+ - `values` (boolean, default: `true`): whether the `data` event should contain values. If set to `true` and `keys` set to `false` then `data` events will simply be values, rather than objects with a `value` property.
200+
201+
202+## add: async
203+
204+Add a well-formed message to the database.
205+
206+```bash
207+cat ./message.json | add
208+add --author {feedid} --sequence {number} --previous {msgid} --timestamp {number} --hash sha256 --signature {sig} --content.type {type} --content.{...}
209+```
210+
211+```js
212+add({ author:, sequence:, previous: timestamp:, hash: 'sha256', signature:, content: { type:, ... } }, cb)
213+```
214+
215+ - `author` (FeedID): Public key of the author of the message.
216+ - `sequence` (number): Sequence number of the message. (Starts from 1.)
217+ - `previous` (MsgID): Hash-id of the previous message in the feed (null for seq=1).
218+ - `timestamp` (number): Unix timestamp for the publish time.
219+ - `hash` (string): The hash algorithm used in the message, should always be `sha256`.
220+ - `signature` (string): A signature computed using the author pubkey and the content of the message (less the `signature` attribute).
221+ - `content` (object): The content of the message.
222+ - `.type` (string): The object's type.
223+
224+
225+## publish: async
226+
227+Construct a message using ssb-server's current user, and add it to the DB.
228+
229+```bash
230+cat ./message-content.json | publish
231+publish --type {string} [--other-attributes...]
232+```
233+
234+```js
235+publish({ type:, ... }, cb)
236+```
237+
238+This is the recommended method for publishing new messages, as it handles the tasks of correctly setting the message's timestamp, sequence number, previous-hash, and signature.
239+
240+ - `content` (object): The content of the message.
241+ - `.type` (string): The object's type.
242+
243+
244+
245+
246+## getAddress: sync
247+
248+Get the address of the server. Default scope is public.
249+
250+```bash
251+getAddress {scope}
252+```
253+
254+```js
255+getAddress(scope, cb)
256+```
257+
258+
259+
260+## getLatest: async
261+
262+Get the latest message in the database by the given feedid.
263+
264+```bash
265+getLatest {feedid}
266+```
267+
268+```js
269+getLatest(id, cb)
270+```
271+
272+
273+
274+## latest: source
275+
276+Get the seq numbers of the latest messages of all users in the database.
277+
278+```bash
279+latest
280+```
281+
282+```js
283+latest()
284+```
285+
286+
287+
288+## latestSequence: async
289+
290+Get the sequence and local timestamp of the last received message from
291+a given `feedId`.
292+
293+```bash
294+latestSequence {feedId}
295+```
296+
297+```js
298+latest({feedId})
299+```
300+
301+
302+
303+## whoami: sync
304+
305+Get information about the current ssb-server user.
306+
307+```bash
308+whoami
309+```
310+
311+```js
312+whoami(cb)
313+```
314+
315+Outputs information in the following form:
316+
317+```
318+{ id: FeedID }
319+```
320+
321+
322+
323+## progress: sync
324+
325+returns an object reflecting the progress state of various plugins.
326+the return value is a `{}` with subobjects showing `{start,current,target}`
327+to represent progress. Currently implemented are `migration` (legacy->flume)
328+migration progress and `indexes` (index regeneration).
329+
330+
331+## status: sync
332+
333+returns an object reflecting the status of various ssb operations,
334+such as db read activity, connection statuses, etc, etc. The purpose is to provide
335+an overview of how ssb is working.
336+
337+## getVectorClock: async
338+
339+## version: sync
340+
341+return the current version number of the running server
defaults.jsView
@@ -1,1 +1,0 @@
1-
legacy.jsView
@@ -1,128 +1,0 @@
1-'use strict'
2-var pull = require('pull-stream')
3-var pl = require('pull-level')
4-var Live = require('pull-live')
5-var paramap = require('pull-paramap')
6-var u = require('./util')
7-var stdopts = u.options
8-var msgFmt = u.format
9-var timestamp = require('monotonic-timestamp')
10-
11-module.exports = function (db, flumedb) {
12- function one (opts, cb) {
13- pull(
14- db.createLogStream(opts),
15- pull.collect(function (err, ary) {
16- cb(err, ary[ary.length - 1])
17- })
18- )
19- }
20-
21- var logDB = db.sublevel('log')
22- db.pre(function (op, add, _batch) {
23- var id = op.key
24- // index by sequence number
25-
26- var localtime = op.timestamp = timestamp()
27-
28- add({
29- key: localtime,
30- value: id,
31- type: 'put',
32- prefix: logDB
33- })
34- })
35-
36- function Limit (fn) {
37- return function (opts) {
38- if (opts && opts.limit && opts.limit > 0) {
39- var limit = opts.limit
40- var read = fn(opts)
41- return function (abort, cb) {
42- if (limit--) {
43- return read(abort, function (err, data) {
44- if (data && data.sync) limit++
45- cb(err, data)
46- })
47- } else read(true, cb)
48- }
49- } else { return fn(opts) }
50- }
51- }
52-
53- db.createLogStream = Limit(Live(function (opts) {
54- opts = stdopts(opts)
55- var keys = opts.keys; delete opts.keys
56- var values = opts.values; delete opts.values
57- return pull(
58- pl.old(logDB, stdopts(opts)),
59- // lookup2(keys, values, 'timestamp')
60- paramap(function (data, cb) {
61- if (values === false) return cb(null, { key: data.value })
62- var key = data.value
63- var seq = data.key
64- db.get(key, function (err, value) {
65- if (err) cb(err)
66- else cb(null, msgFmt(keys, values, { key: key, value: value, timestamp: seq }))
67- })
68- })
69- )
70- }, function (opts) {
71- return pl.live(db, stdopts(opts))
72- }))
73-
74- if (flumedb) {
75- var prog = { current: 0, start: 0, target: 0 }
76-
77- one({ reverse: true, limit: 1 }, function (_, last) {
78- if (!last) ready() // empty legacy database.
79- else {
80- flumedb.since.once(function (v) {
81- if (v === -1) {
82- load(null)
83- } else {
84- flumedb.get(v, function (err, data) {
85- if (err) throw err
86- if (data.timestamp < last.timestamp) {
87- load(data.timestamp)
88- } else ready()
89- })
90- }
91- })
92- }
93-
94- function load (since) {
95- // fast track for more accurate progress
96- flumedb.progress.migration = prog
97- var c = 0
98- pull(
99- pl.old(logDB, { gt: since, values: false }),
100- pull.drain(function () {
101- c++
102- }, function () {
103- prog.target = c
104- migrate()
105- })
106- )
107-
108- function migrate () {
109- // actual upgrade
110- pull(
111- db.createLogStream({ gt: since }),
112- paramap(function (data, cb) {
113- prog.current += 1
114- flumedb.rawAppend(data, cb)
115- }, 32),
116- pull.drain(null, ready)
117- )
118- }
119- }
120- function ready (_) {
121- flumedb.ready.set(true)
122- }
123- })
124- }
125-}
126-
127-
128-
lib/validators.jsView
@@ -1,0 +1,239 @@
1+var valid = require('muxrpc-validation')
2+var zerr = require('zerr')
3+var ref = require('ssb-ref')
4+
5+// errors
6+var MissingAttr = zerr('Usage', 'Param % must have a .% of type "%"')
7+var AttrType = zerr('Usage', '.% of param % must be of type "%"')
8+
9+function isFilter (v) {
10+ return (v == '@' || v == '%' || v == '&')
11+}
12+
13+module.exports = valid({
14+ msgId: function (v) {
15+ if (!ref.isMsg(v))
16+ return 'type'
17+ },
18+ msgLink: function (v) {
19+ if (!ref.isMsgLink(v))
20+ return 'type'
21+ },
22+ feedId: function (v) {
23+ if (!ref.isFeed(v))
24+ return 'type'
25+ },
26+ blobId: function (v) {
27+ if (!ref.isBlob(v))
28+ return 'type'
29+ },
30+
31+ msgContent: function (v, n) {
32+ var err = this.get('object')(v, n)
33+ if (err) return err
34+ if (!v.type || typeof v.type != 'string')
35+ return MissingAttr(n, 'type', 'string')
36+ },
37+
38+ msg: function (v, n) {
39+ var err = this.get('object')(v, n)
40+ if (err)
41+ return err
42+
43+ //allow content to be string. (i.e. for encrypted messages)
44+ //or object with type string
45+ if(!v.content)
46+ return MissingAttr(n, 'content', 'object|string')
47+ else if(typeof v.content === 'string')
48+ ; //check if it's base64?
49+ else if('object' === typeof v.content) {
50+ if(!v.content.type || typeof v.content.type != 'string')
51+ return MissingAttr(n, 'content.type', 'string')
52+ }
53+ else
54+ return MissingAttr(n, 'content', 'object|string')
55+
56+ // .author
57+ if (!ref.isFeed(v.author))
58+ return MissingAttr(n, 'author', 'feedId')
59+
60+ // .sequence
61+ if (typeof v.sequence != 'number')
62+ return MissingAttr(n, 'sequence', 'number')
63+
64+ // .previous
65+ if (v.sequence > 1 && !ref.isMsg(v.previous))
66+ return MissingAttr(n, 'previous', 'msgId')
67+ else if(v.sequence == 1 && v.previous != null)
68+ return MissingAttr(n, 'previous', 'null')
69+
70+ // .timestamp
71+ if (typeof v.timestamp != 'number')
72+ return MissingAttr(n, 'timestamp', 'number')
73+
74+ // .hash
75+ if (v.hash != 'sha256')
76+ return zerr('Usage', 'Param % must have .hash set to "sha256"')(n)
77+
78+ // .signature
79+ if (typeof v.signature != 'string')
80+ return MissingAttr(n, 'signature', 'string')
81+ },
82+
83+ readStreamOpts: function (v, n) {
84+ var err = this.get('object')(v, n)
85+ if (err)
86+ return err
87+
88+ // .live
89+ if (v.live && typeof v.live != 'boolean' && typeof v.live != 'number')
90+ return AttrType(n, 'live', 'boolean')
91+
92+ // .reverse
93+ if (v.reverse && typeof v.reverse != 'boolean' && typeof v.reverse != 'number')
94+ return AttrType(n, 'reverse', 'boolean')
95+
96+ // .keys
97+ if (v.keys && typeof v.keys != 'boolean' && typeof v.keys != 'number')
98+ return AttrType(n, 'keys', 'boolean')
99+
100+ // .values
101+ if (v.values && typeof v.values != 'boolean' && typeof v.values != 'number')
102+ return AttrType(n, 'values', 'boolean')
103+
104+ // .limit
105+ if (v.limit && typeof v.limit != 'number')
106+ return AttrType(n, 'limit', 'number')
107+
108+ // .fillCache
109+ if (v.fillCache && typeof v.fillCache != 'boolean' && typeof v.fillCache != 'number')
110+ return AttrType(n, 'fillCache', 'boolean')
111+ },
112+
113+ createHistoryStreamOpts: function (v, n) {
114+ // .id
115+ if (!ref.isFeed(v.id))
116+ return MissingAttr(n, 'id', 'feedId')
117+
118+ // .seq
119+ if (v.seq && typeof v.seq != 'number')
120+ return AttrType(n, 'seq', 'number')
121+
122+ // .live
123+ if (v.live && typeof v.live != 'boolean' && typeof v.live != 'number')
124+ return AttrType(n, 'live', 'boolean')
125+
126+ // .limit
127+ if (v.limit && typeof v.limit != 'number')
128+ return AttrType(n, 'limit', 'number')
129+
130+ // .keys
131+ if (v.keys && typeof v.keys != 'boolean' && typeof v.keys != 'number')
132+ return AttrType(n, 'keys', 'boolean')
133+
134+ // .values
135+ if (v.values && typeof v.values != 'boolean' && typeof v.values != 'number')
136+ return AttrType(n, 'values', 'boolean')
137+ },
138+
139+ createUserStreamOpts: function (v, n) {
140+ var err = this.get('readStreamOpts')(v, n)
141+ if (err)
142+ return err
143+
144+ // .id
145+ if (!ref.isFeed(v.id))
146+ return MissingAttr(n, 'id', 'feedId')
147+ },
148+
149+ messagesByTypeOpts: function (v, n) {
150+ var err = this.get('readStreamOpts')(v, n)
151+ if (err)
152+ return err
153+
154+ // .type
155+ if (typeof v.type != 'string')
156+ return MissingAttr(n, 'type', 'string')
157+ },
158+
159+ linksOpts: function (v, n) {
160+ var err = this.get('object')(v, n)
161+ if (err)
162+ return err
163+
164+ // .source
165+ if (v.source && !ref.isLink(v.source) && !isFilter(v.source))
166+ return AttrType(n, 'source', 'id|filter')
167+
168+ // .dest
169+ if (v.dest && !ref.isLink(v.dest) && !isFilter(v.dest))
170+ return AttrType(n, 'dest', 'id|filter')
171+
172+ // .rel
173+ if (v.rel && typeof v.rel != 'string')
174+ return AttrType(n, 'rel', 'string')
175+
176+ // .live
177+ if (v.live && typeof v.live != 'boolean' && typeof v.live != 'number')
178+ return AttrType(n, 'live', 'boolean')
179+
180+ // .reverse
181+ if (v.reverse && typeof v.reverse != 'boolean' && typeof v.reverse != 'number')
182+ return AttrType(n, 'reverse', 'boolean')
183+
184+ // .keys
185+ if (v.keys && typeof v.keys != 'boolean' && typeof v.keys != 'number')
186+ return AttrType(n, 'keys', 'boolean')
187+
188+ // .values
189+ if (v.values && typeof v.values != 'boolean' && typeof v.values != 'number')
190+ return AttrType(n, 'values', 'boolean')
191+ },
192+
193+ isBlockedOpts: function (v, n) {
194+ var err = this.get('object')(v, n)
195+ if (err)
196+ return err
197+
198+ // .source
199+ if (v.source && !ref.isFeed(v.source))
200+ return AttrType(n, 'source', 'feedId')
201+
202+ // .dest
203+ if (v.dest && !ref.isFeed(v.dest))
204+ return AttrType(n, 'dest', 'feedId')
205+ },
206+
207+ createFriendStreamOpts: function (v, n) {
208+ var err = this.get('object')(v, n)
209+ if (err)
210+ return err
211+
212+ // .start
213+ if (v.start && !ref.isFeed(v.start))
214+ return AttrType(n, 'start', 'feedId')
215+
216+ // .graph
217+ if (v.graph && typeof v.graph != 'string')
218+ return AttrType(n, 'graph', 'string')
219+
220+ // .dunbar
221+ if (v.dunbar && typeof v.dunbar != 'number')
222+ return AttrType(n, 'dunbar', 'number')
223+
224+ // .hops
225+ if (v.hops && typeof v.hops != 'number')
226+ return AttrType(n, 'hops', 'number')
227+ }
228+})
229+
230+
231+
232+
233+
234+
235+
236+
237+
238+
239+

Built with git-ssb-web