git ssb

4+

Dominic / scuttlebot



Commit e014565b8902bbeec5a7b8e1495873cce9bb8255

Merge branch 'master' into flume

Dominic Tarr committed on 4/15/2017, 9:33:24 PM
Parent: d05b0a6b310964c2e5fd32ad53278ff8db0603eb
Parent: 545a1edbeabbc77f82598a24f6a42ca839daf0fa

Files changed

.gitignorechanged
package.jsonchanged
plugins/plugins.jschanged
plugins/replicate.jsadded
.gitignoreView
@@ -2,5 +2,8 @@
22 .privatekey
33 node_modules
44 sbot.js
55 .ssbrc
6-stuck.log
6 +*.log
7 +
8 +
9 +
package.jsonView
@@ -1,8 +1,8 @@
11 {
22 "name": "scuttlebot",
33 "description": "network protocol layer for secure-scuttlebutt",
4- "version": "9.5.0",
4 + "version": "9.6.1",
55 "homepage": "https://github.com/ssbc/scuttlebot",
66 "repository": {
77 "type": "git",
88 "url": "git://github.com/ssbc/scuttlebot.git"
@@ -11,8 +11,9 @@
1111 "atomic-file": "0.0.1",
1212 "bash-color": "~0.0.3",
1313 "broadcast-stream": "~0.0.0",
1414 "cont": "~1.0.3",
15 + "cross-spawn": "^5.1.0",
1516 "deep-equal": "^1.0.1",
1617 "explain-error": "^1.0.3",
1718 "flumeview-reduce": "^1.0.2",
1819 "graphreduce": "^3.0.0",
@@ -39,9 +40,9 @@
3940 "pull-flatmap": "0.0.1",
4041 "pull-inactivity": "~2.1.1",
4142 "pull-level": "^2.0.2",
4243 "pull-many": "~1.0.6",
43- "pull-next": "0.0.2",
44 + "pull-next": "^1.0.0",
4445 "pull-notify": "0.1.1",
4546 "pull-paramap": "~1.2.1",
4647 "pull-ping": "^2.0.2",
4748 "pull-pushable": "^2.0.1",
@@ -77,10 +78,13 @@
7778 "bin": {
7879 "sbot": "./sbot.js"
7980 },
8081 "scripts": {
81- "prepublish": "npm ls && npm test && noderify bin.js > sbot.js",
82 + "build": "noderify bin.js > sbot.js",
83 + "prepublish": "npm ls && npm test && npm run build",
8284 "test": "set -e; for t in test/*.js; do node $t; done"
8385 },
8486 "author": "Paul Frazee <pfrazee@gmail.com>",
8587 "license": "MIT"
8688 }
89 +
90 +
plugins/plugins.jsView
@@ -5,9 +5,9 @@
55 var cat = require('pull-cat')
66 var many = require('pull-many')
77 var pushable = require('pull-pushable')
88 var toPull = require('stream-to-pull-stream')
9-var spawn = require('child_process').spawn
9 +var spawn = require('cross-spawn')
1010 var mkdirp = require('mkdirp')
1111 var osenv = require('osenv')
1212 var rimraf = require('rimraf')
1313 var mv = require('mv')
plugins/replicate.jsView
@@ -1,0 +1,372 @@
1 +'use strict'
2 +var pull = require('pull-stream')
3 +var pullNext = require('pull-next')
4 +var para = require('pull-paramap')
5 +var Notify = require('pull-notify')
6 +var Cat = require('pull-cat')
7 +var Debounce = require('observ-debounce')
8 +var mdm = require('mdmanifest')
9 +var apidoc = require('../lib/apidocs').replicate
10 +var deepEqual = require('deep-equal')
11 +
12 +var Pushable = require('pull-pushable')
13 +
14 +// compatibility function for old implementations of `latestSequence`
15 +function toSeq (s) {
16 + return 'number' === typeof s ? s : s.sequence
17 +}
18 +
19 +// if one of these shows up in a replication stream, the stream is dead
20 +var streamErrors = {
21 + 'unexpected end of parent stream': true, // stream closed okay
22 + 'unexpected hangup': true, // stream closed probably okay
23 + 'read EHOSTUNREACH': true,
24 + 'read ECONNRESET': true,
25 + 'read ENETDOWN': true,
26 + 'read ETIMEDOUT': true,
27 + 'write ECONNRESET': true,
28 + 'write EPIPE': true,
29 + 'stream is closed': true // rpc method called after stream ended
30 +}
31 +
32 +module.exports = {
33 + name: 'replicate',
34 + version: '2.0.0',
35 + manifest: mdm.manifest(apidoc),
36 + //replicate: replicate,
37 + init: function (sbot, config) {
38 + var debounce = Debounce(200)
39 + var listeners = {}
40 + var notify = Notify()
41 + var newPeer = Notify()
42 +
43 + var start = null
44 + var count = 0
45 + var rate = 0
46 + var loadedFriends = false
47 + var toSend = {}
48 + var peerHas = {}
49 + var pendingFeedsForPeer = {}
50 + var lastProgress = null
51 +
52 + debounce(function () {
53 + // only list loaded feeds once we know about all of them!
54 + var feeds = loadedFriends ? Object.keys(toSend).length : null
55 + var legacyProgress = 0
56 + var legacyTotal = 0
57 +
58 + var pendingFeeds = new Set()
59 + var pendingPeers = {}
60 + var legacyToRecv = {}
61 +
62 + Object.keys(pendingFeedsForPeer).forEach(function (peerId) {
63 + if (pendingFeedsForPeer[peerId] && pendingFeedsForPeer[peerId].size) {
64 + Object.keys(toSend).forEach(function (feedId) {
65 + if (peerHas[peerId] && peerHas[peerId][feedId]) {
66 + if (peerHas[peerId][feedId] > toSend[feedId]) {
67 + pendingFeeds.add(feedId)
68 + }
69 + }
70 + })
71 + pendingPeers[peerId] = pendingFeedsForPeer[peerId].size
72 + }
73 + })
74 +
75 + for (var k in toSend) {
76 + legacyProgress += toSend[k]
77 + }
78 +
79 + for (var id in peerHas) {
80 + for (var k in peerHas[id]) {
81 + legacyToRecv[k] = Math.max(peerHas[id][k], legacyToRecv[k] || 0)
82 + }
83 + }
84 +
85 + for (var k in legacyToRecv) {
86 + if (toSend[k] !== null) {
87 + legacyTotal += legacyToRecv[k]
88 + }
89 + }
90 +
91 + var progress = {
92 + id: sbot.id,
93 + rate, // rate of messages written to sbot
94 + feeds, // total number of feeds we want to replicate
95 + pendingPeers, // number of pending feeds per peer
96 + incompleteFeeds: pendingFeeds.size, // number of feeds with pending messages to download
97 +
98 + // LEGACY: Preserving old api. Needed for test/random.js to pass
99 + progress: legacyProgress,
100 + total: legacyTotal
101 + }
102 +
103 + if (!deepEqual(progress, lastProgress)) {
104 + lastProgress = progress
105 + notify(progress)
106 + }
107 + })
108 +
109 + pull(
110 + sbot.createLogStream({old: false, live: true, sync: false, keys: false}),
111 + pull.drain(function (e) {
112 + //track writes per second, mainly used for developing initial sync.
113 + if(!start) start = Date.now()
114 + var time = (Date.now() - start)/1000
115 + if(time >= 1) {
116 + rate = count / time
117 + start = Date.now()
118 + count = 0
119 + }
120 + var pushable = listeners[e.author]
121 +
122 + if(pushable && pushable.sequence == e.sequence) {
123 + pushable.sequence ++
124 + pushable.forEach(function (p) {
125 + p.push(e)
126 + })
127 + }
128 + count ++
129 + addPeer({id: e.author, sequence: e.sequence})
130 + })
131 + )
132 +
133 + sbot.createHistoryStream.hook(function (fn, args) {
134 + var upto = args[0] || {}
135 + var seq = upto.sequence || upto.seq
136 +
137 + if(this._emit) this._emit('call:createHistoryStream', args[0])
138 +
139 + //if we are calling this locally, skip cleverness
140 + if(this===sbot) return fn.call(this, upto)
141 +
142 + // keep track of each requested value, per feed / per peer.
143 + peerHas[this.id] = peerHas[this.id] || {}
144 + peerHas[this.id][upto.id] = seq - 1 // peer requests +1 from actual last seq
145 +
146 + debounce.set()
147 +
148 + //handle creating lots of histor streams efficiently.
149 + //maybe this could be optimized in map-filter-reduce queries instead?
150 + if(toSend[upto.id] == null || (seq > toSend[upto.id])) {
151 + upto.old = false
152 + if(!upto.live) return pull.empty()
153 + var pushable = listeners[upto.id] = listeners[upto.id] || []
154 + var p = Pushable(function () {
155 + var i = pushable.indexOf(p)
156 + pushable.splice(i, 1)
157 + })
158 + pushable.push(p)
159 + pushable.sequence = seq
160 + return p
161 + }
162 + return fn.call(this, upto)
163 + })
164 +
165 + // collect the IDs of feeds we want to request
166 + var opts = config.replication || {}
167 + opts.hops = opts.hops || 3
168 + opts.dunbar = opts.dunbar || 150
169 + opts.live = true
170 + opts.meta = true
171 +
172 + function localPeers () {
173 + if(!sbot.gossip) return
174 + sbot.gossip.peers().forEach(function (e) {
175 + if (e.source === 'local' && toSend[e.key] == null) {
176 + sbot.latestSequence(e.key, function (err, seq) {
177 + addPeer({id: e.key, sequence: err ? 0 : toSeq(seq)})
178 + })
179 + }
180 + })
181 + }
182 +
183 + //also request local peers.
184 + if (sbot.gossip) {
185 + // if we have the gossip plugin active, then include new local peers
186 + // so that you can put a name to someone on your local network.
187 + var int = setInterval(localPeers, 1000)
188 + if(int.unref) int.unref()
189 + localPeers()
190 + }
191 +
192 + function friendsLoaded () {
193 + loadedFriends = true
194 + debounce.set()
195 + }
196 +
197 + function addPeer (upto) {
198 + if(upto.sync) return friendsLoaded()
199 + if(!upto.id) return console.log('invalid', upto)
200 +
201 + if(toSend[upto.id] == null) {
202 + toSend[upto.id] = Math.max(toSend[upto.id] || 0, upto.sequence || upto.seq || 0)
203 + newPeer({id: upto.id, sequence: toSend[upto.id] , type: 'new' })
204 + } else {
205 + toSend[upto.id] = Math.max(toSend[upto.id] || 0, upto.sequence || upto.seq || 0)
206 + }
207 +
208 + debounce.set()
209 + }
210 +
211 +
212 + // create read-streams for the desired feeds
213 + pull(
214 + sbot.friends.createFriendStream(opts),
215 + // filter out duplicates, and also keep track of what we expect to receive
216 + // lookup the latest sequence from each user
217 + para(function (data, cb) {
218 + if(data.sync) return cb(null, data)
219 + var id = data.id || data
220 + sbot.latestSequence(id, function (err, seq) {
221 + cb(null, {
222 + id: id, sequence: err ? 0 : toSeq(seq)
223 + })
224 + })
225 + }, 32),
226 + pull.drain(addPeer, friendsLoaded)
227 + )
228 +
229 + function upto (opts) {
230 + opts = opts || {}
231 + var ary = Object.keys(toSend).map(function (k) {
232 + return { id: k, sequence: toSend[k] }
233 + })
234 + if(opts.live)
235 + return Cat([pull.values(ary), pull.once({sync: true}), newPeer.listen()])
236 +
237 + return pull.values(ary)
238 + }
239 +
240 + sbot.on('rpc:connect', function(rpc) {
241 + // this is the cli client, just ignore.
242 + if(rpc.id === sbot.id) return
243 +
244 + // check for local peers, or manual connections.
245 + localPeers()
246 +
247 + var drain
248 + sbot.emit('replicate:start', rpc)
249 + rpc.on('closed', function () {
250 + sbot.emit('replicate:finish', toSend)
251 +
252 + // if we disconnect from a peer, remove it from sync progress
253 + delete pendingFeedsForPeer[rpc.id]
254 + debounce.set()
255 + })
256 + var errorsSeen = {}
257 + pull(
258 + upto({live: opts.live}),
259 + drain = pull.drain(function (upto) {
260 + if(upto.sync) return
261 +
262 + // track sync start progress
263 + pendingFeedsForPeer[rpc.id] = pendingFeedsForPeer[rpc.id] || new Set()
264 + pendingFeedsForPeer[rpc.id].add(upto.id)
265 + debounce.set()
266 +
267 + pull(
268 + rpc.createHistoryStream({
269 + id: upto.id,
270 + seq: (upto.sequence || upto.seq || 0) + 1,
271 + live: true,
272 + keys: false
273 + }),
274 +
275 + // track sync completed progress
276 + pull.through(detectSync(rpc.id, upto, toSend, peerHas, function () {
277 + if (pendingFeedsForPeer[rpc.id]) {
278 + pendingFeedsForPeer[rpc.id].delete(upto.id)
279 + debounce.set()
280 + }
281 + })),
282 +
283 + sbot.createWriteStream(function (err) {
284 + if(err && !(err.message in errorsSeen)) {
285 + errorsSeen[err.message] = true
286 + if(err.message in streamErrors) {
287 + drain.abort()
288 + if(err.message === 'unexpected end of parent stream') {
289 + if (err instanceof Error) {
290 + // stream closed okay locally
291 + } else {
292 + // pre-emptively destroy the stream, assuming the other
293 + // end is packet-stream 2.0.0 sending end messages.
294 + rpc.close(err)
295 + }
296 + }
297 + } else {
298 + console.error('Error replicating with ' + rpc.id + ':\n ',
299 + err.stack)
300 + }
301 + }
302 +
303 + // if stream closes, remove from pending progress
304 + if (pendingFeedsForPeer[rpc.id]) {
305 + pendingFeedsForPeer[rpc.id].delete(upto.id)
306 + debounce.set()
307 + }
308 + })
309 + )
310 +
311 + }, function (err) {
312 + if(err && err !== true)
313 + sbot.emit('log:error', ['replication', rpc.id, 'error', err])
314 + })
315 + )
316 + })
317 +
318 + return {
319 + changes: notify.listen,
320 + upto: upto
321 + }
322 + }
323 +}
324 +
325 +function detectSync (peerId, upto, toSend, peerHas, onSync) {
326 + // HACK: createHistoryStream does not emit sync event, so we don't
327 + // know when it switches to live. Do it manually!
328 +
329 + var sync = false
330 + var last = (upto.sequence || upto.seq || 0)
331 +
332 + // check sync after 500ms, hopefully we have the info from the peer by then
333 + setTimeout(function () {
334 + if (peerHas[peerId] && peerHas[peerId][upto.id] != null) {
335 + checkSync()
336 + } else {
337 + // if we get here, the peer hasn't yet asked for this feed, or is not responding
338 + // we can assume it doesn't have the feed, so lets call sync
339 + broadcastSync()
340 + }
341 + }, 500)
342 +
343 + return function (msg) {
344 + if (msg.sync) {
345 + // surprise! This peer actually has a sync event!
346 + broadcastSync()
347 + return false
348 + }
349 +
350 + last = msg.sequence
351 + checkSync()
352 + return true
353 + }
354 +
355 + function checkSync () {
356 + if (!sync) {
357 + var availableSeq = peerHas[peerId] && peerHas[peerId][upto.id]
358 + if (availableSeq === last || availableSeq < toSend[upto.id]) {
359 + // we've reached the maximum sequence this server has told us it knows about
360 + // or we don't need anything from this server
361 + broadcastSync()
362 + }
363 + }
364 + }
365 +
366 + function broadcastSync () {
367 + if (!sync) {
368 + sync = true
369 + onSync && onSync()
370 + }
371 + }
372 +}

Built with git-ssb-web