Commit 73b5b187d94ac2f96f31922f4249e72df021ec1c
cleanup
wanderer committed on 6/28/2017, 9:12:34 PMParent: 3c9e6f994621504f855b1992f1e25e2bb1bf389b
Files changed
exoInterface.js | changed |
index.js | changed |
portManager.js | changed |
scheduler.js | changed |
tests/index.js | changed |
deleteMessage.js | added |
dfsChecker.js | added |
exoInterface.js | ||
---|---|---|
@@ -1,6 +1,7 @@ | ||
1 | +const Message = require('primea-message') | |
1 | 2 | const PortManager = require('./portManager.js') |
2 | -const Message = require('primea-message') | |
3 | +const DeleteMessage = require('./deleteMessage') | |
3 | 4 | |
4 | 5 | module.exports = class ExoInterface { |
5 | 6 | /** |
6 | 7 | * the ExoInterface manages the varous message passing functions and provides |
@@ -18,73 +19,48 @@ | ||
18 | 19 | this.container = new opts.container.Constructor(this, opts.container.args) |
19 | 20 | |
20 | 21 | this.ticks = 0 |
21 | 22 | this.containerState = 'idle' |
22 | - this._pendingSends = new Map() | |
23 | 23 | |
24 | 24 | // create the port manager |
25 | 25 | this.ports = new PortManager(Object.assign({ |
26 | 26 | exInterface: this |
27 | 27 | }, opts)) |
28 | 28 | } |
29 | 29 | |
30 | - _addWork (promise) { | |
31 | - this._outStandingWork = Promise.all([this._outStandingWork, promise]) | |
32 | - } | |
33 | - | |
34 | 30 | /** |
35 | 31 | * adds a message to this containers message queue |
36 | 32 | * @param {string} portName |
37 | 33 | * @param {object} message |
38 | 34 | */ |
39 | 35 | queue (portName, message) { |
40 | 36 | message._hops++ |
41 | - this.ports.queue(portName, message) | |
42 | - if (this.containerState !== 'running') { | |
43 | - this.containerState = 'running' | |
44 | - if (portName) { | |
37 | + if (portName) { | |
38 | + this.ports.queue(portName, message) | |
39 | + if (this.containerState !== 'running') { | |
40 | + this.containerState = 'running' | |
45 | 41 | this._runNextMessage() |
46 | - } else { | |
47 | - this.run(message, true) | |
48 | 42 | } |
43 | + } else { | |
44 | + // initailiazation message | |
45 | + this.containerState = 'running' | |
46 | + this.run(message, true) | |
49 | 47 | } |
50 | 48 | } |
51 | 49 | |
52 | 50 | // waits for the next message |
53 | 51 | async _runNextMessage () { |
54 | 52 | // check if the ports are saturated, if so we don't have to wait on the |
55 | 53 | // scheduler |
56 | - let message = this.ports.peekNextMessage() | |
57 | - let saturated = this.ports.isSaturated() | |
58 | - let oldestTime = this.hypervisor.scheduler.smallest() | |
54 | + const message = await this.ports.getNextMessage() | |
59 | 55 | |
60 | - while (!saturated && | |
61 | - !(message && oldestTime >= message._fromTicks || | |
62 | - !message && oldestTime === this.ticks)) { | |
63 | - const ticksToWait = message ? message._fromTicks : this.ticks | |
64 | - | |
65 | - await Promise.race([ | |
66 | - this.hypervisor.scheduler.wait(ticksToWait, this.id).then(m => { | |
67 | - message = this.ports.peekNextMessage() | |
68 | - }), | |
69 | - this.ports.olderMessage(message).then(m => { | |
70 | - message = m | |
71 | - }), | |
72 | - this.ports.whenSaturated().then(() => { | |
73 | - saturated = true | |
74 | - message = this.ports.peekNextMessage() | |
75 | - }) | |
76 | - ]) | |
77 | - | |
78 | - oldestTime = this.hypervisor.scheduler.smallest() | |
79 | - saturated = this.ports.isSaturated() | |
80 | - } | |
81 | - | |
82 | 56 | if (!message) { |
83 | 57 | // if no more messages then shut down |
84 | - this.hypervisor.scheduler.done(this) | |
58 | + this.hypervisor.scheduler.done(this.id) | |
85 | 59 | } else { |
86 | 60 | message.fromPort.messages.shift() |
61 | + // if the message we recived had more ticks then we currently have the | |
62 | + // update it | |
87 | 63 | if (message._fromTicks > this.ticks) { |
88 | 64 | this.ticks = message._fromTicks |
89 | 65 | } |
90 | 66 | this.hypervisor.scheduler.update(this) |
@@ -101,13 +77,12 @@ | ||
101 | 77 | */ |
102 | 78 | async run (message, init = false) { |
103 | 79 | let result |
104 | 80 | message.ports.forEach(port => this.ports._unboundPorts.add(port)) |
105 | - if (message.data === 'delete') { | |
81 | + if (message.constructor === DeleteMessage) { | |
106 | 82 | this.ports._delete(message.fromName) |
107 | 83 | } else { |
108 | 84 | const method = init ? 'initailize' : 'run' |
109 | - | |
110 | 85 | try { |
111 | 86 | result = await this.container[method](message) || {} |
112 | 87 | } catch (e) { |
113 | 88 | result = { |
@@ -163,8 +138,9 @@ | ||
163 | 138 | const id = port.destId |
164 | 139 | const instance = await this.hypervisor.getInstance(id) |
165 | 140 | instance.queue(port.destName, message) |
166 | 141 | } else { |
142 | + // port is unbound | |
167 | 143 | port.destPort.messages.push(message) |
168 | 144 | } |
169 | 145 | } |
170 | 146 | } |
index.js | ||
---|---|---|
@@ -1,16 +1,18 @@ | ||
1 | 1 | const Graph = require('ipld-graph-builder') |
2 | 2 | const Message = require('primea-message') |
3 | 3 | const ExoInterface = require('./exoInterface.js') |
4 | 4 | const Scheduler = require('./scheduler.js') |
5 | +const DFSchecker = require('./dfsChecker.js') | |
5 | 6 | |
6 | 7 | const ROOT_ID = 'zdpuAm6aTdLVMUuiZypxkwtA7sKm7BWERy8MPbaCrFsmiyzxr' |
7 | 8 | |
8 | 9 | module.exports = class Hypervisor { |
9 | 10 | /** |
10 | 11 | * The Hypervisor manages the container instances by instantiating them and |
11 | 12 | * destorying them when possible. It also facilitates localating Containers |
12 | 13 | * @param {Graph} dag an instance of [ipfs.dag](https://github.com/ipfs/interface-ipfs-core/tree/master/API/dag#dag-api) |
14 | + * @param {object} state - the starting state | |
13 | 15 | */ |
14 | 16 | constructor (dag, state = {}) { |
15 | 17 | this.graph = new Graph(dag) |
16 | 18 | this.scheduler = new Scheduler() |
@@ -27,16 +29,8 @@ | ||
27 | 29 | this._nodesToCheck.add(id) |
28 | 30 | } |
29 | 31 | |
30 | 32 | /** |
31 | - * removes a potaintail node in the state graph to check for garbage collection | |
32 | - * @param {string} id | |
33 | - */ | |
34 | - removeNodeToCheck (id) { | |
35 | - this._nodesToCheck.delete(id) | |
36 | - } | |
37 | - | |
38 | - /** | |
39 | 33 | * given a port, this finds the corridsponeding endpoint port of the channel |
40 | 34 | * @param {object} port |
41 | 35 | * @returns {Promise} |
42 | 36 | */ |
@@ -119,19 +113,8 @@ | ||
119 | 113 | return exoInterface |
120 | 114 | } |
121 | 115 | |
122 | 116 | /** |
123 | - * deletes container from the state | |
124 | - * @param {string} id | |
125 | - */ | |
126 | - deleteInstance (id) { | |
127 | - if (id !== ROOT_ID) { | |
128 | - this._nodesToCheck.delete(id) | |
129 | - delete this.state[id] | |
130 | - } | |
131 | - } | |
132 | - | |
133 | - /** | |
134 | 117 | * creates a state root starting from a given container and a given number of |
135 | 118 | * ticks |
136 | 119 | * @param {Number} ticks the number of ticks at which to create the state root |
137 | 120 | * @returns {Promise} |
@@ -166,70 +149,4 @@ | ||
166 | 149 | async getHashFromObj (obj) { |
167 | 150 | return (await this.graph.flush(obj))['/'] |
168 | 151 | } |
169 | 152 | } |
170 | - | |
171 | -// Implements a parrilizable DFS check for graph connictivity given a set of nodes | |
172 | -// and a root node. Stating for the set of node to check this does a DFS and | |
173 | -// will return a set a nodes if any that is not connected to the root node. | |
174 | -async function DFSchecker (graph, state, root, nodes) { | |
175 | - const checkedNodesSet = new Set() | |
176 | - let hasRootSet = new Set() | |
177 | - const promises = [] | |
178 | - | |
179 | - for (const id of nodes) { | |
180 | - // create a set for each of the starting nodes to track the nodes the DFS has | |
181 | - // has traversed | |
182 | - const checkedNodes = new Set() | |
183 | - checkedNodesSet.add(checkedNodes) | |
184 | - promises.push(check(id, checkedNodes)) | |
185 | - } | |
186 | - | |
187 | - // wait for all the search to complete | |
188 | - await Promise.all(promises) | |
189 | - // remove the set of nodes that are connected to the root | |
190 | - checkedNodesSet.delete(hasRootSet) | |
191 | - let unLinkedNodesArray = [] | |
192 | - | |
193 | - // combine the unconnected sets into a single array | |
194 | - for (const set of checkedNodesSet) { | |
195 | - unLinkedNodesArray = unLinkedNodesArray.concat([...set]) | |
196 | - } | |
197 | - return unLinkedNodesArray | |
198 | - | |
199 | - // does the DFS starting with a single node ID | |
200 | - async function check (id, checkedNodes) { | |
201 | - if (!checkedNodesSet.has(checkedNodes) || // check if this DFS is still searching | |
202 | - checkedNodes.has(id) || // check if this DFS has alread seen the node | |
203 | - hasRootSet === checkedNodes) { // check that this DFS has alread found the root node | |
204 | - return | |
205 | - } | |
206 | - | |
207 | - // check if any of the the other DFSs have seen this node and if so merge | |
208 | - // the sets and stop searching | |
209 | - for (const set of checkedNodesSet) { | |
210 | - if (set.has(id)) { | |
211 | - checkedNodes.forEach(id => set.add(id)) | |
212 | - checkedNodesSet.delete(checkedNodes) | |
213 | - return | |
214 | - } | |
215 | - } | |
216 | - | |
217 | - // mark the node 'checked' | |
218 | - checkedNodes.add(id) | |
219 | - | |
220 | - // check to see if we are at the root | |
221 | - if (id === root) { | |
222 | - hasRootSet = checkedNodes | |
223 | - return | |
224 | - } | |
225 | - | |
226 | - const node = state[id]['/'] | |
227 | - const promises = [] | |
228 | - // iterate through the nodes ports and recursivly check them | |
229 | - for (const name in node.ports) { | |
230 | - const port = node.ports[name] | |
231 | - promises.push(check(port.destId, checkedNodes)) | |
232 | - } | |
233 | - return Promise.all(promises) | |
234 | - } | |
235 | -} |
portManager.js | ||
---|---|---|
@@ -1,6 +1,6 @@ | ||
1 | 1 | const BN = require('bn.js') |
2 | -const Message = require('primea-message') | |
2 | +const DeleteMessage = require('./deleteMessage') | |
3 | 3 | |
4 | 4 | // decides which message to go first |
5 | 5 | function messageArbiter (nameA, nameB) { |
6 | 6 | const a = this.ports[nameA].messages[0] |
@@ -26,18 +26,16 @@ | ||
26 | 26 | * The port manager manages the the ports. This inculdes creation, deletion |
27 | 27 | * fetching and waiting on ports |
28 | 28 | * @param {Object} opts |
29 | 29 | * @param {Object} opts.state |
30 | - * @param {Object} opts.entryPort | |
31 | - * @param {Object} opts.parentPort | |
32 | 30 | * @param {Object} opts.hypervisor |
33 | 31 | * @param {Object} opts.exoInterface |
34 | 32 | */ |
35 | 33 | constructor (opts) { |
36 | 34 | Object.assign(this, opts) |
37 | 35 | this.ports = this.state.ports |
36 | + // tracks unbounded ports that we have | |
38 | 37 | this._unboundPorts = new Set() |
39 | - this._waitingPorts = {} | |
40 | 38 | this._saturationPromise = new Promise((resolve, reject) => { |
41 | 39 | this._saturationResolve = resolve |
42 | 40 | }) |
43 | 41 | this._oldestMessagePromise = new Promise((resolve, reject) => { |
@@ -56,10 +54,14 @@ | ||
56 | 54 | } else if (this.ports[name]) { |
57 | 55 | throw new Error('cannot bind port to a name that is alread bound') |
58 | 56 | } else { |
59 | 57 | this._unboundPorts.delete(port) |
60 | - this.hypervisor.removeNodeToCheck(this.id) | |
61 | 58 | |
59 | + port.messages.forEach(message => { | |
60 | + message._fromPort = port | |
61 | + message.fromName = name | |
62 | + }) | |
63 | + | |
62 | 64 | // save the port instance |
63 | 65 | this.ports[name] = port |
64 | 66 | |
65 | 67 | // update the dest port |
@@ -78,15 +80,15 @@ | ||
78 | 80 | async unbind (name) { |
79 | 81 | const port = this.ports[name] |
80 | 82 | delete this.ports[name] |
81 | 83 | this._unboundPorts.add(port) |
84 | + this.hypervisor.addNodeToCheck(this.id) | |
82 | 85 | |
83 | 86 | // update the destination port |
84 | 87 | const destPort = await this.hypervisor.getDestPort(port) |
85 | 88 | delete destPort.destName |
86 | 89 | delete destPort.destId |
87 | 90 | destPort.destPort = port |
88 | - this.hypervisor.addNodeToCheck(this.id) | |
89 | 91 | return port |
90 | 92 | } |
91 | 93 | |
92 | 94 | /** |
@@ -94,11 +96,9 @@ | ||
94 | 96 | * @param {string} name |
95 | 97 | */ |
96 | 98 | delete (name) { |
97 | 99 | const port = this.ports[name] |
98 | - this.exInterface.send(port, new Message({ | |
99 | - data: 'delete' | |
100 | - })) | |
100 | + this.exInterface.send(port, new DeleteMessage()) | |
101 | 101 | this._delete(name) |
102 | 102 | } |
103 | 103 | |
104 | 104 | _delete (name) { |
@@ -110,15 +110,13 @@ | ||
110 | 110 | * clears any unbounded ports referances |
111 | 111 | */ |
112 | 112 | clearUnboundedPorts () { |
113 | 113 | this._unboundPorts.forEach(port => { |
114 | - this.exInterface.send(port, new Message({ | |
115 | - data: 'delete' | |
116 | - })) | |
114 | + this.exInterface.send(port, new DeleteMessage()) | |
117 | 115 | }) |
118 | 116 | this._unboundPorts.clear() |
119 | 117 | if (Object.keys(this.ports).length === 0) { |
120 | - this.hypervisor.deleteInstance(this.id) | |
118 | + this.hypervisor.addNodeToCheck(this.id) | |
121 | 119 | } |
122 | 120 | } |
123 | 121 | |
124 | 122 | /** |
@@ -134,26 +132,28 @@ | ||
134 | 132 | * queues a message on a port |
135 | 133 | * @param {Message} message |
136 | 134 | */ |
137 | 135 | queue (name, message) { |
138 | - if (name) { | |
139 | - const port = this.ports[name] | |
140 | - if (port.messages.push(message) === 1 && message._fromTicks < this._messageTickThreshold) { | |
141 | - message._fromPort = port | |
142 | - message.fromName = name | |
136 | + const port = this.ports[name] | |
137 | + | |
138 | + message._fromPort = port | |
139 | + message.fromName = name | |
140 | + | |
141 | + if (port.messages.push(message) === 1) { | |
142 | + if (this._isSaturated()) { | |
143 | + this._saturationResolve() | |
144 | + this._saturationPromise = new Promise((resolve, reject) => { | |
145 | + this._saturationResolve = resolve | |
146 | + }) | |
147 | + } | |
148 | + | |
149 | + if (message._fromTicks < this._messageTickThreshold) { | |
143 | 150 | this._oldestMessageResolve(message) |
144 | 151 | this._oldestMessagePromise = new Promise((resolve, reject) => { |
145 | 152 | this._oldestMessageResolve = resolve |
146 | 153 | }) |
147 | 154 | this._messageTickThreshold = Infinity |
148 | 155 | } |
149 | - | |
150 | - if (this.isSaturated()) { | |
151 | - this._saturationResolve() | |
152 | - this._saturationPromise = new Promise((resolve, reject) => { | |
153 | - this._saturationResolve = resolve | |
154 | - }) | |
155 | - } | |
156 | 156 | } |
157 | 157 | } |
158 | 158 | |
159 | 159 | /** |
@@ -186,10 +186,9 @@ | ||
186 | 186 | |
187 | 187 | // create a new channel for the container |
188 | 188 | const ports = this.createChannel() |
189 | 189 | this._unboundPorts.delete(ports[1]) |
190 | - const promise = this.hypervisor.createInstance(type, data, [ports[1]], id) | |
191 | - this.exInterface._addWork(promise) | |
190 | + this.hypervisor.createInstance(type, data, [ports[1]], id) | |
192 | 191 | |
193 | 192 | return ports[0] |
194 | 193 | } |
195 | 194 | |
@@ -212,41 +211,66 @@ | ||
212 | 211 | this._unboundPorts.add(port2) |
213 | 212 | return [port1, port2] |
214 | 213 | } |
215 | 214 | |
216 | - /** | |
217 | - * find and returns the next message | |
218 | - * @returns {object} | |
219 | - */ | |
220 | - peekNextMessage () { | |
215 | + // find and returns the next message | |
216 | + _peekNextMessage () { | |
221 | 217 | const names = Object.keys(this.ports) |
222 | 218 | if (names.length) { |
223 | 219 | const portName = names.reduce(messageArbiter.bind(this)) |
224 | 220 | const port = this.ports[portName] |
225 | - const message = port.messages[0] | |
226 | - | |
227 | - if (message) { | |
228 | - message._fromPort = port | |
229 | - message.fromName = portName | |
230 | - return message | |
231 | - } | |
221 | + return port.messages[0] | |
232 | 222 | } |
233 | 223 | } |
234 | 224 | |
235 | 225 | /** |
236 | - * tests wether or not all the ports have a message | |
237 | - * @returns {boolean} | |
226 | + * Waits for the the next message if any | |
227 | + * @returns {Promise} | |
238 | 228 | */ |
239 | - isSaturated () { | |
229 | + async getNextMessage () { | |
230 | + let message = this._peekNextMessage() | |
231 | + let saturated = this._isSaturated() | |
232 | + let oldestTime = this.hypervisor.scheduler.oldest() | |
233 | + | |
234 | + while (!saturated && // end if there are messages on all the ports | |
235 | + // end if we have a message older then slowest containers | |
236 | + !((message && oldestTime >= message._fromTicks) || | |
237 | + // end if there are no messages and this container is the oldest contaner | |
238 | + (!message && oldestTime === this.exInterface.ticks))) { | |
239 | + const ticksToWait = message ? message._fromTicks : this.exInterface.ticks | |
240 | + | |
241 | + await Promise.race([ | |
242 | + this.hypervisor.scheduler.wait(ticksToWait, this.id).then(() => { | |
243 | + message = this._peekNextMessage() | |
244 | + }), | |
245 | + this._olderMessage(message).then(m => { | |
246 | + message = m | |
247 | + }), | |
248 | + this._whenSaturated().then(() => { | |
249 | + saturated = true | |
250 | + message = this._peekNextMessage() | |
251 | + }) | |
252 | + ]) | |
253 | + | |
254 | + oldestTime = this.hypervisor.scheduler.oldest() | |
255 | + } | |
256 | + return message | |
257 | + } | |
258 | + | |
259 | + // tests wether or not all the ports have a message | |
260 | + _isSaturated () { | |
240 | 261 | const keys = Object.keys(this.ports) |
241 | 262 | return keys.length ? keys.every(name => this.ports[name].messages.length) : 0 |
242 | 263 | } |
243 | 264 | |
244 | - whenSaturated () { | |
265 | + // returns a promise that resolve when the ports are saturated | |
266 | + _whenSaturated () { | |
245 | 267 | return this._saturationPromise |
246 | 268 | } |
247 | 269 | |
248 | - olderMessage (message) { | |
270 | + // returns a promise that resolve when a message older then the given message | |
271 | + // is recived | |
272 | + _olderMessage (message) { | |
249 | 273 | this._messageTickThreshold = message ? message._fromTicks : 0 |
250 | 274 | return this._oldestMessagePromise |
251 | 275 | } |
252 | 276 | } |
scheduler.js | ||
---|---|---|
@@ -1,22 +1,23 @@ | ||
1 | 1 | const binarySearchInsert = require('binary-search-insert') |
2 | 2 | |
3 | -const comparator = function (a, b) { | |
4 | - return a.ticks - b.ticks | |
5 | -} | |
6 | - | |
7 | -const instancesComparator = function (a, b) { | |
8 | - return a[1].ticks - b[1].ticks | |
9 | -} | |
10 | - | |
11 | 3 | module.exports = class Scheduler { |
4 | + /** | |
5 | + * The Sceduler manages the run cycle of the containes and figures out which | |
6 | + * order they should run in | |
7 | + */ | |
12 | 8 | constructor () { |
13 | 9 | this._waits = [] |
14 | 10 | this._running = new Set() |
15 | 11 | this._loadingInstances = new Map() |
16 | 12 | this.instances = new Map() |
17 | 13 | } |
18 | 14 | |
15 | + /** | |
16 | + * locks the scheduler from clearing waits untill the lock is resolved | |
17 | + * @param {string} id | |
18 | + * @return {function} the resolve function to call once it to unlock | |
19 | + */ | |
19 | 20 | getLock (id) { |
20 | 21 | let r |
21 | 22 | const promise = new Promise((resolve, reject) => { |
22 | 23 | r = resolve |
@@ -27,31 +28,56 @@ | ||
27 | 28 | this._loadingInstances.set(id, promise) |
28 | 29 | return r |
29 | 30 | } |
30 | 31 | |
32 | + /** | |
33 | + * updates an instance with a new tick count | |
34 | + * @param {Object} instance - a container instance | |
35 | + */ | |
31 | 36 | update (instance) { |
32 | 37 | this._update(instance) |
33 | 38 | this._checkWaits() |
34 | 39 | } |
35 | 40 | |
36 | 41 | _update (instance) { |
37 | 42 | this._running.add(instance.id) |
43 | + // sorts the container instance map by tick count | |
38 | 44 | this.instances.delete(instance.id) |
39 | 45 | const instanceArray = [...this.instances] |
40 | - binarySearchInsert(instanceArray, instancesComparator, [instance.id, instance]) | |
46 | + binarySearchInsert(instanceArray, comparator, [instance.id, instance]) | |
41 | 47 | this.instances = new Map(instanceArray) |
48 | + | |
49 | + function comparator (a, b) { | |
50 | + return a[1].ticks - b[1].ticks | |
51 | + } | |
42 | 52 | } |
43 | 53 | |
54 | + /** | |
55 | + * returns a container | |
56 | + * @param {string} id | |
57 | + * @return {object} | |
58 | + */ | |
44 | 59 | getInstance (id) { |
45 | 60 | return this.instances.get(id) || this._loadingInstances.get(id) |
46 | 61 | } |
47 | 62 | |
48 | - done (instance) { | |
49 | - this._running.delete(instance.id) | |
50 | - this.instances.delete(instance.id) | |
63 | + /** | |
64 | + * deletes an instance from the scheduler | |
65 | + * @param {string} id - the containers id | |
66 | + */ | |
67 | + done (id) { | |
68 | + this._running.delete(id) | |
69 | + this.instances.delete(id) | |
51 | 70 | this._checkWaits() |
52 | 71 | } |
53 | 72 | |
73 | + /** | |
74 | + * returns a promise that resolves once all containers have reached the given | |
75 | + * number of ticks | |
76 | + * @param {interger} ticks - the number of ticks to wait | |
77 | + * @param {string} id - optional id of the container that is waiting | |
78 | + * @return {Promise} | |
79 | + */ | |
54 | 80 | wait (ticks = Infinity, id) { |
55 | 81 | this._running.delete(id) |
56 | 82 | return new Promise((resolve, reject) => { |
57 | 83 | binarySearchInsert(this._waits, comparator, { |
@@ -59,38 +85,51 @@ | ||
59 | 85 | resolve: resolve |
60 | 86 | }) |
61 | 87 | this._checkWaits() |
62 | 88 | }) |
89 | + | |
90 | + function comparator (a, b) { | |
91 | + return a.ticks - b.ticks | |
92 | + } | |
63 | 93 | } |
64 | 94 | |
65 | - smallest () { | |
66 | - return this.instances.size ? [...this.instances][0][1].ticks : 0 | |
95 | + /** | |
96 | + * returns the oldest container's ticks | |
97 | + * @return {integer} | |
98 | + */ | |
99 | + oldest () { | |
100 | + const nextValue = this.instances.values().next().value | |
101 | + return nextValue ? nextValue.ticks : 0 | |
67 | 102 | } |
68 | 103 | |
104 | + // checks outstanding waits to see if they can be resolved | |
69 | 105 | _checkWaits () { |
70 | 106 | if (!this._loadingInstances.size) { |
71 | 107 | // if there are no running containers |
72 | - if (!this.isRunning()) { | |
108 | + if (!this.instances.size) { | |
73 | 109 | // clear any remanding waits |
74 | 110 | this._waits.forEach(wait => wait.resolve()) |
75 | 111 | this._waits = [] |
76 | 112 | } else if (!this._running.size) { |
77 | - const smallest = this._waits[0].ticks | |
113 | + // if there are no containers running find the oldest wait and update | |
114 | + // the oldest containers to it ticks | |
115 | + const oldest = this._waits[0].ticks | |
78 | 116 | for (let instance of this.instances) { |
79 | 117 | instance = instance[1] |
80 | - if (instance.ticks > smallest) { | |
118 | + if (instance.ticks > oldest) { | |
81 | 119 | break |
82 | 120 | } else { |
83 | - instance.ticks = smallest | |
121 | + instance.ticks = oldest | |
84 | 122 | this._update(instance) |
85 | 123 | } |
86 | 124 | } |
87 | 125 | return this._checkWaits() |
88 | 126 | } else { |
89 | - const smallest = this.smallest() | |
127 | + // find the old container and see if to can resolve any of the waits | |
128 | + const oldest = this.oldest() | |
90 | 129 | for (const index in this._waits) { |
91 | 130 | const wait = this._waits[index] |
92 | - if (wait.ticks <= smallest) { | |
131 | + if (wait.ticks <= oldest) { | |
93 | 132 | wait.resolve() |
94 | 133 | } else { |
95 | 134 | this._waits.splice(0, index) |
96 | 135 | break |
@@ -98,9 +137,5 @@ | ||
98 | 137 | } |
99 | 138 | } |
100 | 139 | } |
101 | 140 | } |
102 | - | |
103 | - isRunning () { | |
104 | - return this.instances.size | |
105 | - } | |
106 | 141 | } |
tests/index.js | ||
---|---|---|
@@ -46,8 +46,29 @@ | ||
46 | 46 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
47 | 47 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
48 | 48 | }) |
49 | 49 | |
50 | + tape('basic - do not store containers with no ports bound', async t => { | |
51 | + t.plan(1) | |
52 | + const expectedState = { | |
53 | + '/': 'zdpuAx5LRRwTgzPipKEPgh7MHUKu4Pd1BYjDqBcf9whgzvrqf' | |
54 | + } | |
55 | + | |
56 | + class testVMContainer extends BaseContainer { | |
57 | + initailize () {} | |
58 | + } | |
59 | + | |
60 | + const hypervisor = new Hypervisor(node.dag) | |
61 | + hypervisor.registerContainer('test', testVMContainer) | |
62 | + | |
63 | + const root = await hypervisor.createInstance('test') | |
64 | + const port = root.ports.create('test') | |
65 | + root.ports.bind('one', port) | |
66 | + | |
67 | + const stateRoot = await hypervisor.createStateRoot(Infinity) | |
68 | + t.deepEquals(stateRoot, expectedState, 'expected root!') | |
69 | + }) | |
70 | + | |
50 | 71 | tape('one child contract with saturated ports', async t => { |
51 | 72 | t.plan(2) |
52 | 73 | let message |
53 | 74 | const expectedState = { |
deleteMessage.js | ||
---|---|---|
@@ -1,0 +1,3 @@ | ||
1 | +const Message = require('primea-message') | |
2 | + | |
3 | +module.exports = class DeleteMessage extends Message {} |
dfsChecker.js | ||
---|---|---|
@@ -1,0 +1,71 @@ | ||
1 | +/** | |
2 | + * Implements a parrilizable DFS check for graph connictivity given a set of nodes | |
3 | + * and a root node. Stating for the set of node to check this does a DFS and | |
4 | + * will return a set a nodes if any that is not connected to the root node. | |
5 | + * @param {object} graph - an instance of ipld-graph-builder | |
6 | + * @param {object} state - the state containing all of the containers to search | |
7 | + * @param {string} root - the root id | |
8 | + * @param {Set} nodes - a set of nodes to start searching from | |
9 | + */ | |
10 | +module.exports = async function DFSchecker (graph, state, root, nodes) { | |
11 | + const checkedNodesSet = new Set() | |
12 | + let hasRootSet = new Set() | |
13 | + const promises = [] | |
14 | + | |
15 | + for (const id of nodes) { | |
16 | + // create a set for each of the starting nodes to track the nodes the DFS has | |
17 | + // has traversed | |
18 | + const checkedNodes = new Set() | |
19 | + checkedNodesSet.add(checkedNodes) | |
20 | + promises.push(check(id, checkedNodes)) | |
21 | + } | |
22 | + | |
23 | + // wait for all the search to complete | |
24 | + await Promise.all(promises) | |
25 | + // remove the set of nodes that are connected to the root | |
26 | + checkedNodesSet.delete(hasRootSet) | |
27 | + let unLinkedNodesArray = [] | |
28 | + | |
29 | + // combine the unconnected sets into a single array | |
30 | + for (const set of checkedNodesSet) { | |
31 | + unLinkedNodesArray = unLinkedNodesArray.concat([...set]) | |
32 | + } | |
33 | + return unLinkedNodesArray | |
34 | + | |
35 | + // does the DFS starting with a single node ID | |
36 | + async function check (id, checkedNodes) { | |
37 | + if (!checkedNodesSet.has(checkedNodes) || // check if this DFS is still searching | |
38 | + checkedNodes.has(id) || // check if this DFS has alread seen the node | |
39 | + hasRootSet === checkedNodes) { // check that this DFS has alread found the root node | |
40 | + return | |
41 | + } | |
42 | + | |
43 | + // check if any of the the other DFSs have seen this node and if so merge | |
44 | + // the sets and stop searching | |
45 | + for (const set of checkedNodesSet) { | |
46 | + if (set.has(id)) { | |
47 | + checkedNodes.forEach(id => set.add(id)) | |
48 | + checkedNodesSet.delete(checkedNodes) | |
49 | + return | |
50 | + } | |
51 | + } | |
52 | + | |
53 | + // mark the node 'checked' | |
54 | + checkedNodes.add(id) | |
55 | + | |
56 | + // check to see if we are at the root | |
57 | + if (id === root) { | |
58 | + hasRootSet = checkedNodes | |
59 | + return | |
60 | + } | |
61 | + | |
62 | + const node = state[id]['/'] | |
63 | + const promises = [] | |
64 | + // iterate through the nodes ports and recursivly check them | |
65 | + for (const name in node.ports) { | |
66 | + const port = node.ports[name] | |
67 | + promises.push(check(port.destId, checkedNodes)) | |
68 | + } | |
69 | + return Promise.all(promises) | |
70 | + } | |
71 | +} |
Built with git-ssb-web