Commit 864bc27090fe788a4bf5a86c00246ca6e4391dd0
docs
wanderer committed on 6/21/2017, 10:29:16 PMParent: 4fdd4668b21eec221f7e9be2df9b212b29feffdc
Files changed
exoInterface.js | changed |
index.js | changed |
portManager.js | changed |
scheduler.js | changed |
tests/index.js | changed |
exoInterface.js | ||
---|---|---|
@@ -18,9 +18,8 @@ | ||
18 | 18 | this.container = new opts.container.Constructor(this, opts.container.args) |
19 | 19 | |
20 | 20 | this.ticks = 0 |
21 | 21 | this.containerState = 'idle' |
22 | - this._waitingMap = new Map() | |
23 | 22 | |
24 | 23 | // create the port manager |
25 | 24 | this.ports = new PortManager(Object.assign({ |
26 | 25 | exInterface: this |
@@ -28,9 +27,10 @@ | ||
28 | 27 | } |
29 | 28 | |
30 | 29 | /** |
31 | 30 | * adds a message to this containers message queue |
32 | - * @param {Message} message | |
31 | + * @param {string} portName | |
32 | + * @param {object} message | |
33 | 33 | */ |
34 | 34 | queue (portName, message) { |
35 | 35 | message._hops++ |
36 | 36 | this.ports.queue(portName, message) |
@@ -43,24 +43,28 @@ | ||
43 | 43 | } |
44 | 44 | } |
45 | 45 | } |
46 | 46 | |
47 | + // waits for the next message | |
47 | 48 | async _runNextMessage () { |
49 | + // check if the ports are saturated, if so we don't have to wait on the | |
50 | + // scheduler | |
48 | 51 | if (!this.ports.isSaturated()) { |
49 | 52 | await this.hypervisor.scheduler.wait(this.ticks, this.id) |
50 | 53 | } |
51 | 54 | |
52 | - if (this.ports.hasMessages()) { | |
53 | - let message = this.ports.peekNextMessage() | |
55 | + let message = this.ports.peekNextMessage() | |
56 | + if (message) { | |
54 | 57 | if (this.ticks < message._fromTicks) { |
55 | 58 | this.ticks = message._fromTicks |
56 | 59 | // check for tie messages |
57 | 60 | this.hypervisor.scheduler.update(this) |
58 | - await this.hypervisor.scheduler.wait(this.ticks, this.id) | |
61 | + if (!this.ports.isSaturated()) { | |
62 | + await this.hypervisor.scheduler.wait(this.ticks, this.id) | |
63 | + message = this.ports.peekNextMessage() | |
64 | + } | |
59 | 65 | } |
60 | - message = this.ports.nextMessage() | |
61 | - this.currentMessage = message | |
62 | - | |
66 | + message.fromPort.messages.shift() | |
63 | 67 | // run the next message |
64 | 68 | this.run(message) |
65 | 69 | } else { |
66 | 70 | // if no more messages then shut down |
@@ -75,11 +79,9 @@ | ||
75 | 79 | * @returns {Promise} |
76 | 80 | */ |
77 | 81 | async run (message, init = false) { |
78 | 82 | let result |
79 | - message.ports.forEach(port => { | |
80 | - this.ports._unboundPorts.add(port) | |
81 | - }) | |
83 | + message.ports.forEach(port => this.ports._unboundPorts.add(port)) | |
82 | 84 | if (message.data === 'delete') { |
83 | 85 | this.ports._delete(message.fromName) |
84 | 86 | } else { |
85 | 87 | const method = init ? 'initailize' : 'run' |
@@ -134,8 +136,9 @@ | ||
134 | 136 | |
135 | 137 | // if (this.currentMessage !== message && !message.responsePort) { |
136 | 138 | // this.currentMessage._addSubMessage(message) |
137 | 139 | // } |
140 | + | |
138 | 141 | if (port.destId) { |
139 | 142 | const id = port.destId |
140 | 143 | const instance = await this.hypervisor.getInstance(id) |
141 | 144 | instance.queue(port.destName, message) |
index.js | ||
---|---|---|
@@ -13,36 +13,45 @@ | ||
13 | 13 | */ |
14 | 14 | constructor (dag, state = {}) { |
15 | 15 | this.graph = new Graph(dag) |
16 | 16 | this.scheduler = new Scheduler() |
17 | - this._state = state | |
17 | + this.state = state | |
18 | 18 | this._containerTypes = {} |
19 | 19 | this._nodesToCheck = new Set() |
20 | 20 | } |
21 | 21 | |
22 | - getDestPort (port) { | |
23 | - if (port.destPort) { | |
24 | - return port.destPort | |
25 | - } else { | |
26 | - return this.graph.get(this._state, `${port.destId}/ports/${port.destName}`) | |
27 | - } | |
22 | + /** | |
23 | + * add a potaintail node in the state graph to check for garbage collection | |
24 | + * @param {string} id | |
25 | + */ | |
26 | + addNodeToCheck (id) { | |
27 | + this._nodesToCheck.add(id) | |
28 | 28 | } |
29 | 29 | |
30 | 30 | /** |
31 | + * removes a potaintail node in the state graph to check for garbage collection | |
32 | + * @param {string} id | |
31 | 33 | */ |
32 | - async getInstance (id) { | |
33 | - let instance = this.scheduler.getInstance(id) | |
34 | - if (instance) { | |
35 | - return instance | |
34 | + removeNodeToCheck (id) { | |
35 | + this._nodesToCheck.delete(id) | |
36 | + } | |
37 | + | |
38 | + /** | |
39 | + * given a port, this finds the corridsponeding endpoint port of the channel | |
40 | + * @param {object} port | |
41 | + * @returns {Promise} | |
42 | + */ | |
43 | + getDestPort (port) { | |
44 | + if (port.destPort) { | |
45 | + return port.destPort | |
36 | 46 | } else { |
37 | - const lock = this.scheduler.getLock() | |
38 | - instance = await this._loadInstance(id, lock) | |
39 | - return instance | |
47 | + return this.graph.get(this.state, `${port.destId}/ports/${port.destName}`) | |
40 | 48 | } |
41 | 49 | } |
42 | 50 | |
51 | + // loads an instance of a container from the state | |
43 | 52 | async _loadInstance (id, lock) { |
44 | - const state = await this.graph.get(this._state, id) | |
53 | + const state = await this.graph.get(this.state, id) | |
45 | 54 | const container = this._containerTypes[state.type] |
46 | 55 | |
47 | 56 | // create a new kernel instance |
48 | 57 | const exoInterface = new ExoInterface({ |
@@ -57,9 +66,37 @@ | ||
57 | 66 | this.scheduler.releaseLock(lock) |
58 | 67 | return exoInterface |
59 | 68 | } |
60 | 69 | |
70 | + /** | |
71 | + * gets an existsing container instances | |
72 | + * @param {string} id - the containers ID | |
73 | + * @returns {Promise} | |
74 | + */ | |
75 | + async getInstance (id) { | |
76 | + let instance = this.scheduler.getInstance(id) | |
77 | + if (instance) { | |
78 | + return instance | |
79 | + } else { | |
80 | + const lock = this.scheduler.getLock() | |
81 | + instance = await this._loadInstance(id, lock) | |
82 | + return instance | |
83 | + } | |
84 | + } | |
85 | + | |
86 | + /** | |
87 | + * creates an new container instances and save it in the state | |
88 | + * @param {string} type - the type of container to create | |
89 | + * @param {*} code | |
90 | + * @param {array} entryPorts | |
91 | + * @param {object} id | |
92 | + * @param {object} id.nonce | |
93 | + * @param {object} id.parent | |
94 | + * @returns {Promise} | |
95 | + */ | |
61 | 96 | async createInstance (type, code, entryPorts = [], id = {nonce: 0, parent: null}) { |
97 | + // create a lock to prevent the scheduler from reloving waits before the | |
98 | + // new container is loaded | |
62 | 99 | const lock = this.scheduler.getLock() |
63 | 100 | id = await this.getHashFromObj(id) |
64 | 101 | const state = { |
65 | 102 | nonce: [0], |
@@ -67,21 +104,28 @@ | ||
67 | 104 | type: type, |
68 | 105 | code: code |
69 | 106 | } |
70 | 107 | |
71 | - await this.graph.set(this._state, id, state) | |
108 | + // save the container in the state | |
109 | + await this.graph.set(this.state, id, state) | |
110 | + // create the container instance | |
72 | 111 | const exoInterface = await this._loadInstance(id, lock) |
112 | + // send the intialization message | |
73 | 113 | exoInterface.queue(null, new Message({ |
74 | 114 | ports: entryPorts |
75 | 115 | })) |
76 | 116 | |
77 | 117 | return exoInterface |
78 | 118 | } |
79 | 119 | |
120 | + /** | |
121 | + * deletes container from the state | |
122 | + * @param {string} id | |
123 | + */ | |
80 | 124 | deleteInstance (id) { |
81 | 125 | if (id !== ROOT_ID) { |
82 | 126 | this._nodesToCheck.delete(id) |
83 | - delete this._state[id] | |
127 | + delete this.state[id] | |
84 | 128 | } |
85 | 129 | } |
86 | 130 | |
87 | 131 | /** |
@@ -89,15 +133,15 @@ | ||
89 | 133 | * ticks |
90 | 134 | * @param {Number} ticks the number of ticks at which to create the state root |
91 | 135 | * @returns {Promise} |
92 | 136 | */ |
93 | - async createStateRoot (ticks = Infinity) { | |
137 | + async createStateRoot (ticks) { | |
94 | 138 | await this.scheduler.wait(ticks) |
95 | - const unlinked = await DFSchecker(this.graph, this._state, ROOT_ID, this._nodesToCheck) | |
139 | + const unlinked = await DFSchecker(this.graph, this.state, ROOT_ID, this._nodesToCheck) | |
96 | 140 | unlinked.forEach(id => { |
97 | - delete this._state[id] | |
141 | + delete this.state[id] | |
98 | 142 | }) |
99 | - return this.graph.flush(this._state) | |
143 | + return this.graph.flush(this.state) | |
100 | 144 | } |
101 | 145 | |
102 | 146 | /** |
103 | 147 | * regirsters a container with the hypervisor |
@@ -111,55 +155,76 @@ | ||
111 | 155 | args: args |
112 | 156 | } |
113 | 157 | } |
114 | 158 | |
159 | + /** | |
160 | + * get a hash from a POJO | |
161 | + * @param {object} obj | |
162 | + * @return {Promise} | |
163 | + */ | |
115 | 164 | async getHashFromObj (obj) { |
116 | 165 | return (await this.graph.flush(obj))['/'] |
117 | 166 | } |
118 | 167 | } |
119 | 168 | |
169 | +// Implements a parrilizable DFS check for graph connictivity given a set of nodes | |
170 | +// and a root node. Stating for the set of node to check this does a DFS and | |
171 | +// will return a set a nodes if any that is not connected to the root node. | |
120 | 172 | async function DFSchecker (graph, state, root, nodes) { |
121 | 173 | const checkedNodesSet = new Set() |
122 | 174 | let hasRootSet = new Set() |
123 | 175 | const promises = [] |
124 | 176 | |
125 | 177 | for (const id of nodes) { |
178 | + // create a set for each of the starting nodes to track the nodes the DFS has | |
179 | + // has traversed | |
126 | 180 | const checkedNodes = new Set() |
127 | 181 | checkedNodesSet.add(checkedNodes) |
128 | 182 | promises.push(check(id, checkedNodes)) |
129 | 183 | } |
130 | 184 | |
185 | + // wait for all the search to complete | |
131 | 186 | await Promise.all(promises) |
187 | + // remove the set of nodes that are connected to the root | |
132 | 188 | checkedNodesSet.delete(hasRootSet) |
133 | 189 | let unLinkedNodesArray = [] |
134 | 190 | |
191 | + // combine the unconnected sets into a single array | |
135 | 192 | for (const set of checkedNodesSet) { |
136 | 193 | unLinkedNodesArray = unLinkedNodesArray.concat([...set]) |
137 | 194 | } |
138 | 195 | return unLinkedNodesArray |
139 | 196 | |
197 | + // does the DFS starting with a single node ID | |
140 | 198 | async function check (id, checkedNodes) { |
141 | - if (!checkedNodesSet.has(checkedNodes) || checkedNodes.has(id) || hasRootSet === checkedNodes) { | |
199 | + if (!checkedNodesSet.has(checkedNodes) || // check if this DFS is still searching | |
200 | + checkedNodes.has(id) || // check if this DFS has alread seen the node | |
201 | + hasRootSet === checkedNodes) { // check that this DFS has alread found the root node | |
142 | 202 | return |
143 | 203 | } |
144 | 204 | |
205 | + // check if any of the the other DFSs have seen this node and if so merge | |
206 | + // the sets and stop searching | |
145 | 207 | for (const set of checkedNodesSet) { |
146 | 208 | if (set.has(id)) { |
147 | 209 | checkedNodes.forEach(id => set.add(id)) |
148 | 210 | checkedNodesSet.delete(checkedNodes) |
149 | 211 | return |
150 | 212 | } |
151 | 213 | } |
152 | 214 | |
215 | + // mark the node 'checked' | |
153 | 216 | checkedNodes.add(id) |
154 | 217 | |
218 | + // check to see if we are at the root | |
155 | 219 | if (id === root) { |
156 | 220 | hasRootSet = checkedNodes |
157 | 221 | return |
158 | 222 | } |
159 | 223 | |
160 | - const node = await graph.get(state, id) | |
224 | + const node = state[id]['/'] | |
161 | 225 | const promises = [] |
226 | + // iterate through the nodes ports and recursivly check them | |
162 | 227 | for (const name in node.ports) { |
163 | 228 | const port = node.ports[name] |
164 | 229 | promises.push(check(port.destId, checkedNodes)) |
165 | 230 | } |
portManager.js | ||
---|---|---|
@@ -50,8 +50,9 @@ | ||
50 | 50 | } else if (this.ports[name]) { |
51 | 51 | throw new Error('cannot bind port to a name that is alread bound') |
52 | 52 | } else { |
53 | 53 | this._unboundPorts.delete(port) |
54 | + this.hypervisor.removeNodeToCheck(this.id) | |
54 | 55 | |
55 | 56 | // save the port instance |
56 | 57 | this.ports[name] = port |
57 | 58 | |
@@ -64,38 +65,45 @@ | ||
64 | 65 | } |
65 | 66 | |
66 | 67 | /** |
67 | 68 | * unbinds a port given its name |
68 | - * @param {String} name | |
69 | - * @returns {boolean} whether or not the port was deleted | |
69 | + * @param {string} name | |
70 | + * @returns {Promise} | |
70 | 71 | */ |
71 | 72 | async unbind (name) { |
72 | 73 | const port = this.ports[name] |
73 | 74 | delete this.ports[name] |
74 | 75 | this._unboundPorts.add(port) |
75 | 76 | |
76 | - let destPort = await this.hypervisor.getDestPort(port) | |
77 | - | |
77 | + // update the destination port | |
78 | + const destPort = await this.hypervisor.getDestPort(port) | |
78 | 79 | delete destPort.destName |
79 | 80 | delete destPort.destId |
80 | 81 | destPort.destPort = port |
81 | - this.hypervisor._nodesToCheck.add(this.id) | |
82 | + this.hypervisor.addNodeToCheck(this.id) | |
82 | 83 | return port |
83 | 84 | } |
84 | 85 | |
86 | + /** | |
87 | + * delete an port given the name it is bound to | |
88 | + * @param {string} name | |
89 | + */ | |
85 | 90 | delete (name) { |
86 | 91 | const port = this.ports[name] |
87 | - this._delete(name) | |
88 | 92 | this.exInterface.send(port, new Message({ |
89 | 93 | data: 'delete' |
90 | 94 | })) |
95 | + this._delete(name) | |
91 | 96 | } |
92 | 97 | |
93 | 98 | _delete (name) { |
94 | - this.hypervisor._nodesToCheck.add(this.id) | |
99 | + this.hypervisor.addNodeToCheck(this.id) | |
95 | 100 | delete this.ports[name] |
96 | 101 | } |
97 | 102 | |
103 | + /** | |
104 | + * clears any unbounded ports referances | |
105 | + */ | |
98 | 106 | clearUnboundedPorts () { |
99 | 107 | this._unboundPorts.forEach(port => { |
100 | 108 | this.exInterface.send(port, new Message({ |
101 | 109 | data: 'delete' |
@@ -135,33 +143,38 @@ | ||
135 | 143 | return this.ports[name] |
136 | 144 | } |
137 | 145 | |
138 | 146 | /** |
139 | - * creates a new Port given the container type | |
147 | + * creates a new container. Returning a port to it. | |
140 | 148 | * @param {String} type |
141 | 149 | * @param {*} data - the data to populate the initail state with |
142 | - * @returns {Promise} | |
150 | + * @returns {Object} | |
143 | 151 | */ |
144 | 152 | create (type, data) { |
145 | - // const container = this.hypervisor._containerTypes[type] | |
146 | 153 | let nonce = this.state.nonce |
147 | 154 | |
148 | 155 | const id = { |
149 | 156 | nonce: nonce, |
150 | 157 | parent: this.id |
151 | 158 | } |
152 | 159 | |
160 | + // incerment the nonce | |
161 | + nonce = new BN(nonce) | |
162 | + nonce.iaddn(1) | |
163 | + this.state.nonce = nonce.toArray() | |
164 | + | |
165 | + // create a new channel for the container | |
153 | 166 | const ports = this.createChannel() |
154 | 167 | this._unboundPorts.delete(ports[1]) |
155 | 168 | this.hypervisor.createInstance(type, data, [ports[1]], id) |
156 | 169 | |
157 | - // incerment the nonce | |
158 | - nonce = new BN(nonce) | |
159 | - nonce.iaddn(1) | |
160 | - this.state.nonce = nonce.toArray() | |
161 | 170 | return ports[0] |
162 | 171 | } |
163 | 172 | |
173 | + /** | |
174 | + * creates a channel returns the created ports in an Array | |
175 | + * @returns {array} | |
176 | + */ | |
164 | 177 | createChannel () { |
165 | 178 | const port1 = { |
166 | 179 | messages: [] |
167 | 180 | } |
@@ -177,31 +190,29 @@ | ||
177 | 190 | return [port1, port2] |
178 | 191 | } |
179 | 192 | |
180 | 193 | /** |
181 | - * gets the next canonical message given the an array of ports to choose from | |
182 | - * @param {Array} ports | |
183 | - * @returns {Promise} | |
194 | + * find and returns the next message | |
195 | + * @returns {object} | |
184 | 196 | */ |
185 | - nextMessage () { | |
186 | - const message = this.peekNextMessage() | |
187 | - message._fromPort.messages.shift() | |
188 | - return message | |
189 | - } | |
190 | - | |
191 | 197 | peekNextMessage () { |
192 | - const portName = Object.keys(this.ports).reduce(messageArbiter.bind(this)) | |
193 | - const port = this.ports[portName] | |
194 | - const message = port.messages[0] | |
195 | - message._fromPort = port | |
196 | - message.fromName = portName | |
197 | - return message | |
198 | + const names = Object.keys(this.ports) | |
199 | + if (names.length) { | |
200 | + const portName = names.reduce(messageArbiter.bind(this)) | |
201 | + const port = this.ports[portName] | |
202 | + const message = port.messages[0] | |
203 | + if (message) { | |
204 | + message._fromPort = port | |
205 | + message.fromName = portName | |
206 | + return message | |
207 | + } | |
208 | + } | |
198 | 209 | } |
199 | 210 | |
200 | - hasMessages () { | |
201 | - return Object.keys(this.ports).some(name => this.ports[name].messages.length) | |
202 | - } | |
203 | - | |
211 | + /** | |
212 | + * tests wether or not all the ports have a message | |
213 | + * @returns {boolean} | |
214 | + */ | |
204 | 215 | isSaturated () { |
205 | 216 | return Object.keys(this.ports).every(name => this.ports[name].messages.length) |
206 | 217 | } |
207 | 218 | } |
scheduler.js | ||
---|---|---|
@@ -43,17 +43,16 @@ | ||
43 | 43 | this.instances.delete(instance.id) |
44 | 44 | this._checkWaits() |
45 | 45 | } |
46 | 46 | |
47 | - wait (ticks, id) { | |
47 | + wait (ticks = Infinity) { | |
48 | 48 | if (!this.locks.size && ticks <= this.smallest()) { |
49 | 49 | return |
50 | 50 | } else { |
51 | 51 | return new Promise((resolve, reject) => { |
52 | 52 | binarySearchInsert(this._waits, comparator, { |
53 | 53 | ticks: ticks, |
54 | - resolve: resolve, | |
55 | - id: id | |
54 | + resolve: resolve | |
56 | 55 | }) |
57 | 56 | }) |
58 | 57 | } |
59 | 58 | } |
tests/index.js | ||
---|---|---|
@@ -46,8 +46,45 @@ | ||
46 | 46 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
47 | 47 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
48 | 48 | }) |
49 | 49 | |
50 | + tape('one child contract with saturated ports', async t => { | |
51 | + t.plan(2) | |
52 | + let message | |
53 | + const expectedState = { | |
54 | + '/': 'zdpuAtVcH6MUnvt2RXnLsDXyLB3CBSQ7aydfh2ogSKGCejJCQ' | |
55 | + } | |
56 | + | |
57 | + class testVMContainer2 extends BaseContainer { | |
58 | + run (m) { | |
59 | + t.true(m === message, 'should recive a message') | |
60 | + } | |
61 | + } | |
62 | + | |
63 | + class testVMContainer extends BaseContainer { | |
64 | + run (m) { | |
65 | + const port = this.exInterface.ports.create('test2') | |
66 | + this.exInterface.ports.bind('child', port) | |
67 | + this.exInterface.incrementTicks(2) | |
68 | + this.exInterface.send(port, m) | |
69 | + } | |
70 | + } | |
71 | + | |
72 | + const hypervisor = new Hypervisor(node.dag) | |
73 | + hypervisor.registerContainer('test', testVMContainer) | |
74 | + hypervisor.registerContainer('test2', testVMContainer2) | |
75 | + | |
76 | + let root = await hypervisor.createInstance('test') | |
77 | + let port = root.ports.create('test') | |
78 | + | |
79 | + root.ports.bind('first', port) | |
80 | + message = root.createMessage() | |
81 | + | |
82 | + root.send(port, message) | |
83 | + const stateRoot = await hypervisor.createStateRoot(Infinity) | |
84 | + t.deepEquals(stateRoot, expectedState, 'expected state') | |
85 | + }) | |
86 | + | |
50 | 87 | tape('one child contract', async t => { |
51 | 88 | t.plan(4) |
52 | 89 | let message |
53 | 90 | const expectedState = { |
@@ -150,9 +187,8 @@ | ||
150 | 187 | this.exInterface.ports.bind('two', two) |
151 | 188 | |
152 | 189 | this.exInterface.send(one, this.exInterface.createMessage()) |
153 | 190 | this.exInterface.send(two, this.exInterface.createMessage()) |
154 | - | |
155 | 191 | } else if (runs === 1) { |
156 | 192 | runs++ |
157 | 193 | t.equals(m.data, 'second', 'should recived the second message') |
158 | 194 | } else if (runs === 2) { |
@@ -486,8 +522,61 @@ | ||
486 | 522 | |
487 | 523 | t.end() |
488 | 524 | }) |
489 | 525 | |
526 | + tape('should not remove connected nodes', async t => { | |
527 | + const expectedSr = { | |
528 | + '/': 'zdpuAwsZTd5mRZBCYA1FJSHrpYDPgSZSiaTQp9xkUeajaoMHM' | |
529 | + } | |
530 | + class Root extends BaseContainer { | |
531 | + run (m) { | |
532 | + if (m.ports.length) { | |
533 | + const port = this.exInterface.ports.get('test1') | |
534 | + this.exInterface.send(port, m) | |
535 | + this.exInterface.ports.unbind('test1') | |
536 | + // this.exInterface.ports.unbind('test2') | |
537 | + } else { | |
538 | + const port1 = this.exInterface.ports.create('sub') | |
539 | + this.exInterface.ports.bind('test1', port1) | |
540 | + const port2 = this.exInterface.ports.create('sub') | |
541 | + this.exInterface.ports.bind('test2', port2) | |
542 | + this.exInterface.send(port2, this.exInterface.createMessage({data: 'getChannel'})) | |
543 | + } | |
544 | + } | |
545 | + } | |
546 | + | |
547 | + class Sub extends BaseContainer { | |
548 | + run (message) { | |
549 | + if (message.data === 'getChannel') { | |
550 | + const ports = this.exInterface.ports.createChannel() | |
551 | + this.exInterface.ports.bind('channel', ports[0]) | |
552 | + this.exInterface.send(message.fromPort, this.exInterface.createMessage({ | |
553 | + data: 'bindPort', | |
554 | + ports: [ports[1]] | |
555 | + })) | |
556 | + } else if (message.data === 'bindPort') { | |
557 | + this.exInterface.ports.bind('channel', message.ports[0]) | |
558 | + } | |
559 | + } | |
560 | + } | |
561 | + | |
562 | + const hypervisor = new Hypervisor(node.dag) | |
563 | + | |
564 | + hypervisor.registerContainer('root', Root) | |
565 | + hypervisor.registerContainer('sub', Sub) | |
566 | + | |
567 | + const root = await hypervisor.createInstance('root') | |
568 | + const port = root.ports.create('root') | |
569 | + root.ports.bind('first', port) | |
570 | + root.send(port, root.createMessage()) | |
571 | + const sr = await hypervisor.createStateRoot() | |
572 | + t.deepEquals(sr, expectedSr, 'should produce the corret state root') | |
573 | + // await hypervisor.graph.tree(sr, Infinity) | |
574 | + // console.log(JSON.stringify(sr, null, 2)) | |
575 | + | |
576 | + t.end() | |
577 | + }) | |
578 | + | |
490 | 579 | tape('should remove multiple subgraphs', async t => { |
491 | 580 | const expectedSr = { |
492 | 581 | '/': 'zdpuAmi9tkYTpoVsZvqQgxpQFRhCgYFVv4W3fjjfVhf1j8swv' |
493 | 582 | } |
Built with git-ssb-web