Commit 4d55c78be4ffb7631d68b6f18e18e09d8bd4b510
fix port binding error
Signed-off-by: wanderer <mjbecze@gmail.com>wanderer committed on 8/11/2017, 2:18:58 PM
Parent: d0da8197e290a0f7d27a4b08cbc89d00cf1ca258
Files changed
index.js | changed |
portManager.js | changed |
scheduler.js | changed |
tests/index.js | changed |
index.js | ||
---|---|---|
@@ -177,9 +177,8 @@ | ||
177 | 177 … | for (const id of unlinked) { |
178 | 178 … | await this.tree.delete(id) |
179 | 179 … | } |
180 | 180 … | |
181 | - // console.log(JSON.stringify(this.state, null, 2)) | |
182 | 181 … | return this.graph.flush(this.state) |
183 | 182 … | } |
184 | 183 … | |
185 | 184 … | /** |
portManager.js | ||
---|---|---|
@@ -54,18 +54,17 @@ | ||
54 | 54 … | throw new Error('cannot bind port to a name that is alread bound') |
55 | 55 … | } else { |
56 | 56 … | this._unboundPorts.delete(port) |
57 | 57 … | |
58 | - port.messages.forEach(message => { | |
59 | - message._fromPort = port | |
60 | - message.fromName = name | |
61 | - }) | |
62 | - | |
63 | 58 … | // save the port instance |
64 | 59 … | this.ports[name] = port |
65 | 60 … | |
66 | 61 … | // update the dest port |
67 | 62 … | const destPort = await this.hypervisor.getDestPort(port) |
63 … | + port.messages.forEach(message => { | |
64 … | + message._fromPort = port | |
65 … | + message.fromName = name | |
66 … | + }) | |
68 | 67 … | destPort.destName = name |
69 | 68 … | destPort.destId = this.id |
70 | 69 … | delete destPort.destPort |
71 | 70 … | } |
@@ -93,12 +92,12 @@ | ||
93 | 92 … | /** |
94 | 93 … | * delete an port given the name it is bound to |
95 | 94 … | * @param {string} name |
96 | 95 … | */ |
97 | - delete (name) { | |
96 … | + async delete (name) { | |
98 | 97 … | const port = this.ports[name] |
98 … | + await this.kernel.send(port, new DeleteMessage()) | |
99 | 99 … | this._delete(name) |
100 | - return this.kernel.send(port, new DeleteMessage()) | |
101 | 100 … | } |
102 | 101 … | |
103 | 102 … | _delete (name) { |
104 | 103 … | this.hypervisor.addNodeToCheck(this.id) |
@@ -199,9 +198,9 @@ | ||
199 | 198 … | */ |
200 | 199 … | async getNextMessage () { |
201 | 200 … | let message = this._peekNextMessage() |
202 | 201 … | let saturated = this._isSaturated() |
203 | - let oldestTime = this.hypervisor.scheduler.oldest() | |
202 … | + let oldestTime = this.hypervisor.scheduler.leastNumberOfTicks() | |
204 | 203 … | |
205 | 204 … | while (!saturated && // end if there are messages on all the ports |
206 | 205 … | // end if we have a message older then slowest containers |
207 | 206 … | !((message && oldestTime >= message._fromTicks) || |
@@ -221,9 +220,9 @@ | ||
221 | 220 … | message = this._peekNextMessage() |
222 | 221 … | }) |
223 | 222 … | ]) |
224 | 223 … | |
225 | - oldestTime = this.hypervisor.scheduler.oldest() | |
224 … | + oldestTime = this.hypervisor.scheduler.leastNumberOfTicks() | |
226 | 225 … | } |
227 | 226 … | |
228 | 227 … | return message |
229 | 228 … | } |
scheduler.js | ||
---|---|---|
@@ -96,9 +96,9 @@ | ||
96 | 96 … | /** |
97 | 97 … | * returns the oldest container's ticks |
98 | 98 … | * @return {integer} |
99 | 99 … | */ |
100 | - oldest () { | |
100 … | + leastNumberOfTicks () { | |
101 | 101 … | const nextValue = this.instances.values().next().value |
102 | 102 … | return nextValue ? nextValue.ticks : 0 |
103 | 103 … | } |
104 | 104 … | |
@@ -110,12 +110,12 @@ | ||
110 | 110 … | this._waits.forEach(wait => wait.resolve()) |
111 | 111 … | this._waits = [] |
112 | 112 … | } else { |
113 | 113 … | // find the old container and see if to can resolve any of the waits |
114 | - const oldest = this.oldest() | |
114 … | + const least = this.leastNumberOfTicks() | |
115 | 115 … | for (const index in this._waits) { |
116 | 116 … | const wait = this._waits[index] |
117 | - if (wait.ticks <= oldest) { | |
117 … | + if (wait.ticks <= least) { | |
118 | 118 … | wait.resolve() |
119 | 119 … | this._running.add(wait.id) |
120 | 120 … | } else { |
121 | 121 … | this._waits.splice(0, index) |
tests/index.js | ||
---|---|---|
@@ -55,9 +55,9 @@ | ||
55 | 55 … | rootContainer.send(portRef1, message) |
56 | 56 … | |
57 | 57 … | const stateRoot = await hypervisor.createStateRoot(Infinity) |
58 | 58 … | t.deepEquals(stateRoot, expectedState, 'expected root!') |
59 | - t.equals(hypervisor.scheduler.oldest(), 0) | |
59 … | + t.equals(hypervisor.scheduler.leastNumberOfTicks(), 0) | |
60 | 60 … | } catch (e) { |
61 | 61 … | console.log(e) |
62 | 62 … | } |
63 | 63 … | }) |
@@ -307,8 +307,9 @@ | ||
307 | 307 … | t.plan(3) |
308 | 308 … | let runs = 0 |
309 | 309 … | |
310 | 310 … | class Root extends BaseContainer { |
311 … | + onIdle () {} | |
311 | 312 … | async onMessage (m) { |
312 | 313 … | if (!runs) { |
313 | 314 … | runs++ |
314 | 315 … | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
@@ -334,12 +335,12 @@ | ||
334 | 335 … | runs++ |
335 | 336 … | t.equals(m.data, 'first', 'should recive the first message') |
336 | 337 … | } else if (runs === 2) { |
337 | 338 … | runs++ |
338 | - t.equals(m.data, 'second', 'should recive the first message') | |
339 … | + t.equals(m.data, 'second', 'should recive the second message') | |
339 | 340 … | } else if (runs === 3) { |
340 | 341 … | runs++ |
341 | - t.equals(m.data, 'third', 'should recived the second message') | |
342 … | + t.equals(m.data, 'third', 'should recived the third message') | |
342 | 343 … | } |
343 | 344 … | } |
344 | 345 … | static get typeId () { |
345 | 346 … | return 299 |
@@ -389,9 +390,9 @@ | ||
389 | 390 … | hypervisor.registerContainer(First) |
390 | 391 … | hypervisor.registerContainer(Second) |
391 | 392 … | hypervisor.registerContainer(Waiter) |
392 | 393 … | |
393 | - const root = await hypervisor.createInstance(Root.typeId) | |
394 … | + let root = await hypervisor.createInstance(Root.typeId) | |
394 | 395 … | const [portRef1, portRef2] = root.ports.createChannel() |
395 | 396 … | const [portRef3, portRef4] = root.ports.createChannel() |
396 | 397 … | |
397 | 398 … | const message = root.createMessage() |
@@ -406,11 +407,12 @@ | ||
406 | 407 … | ports: [portRef4] |
407 | 408 … | })) |
408 | 409 … | ]) |
409 | 410 … | |
411 … | + // root = await hypervisor.getInstance(root.id) | |
410 | 412 … | root.incrementTicks(100) |
411 | 413 … | await root.send(portRef1, root.createMessage({data: 'testss'})) |
412 | - hypervisor.scheduler.done(root.id) | |
414 … | + root.shutdown() | |
413 | 415 … | } catch (e) { |
414 | 416 … | console.log(e) |
415 | 417 … | } |
416 | 418 … | }) |
@@ -461,12 +463,14 @@ | ||
461 | 463 … | hypervisor.registerContainer(First) |
462 | 464 … | |
463 | 465 … | const root = await hypervisor.createInstance(Root.typeId) |
464 | 466 … | const [portRef1, portRef2] = root.ports.createChannel() |
465 | - await root.ports.bind('first', portRef1) | |
466 | - await root.createInstance(Root.typeId, root.createMessage({ | |
467 | - ports: [portRef2] | |
468 | - })) | |
467 … | + await Promise.all([ | |
468 … | + root.ports.bind('first', portRef1), | |
469 … | + root.createInstance(Root.typeId, root.createMessage({ | |
470 … | + ports: [portRef2] | |
471 … | + })) | |
472 … | + ]) | |
469 | 473 … | |
470 | 474 … | const message = root.createMessage() |
471 | 475 … | await root.send(portRef1, message) |
472 | 476 … | await hypervisor.createStateRoot() |
Built with git-ssb-web