Files: 4d55c78be4ffb7631d68b6f18e18e09d8bd4b510 / tests / index.js
26089 bytesRaw
1 | const tape = require('tape') |
2 | const IPFS = require('ipfs') |
3 | const AbstractContainer = require('primea-abstract-container') |
4 | const Message = require('primea-message') |
5 | const Hypervisor = require('../') |
6 | |
7 | // start ipfs |
8 | const node = new IPFS({ |
9 | start: false |
10 | }) |
11 | |
12 | class BaseContainer extends AbstractContainer { |
13 | onCreation (message) { |
14 | this.kernel.state.code = message.data.byteLength ? message.data : undefined |
15 | const port = message.ports[0] |
16 | if (port) { |
17 | return this.kernel.ports.bind('root', port) |
18 | } |
19 | } |
20 | static get typeId () { |
21 | return 9 |
22 | } |
23 | } |
24 | |
25 | node.on('ready', () => { |
26 | tape('basic', async t => { |
27 | t.plan(3) |
28 | let message |
29 | const expectedState = { |
30 | '/': 'zdpuApGUFnjcY3eBeVPFfnEgGunPz8vyXVJbrkgBmYwrbVDpA' |
31 | } |
32 | |
33 | class testVMContainer extends BaseContainer { |
34 | onMessage (m) { |
35 | t.true(m === message, 'should recive a message') |
36 | } |
37 | } |
38 | |
39 | try { |
40 | const hypervisor = new Hypervisor(node.dag) |
41 | hypervisor.registerContainer(testVMContainer) |
42 | |
43 | const rootContainer = await hypervisor.createInstance(testVMContainer.typeId) |
44 | |
45 | const [portRef1, portRef2] = rootContainer.ports.createChannel() |
46 | const initMessage = rootContainer.createMessage({ |
47 | data: Buffer.from('test code'), |
48 | ports: [portRef2] |
49 | }) |
50 | |
51 | await rootContainer.createInstance(testVMContainer.typeId, initMessage) |
52 | |
53 | await rootContainer.ports.bind('first', portRef1) |
54 | message = rootContainer.createMessage() |
55 | rootContainer.send(portRef1, message) |
56 | |
57 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
58 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
59 | t.equals(hypervisor.scheduler.leastNumberOfTicks(), 0) |
60 | } catch (e) { |
61 | console.log(e) |
62 | } |
63 | }) |
64 | |
65 | tape('basic - do not store containers with no ports bound', async t => { |
66 | t.plan(1) |
67 | const expectedState = { |
68 | '/': 'zdpuAop4nt8pqzg7duciSYbZmWfDaBiz87RCtGCbb35ewUrbW' |
69 | } |
70 | |
71 | class testVMContainer extends BaseContainer { |
72 | onCreation () {} |
73 | } |
74 | |
75 | try { |
76 | const hypervisor = new Hypervisor(node.dag) |
77 | hypervisor.registerContainer(testVMContainer) |
78 | |
79 | const root = await hypervisor.createInstance(testVMContainer.typeId) |
80 | const [portRef1, portRef2] = root.ports.createChannel() |
81 | |
82 | await root.ports.bind('one', portRef1) |
83 | await root.createInstance(testVMContainer.typeId, root.createMessage({ |
84 | ports: [portRef2] |
85 | })) |
86 | |
87 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
88 | |
89 | // await hypervisor.graph.tree(stateRoot, Infinity, true) |
90 | // console.log(JSON.stringify(stateRoot, null, 2)) |
91 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
92 | } catch (e) { |
93 | console.log(e) |
94 | } |
95 | }) |
96 | |
97 | tape('one child contract', async t => { |
98 | t.plan(4) |
99 | let message |
100 | const expectedState = { |
101 | '/': 'zdpuArCqpDZtEqjrXrRhMiYLE7QQ1szVr1qLVkiwtDLincGWU' |
102 | } |
103 | let hasResolved = false |
104 | |
105 | class testVMContainer2 extends BaseContainer { |
106 | onMessage (m) { |
107 | t.true(m === message, 'should recive a message') |
108 | return new Promise((resolve, reject) => { |
109 | setTimeout(() => { |
110 | this.kernel.incrementTicks(1) |
111 | hasResolved = true |
112 | resolve() |
113 | }, 200) |
114 | }) |
115 | } |
116 | |
117 | static get typeId () { |
118 | return 99 |
119 | } |
120 | } |
121 | |
122 | class testVMContainer extends BaseContainer { |
123 | async onMessage (m) { |
124 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
125 | await this.kernel.createInstance(testVMContainer2.typeId, this.kernel.createMessage({ |
126 | ports: [portRef2] |
127 | })) |
128 | await this.kernel.send(portRef1, m) |
129 | this.kernel.incrementTicks(1) |
130 | return this.kernel.ports.bind('child', portRef1) |
131 | } |
132 | } |
133 | |
134 | const hypervisor = new Hypervisor(node.dag) |
135 | hypervisor.registerContainer(testVMContainer) |
136 | hypervisor.registerContainer(testVMContainer2) |
137 | |
138 | let root = await hypervisor.createInstance(testVMContainer.typeId) |
139 | const rootId = root.id |
140 | const [portRef1, portRef2] = root.ports.createChannel() |
141 | await root.createInstance(testVMContainer.typeId, root.createMessage({ |
142 | ports: [portRef2] |
143 | })) |
144 | |
145 | await root.ports.bind('first', portRef1) |
146 | message = root.createMessage() |
147 | |
148 | await root.send(portRef1, message) |
149 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
150 | t.true(hasResolved, 'should resolve before generating the state root') |
151 | |
152 | // await hypervisor.graph.tree(stateRoot, Infinity, true) |
153 | // console.log(JSON.stringify(stateRoot, null, 2)) |
154 | t.deepEquals(stateRoot, expectedState, 'expected state') |
155 | |
156 | // test reviving the state |
157 | class testVMContainer3 extends BaseContainer { |
158 | onMessage (m) { |
159 | const port = this.kernel.ports.get('child') |
160 | this.kernel.send(port, m) |
161 | this.kernel.incrementTicks(1) |
162 | } |
163 | } |
164 | |
165 | hypervisor.registerContainer(testVMContainer3) |
166 | root = await hypervisor.getInstance(rootId) |
167 | const port = root.ports.get('first') |
168 | root.send(port, message) |
169 | }) |
170 | |
171 | tape('traps', async t => { |
172 | t.plan(1) |
173 | class Root extends BaseContainer { |
174 | async onMessage (m) { |
175 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
176 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
177 | const [portRef5, portRef6] = this.kernel.ports.createChannel() |
178 | |
179 | await Promise.all( |
180 | this.kernel.ports.bind('one', portRef1), |
181 | this.kernel.ports.bind('two', portRef3), |
182 | this.kernel.ports.bind('three', portRef5) |
183 | ) |
184 | |
185 | const message1 = this.kernel.createMessage({ |
186 | ports: [portRef2] |
187 | }) |
188 | const message2 = this.kernel.createMessage({ |
189 | ports: [portRef4] |
190 | }) |
191 | const message3 = this.kernel.createMessage({ |
192 | ports: [portRef6] |
193 | }) |
194 | |
195 | await Promise.all([ |
196 | this.kernel.createInstance(Root.typeId, message1), |
197 | this.kernel.createInstance(Root.typeId, message2), |
198 | this.kernel.createInstance(Root.typeId, message3) |
199 | ]) |
200 | |
201 | throw new Error('it is a trap!!!') |
202 | } |
203 | } |
204 | |
205 | const hypervisor = new Hypervisor(node.dag) |
206 | |
207 | hypervisor.registerContainer(Root) |
208 | const root = await hypervisor.createInstance(Root.typeId) |
209 | await root.message(root.createMessage()) |
210 | const stateRoot = await hypervisor.createStateRoot() |
211 | |
212 | t.deepEquals(stateRoot, { |
213 | '/': 'zdpuAoifKuJkWz9Fjvt79NmGq3tcefhfCyq8iM8YhcFdV9bmZ' |
214 | }, 'should revert the state') |
215 | }) |
216 | |
217 | tape('recieving older messages', async t => { |
218 | t.plan(2) |
219 | let runs = 0 |
220 | |
221 | class Root extends BaseContainer { |
222 | async onMessage (m) { |
223 | if (!runs) { |
224 | runs++ |
225 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
226 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
227 | |
228 | const message1 = this.kernel.createMessage({ |
229 | ports: [portRef2] |
230 | }) |
231 | const message2 = this.kernel.createMessage({ |
232 | ports: [portRef4] |
233 | }) |
234 | |
235 | await Promise.all([ |
236 | this.kernel.createInstance(First.typeId, message1), |
237 | this.kernel.send(portRef1, this.kernel.createMessage()), |
238 | this.kernel.send(portRef3, this.kernel.createMessage()), |
239 | this.kernel.ports.bind('one', portRef1), |
240 | this.kernel.ports.bind('two', portRef3) |
241 | ]) |
242 | return this.kernel.createInstance(Waiter.typeId, message2) |
243 | } else if (runs === 1) { |
244 | runs++ |
245 | t.equals(m.data, 'first', 'should recive the first message') |
246 | } else if (runs === 2) { |
247 | runs++ |
248 | t.equals(m.data, 'second', 'should recive the second message') |
249 | } else if (runs === 3) { |
250 | runs++ |
251 | // t.equals(m.data, 'third', 'should recived the second message') |
252 | } |
253 | } |
254 | static get typeId () { |
255 | return 299 |
256 | } |
257 | } |
258 | |
259 | class First extends BaseContainer { |
260 | onMessage (m) { |
261 | this.kernel.incrementTicks(2) |
262 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
263 | data: 'second' |
264 | })) |
265 | } |
266 | static get typeId () { |
267 | return 29 |
268 | } |
269 | } |
270 | |
271 | class Waiter extends BaseContainer { |
272 | onMessage (m) { |
273 | return new Promise((resolve, reject) => { |
274 | setTimeout(() => { |
275 | this.kernel.send(m.fromPort, this.kernel.createMessage({ |
276 | data: 'first' |
277 | })).then(resolve) |
278 | }, 200) |
279 | }) |
280 | } |
281 | } |
282 | |
283 | try { |
284 | const hypervisor = new Hypervisor(node.dag) |
285 | |
286 | hypervisor.registerContainer(Root) |
287 | hypervisor.registerContainer(First) |
288 | hypervisor.registerContainer(Waiter) |
289 | |
290 | const root = await hypervisor.createInstance(Root.typeId) |
291 | const [portRef1, portRef2] = root.ports.createChannel() |
292 | |
293 | const message = root.createMessage() |
294 | await Promise.all([ |
295 | root.send(portRef1, message), |
296 | root.ports.bind('first', portRef1), |
297 | root.createInstance(Root.typeId, root.createMessage({ |
298 | ports: [portRef2] |
299 | })) |
300 | ]) |
301 | } catch (e) { |
302 | console.log(e) |
303 | } |
304 | }) |
305 | |
306 | tape('saturation', async t => { |
307 | t.plan(3) |
308 | let runs = 0 |
309 | |
310 | class Root extends BaseContainer { |
311 | onIdle () {} |
312 | async onMessage (m) { |
313 | if (!runs) { |
314 | runs++ |
315 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
316 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
317 | |
318 | const message1 = this.kernel.createMessage({ |
319 | ports: [portRef2] |
320 | }) |
321 | const message2 = this.kernel.createMessage({ |
322 | ports: [portRef4] |
323 | }) |
324 | |
325 | this.kernel.incrementTicks(6) |
326 | return Promise.all([ |
327 | this.kernel.createInstance(First.typeId, message1), |
328 | this.kernel.createInstance(Second.typeId, message2), |
329 | this.kernel.send(portRef1, this.kernel.createMessage()), |
330 | this.kernel.send(portRef3, this.kernel.createMessage()), |
331 | this.kernel.ports.bind('one', portRef1), |
332 | this.kernel.ports.bind('two', portRef3) |
333 | ]) |
334 | } else if (runs === 1) { |
335 | runs++ |
336 | t.equals(m.data, 'first', 'should recive the first message') |
337 | } else if (runs === 2) { |
338 | runs++ |
339 | t.equals(m.data, 'second', 'should recive the second message') |
340 | } else if (runs === 3) { |
341 | runs++ |
342 | t.equals(m.data, 'third', 'should recived the third message') |
343 | } |
344 | } |
345 | static get typeId () { |
346 | return 299 |
347 | } |
348 | } |
349 | |
350 | class First extends BaseContainer { |
351 | onMessage (m) { |
352 | this.kernel.incrementTicks(2) |
353 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
354 | data: 'second' |
355 | })) |
356 | } |
357 | static get typeId () { |
358 | return 29 |
359 | } |
360 | } |
361 | |
362 | class Second extends BaseContainer { |
363 | onMessage (m) { |
364 | this.kernel.incrementTicks(3) |
365 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
366 | data: 'third' |
367 | })) |
368 | } |
369 | static get typeId () { |
370 | return 2 |
371 | } |
372 | } |
373 | |
374 | class Waiter extends BaseContainer { |
375 | onCreation (m) { |
376 | return new Promise((resolve, reject) => { |
377 | setTimeout(() => { |
378 | this.kernel.send(m.ports[0], this.kernel.createMessage({ |
379 | data: 'first' |
380 | })).then(resolve) |
381 | }, 200) |
382 | }) |
383 | } |
384 | } |
385 | |
386 | try { |
387 | const hypervisor = new Hypervisor(node.dag) |
388 | |
389 | hypervisor.registerContainer(Root) |
390 | hypervisor.registerContainer(First) |
391 | hypervisor.registerContainer(Second) |
392 | hypervisor.registerContainer(Waiter) |
393 | |
394 | let root = await hypervisor.createInstance(Root.typeId) |
395 | const [portRef1, portRef2] = root.ports.createChannel() |
396 | const [portRef3, portRef4] = root.ports.createChannel() |
397 | |
398 | const message = root.createMessage() |
399 | await Promise.all([ |
400 | root.send(portRef1, message), |
401 | root.ports.bind('first', portRef1), |
402 | root.createInstance(Root.typeId, root.createMessage({ |
403 | ports: [portRef2] |
404 | })), |
405 | root.ports.bind('sencond', portRef3), |
406 | root.createInstance(Waiter.typeId, root.createMessage({ |
407 | ports: [portRef4] |
408 | })) |
409 | ]) |
410 | |
411 | // root = await hypervisor.getInstance(root.id) |
412 | root.incrementTicks(100) |
413 | await root.send(portRef1, root.createMessage({data: 'testss'})) |
414 | root.shutdown() |
415 | } catch (e) { |
416 | console.log(e) |
417 | } |
418 | }) |
419 | |
420 | tape('send to the same container at the same time', async t => { |
421 | t.plan(2) |
422 | |
423 | let runs = 0 |
424 | let instance |
425 | |
426 | class Root extends BaseContainer { |
427 | async onMessage (m) { |
428 | let one = this.kernel.ports.get('one') |
429 | if (!one) { |
430 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
431 | const message1 = this.kernel.createMessage({ |
432 | ports: [portRef2] |
433 | }) |
434 | await this.kernel.createInstance(First.typeId, message1) |
435 | return this.kernel.ports.bind('one', portRef1) |
436 | } else { |
437 | return Promise.all([ |
438 | this.kernel.send(one, this.kernel.createMessage()), |
439 | this.kernel.send(one, this.kernel.createMessage()) |
440 | ]) |
441 | } |
442 | } |
443 | static get typeId () { |
444 | return 299 |
445 | } |
446 | } |
447 | |
448 | class First extends BaseContainer { |
449 | onMessage (m) { |
450 | ++runs |
451 | if (runs === 2) { |
452 | t.equals(instance, this, 'should have same instances') |
453 | } else { |
454 | instance = this |
455 | } |
456 | } |
457 | } |
458 | |
459 | try { |
460 | const hypervisor = new Hypervisor(node.dag) |
461 | |
462 | hypervisor.registerContainer(Root) |
463 | hypervisor.registerContainer(First) |
464 | |
465 | const root = await hypervisor.createInstance(Root.typeId) |
466 | const [portRef1, portRef2] = root.ports.createChannel() |
467 | await Promise.all([ |
468 | root.ports.bind('first', portRef1), |
469 | root.createInstance(Root.typeId, root.createMessage({ |
470 | ports: [portRef2] |
471 | })) |
472 | ]) |
473 | |
474 | const message = root.createMessage() |
475 | await root.send(portRef1, message) |
476 | await hypervisor.createStateRoot() |
477 | await root.send(portRef1, root.createMessage()) |
478 | await hypervisor.createStateRoot() |
479 | t.equals(runs, 2) |
480 | } catch (e) { |
481 | console.log(e) |
482 | } |
483 | }) |
484 | |
485 | tape('checking ports', async t => { |
486 | t.plan(4) |
487 | const hypervisor = new Hypervisor(node.dag) |
488 | hypervisor.registerContainer(BaseContainer) |
489 | |
490 | const root = await hypervisor.createInstance(BaseContainer.typeId) |
491 | |
492 | const [portRef1, portRef2] = root.ports.createChannel() |
493 | root.createInstance(BaseContainer.typeId, root.createMessage({ |
494 | ports: [portRef2] |
495 | })) |
496 | await root.ports.bind('test', portRef1) |
497 | |
498 | try { |
499 | root.createMessage({ |
500 | ports: [portRef1] |
501 | }) |
502 | } catch (e) { |
503 | t.pass('should thow if sending a port that is bound') |
504 | } |
505 | |
506 | try { |
507 | await root.ports.bind('test', portRef1) |
508 | } catch (e) { |
509 | t.pass('should thow if binding an already bound port') |
510 | } |
511 | |
512 | try { |
513 | const [portRef3] = root.ports.createChannel() |
514 | await root.ports.bind('test', portRef3) |
515 | } catch (e) { |
516 | t.pass('should thow if binding an already bound name') |
517 | } |
518 | |
519 | await root.ports.unbind('test') |
520 | const message = root.createMessage({ |
521 | ports: [portRef1] |
522 | }) |
523 | t.equals(message.ports[0], portRef1, 'should create a message if the port is unbound') |
524 | }) |
525 | |
526 | tape('port deletion', async t => { |
527 | const expectedSr = { |
528 | '/': 'zdpuAopMy53q2uvL2a4fhVEAvwXjSDW28fh8zhQUj598tb5md' |
529 | } |
530 | class Root extends BaseContainer { |
531 | async onMessage (m) { |
532 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
533 | const message1 = this.kernel.createMessage({ |
534 | ports: [portRef2] |
535 | }) |
536 | |
537 | await this.kernel.createInstance(First.typeId, message1) |
538 | await this.kernel.send(portRef1, this.kernel.createMessage()) |
539 | this.kernel.incrementTicks(6) |
540 | return this.kernel.ports.bind('one', portRef1) |
541 | } |
542 | } |
543 | |
544 | class First extends BaseContainer { |
545 | onMessage (m) { |
546 | this.kernel.incrementTicks(2) |
547 | return this.kernel.ports.delete('root') |
548 | } |
549 | static get typeId () { |
550 | return 299 |
551 | } |
552 | } |
553 | |
554 | const hypervisor = new Hypervisor(node.dag) |
555 | |
556 | hypervisor.registerContainer(Root) |
557 | hypervisor.registerContainer(First) |
558 | |
559 | const root = await hypervisor.createInstance(Root.typeId) |
560 | const [portRef1, portRef2] = root.ports.createChannel() |
561 | await root.ports.bind('first', portRef1) |
562 | await root.createInstance(Root.typeId, root.createMessage({ |
563 | ports: [portRef2] |
564 | })) |
565 | |
566 | const message = root.createMessage() |
567 | await root.send(portRef1, message) |
568 | |
569 | const sr = await hypervisor.createStateRoot() |
570 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
571 | await hypervisor.graph.tree(sr, Infinity, true) |
572 | |
573 | t.end() |
574 | }) |
575 | |
576 | tape('clear unbounded ports', async t => { |
577 | const expectedSr = { |
578 | '/': 'zdpuAopMy53q2uvL2a4fhVEAvwXjSDW28fh8zhQUj598tb5md' |
579 | } |
580 | class Root extends BaseContainer { |
581 | onMessage (m) { |
582 | return this.kernel.createInstance(Root.typeId) |
583 | } |
584 | } |
585 | |
586 | const hypervisor = new Hypervisor(node.dag) |
587 | hypervisor.registerContainer(Root) |
588 | |
589 | const root = await hypervisor.createInstance(Root.typeId) |
590 | const [portRef1, portRef2] = root.ports.createChannel() |
591 | await root.ports.bind('first', portRef1) |
592 | await root.createInstance(Root.typeId, root.createMessage({ |
593 | ports: [portRef2] |
594 | })) |
595 | |
596 | const message = root.createMessage() |
597 | await root.send(portRef1, message) |
598 | const sr = await hypervisor.createStateRoot() |
599 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
600 | |
601 | t.end() |
602 | }) |
603 | |
604 | tape('should remove subgraphs', async t => { |
605 | const expectedSr = { |
606 | '/': 'zdpuAopMy53q2uvL2a4fhVEAvwXjSDW28fh8zhQUj598tb5md' |
607 | } |
608 | class Root extends BaseContainer { |
609 | onMessage (m) { |
610 | const [, portRef2] = this.kernel.ports.createChannel() |
611 | return this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
612 | ports: [portRef2] |
613 | })) |
614 | } |
615 | } |
616 | |
617 | class Sub extends BaseContainer { |
618 | async onInitailize (message) { |
619 | await this.kernel.ports.bind('root', message.ports[0]) |
620 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
621 | await this.kernel.ports.bind('child', portRef1) |
622 | await this.kernel.createInstance(Root.typeId, this.kernel.createMessage({ |
623 | ports: [portRef2] |
624 | })) |
625 | } |
626 | static get typeId () { |
627 | return 299 |
628 | } |
629 | } |
630 | |
631 | try { |
632 | const hypervisor = new Hypervisor(node.dag) |
633 | |
634 | hypervisor.registerContainer(Root) |
635 | hypervisor.registerContainer(Sub) |
636 | |
637 | const root = await hypervisor.createInstance(Root.typeId) |
638 | const [portRef1, portRef2] = root.ports.createChannel() |
639 | await root.ports.bind('first', portRef1) |
640 | await root.createInstance(Root.typeId, root.createMessage({ |
641 | ports: [portRef2] |
642 | })) |
643 | |
644 | await root.send(portRef1, root.createMessage()) |
645 | const sr = await hypervisor.createStateRoot() |
646 | |
647 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
648 | t.end() |
649 | } catch (e) { |
650 | console.log(e) |
651 | } |
652 | }) |
653 | |
654 | tape('should not remove connected nodes', async t => { |
655 | const expectedSr = { |
656 | '/': 'zdpuApKrsvsWknDML2Mme9FyZfRnVZ1hTCoKzkooYAWT3dUDV' |
657 | } |
658 | class Root extends BaseContainer { |
659 | async onMessage (m) { |
660 | if (m.ports.length) { |
661 | const port = this.kernel.ports.get('test1') |
662 | await this.kernel.send(port, m) |
663 | return this.kernel.ports.unbind('test1') |
664 | } else { |
665 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
666 | await this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
667 | ports: [portRef2] |
668 | })) |
669 | await this.kernel.ports.bind('test1', portRef1) |
670 | |
671 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
672 | await this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
673 | ports: [portRef4] |
674 | })) |
675 | await this.kernel.ports.bind('test2', portRef3) |
676 | await this.kernel.send(portRef3, this.kernel.createMessage({ |
677 | data: 'getChannel' |
678 | })) |
679 | } |
680 | } |
681 | } |
682 | |
683 | class Sub extends BaseContainer { |
684 | async onMessage (message) { |
685 | if (message.data === 'getChannel') { |
686 | const ports = this.kernel.ports.createChannel() |
687 | await this.kernel.send(message.fromPort, this.kernel.createMessage({ |
688 | data: 'bindPort', |
689 | ports: [ports[1]] |
690 | })) |
691 | return this.kernel.ports.bind('channel', ports[0]) |
692 | } else if (message.data === 'bindPort') { |
693 | return this.kernel.ports.bind('channel', message.ports[0]) |
694 | } |
695 | } |
696 | static get typeId () { |
697 | return 299 |
698 | } |
699 | } |
700 | |
701 | const hypervisor = new Hypervisor(node.dag) |
702 | |
703 | hypervisor.registerContainer(Root) |
704 | hypervisor.registerContainer(Sub) |
705 | |
706 | const root = await hypervisor.createInstance(Root.typeId) |
707 | const [portRef1, portRef2] = root.ports.createChannel() |
708 | await root.ports.bind('first', portRef1) |
709 | await root.createInstance(Root.typeId, root.createMessage({ |
710 | ports: [portRef2] |
711 | })) |
712 | |
713 | await root.send(portRef1, root.createMessage()) |
714 | const sr = await hypervisor.createStateRoot() |
715 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
716 | // await hypervisor.graph.tree(sr, Infinity) |
717 | |
718 | t.end() |
719 | }) |
720 | |
721 | tape('should remove multiple subgraphs', async t => { |
722 | const expectedSr = { |
723 | '/': 'zdpuArkZ5yNowNnU4qJ8vayAUncgibQP9goDP1CwFxdmPJF9D' |
724 | } |
725 | class Root extends BaseContainer { |
726 | onMessage (m) { |
727 | if (m.ports.length) { |
728 | const port = this.kernel.ports.get('test1') |
729 | return Promise.all([ |
730 | this.kernel.send(port, m), |
731 | this.kernel.ports.unbind('test1'), |
732 | this.kernel.ports.unbind('test2') |
733 | ]) |
734 | } else { |
735 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
736 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
737 | return Promise.all([ |
738 | this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
739 | ports: [portRef2] |
740 | })), |
741 | this.kernel.ports.bind('test1', portRef1), |
742 | this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
743 | ports: [portRef4] |
744 | })), |
745 | this.kernel.ports.bind('test2', portRef3), |
746 | this.kernel.send(portRef3, this.kernel.createMessage({ |
747 | data: 'getChannel' |
748 | })) |
749 | ]) |
750 | } |
751 | } |
752 | } |
753 | |
754 | class Sub extends BaseContainer { |
755 | async onMessage (message) { |
756 | if (message.data === 'getChannel') { |
757 | const ports = this.kernel.ports.createChannel() |
758 | await this.kernel.send(message.fromPort, this.kernel.createMessage({ |
759 | data: 'bindPort', |
760 | ports: [ports[1]] |
761 | })) |
762 | return this.kernel.ports.bind('channel', ports[0]) |
763 | } else if (message.data === 'bindPort') { |
764 | return this.kernel.ports.bind('channel', message.ports[0]) |
765 | } |
766 | } |
767 | static get typeId () { |
768 | return 299 |
769 | } |
770 | } |
771 | |
772 | try { |
773 | const hypervisor = new Hypervisor(node.dag) |
774 | |
775 | hypervisor.registerContainer(Root) |
776 | hypervisor.registerContainer(Sub) |
777 | |
778 | const root = await hypervisor.createInstance(Root.typeId) |
779 | |
780 | const [portRef1, portRef2] = root.ports.createChannel() |
781 | await Promise.all([ |
782 | root.ports.bind('first', portRef1), |
783 | root.createInstance(Root.typeId, root.createMessage({ |
784 | ports: [portRef2] |
785 | })), |
786 | root.send(portRef1, root.createMessage()) |
787 | ]) |
788 | |
789 | const sr = await hypervisor.createStateRoot() |
790 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
791 | |
792 | t.end() |
793 | } catch (e) { |
794 | console.log(e) |
795 | } |
796 | }) |
797 | |
798 | tape('response ports', async t => { |
799 | t.plan(2) |
800 | let runs = 0 |
801 | const returnValue = 'this is a test' |
802 | |
803 | class testVMContainer extends BaseContainer { |
804 | onMessage (m) { |
805 | runs++ |
806 | if (runs === 1) { |
807 | return returnValue |
808 | } else { |
809 | t.equals(m.data, returnValue, 'should have correct return value') |
810 | } |
811 | } |
812 | } |
813 | |
814 | const hypervisor = new Hypervisor(node.dag) |
815 | |
816 | hypervisor.registerContainer(testVMContainer) |
817 | |
818 | const rootContainer = await hypervisor.createInstance(testVMContainer.typeId) |
819 | |
820 | const [portRef1, portRef2] = rootContainer.ports.createChannel() |
821 | const initMessage = rootContainer.createMessage({ |
822 | ports: [portRef2] |
823 | }) |
824 | |
825 | rootContainer.createInstance(testVMContainer.typeId, initMessage) |
826 | |
827 | await rootContainer.ports.bind('first', portRef1) |
828 | const message = rootContainer.createMessage() |
829 | const rPort = rootContainer.getResponsePort(message) |
830 | const rPort2 = rootContainer.getResponsePort(message) |
831 | |
832 | t.equals(rPort2, rPort) |
833 | |
834 | rootContainer.send(portRef1, message) |
835 | await rootContainer.ports.bind('response', rPort) |
836 | }) |
837 | |
838 | tape('start up', async t => { |
839 | t.plan(1) |
840 | class testVMContainer extends BaseContainer { |
841 | onMessage () {} |
842 | onStartup () { |
843 | t.true(true, 'should start up') |
844 | } |
845 | } |
846 | |
847 | const hypervisor = new Hypervisor(node.dag) |
848 | hypervisor.registerContainer(testVMContainer) |
849 | await hypervisor.createInstance(testVMContainer.typeId) |
850 | hypervisor.getInstance(hypervisor.ROOT_ID) |
851 | }) |
852 | |
853 | tape('large code size', async t => { |
854 | t.plan(1) |
855 | const content = Buffer.from(new ArrayBuffer(1000000)) |
856 | class testVMContainer extends BaseContainer { |
857 | onMessage () {} |
858 | } |
859 | |
860 | const hypervisor = new Hypervisor(node.dag) |
861 | hypervisor.registerContainer(testVMContainer) |
862 | await hypervisor.createInstance(testVMContainer.typeId, new Message({data: content})) |
863 | const instance = await hypervisor.getInstance(hypervisor.ROOT_ID) |
864 | t.equals(content.length, instance.code.length) |
865 | }) |
866 | }) |
867 |
Built with git-ssb-web