Files: 1113088b6d1066919f231e1926ea7ebad172d198 / tests / index.js
28549 bytesRaw
1 | const tape = require('tape') |
2 | const IPFS = require('ipfs') |
3 | const AbstractContainer = require('primea-abstract-container') |
4 | const Message = require('primea-message') |
5 | const Hypervisor = require('../') |
6 | |
7 | // start ipfs |
8 | const node = new IPFS({ |
9 | start: false |
10 | }) |
11 | |
12 | class BaseContainer extends AbstractContainer { |
13 | onCreation (message) { |
14 | const port = message.ports[0] |
15 | if (port) { |
16 | return this.kernel.ports.bind('root', port) |
17 | } |
18 | } |
19 | static get typeId () { |
20 | return 9 |
21 | } |
22 | } |
23 | |
24 | node.on('ready', () => { |
25 | tape('basic', async t => { |
26 | t.plan(3) |
27 | let message |
28 | const expectedState = { |
29 | '/': 'zdpuAqbcQhgu2T2MBgHbYu1MtHXyZzNsCaQjTPTR6NN9s5hbk' |
30 | } |
31 | |
32 | class testVMContainer extends BaseContainer { |
33 | onMessage (m) { |
34 | t.true(m === message, 'should recive a message') |
35 | } |
36 | } |
37 | |
38 | try { |
39 | const hypervisor = new Hypervisor(node.dag) |
40 | hypervisor.registerContainer(testVMContainer) |
41 | |
42 | const rootContainer = await hypervisor.createInstance(new Message({ |
43 | data: { |
44 | type: testVMContainer.typeId |
45 | } |
46 | })) |
47 | |
48 | const [portRef1, portRef2] = rootContainer.ports.createChannel() |
49 | const initMessage = rootContainer.createMessage({ |
50 | data: { |
51 | code: Buffer.from('test code'), |
52 | type: testVMContainer.typeId |
53 | }, |
54 | ports: [portRef2] |
55 | }) |
56 | |
57 | await rootContainer.createInstance(initMessage) |
58 | |
59 | await rootContainer.ports.bind('first', portRef1) |
60 | message = rootContainer.createMessage() |
61 | await rootContainer.send(portRef1, message) |
62 | |
63 | // console.log(JSON.stringify(hypervisor.state, null, 2)) |
64 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
65 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
66 | |
67 | t.equals(hypervisor.scheduler.leastNumberOfTicks(), 0) |
68 | } catch (e) { |
69 | console.log(e) |
70 | } |
71 | }) |
72 | |
73 | tape('basic - do not store containers with no ports bound', async t => { |
74 | t.plan(1) |
75 | const expectedState = { |
76 | '/': 'zdpuAop4nt8pqzg7duciSYbZmWfDaBiz87RCtGCbb35ewUrbW' |
77 | } |
78 | |
79 | class testVMContainer extends BaseContainer { |
80 | onCreation () {} |
81 | } |
82 | |
83 | try { |
84 | const hypervisor = new Hypervisor(node.dag) |
85 | hypervisor.registerContainer(testVMContainer) |
86 | |
87 | const root = await hypervisor.createInstance(new Message({ |
88 | data: { |
89 | type: testVMContainer.typeId |
90 | } |
91 | })) |
92 | const [portRef1, portRef2] = root.ports.createChannel() |
93 | |
94 | await root.ports.bind('one', portRef1) |
95 | await root.createInstance(root.createMessage({ |
96 | data: { |
97 | type: testVMContainer.typeId |
98 | }, |
99 | ports: [portRef2] |
100 | })) |
101 | |
102 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
103 | |
104 | // await hypervisor.graph.tree(stateRoot, Infinity, true) |
105 | // console.log(JSON.stringify(stateRoot, null, 2)) |
106 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
107 | } catch (e) { |
108 | console.log(e) |
109 | } |
110 | }) |
111 | |
112 | tape('one child contract', async t => { |
113 | t.plan(4) |
114 | let message |
115 | const expectedState = { |
116 | '/': 'zdpuB2Huo3ro3Fv9mpMhnUcL3jjd37T6MJ6jEd8GvA2cpvaYR' |
117 | } |
118 | let hasResolved = false |
119 | |
120 | class testVMContainer2 extends BaseContainer { |
121 | onMessage (m) { |
122 | t.true(m === message, 'should recive a message') |
123 | return new Promise((resolve, reject) => { |
124 | setTimeout(() => { |
125 | this.kernel.incrementTicks(1) |
126 | hasResolved = true |
127 | resolve() |
128 | }, 200) |
129 | }) |
130 | } |
131 | |
132 | static get typeId () { |
133 | return 99 |
134 | } |
135 | } |
136 | |
137 | class testVMContainer extends BaseContainer { |
138 | async onMessage (m) { |
139 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
140 | await this.kernel.createInstance(this.kernel.createMessage({ |
141 | data: { |
142 | type: testVMContainer2.typeId |
143 | }, |
144 | ports: [portRef2] |
145 | })) |
146 | await this.kernel.send(portRef1, m) |
147 | this.kernel.incrementTicks(1) |
148 | return this.kernel.ports.bind('child', portRef1) |
149 | } |
150 | } |
151 | |
152 | const hypervisor = new Hypervisor(node.dag) |
153 | hypervisor.registerContainer(testVMContainer) |
154 | hypervisor.registerContainer(testVMContainer2) |
155 | |
156 | let root = await hypervisor.createInstance(new Message({ |
157 | data: { |
158 | type: testVMContainer.typeId |
159 | } |
160 | })) |
161 | const rootId = root.id |
162 | const [portRef1, portRef2] = root.ports.createChannel() |
163 | await root.createInstance(root.createMessage({ |
164 | data: { |
165 | type: testVMContainer.typeId |
166 | }, |
167 | ports: [portRef2] |
168 | })) |
169 | |
170 | await root.ports.bind('first', portRef1) |
171 | message = root.createMessage() |
172 | |
173 | await root.send(portRef1, message) |
174 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
175 | t.true(hasResolved, 'should resolve before generating the state root') |
176 | |
177 | // await hypervisor.graph.tree(stateRoot, Infinity, true) |
178 | // console.log(JSON.stringify(stateRoot, null, 2)) |
179 | t.deepEquals(stateRoot, expectedState, 'expected state') |
180 | |
181 | // test reviving the state |
182 | class testVMContainer3 extends BaseContainer { |
183 | onMessage (m) { |
184 | const port = this.kernel.ports.get('child') |
185 | this.kernel.send(port, m) |
186 | this.kernel.incrementTicks(1) |
187 | } |
188 | } |
189 | |
190 | hypervisor.registerContainer(testVMContainer3) |
191 | root = await hypervisor.getInstance(rootId) |
192 | const port = root.ports.get('first') |
193 | root.send(port, message) |
194 | }) |
195 | |
196 | tape('traps', async t => { |
197 | t.plan(1) |
198 | class Root extends BaseContainer { |
199 | async onMessage (m) { |
200 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
201 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
202 | const [portRef5, portRef6] = this.kernel.ports.createChannel() |
203 | |
204 | await Promise.all( |
205 | this.kernel.ports.bind('one', portRef1), |
206 | this.kernel.ports.bind('two', portRef3), |
207 | this.kernel.ports.bind('three', portRef5) |
208 | ) |
209 | |
210 | const message1 = this.kernel.createMessage({ |
211 | data: { |
212 | type: Root.typeId |
213 | }, |
214 | ports: [portRef2] |
215 | }) |
216 | const message2 = this.kernel.createMessage({ |
217 | data: { |
218 | type: Root.typeId |
219 | }, |
220 | ports: [portRef4] |
221 | }) |
222 | const message3 = this.kernel.createMessage({ |
223 | data: { |
224 | type: Root.typeId |
225 | }, |
226 | ports: [portRef6] |
227 | }) |
228 | |
229 | await Promise.all([ |
230 | this.kernel.createInstance(message1), |
231 | this.kernel.createInstance(message2), |
232 | this.kernel.createInstance(message3) |
233 | ]) |
234 | |
235 | throw new Error('it is a trap!!!') |
236 | } |
237 | } |
238 | |
239 | const hypervisor = new Hypervisor(node.dag) |
240 | |
241 | hypervisor.registerContainer(Root) |
242 | const root = await hypervisor.createInstance(new Message({ |
243 | data: { |
244 | type: Root.typeId |
245 | } |
246 | })) |
247 | await root.message(root.createMessage()) |
248 | const stateRoot = await hypervisor.createStateRoot() |
249 | |
250 | t.deepEquals(stateRoot, { |
251 | '/': 'zdpuAwAZnRgD7ZKH8ssU9UdpFTsw3Q4gecKKyRoDsD4obhpJm' |
252 | }, 'should revert the state') |
253 | }) |
254 | |
255 | tape('recieving older messages', async t => { |
256 | t.plan(2) |
257 | let runs = 0 |
258 | |
259 | class Root extends BaseContainer { |
260 | async onMessage (m) { |
261 | if (!runs) { |
262 | runs++ |
263 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
264 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
265 | |
266 | const message1 = this.kernel.createMessage({ |
267 | data: { |
268 | type: First.typeId |
269 | }, |
270 | ports: [portRef2] |
271 | }) |
272 | const message2 = this.kernel.createMessage({ |
273 | data: { |
274 | type: Waiter.typeId |
275 | }, |
276 | ports: [portRef4] |
277 | }) |
278 | |
279 | await Promise.all([ |
280 | this.kernel.createInstance(message1), |
281 | this.kernel.send(portRef1, this.kernel.createMessage()), |
282 | this.kernel.send(portRef3, this.kernel.createMessage()), |
283 | this.kernel.ports.bind('one', portRef1), |
284 | this.kernel.ports.bind('two', portRef3) |
285 | ]) |
286 | return this.kernel.createInstance(message2) |
287 | } else if (runs === 1) { |
288 | runs++ |
289 | t.equals(m.data, 'first', 'should recive the first message') |
290 | } else if (runs === 2) { |
291 | runs++ |
292 | t.equals(m.data, 'second', 'should recive the second message') |
293 | } else if (runs === 3) { |
294 | runs++ |
295 | // t.equals(m.data, 'third', 'should recived the second message') |
296 | } |
297 | } |
298 | static get typeId () { |
299 | return 299 |
300 | } |
301 | } |
302 | |
303 | class First extends BaseContainer { |
304 | onMessage (m) { |
305 | this.kernel.incrementTicks(2) |
306 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
307 | data: 'second' |
308 | })) |
309 | } |
310 | static get typeId () { |
311 | return 29 |
312 | } |
313 | } |
314 | |
315 | class Waiter extends BaseContainer { |
316 | onMessage (m) { |
317 | return new Promise((resolve, reject) => { |
318 | setTimeout(() => { |
319 | this.kernel.send(m.fromPort, this.kernel.createMessage({ |
320 | data: 'first' |
321 | })).then(resolve) |
322 | }, 200) |
323 | }) |
324 | } |
325 | } |
326 | |
327 | try { |
328 | const hypervisor = new Hypervisor(node.dag) |
329 | |
330 | hypervisor.registerContainer(Root) |
331 | hypervisor.registerContainer(First) |
332 | hypervisor.registerContainer(Waiter) |
333 | |
334 | const root = await hypervisor.createInstance(new Message({ |
335 | data: { |
336 | type: Root.typeId |
337 | } |
338 | })) |
339 | const [portRef1, portRef2] = root.ports.createChannel() |
340 | |
341 | const message = root.createMessage() |
342 | await Promise.all([ |
343 | root.send(portRef1, message), |
344 | root.ports.bind('first', portRef1), |
345 | root.createInstance(root.createMessage({ |
346 | data: { |
347 | type: Root.typeId |
348 | }, |
349 | ports: [portRef2] |
350 | })) |
351 | ]) |
352 | } catch (e) { |
353 | console.log(e) |
354 | } |
355 | }) |
356 | |
357 | tape('saturation', async t => { |
358 | t.plan(3) |
359 | let runs = 0 |
360 | |
361 | class Root extends BaseContainer { |
362 | onIdle () {} |
363 | async onMessage (m) { |
364 | if (!runs) { |
365 | runs++ |
366 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
367 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
368 | |
369 | const message1 = this.kernel.createMessage({ |
370 | data: { |
371 | type: First.typeId |
372 | }, |
373 | ports: [portRef2] |
374 | }) |
375 | const message2 = this.kernel.createMessage({ |
376 | data: { |
377 | type: Second.typeId |
378 | }, |
379 | ports: [portRef4] |
380 | }) |
381 | |
382 | this.kernel.incrementTicks(6) |
383 | return Promise.all([ |
384 | this.kernel.createInstance(message1), |
385 | this.kernel.createInstance(message2), |
386 | this.kernel.send(portRef1, this.kernel.createMessage()), |
387 | this.kernel.send(portRef3, this.kernel.createMessage()), |
388 | this.kernel.ports.bind('one', portRef1), |
389 | this.kernel.ports.bind('two', portRef3) |
390 | ]) |
391 | } else if (runs === 1) { |
392 | runs++ |
393 | t.equals(m.data, 'first', 'should recive the first message') |
394 | } else if (runs === 2) { |
395 | runs++ |
396 | t.equals(m.data, 'second', 'should recive the second message') |
397 | } else if (runs === 3) { |
398 | runs++ |
399 | t.equals(m.data, 'third', 'should recived the third message') |
400 | } |
401 | } |
402 | static get typeId () { |
403 | return 299 |
404 | } |
405 | } |
406 | |
407 | class First extends BaseContainer { |
408 | onMessage (m) { |
409 | this.kernel.incrementTicks(2) |
410 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
411 | data: 'second' |
412 | })) |
413 | } |
414 | static get typeId () { |
415 | return 29 |
416 | } |
417 | } |
418 | |
419 | class Second extends BaseContainer { |
420 | onMessage (m) { |
421 | this.kernel.incrementTicks(3) |
422 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
423 | data: 'third' |
424 | })) |
425 | } |
426 | static get typeId () { |
427 | return 2 |
428 | } |
429 | } |
430 | |
431 | class Waiter extends BaseContainer { |
432 | onCreation (m) { |
433 | return new Promise((resolve, reject) => { |
434 | setTimeout(() => { |
435 | this.kernel.send(m.ports[0], this.kernel.createMessage({ |
436 | data: 'first' |
437 | })).then(resolve) |
438 | }, 200) |
439 | }) |
440 | } |
441 | } |
442 | |
443 | try { |
444 | const hypervisor = new Hypervisor(node.dag) |
445 | |
446 | hypervisor.registerContainer(Root) |
447 | hypervisor.registerContainer(First) |
448 | hypervisor.registerContainer(Second) |
449 | hypervisor.registerContainer(Waiter) |
450 | |
451 | let root = await hypervisor.createInstance(new Message({ |
452 | data: { |
453 | type: Root.typeId |
454 | } |
455 | })) |
456 | const [portRef1, portRef2] = root.ports.createChannel() |
457 | const [portRef3, portRef4] = root.ports.createChannel() |
458 | |
459 | const message = root.createMessage() |
460 | await Promise.all([ |
461 | root.send(portRef1, message), |
462 | root.ports.bind('first', portRef1), |
463 | root.createInstance(root.createMessage({ |
464 | data: { |
465 | type: Root.typeId |
466 | }, |
467 | ports: [portRef2] |
468 | })), |
469 | root.ports.bind('sencond', portRef3), |
470 | root.createInstance(root.createMessage({ |
471 | data: { |
472 | type: Waiter.typeId |
473 | }, |
474 | ports: [portRef4] |
475 | })) |
476 | ]) |
477 | |
478 | // root = await hypervisor.getInstance(root.id) |
479 | root.incrementTicks(100) |
480 | await root.send(portRef1, root.createMessage({ |
481 | data: 'testss' |
482 | })) |
483 | root.shutdown() |
484 | } catch (e) { |
485 | console.log(e) |
486 | } |
487 | }) |
488 | |
489 | tape('send to the same container at the same time', async t => { |
490 | t.plan(2) |
491 | |
492 | let runs = 0 |
493 | let instance |
494 | |
495 | class Root extends BaseContainer { |
496 | async onMessage (m) { |
497 | let one = this.kernel.ports.get('one') |
498 | if (!one) { |
499 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
500 | const message1 = this.kernel.createMessage({ |
501 | data: { |
502 | type: First.typeId |
503 | }, |
504 | ports: [portRef2] |
505 | }) |
506 | await this.kernel.createInstance(message1) |
507 | return this.kernel.ports.bind('one', portRef1) |
508 | } else { |
509 | return Promise.all([ |
510 | this.kernel.send(one, this.kernel.createMessage()), |
511 | this.kernel.send(one, this.kernel.createMessage()) |
512 | ]) |
513 | } |
514 | } |
515 | static get typeId () { |
516 | return 299 |
517 | } |
518 | } |
519 | |
520 | class First extends BaseContainer { |
521 | onMessage (m) { |
522 | ++runs |
523 | if (runs === 2) { |
524 | t.equals(instance, this, 'should have same instances') |
525 | } else { |
526 | instance = this |
527 | } |
528 | } |
529 | } |
530 | |
531 | try { |
532 | const hypervisor = new Hypervisor(node.dag) |
533 | |
534 | hypervisor.registerContainer(Root) |
535 | hypervisor.registerContainer(First) |
536 | |
537 | const root = await hypervisor.createInstance(new Message({ |
538 | data: { |
539 | type: Root.typeId |
540 | } |
541 | })) |
542 | const [portRef1, portRef2] = root.ports.createChannel() |
543 | await Promise.all([ |
544 | root.ports.bind('first', portRef1), |
545 | root.createInstance(root.createMessage({ |
546 | data: { |
547 | type: Root.typeId |
548 | }, |
549 | ports: [portRef2] |
550 | })) |
551 | ]) |
552 | |
553 | const message = root.createMessage() |
554 | await root.send(portRef1, message) |
555 | await hypervisor.createStateRoot() |
556 | await root.send(portRef1, root.createMessage()) |
557 | await hypervisor.createStateRoot() |
558 | t.equals(runs, 2) |
559 | } catch (e) { |
560 | console.log(e) |
561 | } |
562 | }) |
563 | |
564 | tape('checking ports', async t => { |
565 | t.plan(4) |
566 | const hypervisor = new Hypervisor(node.dag) |
567 | hypervisor.registerContainer(BaseContainer) |
568 | |
569 | const root = await hypervisor.createInstance(new Message({ |
570 | data: { |
571 | type: BaseContainer.typeId |
572 | } |
573 | })) |
574 | |
575 | const [portRef1, portRef2] = root.ports.createChannel() |
576 | root.createInstance(root.createMessage({ |
577 | data: { |
578 | type: BaseContainer.typeId |
579 | }, |
580 | ports: [portRef2] |
581 | })) |
582 | await root.ports.bind('test', portRef1) |
583 | |
584 | try { |
585 | root.createMessage({ |
586 | ports: [portRef1] |
587 | }) |
588 | } catch (e) { |
589 | t.pass('should thow if sending a port that is bound') |
590 | } |
591 | |
592 | try { |
593 | await root.ports.bind('test', portRef1) |
594 | } catch (e) { |
595 | t.pass('should thow if binding an already bound port') |
596 | } |
597 | |
598 | try { |
599 | const [portRef3] = root.ports.createChannel() |
600 | await root.ports.bind('test', portRef3) |
601 | } catch (e) { |
602 | t.pass('should thow if binding an already bound name') |
603 | } |
604 | |
605 | await root.ports.unbind('test') |
606 | const message = root.createMessage({ |
607 | ports: [portRef1] |
608 | }) |
609 | t.equals(message.ports[0], portRef1, 'should create a message if the port is unbound') |
610 | }) |
611 | |
612 | tape('port deletion', async t => { |
613 | const expectedSr = { |
614 | '/': 'zdpuAxKfu5nMTfpz6uHPqXdHZFQDZdRUer8zcQ6nvC4pTQsop' |
615 | } |
616 | class Root extends BaseContainer { |
617 | async onMessage (m) { |
618 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
619 | const message1 = this.kernel.createMessage({ |
620 | data: { |
621 | type: First.typeId |
622 | }, |
623 | ports: [portRef2] |
624 | }) |
625 | |
626 | await this.kernel.createInstance(message1) |
627 | await this.kernel.send(portRef1, this.kernel.createMessage()) |
628 | this.kernel.incrementTicks(6) |
629 | return this.kernel.ports.bind('one', portRef1) |
630 | } |
631 | } |
632 | |
633 | class First extends BaseContainer { |
634 | onMessage (m) { |
635 | this.kernel.incrementTicks(2) |
636 | return this.kernel.ports.delete('root') |
637 | } |
638 | static get typeId () { |
639 | return 299 |
640 | } |
641 | } |
642 | |
643 | const hypervisor = new Hypervisor(node.dag) |
644 | |
645 | hypervisor.registerContainer(Root) |
646 | hypervisor.registerContainer(First) |
647 | |
648 | const root = await hypervisor.createInstance(new Message({ |
649 | data: { |
650 | type: Root.typeId |
651 | } |
652 | })) |
653 | const [portRef1, portRef2] = root.ports.createChannel() |
654 | await root.ports.bind('first', portRef1) |
655 | await root.createInstance(root.createMessage({ |
656 | data: { |
657 | type: Root.typeId |
658 | }, |
659 | ports: [portRef2] |
660 | })) |
661 | |
662 | const message = root.createMessage() |
663 | await root.send(portRef1, message) |
664 | |
665 | const sr = await hypervisor.createStateRoot() |
666 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
667 | await hypervisor.graph.tree(sr, Infinity, true) |
668 | |
669 | t.end() |
670 | }) |
671 | |
672 | tape('clear unbounded ports', async t => { |
673 | const expectedSr = { |
674 | '/': 'zdpuAxKfu5nMTfpz6uHPqXdHZFQDZdRUer8zcQ6nvC4pTQsop' |
675 | } |
676 | |
677 | class Root extends BaseContainer { |
678 | onMessage (m) { |
679 | return this.kernel.createInstance(new Message({ |
680 | data: { |
681 | type: Root.typeId |
682 | } |
683 | })) |
684 | } |
685 | } |
686 | |
687 | const hypervisor = new Hypervisor(node.dag) |
688 | hypervisor.registerContainer(Root) |
689 | |
690 | const root = await hypervisor.createInstance(new Message({ |
691 | data: { |
692 | type: Root.typeId |
693 | } |
694 | })) |
695 | const [portRef1, portRef2] = root.ports.createChannel() |
696 | await root.ports.bind('first', portRef1) |
697 | await root.createInstance(root.createMessage({ |
698 | data: { |
699 | type: Root.typeId |
700 | }, |
701 | ports: [portRef2] |
702 | })) |
703 | |
704 | const message = root.createMessage() |
705 | await root.send(portRef1, message) |
706 | const sr = await hypervisor.createStateRoot() |
707 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
708 | |
709 | t.end() |
710 | }) |
711 | |
712 | tape('should remove subgraphs', async t => { |
713 | const expectedSr = { |
714 | '/': 'zdpuAxKfu5nMTfpz6uHPqXdHZFQDZdRUer8zcQ6nvC4pTQsop' |
715 | } |
716 | class Root extends BaseContainer { |
717 | onMessage (m) { |
718 | const [, portRef2] = this.kernel.ports.createChannel() |
719 | return this.kernel.createInstance(this.kernel.createMessage({ |
720 | data: { |
721 | type: Sub.typeId |
722 | }, |
723 | ports: [portRef2] |
724 | })) |
725 | } |
726 | } |
727 | |
728 | class Sub extends BaseContainer { |
729 | async onInitailize (message) { |
730 | await this.kernel.ports.bind('root', message.ports[0]) |
731 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
732 | await this.kernel.ports.bind('child', portRef1) |
733 | await this.kernel.createInstance(this.kernel.createMessage({ |
734 | data: { |
735 | type: Root.typeId |
736 | }, |
737 | ports: [portRef2] |
738 | })) |
739 | } |
740 | static get typeId () { |
741 | return 299 |
742 | } |
743 | } |
744 | |
745 | try { |
746 | const hypervisor = new Hypervisor(node.dag) |
747 | |
748 | hypervisor.registerContainer(Root) |
749 | hypervisor.registerContainer(Sub) |
750 | |
751 | const root = await hypervisor.createInstance(new Message({ |
752 | data: { |
753 | type: Root.typeId |
754 | } |
755 | })) |
756 | const [portRef1, portRef2] = root.ports.createChannel() |
757 | await root.ports.bind('first', portRef1) |
758 | await root.createInstance(root.createMessage({ |
759 | data: { |
760 | type: Root.typeId |
761 | }, |
762 | ports: [portRef2] |
763 | })) |
764 | |
765 | await root.send(portRef1, root.createMessage()) |
766 | const sr = await hypervisor.createStateRoot() |
767 | |
768 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
769 | t.end() |
770 | } catch (e) { |
771 | console.log(e) |
772 | } |
773 | }) |
774 | |
775 | tape('should not remove connected nodes', async t => { |
776 | const expectedSr = { |
777 | '/': 'zdpuAr4A3i1t6B7BkLT9C7DoxwvFnNg74gEzyqhpFj7nqVBy6' |
778 | } |
779 | class Root extends BaseContainer { |
780 | async onMessage (m) { |
781 | if (m.ports.length) { |
782 | const port = this.kernel.ports.get('test1') |
783 | await this.kernel.send(port, m) |
784 | return this.kernel.ports.unbind('test1') |
785 | } else { |
786 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
787 | await this.kernel.createInstance(this.kernel.createMessage({ |
788 | data: { |
789 | type: Sub.typeId |
790 | }, |
791 | ports: [portRef2] |
792 | })) |
793 | await this.kernel.ports.bind('test1', portRef1) |
794 | |
795 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
796 | await this.kernel.createInstance(this.kernel.createMessage({ |
797 | data: { |
798 | type: Sub.typeId |
799 | }, |
800 | ports: [portRef4] |
801 | })) |
802 | await this.kernel.ports.bind('test2', portRef3) |
803 | await this.kernel.send(portRef3, this.kernel.createMessage({ |
804 | data: 'getChannel' |
805 | })) |
806 | } |
807 | } |
808 | } |
809 | |
810 | class Sub extends BaseContainer { |
811 | async onMessage (message) { |
812 | if (message.data === 'getChannel') { |
813 | const ports = this.kernel.ports.createChannel() |
814 | await this.kernel.send(message.fromPort, this.kernel.createMessage({ |
815 | data: 'bindPort', |
816 | ports: [ports[1]] |
817 | })) |
818 | return this.kernel.ports.bind('channel', ports[0]) |
819 | } else if (message.data === 'bindPort') { |
820 | return this.kernel.ports.bind('channel', message.ports[0]) |
821 | } |
822 | } |
823 | static get typeId () { |
824 | return 299 |
825 | } |
826 | } |
827 | |
828 | const hypervisor = new Hypervisor(node.dag) |
829 | |
830 | hypervisor.registerContainer(Root) |
831 | hypervisor.registerContainer(Sub) |
832 | |
833 | const root = await hypervisor.createInstance(new Message({ |
834 | data: { |
835 | type: Root.typeId |
836 | } |
837 | })) |
838 | const [portRef1, portRef2] = root.ports.createChannel() |
839 | await root.ports.bind('first', portRef1) |
840 | await root.createInstance(root.createMessage({ |
841 | data: { |
842 | type: Root.typeId |
843 | }, |
844 | ports: [portRef2] |
845 | })) |
846 | |
847 | await root.send(portRef1, root.createMessage()) |
848 | const sr = await hypervisor.createStateRoot() |
849 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
850 | // await hypervisor.graph.tree(sr, Infinity) |
851 | |
852 | t.end() |
853 | }) |
854 | |
855 | tape('should remove multiple subgraphs', async t => { |
856 | const expectedSr = { |
857 | '/': 'zdpuAzYGmZeZsi5Zer7LXCTm1AsmqpUMJAXZnEeFW2UVDZj2P' |
858 | } |
859 | class Root extends BaseContainer { |
860 | onMessage (m) { |
861 | if (m.ports.length) { |
862 | const port = this.kernel.ports.get('test1') |
863 | return Promise.all([ |
864 | this.kernel.send(port, m), |
865 | this.kernel.ports.unbind('test1'), |
866 | this.kernel.ports.unbind('test2') |
867 | ]) |
868 | } else { |
869 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
870 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
871 | return Promise.all([ |
872 | this.kernel.createInstance(this.kernel.createMessage({ |
873 | data: { |
874 | type: Sub.typeId |
875 | }, |
876 | ports: [portRef2] |
877 | })), |
878 | this.kernel.ports.bind('test1', portRef1), |
879 | this.kernel.createInstance(this.kernel.createMessage({ |
880 | data: { |
881 | type: Sub.typeId |
882 | }, |
883 | ports: [portRef4] |
884 | })), |
885 | this.kernel.ports.bind('test2', portRef3), |
886 | this.kernel.send(portRef3, this.kernel.createMessage({ |
887 | data: 'getChannel' |
888 | })) |
889 | ]) |
890 | } |
891 | } |
892 | } |
893 | |
894 | class Sub extends BaseContainer { |
895 | async onMessage (message) { |
896 | if (message.data === 'getChannel') { |
897 | const ports = this.kernel.ports.createChannel() |
898 | await this.kernel.send(message.fromPort, this.kernel.createMessage({ |
899 | data: 'bindPort', |
900 | ports: [ports[1]] |
901 | })) |
902 | return this.kernel.ports.bind('channel', ports[0]) |
903 | } else if (message.data === 'bindPort') { |
904 | return this.kernel.ports.bind('channel', message.ports[0]) |
905 | } |
906 | } |
907 | static get typeId () { |
908 | return 299 |
909 | } |
910 | } |
911 | |
912 | try { |
913 | const hypervisor = new Hypervisor(node.dag) |
914 | |
915 | hypervisor.registerContainer(Root) |
916 | hypervisor.registerContainer(Sub) |
917 | |
918 | const root = await hypervisor.createInstance(new Message({ |
919 | data: { |
920 | type: Root.typeId |
921 | } |
922 | })) |
923 | |
924 | const [portRef1, portRef2] = root.ports.createChannel() |
925 | await Promise.all([ |
926 | root.ports.bind('first', portRef1), |
927 | root.createInstance(root.createMessage({ |
928 | data: { |
929 | type: Root.typeId |
930 | }, |
931 | ports: [portRef2] |
932 | })), |
933 | root.send(portRef1, root.createMessage()) |
934 | ]) |
935 | |
936 | const sr = await hypervisor.createStateRoot() |
937 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
938 | |
939 | t.end() |
940 | } catch (e) { |
941 | console.log(e) |
942 | } |
943 | }) |
944 | |
945 | tape('response ports', async t => { |
946 | t.plan(2) |
947 | let runs = 0 |
948 | const returnValue = 'this is a test' |
949 | |
950 | class testVMContainer extends BaseContainer { |
951 | onMessage (m) { |
952 | runs++ |
953 | if (runs === 1) { |
954 | return returnValue |
955 | } else { |
956 | t.equals(m.data, returnValue, 'should have correct return value') |
957 | } |
958 | } |
959 | } |
960 | |
961 | const hypervisor = new Hypervisor(node.dag) |
962 | |
963 | hypervisor.registerContainer(testVMContainer) |
964 | |
965 | const rootContainer = await hypervisor.createInstance(new Message({ |
966 | data: { |
967 | type: testVMContainer.typeId |
968 | } |
969 | })) |
970 | |
971 | const [portRef1, portRef2] = rootContainer.ports.createChannel() |
972 | const initMessage = rootContainer.createMessage({ |
973 | data: { |
974 | type: testVMContainer.typeId |
975 | }, |
976 | ports: [portRef2] |
977 | }) |
978 | |
979 | rootContainer.createInstance(initMessage) |
980 | |
981 | await rootContainer.ports.bind('first', portRef1) |
982 | const message = rootContainer.createMessage() |
983 | const rPort = rootContainer.getResponsePort(message) |
984 | const rPort2 = rootContainer.getResponsePort(message) |
985 | |
986 | t.equals(rPort2, rPort) |
987 | |
988 | rootContainer.send(portRef1, message) |
989 | await rootContainer.ports.bind('response', rPort) |
990 | }) |
991 | |
992 | tape('start up', async t => { |
993 | t.plan(1) |
994 | class testVMContainer extends BaseContainer { |
995 | onMessage () {} |
996 | onStartup () { |
997 | t.true(true, 'should start up') |
998 | } |
999 | } |
1000 | |
1001 | const hypervisor = new Hypervisor(node.dag) |
1002 | hypervisor.registerContainer(testVMContainer) |
1003 | await hypervisor.createInstance(new Message({ |
1004 | data: { |
1005 | type: testVMContainer.typeId |
1006 | } |
1007 | })) |
1008 | hypervisor.getInstance(hypervisor.ROOT_ID) |
1009 | }) |
1010 | |
1011 | tape('large code size', async t => { |
1012 | t.plan(1) |
1013 | const content = Buffer.from(new ArrayBuffer(1000000)) |
1014 | class testVMContainer extends BaseContainer { |
1015 | onMessage () {} |
1016 | } |
1017 | |
1018 | const hypervisor = new Hypervisor(node.dag) |
1019 | hypervisor.registerContainer(testVMContainer) |
1020 | await hypervisor.createInstance(new Message({ |
1021 | data: { |
1022 | type: testVMContainer.typeId, |
1023 | code: content |
1024 | } |
1025 | })) |
1026 | const instance = await hypervisor.getInstance(hypervisor.ROOT_ID) |
1027 | t.equals(content.length, instance.code.length) |
1028 | }) |
1029 | }) |
1030 |
Built with git-ssb-web