Files: dca4a828fc87c8fdcf30901362c346ff179401d4 / tests / index.js
30310 bytesRaw
1 | const tape = require('tape') |
2 | const IPFS = require('ipfs') |
3 | const AbstractContainer = require('primea-abstract-container') |
4 | const Message = require('primea-message') |
5 | const Hypervisor = require('../') |
6 | |
7 | // start ipfs |
8 | const node = new IPFS({ |
9 | start: false |
10 | }) |
11 | |
12 | class BaseContainer extends AbstractContainer { |
13 | onCreation (message) { |
14 | const port = message.ports[0] |
15 | if (port) { |
16 | return this.kernel.ports.bind('root', port) |
17 | } |
18 | } |
19 | static get typeId () { |
20 | return 9 |
21 | } |
22 | } |
23 | |
24 | node.on('ready', () => { |
25 | tape('basic', async t => { |
26 | t.plan(3) |
27 | let message |
28 | const expectedState = { |
29 | '/': 'zdpuAqbcQhgu2T2MBgHbYu1MtHXyZzNsCaQjTPTR6NN9s5hbk' |
30 | } |
31 | |
32 | class testVMContainer extends BaseContainer { |
33 | onMessage (m) { |
34 | t.true(m === message, 'should recive a message') |
35 | } |
36 | } |
37 | |
38 | try { |
39 | const hypervisor = new Hypervisor(node.dag) |
40 | hypervisor.registerContainer(testVMContainer) |
41 | |
42 | const rootContainer = await hypervisor.createInstance(new Message({ |
43 | data: { |
44 | type: testVMContainer.typeId |
45 | } |
46 | })) |
47 | |
48 | hypervisor.pin(rootContainer) |
49 | |
50 | const [portRef1, portRef2] = rootContainer.ports.createChannel() |
51 | const initMessage = rootContainer.createMessage({ |
52 | data: { |
53 | code: Buffer.from('test code'), |
54 | type: testVMContainer.typeId |
55 | }, |
56 | ports: [portRef2] |
57 | }) |
58 | |
59 | await rootContainer.createInstance(initMessage) |
60 | |
61 | await rootContainer.ports.bind('first', portRef1) |
62 | message = rootContainer.createMessage() |
63 | await rootContainer.send(portRef1, message) |
64 | |
65 | // console.log(JSON.stringify(hypervisor.state, null, 2)) |
66 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
67 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
68 | |
69 | t.equals(hypervisor.scheduler.leastNumberOfTicks(), 0) |
70 | } catch (e) { |
71 | console.log(e) |
72 | } |
73 | }) |
74 | |
75 | tape('basic - do not store containers with no ports bound', async t => { |
76 | t.plan(1) |
77 | const expectedState = { |
78 | '/': 'zdpuAop4nt8pqzg7duciSYbZmWfDaBiz87RCtGCbb35ewUrbW' |
79 | } |
80 | |
81 | class testVMContainer extends BaseContainer { |
82 | onCreation () {} |
83 | } |
84 | |
85 | try { |
86 | const hypervisor = new Hypervisor(node.dag) |
87 | hypervisor.registerContainer(testVMContainer) |
88 | |
89 | const root = await hypervisor.createInstance(new Message({ |
90 | data: { |
91 | type: testVMContainer.typeId |
92 | } |
93 | })) |
94 | |
95 | hypervisor.pin(root) |
96 | const [portRef1, portRef2] = root.ports.createChannel() |
97 | |
98 | await root.ports.bind('one', portRef1) |
99 | await root.createInstance(root.createMessage({ |
100 | data: { |
101 | type: testVMContainer.typeId |
102 | }, |
103 | ports: [portRef2] |
104 | })) |
105 | |
106 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
107 | |
108 | // await hypervisor.graph.tree(stateRoot, Infinity, true) |
109 | // console.log(JSON.stringify(stateRoot, null, 2)) |
110 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
111 | } catch (e) { |
112 | console.log(e) |
113 | } |
114 | }) |
115 | |
116 | tape('one child contract', async t => { |
117 | t.plan(4) |
118 | let message |
119 | const expectedState = { |
120 | '/': 'zdpuB2Huo3ro3Fv9mpMhnUcL3jjd37T6MJ6jEd8GvA2cpvaYR' |
121 | } |
122 | let hasResolved = false |
123 | |
124 | class testVMContainer2 extends BaseContainer { |
125 | onMessage (m) { |
126 | t.true(m === message, 'should recive a message') |
127 | return new Promise((resolve, reject) => { |
128 | setTimeout(() => { |
129 | this.kernel.incrementTicks(1) |
130 | hasResolved = true |
131 | resolve() |
132 | }, 200) |
133 | }) |
134 | } |
135 | |
136 | static get typeId () { |
137 | return 99 |
138 | } |
139 | } |
140 | |
141 | class testVMContainer extends BaseContainer { |
142 | async onMessage (m) { |
143 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
144 | await this.kernel.createInstance(this.kernel.createMessage({ |
145 | data: { |
146 | type: testVMContainer2.typeId |
147 | }, |
148 | ports: [portRef2] |
149 | })) |
150 | await this.kernel.send(portRef1, m) |
151 | this.kernel.incrementTicks(1) |
152 | return this.kernel.ports.bind('child', portRef1) |
153 | } |
154 | } |
155 | |
156 | const hypervisor = new Hypervisor(node.dag) |
157 | hypervisor.registerContainer(testVMContainer) |
158 | hypervisor.registerContainer(testVMContainer2) |
159 | |
160 | let root = await hypervisor.createInstance(new Message({ |
161 | data: { |
162 | type: testVMContainer.typeId |
163 | } |
164 | })) |
165 | |
166 | hypervisor.pin(root) |
167 | |
168 | const rootId = root.id |
169 | const [portRef1, portRef2] = root.ports.createChannel() |
170 | await root.createInstance(root.createMessage({ |
171 | data: { |
172 | type: testVMContainer.typeId |
173 | }, |
174 | ports: [portRef2] |
175 | })) |
176 | |
177 | await root.ports.bind('first', portRef1) |
178 | message = root.createMessage() |
179 | |
180 | await root.send(portRef1, message) |
181 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
182 | t.true(hasResolved, 'should resolve before generating the state root') |
183 | |
184 | // await hypervisor.graph.tree(stateRoot, Infinity, true) |
185 | // console.log(JSON.stringify(stateRoot, null, 2)) |
186 | t.deepEquals(stateRoot, expectedState, 'expected state') |
187 | |
188 | // test reviving the state |
189 | class testVMContainer3 extends BaseContainer { |
190 | onMessage (m) { |
191 | const port = this.kernel.ports.get('child') |
192 | this.kernel.send(port, m) |
193 | this.kernel.incrementTicks(1) |
194 | } |
195 | } |
196 | |
197 | hypervisor.registerContainer(testVMContainer3) |
198 | root = await hypervisor.getInstance(rootId) |
199 | const port = root.ports.get('first') |
200 | root.send(port, message) |
201 | }) |
202 | |
203 | tape('traps', async t => { |
204 | t.plan(1) |
205 | class Root extends BaseContainer { |
206 | async onMessage (m) { |
207 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
208 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
209 | const [portRef5, portRef6] = this.kernel.ports.createChannel() |
210 | |
211 | await Promise.all( |
212 | this.kernel.ports.bind('one', portRef1), |
213 | this.kernel.ports.bind('two', portRef3), |
214 | this.kernel.ports.bind('three', portRef5) |
215 | ) |
216 | |
217 | const message1 = this.kernel.createMessage({ |
218 | data: { |
219 | type: Root.typeId |
220 | }, |
221 | ports: [portRef2] |
222 | }) |
223 | const message2 = this.kernel.createMessage({ |
224 | data: { |
225 | type: Root.typeId |
226 | }, |
227 | ports: [portRef4] |
228 | }) |
229 | const message3 = this.kernel.createMessage({ |
230 | data: { |
231 | type: Root.typeId |
232 | }, |
233 | ports: [portRef6] |
234 | }) |
235 | |
236 | await Promise.all([ |
237 | this.kernel.createInstance(message1), |
238 | this.kernel.createInstance(message2), |
239 | this.kernel.createInstance(message3) |
240 | ]) |
241 | |
242 | throw new Error('it is a trap!!!') |
243 | } |
244 | } |
245 | |
246 | const hypervisor = new Hypervisor(node.dag) |
247 | |
248 | hypervisor.registerContainer(Root) |
249 | const root = await hypervisor.createInstance(new Message({ |
250 | data: { |
251 | type: Root.typeId |
252 | } |
253 | })) |
254 | |
255 | hypervisor.pin(root) |
256 | |
257 | await root.message(root.createMessage()) |
258 | const stateRoot = await hypervisor.createStateRoot() |
259 | |
260 | t.deepEquals(stateRoot, { |
261 | '/': 'zdpuAwAZnRgD7ZKH8ssU9UdpFTsw3Q4gecKKyRoDsD4obhpJm' |
262 | }, 'should revert the state') |
263 | }) |
264 | |
265 | tape('recieving older messages', async t => { |
266 | t.plan(2) |
267 | let runs = 0 |
268 | |
269 | class Root extends BaseContainer { |
270 | async onMessage (m) { |
271 | if (!runs) { |
272 | runs++ |
273 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
274 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
275 | |
276 | const message1 = this.kernel.createMessage({ |
277 | data: { |
278 | type: First.typeId |
279 | }, |
280 | ports: [portRef2] |
281 | }) |
282 | const message2 = this.kernel.createMessage({ |
283 | data: { |
284 | type: Waiter.typeId |
285 | }, |
286 | ports: [portRef4] |
287 | }) |
288 | |
289 | await Promise.all([ |
290 | this.kernel.createInstance(message1), |
291 | this.kernel.send(portRef1, this.kernel.createMessage()), |
292 | this.kernel.send(portRef3, this.kernel.createMessage()), |
293 | this.kernel.ports.bind('one', portRef1), |
294 | this.kernel.ports.bind('two', portRef3) |
295 | ]) |
296 | return this.kernel.createInstance(message2) |
297 | } else if (runs === 1) { |
298 | runs++ |
299 | t.equals(m.data, 'first', 'should recive the first message') |
300 | } else if (runs === 2) { |
301 | runs++ |
302 | t.equals(m.data, 'second', 'should recive the second message') |
303 | } else if (runs === 3) { |
304 | runs++ |
305 | // t.equals(m.data, 'third', 'should recived the second message') |
306 | } |
307 | } |
308 | static get typeId () { |
309 | return 299 |
310 | } |
311 | } |
312 | |
313 | class First extends BaseContainer { |
314 | onMessage (m) { |
315 | this.kernel.incrementTicks(2) |
316 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
317 | data: 'second' |
318 | })) |
319 | } |
320 | static get typeId () { |
321 | return 29 |
322 | } |
323 | } |
324 | |
325 | class Waiter extends BaseContainer { |
326 | onMessage (m) { |
327 | return new Promise((resolve, reject) => { |
328 | setTimeout(() => { |
329 | this.kernel.send(m.fromPort, this.kernel.createMessage({ |
330 | data: 'first' |
331 | })).then(resolve) |
332 | }, 200) |
333 | }) |
334 | } |
335 | } |
336 | |
337 | try { |
338 | const hypervisor = new Hypervisor(node.dag) |
339 | |
340 | hypervisor.registerContainer(Root) |
341 | hypervisor.registerContainer(First) |
342 | hypervisor.registerContainer(Waiter) |
343 | |
344 | const root = await hypervisor.createInstance(new Message({ |
345 | data: { |
346 | type: Root.typeId |
347 | } |
348 | })) |
349 | |
350 | hypervisor.pin(root) |
351 | const [portRef1, portRef2] = root.ports.createChannel() |
352 | |
353 | const message = root.createMessage() |
354 | await Promise.all([ |
355 | root.send(portRef1, message), |
356 | root.ports.bind('first', portRef1), |
357 | root.createInstance(root.createMessage({ |
358 | data: { |
359 | type: Root.typeId |
360 | }, |
361 | ports: [portRef2] |
362 | })) |
363 | ]) |
364 | } catch (e) { |
365 | console.log(e) |
366 | } |
367 | }) |
368 | |
369 | tape('saturation', async t => { |
370 | t.plan(3) |
371 | let runs = 0 |
372 | |
373 | class Root extends BaseContainer { |
374 | onIdle () {} |
375 | async onMessage (m) { |
376 | if (!runs) { |
377 | runs++ |
378 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
379 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
380 | |
381 | const message1 = this.kernel.createMessage({ |
382 | data: { |
383 | type: First.typeId |
384 | }, |
385 | ports: [portRef2] |
386 | }) |
387 | const message2 = this.kernel.createMessage({ |
388 | data: { |
389 | type: Second.typeId |
390 | }, |
391 | ports: [portRef4] |
392 | }) |
393 | |
394 | this.kernel.incrementTicks(6) |
395 | return Promise.all([ |
396 | this.kernel.createInstance(message1), |
397 | this.kernel.createInstance(message2), |
398 | this.kernel.send(portRef1, this.kernel.createMessage()), |
399 | this.kernel.send(portRef3, this.kernel.createMessage()), |
400 | this.kernel.ports.bind('one', portRef1), |
401 | this.kernel.ports.bind('two', portRef3) |
402 | ]) |
403 | } else if (runs === 1) { |
404 | runs++ |
405 | t.equals(m.data, 'first', 'should recive the first message') |
406 | } else if (runs === 2) { |
407 | runs++ |
408 | t.equals(m.data, 'second', 'should recive the second message') |
409 | } else if (runs === 3) { |
410 | runs++ |
411 | t.equals(m.data, 'third', 'should recived the third message') |
412 | } |
413 | } |
414 | static get typeId () { |
415 | return 299 |
416 | } |
417 | } |
418 | |
419 | class First extends BaseContainer { |
420 | onMessage (m) { |
421 | this.kernel.incrementTicks(2) |
422 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
423 | data: 'second' |
424 | })) |
425 | } |
426 | static get typeId () { |
427 | return 29 |
428 | } |
429 | } |
430 | |
431 | class Second extends BaseContainer { |
432 | onMessage (m) { |
433 | this.kernel.incrementTicks(3) |
434 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
435 | data: 'third' |
436 | })) |
437 | } |
438 | static get typeId () { |
439 | return 2 |
440 | } |
441 | } |
442 | |
443 | class Waiter extends BaseContainer { |
444 | onCreation (m) { |
445 | return new Promise((resolve, reject) => { |
446 | setTimeout(() => { |
447 | this.kernel.send(m.ports[0], this.kernel.createMessage({ |
448 | data: 'first' |
449 | })).then(resolve) |
450 | }, 200) |
451 | }) |
452 | } |
453 | } |
454 | |
455 | try { |
456 | const hypervisor = new Hypervisor(node.dag) |
457 | |
458 | hypervisor.registerContainer(Root) |
459 | hypervisor.registerContainer(First) |
460 | hypervisor.registerContainer(Second) |
461 | hypervisor.registerContainer(Waiter) |
462 | |
463 | let root = await hypervisor.createInstance(new Message({ |
464 | data: { |
465 | type: Root.typeId |
466 | } |
467 | })) |
468 | |
469 | hypervisor.pin(root) |
470 | |
471 | const [portRef1, portRef2] = root.ports.createChannel() |
472 | const [portRef3, portRef4] = root.ports.createChannel() |
473 | |
474 | const message = root.createMessage() |
475 | await Promise.all([ |
476 | root.send(portRef1, message), |
477 | root.ports.bind('first', portRef1), |
478 | root.createInstance(root.createMessage({ |
479 | data: { |
480 | type: Root.typeId |
481 | }, |
482 | ports: [portRef2] |
483 | })), |
484 | root.ports.bind('sencond', portRef3), |
485 | root.createInstance(root.createMessage({ |
486 | data: { |
487 | type: Waiter.typeId |
488 | }, |
489 | ports: [portRef4] |
490 | })) |
491 | ]) |
492 | |
493 | // root = await hypervisor.getInstance(root.id) |
494 | root.incrementTicks(100) |
495 | await root.send(portRef1, root.createMessage({ |
496 | data: 'testss' |
497 | })) |
498 | root.shutdown() |
499 | } catch (e) { |
500 | console.log(e) |
501 | } |
502 | }) |
503 | |
504 | tape('send to the same container at the same time', async t => { |
505 | t.plan(2) |
506 | |
507 | let runs = 0 |
508 | let instance |
509 | |
510 | class Root extends BaseContainer { |
511 | async onMessage (m) { |
512 | let one = this.kernel.ports.get('one') |
513 | if (!one) { |
514 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
515 | const message1 = this.kernel.createMessage({ |
516 | data: { |
517 | type: First.typeId |
518 | }, |
519 | ports: [portRef2] |
520 | }) |
521 | await this.kernel.createInstance(message1) |
522 | return this.kernel.ports.bind('one', portRef1) |
523 | } else { |
524 | return Promise.all([ |
525 | this.kernel.send(one, this.kernel.createMessage()), |
526 | this.kernel.send(one, this.kernel.createMessage()) |
527 | ]) |
528 | } |
529 | } |
530 | static get typeId () { |
531 | return 299 |
532 | } |
533 | } |
534 | |
535 | class First extends BaseContainer { |
536 | onMessage (m) { |
537 | ++runs |
538 | if (runs === 2) { |
539 | t.equals(instance, this, 'should have same instances') |
540 | } else { |
541 | instance = this |
542 | } |
543 | } |
544 | } |
545 | |
546 | try { |
547 | const hypervisor = new Hypervisor(node.dag) |
548 | |
549 | hypervisor.registerContainer(Root) |
550 | hypervisor.registerContainer(First) |
551 | |
552 | const root = await hypervisor.createInstance(new Message({ |
553 | data: { |
554 | type: Root.typeId |
555 | } |
556 | })) |
557 | |
558 | hypervisor.pin(root) |
559 | |
560 | const [portRef1, portRef2] = root.ports.createChannel() |
561 | await Promise.all([ |
562 | root.ports.bind('first', portRef1), |
563 | root.createInstance(root.createMessage({ |
564 | data: { |
565 | type: Root.typeId |
566 | }, |
567 | ports: [portRef2] |
568 | })) |
569 | ]) |
570 | |
571 | const message = root.createMessage() |
572 | await root.send(portRef1, message) |
573 | await hypervisor.createStateRoot() |
574 | await root.send(portRef1, root.createMessage()) |
575 | await hypervisor.createStateRoot() |
576 | t.equals(runs, 2) |
577 | } catch (e) { |
578 | console.log(e) |
579 | } |
580 | }) |
581 | |
582 | tape('checking ports', async t => { |
583 | t.plan(4) |
584 | const hypervisor = new Hypervisor(node.dag) |
585 | hypervisor.registerContainer(BaseContainer) |
586 | |
587 | const root = await hypervisor.createInstance(new Message({ |
588 | data: { |
589 | type: BaseContainer.typeId |
590 | } |
591 | })) |
592 | |
593 | hypervisor.pin(root) |
594 | |
595 | const [portRef1, portRef2] = root.ports.createChannel() |
596 | root.createInstance(root.createMessage({ |
597 | data: { |
598 | type: BaseContainer.typeId |
599 | }, |
600 | ports: [portRef2] |
601 | })) |
602 | await root.ports.bind('test', portRef1) |
603 | |
604 | try { |
605 | root.createMessage({ |
606 | ports: [portRef1] |
607 | }) |
608 | } catch (e) { |
609 | t.pass('should thow if sending a port that is bound') |
610 | } |
611 | |
612 | try { |
613 | await root.ports.bind('test', portRef1) |
614 | } catch (e) { |
615 | t.pass('should thow if binding an already bound port') |
616 | } |
617 | |
618 | try { |
619 | const [portRef3] = root.ports.createChannel() |
620 | await root.ports.bind('test', portRef3) |
621 | } catch (e) { |
622 | t.pass('should thow if binding an already bound name') |
623 | } |
624 | |
625 | await root.ports.unbind('test') |
626 | const message = root.createMessage({ |
627 | ports: [portRef1] |
628 | }) |
629 | t.equals(message.ports[0], portRef1, 'should create a message if the port is unbound') |
630 | }) |
631 | |
632 | tape('port deletion', async t => { |
633 | const expectedSr = { |
634 | '/': 'zdpuAxKfu5nMTfpz6uHPqXdHZFQDZdRUer8zcQ6nvC4pTQsop' |
635 | } |
636 | class Root extends BaseContainer { |
637 | async onMessage (m) { |
638 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
639 | const message1 = this.kernel.createMessage({ |
640 | data: { |
641 | type: First.typeId |
642 | }, |
643 | ports: [portRef2] |
644 | }) |
645 | |
646 | await this.kernel.createInstance(message1) |
647 | await this.kernel.send(portRef1, this.kernel.createMessage()) |
648 | this.kernel.incrementTicks(6) |
649 | return this.kernel.ports.bind('one', portRef1) |
650 | } |
651 | } |
652 | |
653 | class First extends BaseContainer { |
654 | onMessage (m) { |
655 | this.kernel.incrementTicks(2) |
656 | return this.kernel.ports.delete('root') |
657 | } |
658 | static get typeId () { |
659 | return 299 |
660 | } |
661 | } |
662 | |
663 | const hypervisor = new Hypervisor(node.dag) |
664 | |
665 | hypervisor.registerContainer(Root) |
666 | hypervisor.registerContainer(First) |
667 | |
668 | const root = await hypervisor.createInstance(new Message({ |
669 | data: { |
670 | type: Root.typeId |
671 | } |
672 | })) |
673 | |
674 | hypervisor.pin(root) |
675 | |
676 | const [portRef1, portRef2] = root.ports.createChannel() |
677 | await root.ports.bind('first', portRef1) |
678 | await root.createInstance(root.createMessage({ |
679 | data: { |
680 | type: Root.typeId |
681 | }, |
682 | ports: [portRef2] |
683 | })) |
684 | |
685 | const message = root.createMessage() |
686 | await root.send(portRef1, message) |
687 | |
688 | const sr = await hypervisor.createStateRoot() |
689 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
690 | await hypervisor.graph.tree(sr, Infinity, true) |
691 | |
692 | t.end() |
693 | }) |
694 | |
695 | tape('clear unbounded ports', async t => { |
696 | const expectedSr = { |
697 | '/': 'zdpuAxKfu5nMTfpz6uHPqXdHZFQDZdRUer8zcQ6nvC4pTQsop' |
698 | } |
699 | |
700 | class Root extends BaseContainer { |
701 | onMessage (m) { |
702 | return this.kernel.createInstance(new Message({ |
703 | data: { |
704 | type: Root.typeId |
705 | } |
706 | })) |
707 | } |
708 | } |
709 | |
710 | const hypervisor = new Hypervisor(node.dag) |
711 | hypervisor.registerContainer(Root) |
712 | |
713 | const root = await hypervisor.createInstance(new Message({ |
714 | data: { |
715 | type: Root.typeId |
716 | } |
717 | })) |
718 | |
719 | hypervisor.pin(root) |
720 | |
721 | const [portRef1, portRef2] = root.ports.createChannel() |
722 | await root.ports.bind('first', portRef1) |
723 | await root.createInstance(root.createMessage({ |
724 | data: { |
725 | type: Root.typeId |
726 | }, |
727 | ports: [portRef2] |
728 | })) |
729 | |
730 | const message = root.createMessage() |
731 | await root.send(portRef1, message) |
732 | const sr = await hypervisor.createStateRoot() |
733 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
734 | |
735 | t.end() |
736 | }) |
737 | |
738 | tape('should remove subgraphs', async t => { |
739 | const expectedSr = { |
740 | '/': 'zdpuAxKfu5nMTfpz6uHPqXdHZFQDZdRUer8zcQ6nvC4pTQsop' |
741 | } |
742 | class Root extends BaseContainer { |
743 | onMessage (m) { |
744 | const [, portRef2] = this.kernel.ports.createChannel() |
745 | return this.kernel.createInstance(this.kernel.createMessage({ |
746 | data: { |
747 | type: Sub.typeId |
748 | }, |
749 | ports: [portRef2] |
750 | })) |
751 | } |
752 | } |
753 | |
754 | class Sub extends BaseContainer { |
755 | async onInitailize (message) { |
756 | await this.kernel.ports.bind('root', message.ports[0]) |
757 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
758 | await this.kernel.ports.bind('child', portRef1) |
759 | await this.kernel.createInstance(this.kernel.createMessage({ |
760 | data: { |
761 | type: Root.typeId |
762 | }, |
763 | ports: [portRef2] |
764 | })) |
765 | } |
766 | static get typeId () { |
767 | return 299 |
768 | } |
769 | } |
770 | |
771 | try { |
772 | const hypervisor = new Hypervisor(node.dag) |
773 | |
774 | hypervisor.registerContainer(Root) |
775 | hypervisor.registerContainer(Sub) |
776 | |
777 | const root = await hypervisor.createInstance(new Message({ |
778 | data: { |
779 | type: Root.typeId |
780 | } |
781 | })) |
782 | |
783 | hypervisor.pin(root) |
784 | |
785 | const [portRef1, portRef2] = root.ports.createChannel() |
786 | await root.ports.bind('first', portRef1) |
787 | await root.createInstance(root.createMessage({ |
788 | data: { |
789 | type: Root.typeId |
790 | }, |
791 | ports: [portRef2] |
792 | })) |
793 | |
794 | await root.send(portRef1, root.createMessage()) |
795 | const sr = await hypervisor.createStateRoot() |
796 | |
797 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
798 | t.end() |
799 | } catch (e) { |
800 | console.log(e) |
801 | } |
802 | }) |
803 | |
804 | tape('should not remove connected nodes', async t => { |
805 | const expectedSr = { |
806 | '/': 'zdpuAr4A3i1t6B7BkLT9C7DoxwvFnNg74gEzyqhpFj7nqVBy6' |
807 | } |
808 | class Root extends BaseContainer { |
809 | async onMessage (m) { |
810 | if (m.ports.length) { |
811 | const port = this.kernel.ports.get('test1') |
812 | await this.kernel.send(port, m) |
813 | return this.kernel.ports.unbind('test1') |
814 | } else { |
815 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
816 | await this.kernel.createInstance(this.kernel.createMessage({ |
817 | data: { |
818 | type: Sub.typeId |
819 | }, |
820 | ports: [portRef2] |
821 | })) |
822 | await this.kernel.ports.bind('test1', portRef1) |
823 | |
824 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
825 | await this.kernel.createInstance(this.kernel.createMessage({ |
826 | data: { |
827 | type: Sub.typeId |
828 | }, |
829 | ports: [portRef4] |
830 | })) |
831 | await this.kernel.ports.bind('test2', portRef3) |
832 | await this.kernel.send(portRef3, this.kernel.createMessage({ |
833 | data: 'getChannel' |
834 | })) |
835 | } |
836 | } |
837 | } |
838 | |
839 | class Sub extends BaseContainer { |
840 | async onMessage (message) { |
841 | if (message.data === 'getChannel') { |
842 | const ports = this.kernel.ports.createChannel() |
843 | await this.kernel.send(message.fromPort, this.kernel.createMessage({ |
844 | data: 'bindPort', |
845 | ports: [ports[1]] |
846 | })) |
847 | return this.kernel.ports.bind('channel', ports[0]) |
848 | } else if (message.data === 'bindPort') { |
849 | return this.kernel.ports.bind('channel', message.ports[0]) |
850 | } |
851 | } |
852 | static get typeId () { |
853 | return 299 |
854 | } |
855 | } |
856 | |
857 | const hypervisor = new Hypervisor(node.dag) |
858 | |
859 | hypervisor.registerContainer(Root) |
860 | hypervisor.registerContainer(Sub) |
861 | |
862 | const root = await hypervisor.createInstance(new Message({ |
863 | data: { |
864 | type: Root.typeId |
865 | } |
866 | })) |
867 | |
868 | hypervisor.pin(root) |
869 | |
870 | const [portRef1, portRef2] = root.ports.createChannel() |
871 | await root.ports.bind('first', portRef1) |
872 | await root.createInstance(root.createMessage({ |
873 | data: { |
874 | type: Root.typeId |
875 | }, |
876 | ports: [portRef2] |
877 | })) |
878 | |
879 | await root.send(portRef1, root.createMessage()) |
880 | const sr = await hypervisor.createStateRoot() |
881 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
882 | // await hypervisor.graph.tree(sr, Infinity) |
883 | |
884 | t.end() |
885 | }) |
886 | |
887 | tape('should remove multiple subgraphs', async t => { |
888 | const expectedSr = { |
889 | '/': 'zdpuAzYGmZeZsi5Zer7LXCTm1AsmqpUMJAXZnEeFW2UVDZj2P' |
890 | } |
891 | class Root extends BaseContainer { |
892 | onMessage (m) { |
893 | if (m.ports.length) { |
894 | const port = this.kernel.ports.get('test1') |
895 | return Promise.all([ |
896 | this.kernel.send(port, m), |
897 | this.kernel.ports.unbind('test1'), |
898 | this.kernel.ports.unbind('test2') |
899 | ]) |
900 | } else { |
901 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
902 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
903 | return Promise.all([ |
904 | this.kernel.createInstance(this.kernel.createMessage({ |
905 | data: { |
906 | type: Sub.typeId |
907 | }, |
908 | ports: [portRef2] |
909 | })), |
910 | this.kernel.ports.bind('test1', portRef1), |
911 | this.kernel.createInstance(this.kernel.createMessage({ |
912 | data: { |
913 | type: Sub.typeId |
914 | }, |
915 | ports: [portRef4] |
916 | })), |
917 | this.kernel.ports.bind('test2', portRef3), |
918 | this.kernel.send(portRef3, this.kernel.createMessage({ |
919 | data: 'getChannel' |
920 | })) |
921 | ]) |
922 | } |
923 | } |
924 | } |
925 | |
926 | class Sub extends BaseContainer { |
927 | async onMessage (message) { |
928 | if (message.data === 'getChannel') { |
929 | const ports = this.kernel.ports.createChannel() |
930 | await this.kernel.send(message.fromPort, this.kernel.createMessage({ |
931 | data: 'bindPort', |
932 | ports: [ports[1]] |
933 | })) |
934 | return this.kernel.ports.bind('channel', ports[0]) |
935 | } else if (message.data === 'bindPort') { |
936 | return this.kernel.ports.bind('channel', message.ports[0]) |
937 | } |
938 | } |
939 | static get typeId () { |
940 | return 299 |
941 | } |
942 | } |
943 | |
944 | try { |
945 | const hypervisor = new Hypervisor(node.dag) |
946 | |
947 | hypervisor.registerContainer(Root) |
948 | hypervisor.registerContainer(Sub) |
949 | |
950 | const root = await hypervisor.createInstance(new Message({ |
951 | data: { |
952 | type: Root.typeId |
953 | } |
954 | })) |
955 | |
956 | hypervisor.pin(root) |
957 | |
958 | const [portRef1, portRef2] = root.ports.createChannel() |
959 | await Promise.all([ |
960 | root.ports.bind('first', portRef1), |
961 | root.createInstance(root.createMessage({ |
962 | data: { |
963 | type: Root.typeId |
964 | }, |
965 | ports: [portRef2] |
966 | })), |
967 | root.send(portRef1, root.createMessage()) |
968 | ]) |
969 | |
970 | const sr = await hypervisor.createStateRoot() |
971 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
972 | |
973 | t.end() |
974 | } catch (e) { |
975 | console.log(e) |
976 | } |
977 | }) |
978 | |
979 | tape('response ports', async t => { |
980 | t.plan(2) |
981 | let runs = 0 |
982 | const returnValue = 'this is a test' |
983 | |
984 | class testVMContainer extends BaseContainer { |
985 | onMessage (m) { |
986 | runs++ |
987 | if (runs === 1) { |
988 | return returnValue |
989 | } else { |
990 | t.equals(m.data, returnValue, 'should have correct return value') |
991 | } |
992 | } |
993 | } |
994 | |
995 | const hypervisor = new Hypervisor(node.dag) |
996 | hypervisor.registerContainer(testVMContainer) |
997 | |
998 | const rootContainer = await hypervisor.createInstance(new Message({ |
999 | data: { |
1000 | type: testVMContainer.typeId |
1001 | } |
1002 | })) |
1003 | |
1004 | hypervisor.pin(rootContainer) |
1005 | |
1006 | const [portRef1, portRef2] = rootContainer.ports.createChannel() |
1007 | const initMessage = rootContainer.createMessage({ |
1008 | data: { |
1009 | type: testVMContainer.typeId |
1010 | }, |
1011 | ports: [portRef2] |
1012 | }) |
1013 | |
1014 | rootContainer.createInstance(initMessage) |
1015 | |
1016 | await rootContainer.ports.bind('first', portRef1) |
1017 | const message = rootContainer.createMessage() |
1018 | const rPort = rootContainer.getResponsePort(message) |
1019 | const rPort2 = rootContainer.getResponsePort(message) |
1020 | |
1021 | t.equals(rPort2, rPort) |
1022 | |
1023 | rootContainer.send(portRef1, message) |
1024 | await rootContainer.ports.bind('response', rPort) |
1025 | }) |
1026 | |
1027 | tape('start up', async t => { |
1028 | t.plan(1) |
1029 | class testVMContainer extends BaseContainer { |
1030 | onMessage () {} |
1031 | onStartup () { |
1032 | t.true(true, 'should start up') |
1033 | } |
1034 | } |
1035 | |
1036 | const hypervisor = new Hypervisor(node.dag) |
1037 | hypervisor.registerContainer(testVMContainer) |
1038 | await hypervisor.createInstance(new Message({ |
1039 | data: { |
1040 | type: testVMContainer.typeId |
1041 | } |
1042 | })) |
1043 | hypervisor.getInstance(hypervisor.ROOT_ID) |
1044 | }) |
1045 | |
1046 | tape('large code size', async t => { |
1047 | t.plan(1) |
1048 | const content = Buffer.from(new ArrayBuffer(1000000)) |
1049 | class testVMContainer extends BaseContainer { |
1050 | onMessage () {} |
1051 | } |
1052 | |
1053 | const hypervisor = new Hypervisor(node.dag) |
1054 | hypervisor.registerContainer(testVMContainer) |
1055 | await hypervisor.createInstance(new Message({ |
1056 | data: { |
1057 | type: testVMContainer.typeId, |
1058 | code: content |
1059 | } |
1060 | })) |
1061 | const instance = await hypervisor.getInstance(hypervisor.ROOT_ID) |
1062 | t.equals(content.length, instance.code.length) |
1063 | }) |
1064 | |
1065 | tape('creation service messaging', async t => { |
1066 | t.plan(1) |
1067 | class TestVMContainer extends BaseContainer { |
1068 | async onCreation (m) { |
1069 | const creationPort = m.ports[0] |
1070 | const [port1, port2] = this.kernel.ports.createChannel() |
1071 | await this.kernel.ports.bind('child', port1) |
1072 | |
1073 | const message = this.kernel.createMessage({ |
1074 | data: { |
1075 | type: TestVMContainer2.typeId |
1076 | }, |
1077 | ports: [port2] |
1078 | }) |
1079 | return this.kernel.send(creationPort, message) |
1080 | } |
1081 | onMessage () { |
1082 | |
1083 | } |
1084 | } |
1085 | |
1086 | class TestVMContainer2 extends BaseContainer { |
1087 | onMessage () { |
1088 | |
1089 | } |
1090 | |
1091 | static get typeId () { |
1092 | return 66 |
1093 | } |
1094 | } |
1095 | |
1096 | const hypervisor = new Hypervisor(node.dag) |
1097 | hypervisor.registerContainer(TestVMContainer) |
1098 | hypervisor.registerContainer(TestVMContainer2) |
1099 | |
1100 | const port = hypervisor.creationService.getPort() |
1101 | |
1102 | const root = await hypervisor.createInstance(new Message({ |
1103 | data: { |
1104 | type: TestVMContainer.typeId |
1105 | }, |
1106 | ports: [port] |
1107 | })) |
1108 | |
1109 | hypervisor.pin(root) |
1110 | |
1111 | const stateRoot = await hypervisor.createStateRoot() |
1112 | // await hypervisor.graph.tree(hypervisor.state, Infinity, true) |
1113 | const expectedSR = { |
1114 | '/': 'zdpuAonuhk7ZhdghJh4saaUCskY5mXZ6M9BcV9iAhCanAQx9i' |
1115 | } |
1116 | t.deepEquals(stateRoot, expectedSR) |
1117 | }) |
1118 | }) |
1119 |
Built with git-ssb-web