Files: d0da8197e290a0f7d27a4b08cbc89d00cf1ca258 / tests / index.js
25993 bytesRaw
1 | const tape = require('tape') |
2 | const IPFS = require('ipfs') |
3 | const AbstractContainer = require('primea-abstract-container') |
4 | const Message = require('primea-message') |
5 | const Hypervisor = require('../') |
6 | |
7 | // start ipfs |
8 | const node = new IPFS({ |
9 | start: false |
10 | }) |
11 | |
12 | class BaseContainer extends AbstractContainer { |
13 | onCreation (message) { |
14 | this.kernel.state.code = message.data.byteLength ? message.data : undefined |
15 | const port = message.ports[0] |
16 | if (port) { |
17 | return this.kernel.ports.bind('root', port) |
18 | } |
19 | } |
20 | static get typeId () { |
21 | return 9 |
22 | } |
23 | } |
24 | |
25 | node.on('ready', () => { |
26 | tape('basic', async t => { |
27 | t.plan(3) |
28 | let message |
29 | const expectedState = { |
30 | '/': 'zdpuApGUFnjcY3eBeVPFfnEgGunPz8vyXVJbrkgBmYwrbVDpA' |
31 | } |
32 | |
33 | class testVMContainer extends BaseContainer { |
34 | onMessage (m) { |
35 | t.true(m === message, 'should recive a message') |
36 | } |
37 | } |
38 | |
39 | try { |
40 | const hypervisor = new Hypervisor(node.dag) |
41 | hypervisor.registerContainer(testVMContainer) |
42 | |
43 | const rootContainer = await hypervisor.createInstance(testVMContainer.typeId) |
44 | |
45 | const [portRef1, portRef2] = rootContainer.ports.createChannel() |
46 | const initMessage = rootContainer.createMessage({ |
47 | data: Buffer.from('test code'), |
48 | ports: [portRef2] |
49 | }) |
50 | |
51 | await rootContainer.createInstance(testVMContainer.typeId, initMessage) |
52 | |
53 | await rootContainer.ports.bind('first', portRef1) |
54 | message = rootContainer.createMessage() |
55 | rootContainer.send(portRef1, message) |
56 | |
57 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
58 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
59 | t.equals(hypervisor.scheduler.oldest(), 0) |
60 | } catch (e) { |
61 | console.log(e) |
62 | } |
63 | }) |
64 | |
65 | tape('basic - do not store containers with no ports bound', async t => { |
66 | t.plan(1) |
67 | const expectedState = { |
68 | '/': 'zdpuAop4nt8pqzg7duciSYbZmWfDaBiz87RCtGCbb35ewUrbW' |
69 | } |
70 | |
71 | class testVMContainer extends BaseContainer { |
72 | onCreation () {} |
73 | } |
74 | |
75 | try { |
76 | const hypervisor = new Hypervisor(node.dag) |
77 | hypervisor.registerContainer(testVMContainer) |
78 | |
79 | const root = await hypervisor.createInstance(testVMContainer.typeId) |
80 | const [portRef1, portRef2] = root.ports.createChannel() |
81 | |
82 | await root.ports.bind('one', portRef1) |
83 | await root.createInstance(testVMContainer.typeId, root.createMessage({ |
84 | ports: [portRef2] |
85 | })) |
86 | |
87 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
88 | |
89 | // await hypervisor.graph.tree(stateRoot, Infinity, true) |
90 | // console.log(JSON.stringify(stateRoot, null, 2)) |
91 | t.deepEquals(stateRoot, expectedState, 'expected root!') |
92 | } catch (e) { |
93 | console.log(e) |
94 | } |
95 | }) |
96 | |
97 | tape('one child contract', async t => { |
98 | t.plan(4) |
99 | let message |
100 | const expectedState = { |
101 | '/': 'zdpuArCqpDZtEqjrXrRhMiYLE7QQ1szVr1qLVkiwtDLincGWU' |
102 | } |
103 | let hasResolved = false |
104 | |
105 | class testVMContainer2 extends BaseContainer { |
106 | onMessage (m) { |
107 | t.true(m === message, 'should recive a message') |
108 | return new Promise((resolve, reject) => { |
109 | setTimeout(() => { |
110 | this.kernel.incrementTicks(1) |
111 | hasResolved = true |
112 | resolve() |
113 | }, 200) |
114 | }) |
115 | } |
116 | |
117 | static get typeId () { |
118 | return 99 |
119 | } |
120 | } |
121 | |
122 | class testVMContainer extends BaseContainer { |
123 | async onMessage (m) { |
124 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
125 | await this.kernel.createInstance(testVMContainer2.typeId, this.kernel.createMessage({ |
126 | ports: [portRef2] |
127 | })) |
128 | await this.kernel.send(portRef1, m) |
129 | this.kernel.incrementTicks(1) |
130 | return this.kernel.ports.bind('child', portRef1) |
131 | } |
132 | } |
133 | |
134 | const hypervisor = new Hypervisor(node.dag) |
135 | hypervisor.registerContainer(testVMContainer) |
136 | hypervisor.registerContainer(testVMContainer2) |
137 | |
138 | let root = await hypervisor.createInstance(testVMContainer.typeId) |
139 | const rootId = root.id |
140 | const [portRef1, portRef2] = root.ports.createChannel() |
141 | await root.createInstance(testVMContainer.typeId, root.createMessage({ |
142 | ports: [portRef2] |
143 | })) |
144 | |
145 | await root.ports.bind('first', portRef1) |
146 | message = root.createMessage() |
147 | |
148 | await root.send(portRef1, message) |
149 | const stateRoot = await hypervisor.createStateRoot(Infinity) |
150 | t.true(hasResolved, 'should resolve before generating the state root') |
151 | |
152 | // await hypervisor.graph.tree(stateRoot, Infinity, true) |
153 | // console.log(JSON.stringify(stateRoot, null, 2)) |
154 | t.deepEquals(stateRoot, expectedState, 'expected state') |
155 | |
156 | // test reviving the state |
157 | class testVMContainer3 extends BaseContainer { |
158 | onMessage (m) { |
159 | const port = this.kernel.ports.get('child') |
160 | this.kernel.send(port, m) |
161 | this.kernel.incrementTicks(1) |
162 | } |
163 | } |
164 | |
165 | hypervisor.registerContainer(testVMContainer3) |
166 | root = await hypervisor.getInstance(rootId) |
167 | const port = root.ports.get('first') |
168 | root.send(port, message) |
169 | }) |
170 | |
171 | tape('traps', async t => { |
172 | t.plan(1) |
173 | class Root extends BaseContainer { |
174 | async onMessage (m) { |
175 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
176 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
177 | const [portRef5, portRef6] = this.kernel.ports.createChannel() |
178 | |
179 | await Promise.all( |
180 | this.kernel.ports.bind('one', portRef1), |
181 | this.kernel.ports.bind('two', portRef3), |
182 | this.kernel.ports.bind('three', portRef5) |
183 | ) |
184 | |
185 | const message1 = this.kernel.createMessage({ |
186 | ports: [portRef2] |
187 | }) |
188 | const message2 = this.kernel.createMessage({ |
189 | ports: [portRef4] |
190 | }) |
191 | const message3 = this.kernel.createMessage({ |
192 | ports: [portRef6] |
193 | }) |
194 | |
195 | await Promise.all([ |
196 | this.kernel.createInstance(Root.typeId, message1), |
197 | this.kernel.createInstance(Root.typeId, message2), |
198 | this.kernel.createInstance(Root.typeId, message3) |
199 | ]) |
200 | |
201 | throw new Error('it is a trap!!!') |
202 | } |
203 | } |
204 | |
205 | const hypervisor = new Hypervisor(node.dag) |
206 | |
207 | hypervisor.registerContainer(Root) |
208 | const root = await hypervisor.createInstance(Root.typeId) |
209 | await root.message(root.createMessage()) |
210 | const stateRoot = await hypervisor.createStateRoot() |
211 | |
212 | t.deepEquals(stateRoot, { |
213 | '/': 'zdpuAoifKuJkWz9Fjvt79NmGq3tcefhfCyq8iM8YhcFdV9bmZ' |
214 | }, 'should revert the state') |
215 | }) |
216 | |
217 | tape('recieving older messages', async t => { |
218 | t.plan(2) |
219 | let runs = 0 |
220 | |
221 | class Root extends BaseContainer { |
222 | async onMessage (m) { |
223 | if (!runs) { |
224 | runs++ |
225 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
226 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
227 | |
228 | const message1 = this.kernel.createMessage({ |
229 | ports: [portRef2] |
230 | }) |
231 | const message2 = this.kernel.createMessage({ |
232 | ports: [portRef4] |
233 | }) |
234 | |
235 | await Promise.all([ |
236 | this.kernel.createInstance(First.typeId, message1), |
237 | this.kernel.send(portRef1, this.kernel.createMessage()), |
238 | this.kernel.send(portRef3, this.kernel.createMessage()), |
239 | this.kernel.ports.bind('one', portRef1), |
240 | this.kernel.ports.bind('two', portRef3) |
241 | ]) |
242 | return this.kernel.createInstance(Waiter.typeId, message2) |
243 | } else if (runs === 1) { |
244 | runs++ |
245 | t.equals(m.data, 'first', 'should recive the first message') |
246 | } else if (runs === 2) { |
247 | runs++ |
248 | t.equals(m.data, 'second', 'should recive the second message') |
249 | } else if (runs === 3) { |
250 | runs++ |
251 | // t.equals(m.data, 'third', 'should recived the second message') |
252 | } |
253 | } |
254 | static get typeId () { |
255 | return 299 |
256 | } |
257 | } |
258 | |
259 | class First extends BaseContainer { |
260 | onMessage (m) { |
261 | this.kernel.incrementTicks(2) |
262 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
263 | data: 'second' |
264 | })) |
265 | } |
266 | static get typeId () { |
267 | return 29 |
268 | } |
269 | } |
270 | |
271 | class Waiter extends BaseContainer { |
272 | onMessage (m) { |
273 | return new Promise((resolve, reject) => { |
274 | setTimeout(() => { |
275 | this.kernel.send(m.fromPort, this.kernel.createMessage({ |
276 | data: 'first' |
277 | })).then(resolve) |
278 | }, 200) |
279 | }) |
280 | } |
281 | } |
282 | |
283 | try { |
284 | const hypervisor = new Hypervisor(node.dag) |
285 | |
286 | hypervisor.registerContainer(Root) |
287 | hypervisor.registerContainer(First) |
288 | hypervisor.registerContainer(Waiter) |
289 | |
290 | const root = await hypervisor.createInstance(Root.typeId) |
291 | const [portRef1, portRef2] = root.ports.createChannel() |
292 | |
293 | const message = root.createMessage() |
294 | await Promise.all([ |
295 | root.send(portRef1, message), |
296 | root.ports.bind('first', portRef1), |
297 | root.createInstance(Root.typeId, root.createMessage({ |
298 | ports: [portRef2] |
299 | })) |
300 | ]) |
301 | } catch (e) { |
302 | console.log(e) |
303 | } |
304 | }) |
305 | |
306 | tape('saturation', async t => { |
307 | t.plan(3) |
308 | let runs = 0 |
309 | |
310 | class Root extends BaseContainer { |
311 | async onMessage (m) { |
312 | if (!runs) { |
313 | runs++ |
314 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
315 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
316 | |
317 | const message1 = this.kernel.createMessage({ |
318 | ports: [portRef2] |
319 | }) |
320 | const message2 = this.kernel.createMessage({ |
321 | ports: [portRef4] |
322 | }) |
323 | |
324 | this.kernel.incrementTicks(6) |
325 | return Promise.all([ |
326 | this.kernel.createInstance(First.typeId, message1), |
327 | this.kernel.createInstance(Second.typeId, message2), |
328 | this.kernel.send(portRef1, this.kernel.createMessage()), |
329 | this.kernel.send(portRef3, this.kernel.createMessage()), |
330 | this.kernel.ports.bind('one', portRef1), |
331 | this.kernel.ports.bind('two', portRef3) |
332 | ]) |
333 | } else if (runs === 1) { |
334 | runs++ |
335 | t.equals(m.data, 'first', 'should recive the first message') |
336 | } else if (runs === 2) { |
337 | runs++ |
338 | t.equals(m.data, 'second', 'should recive the first message') |
339 | } else if (runs === 3) { |
340 | runs++ |
341 | t.equals(m.data, 'third', 'should recived the second message') |
342 | } |
343 | } |
344 | static get typeId () { |
345 | return 299 |
346 | } |
347 | } |
348 | |
349 | class First extends BaseContainer { |
350 | onMessage (m) { |
351 | this.kernel.incrementTicks(2) |
352 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
353 | data: 'second' |
354 | })) |
355 | } |
356 | static get typeId () { |
357 | return 29 |
358 | } |
359 | } |
360 | |
361 | class Second extends BaseContainer { |
362 | onMessage (m) { |
363 | this.kernel.incrementTicks(3) |
364 | return this.kernel.send(m.fromPort, this.kernel.createMessage({ |
365 | data: 'third' |
366 | })) |
367 | } |
368 | static get typeId () { |
369 | return 2 |
370 | } |
371 | } |
372 | |
373 | class Waiter extends BaseContainer { |
374 | onCreation (m) { |
375 | return new Promise((resolve, reject) => { |
376 | setTimeout(() => { |
377 | this.kernel.send(m.ports[0], this.kernel.createMessage({ |
378 | data: 'first' |
379 | })).then(resolve) |
380 | }, 200) |
381 | }) |
382 | } |
383 | } |
384 | |
385 | try { |
386 | const hypervisor = new Hypervisor(node.dag) |
387 | |
388 | hypervisor.registerContainer(Root) |
389 | hypervisor.registerContainer(First) |
390 | hypervisor.registerContainer(Second) |
391 | hypervisor.registerContainer(Waiter) |
392 | |
393 | const root = await hypervisor.createInstance(Root.typeId) |
394 | const [portRef1, portRef2] = root.ports.createChannel() |
395 | const [portRef3, portRef4] = root.ports.createChannel() |
396 | |
397 | const message = root.createMessage() |
398 | await Promise.all([ |
399 | root.send(portRef1, message), |
400 | root.ports.bind('first', portRef1), |
401 | root.createInstance(Root.typeId, root.createMessage({ |
402 | ports: [portRef2] |
403 | })), |
404 | root.ports.bind('sencond', portRef3), |
405 | root.createInstance(Waiter.typeId, root.createMessage({ |
406 | ports: [portRef4] |
407 | })) |
408 | ]) |
409 | |
410 | root.incrementTicks(100) |
411 | await root.send(portRef1, root.createMessage({data: 'testss'})) |
412 | hypervisor.scheduler.done(root.id) |
413 | } catch (e) { |
414 | console.log(e) |
415 | } |
416 | }) |
417 | |
418 | tape('send to the same container at the same time', async t => { |
419 | t.plan(2) |
420 | |
421 | let runs = 0 |
422 | let instance |
423 | |
424 | class Root extends BaseContainer { |
425 | async onMessage (m) { |
426 | let one = this.kernel.ports.get('one') |
427 | if (!one) { |
428 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
429 | const message1 = this.kernel.createMessage({ |
430 | ports: [portRef2] |
431 | }) |
432 | await this.kernel.createInstance(First.typeId, message1) |
433 | return this.kernel.ports.bind('one', portRef1) |
434 | } else { |
435 | return Promise.all([ |
436 | this.kernel.send(one, this.kernel.createMessage()), |
437 | this.kernel.send(one, this.kernel.createMessage()) |
438 | ]) |
439 | } |
440 | } |
441 | static get typeId () { |
442 | return 299 |
443 | } |
444 | } |
445 | |
446 | class First extends BaseContainer { |
447 | onMessage (m) { |
448 | ++runs |
449 | if (runs === 2) { |
450 | t.equals(instance, this, 'should have same instances') |
451 | } else { |
452 | instance = this |
453 | } |
454 | } |
455 | } |
456 | |
457 | try { |
458 | const hypervisor = new Hypervisor(node.dag) |
459 | |
460 | hypervisor.registerContainer(Root) |
461 | hypervisor.registerContainer(First) |
462 | |
463 | const root = await hypervisor.createInstance(Root.typeId) |
464 | const [portRef1, portRef2] = root.ports.createChannel() |
465 | await root.ports.bind('first', portRef1) |
466 | await root.createInstance(Root.typeId, root.createMessage({ |
467 | ports: [portRef2] |
468 | })) |
469 | |
470 | const message = root.createMessage() |
471 | await root.send(portRef1, message) |
472 | await hypervisor.createStateRoot() |
473 | await root.send(portRef1, root.createMessage()) |
474 | await hypervisor.createStateRoot() |
475 | t.equals(runs, 2) |
476 | } catch (e) { |
477 | console.log(e) |
478 | } |
479 | }) |
480 | |
481 | tape('checking ports', async t => { |
482 | t.plan(4) |
483 | const hypervisor = new Hypervisor(node.dag) |
484 | hypervisor.registerContainer(BaseContainer) |
485 | |
486 | const root = await hypervisor.createInstance(BaseContainer.typeId) |
487 | |
488 | const [portRef1, portRef2] = root.ports.createChannel() |
489 | root.createInstance(BaseContainer.typeId, root.createMessage({ |
490 | ports: [portRef2] |
491 | })) |
492 | await root.ports.bind('test', portRef1) |
493 | |
494 | try { |
495 | root.createMessage({ |
496 | ports: [portRef1] |
497 | }) |
498 | } catch (e) { |
499 | t.pass('should thow if sending a port that is bound') |
500 | } |
501 | |
502 | try { |
503 | await root.ports.bind('test', portRef1) |
504 | } catch (e) { |
505 | t.pass('should thow if binding an already bound port') |
506 | } |
507 | |
508 | try { |
509 | const [portRef3] = root.ports.createChannel() |
510 | await root.ports.bind('test', portRef3) |
511 | } catch (e) { |
512 | t.pass('should thow if binding an already bound name') |
513 | } |
514 | |
515 | await root.ports.unbind('test') |
516 | const message = root.createMessage({ |
517 | ports: [portRef1] |
518 | }) |
519 | t.equals(message.ports[0], portRef1, 'should create a message if the port is unbound') |
520 | }) |
521 | |
522 | tape('port deletion', async t => { |
523 | const expectedSr = { |
524 | '/': 'zdpuAopMy53q2uvL2a4fhVEAvwXjSDW28fh8zhQUj598tb5md' |
525 | } |
526 | class Root extends BaseContainer { |
527 | async onMessage (m) { |
528 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
529 | const message1 = this.kernel.createMessage({ |
530 | ports: [portRef2] |
531 | }) |
532 | |
533 | await this.kernel.createInstance(First.typeId, message1) |
534 | await this.kernel.send(portRef1, this.kernel.createMessage()) |
535 | this.kernel.incrementTicks(6) |
536 | return this.kernel.ports.bind('one', portRef1) |
537 | } |
538 | } |
539 | |
540 | class First extends BaseContainer { |
541 | onMessage (m) { |
542 | this.kernel.incrementTicks(2) |
543 | return this.kernel.ports.delete('root') |
544 | } |
545 | static get typeId () { |
546 | return 299 |
547 | } |
548 | } |
549 | |
550 | const hypervisor = new Hypervisor(node.dag) |
551 | |
552 | hypervisor.registerContainer(Root) |
553 | hypervisor.registerContainer(First) |
554 | |
555 | const root = await hypervisor.createInstance(Root.typeId) |
556 | const [portRef1, portRef2] = root.ports.createChannel() |
557 | await root.ports.bind('first', portRef1) |
558 | await root.createInstance(Root.typeId, root.createMessage({ |
559 | ports: [portRef2] |
560 | })) |
561 | |
562 | const message = root.createMessage() |
563 | await root.send(portRef1, message) |
564 | |
565 | const sr = await hypervisor.createStateRoot() |
566 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
567 | await hypervisor.graph.tree(sr, Infinity, true) |
568 | |
569 | t.end() |
570 | }) |
571 | |
572 | tape('clear unbounded ports', async t => { |
573 | const expectedSr = { |
574 | '/': 'zdpuAopMy53q2uvL2a4fhVEAvwXjSDW28fh8zhQUj598tb5md' |
575 | } |
576 | class Root extends BaseContainer { |
577 | onMessage (m) { |
578 | return this.kernel.createInstance(Root.typeId) |
579 | } |
580 | } |
581 | |
582 | const hypervisor = new Hypervisor(node.dag) |
583 | hypervisor.registerContainer(Root) |
584 | |
585 | const root = await hypervisor.createInstance(Root.typeId) |
586 | const [portRef1, portRef2] = root.ports.createChannel() |
587 | await root.ports.bind('first', portRef1) |
588 | await root.createInstance(Root.typeId, root.createMessage({ |
589 | ports: [portRef2] |
590 | })) |
591 | |
592 | const message = root.createMessage() |
593 | await root.send(portRef1, message) |
594 | const sr = await hypervisor.createStateRoot() |
595 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
596 | |
597 | t.end() |
598 | }) |
599 | |
600 | tape('should remove subgraphs', async t => { |
601 | const expectedSr = { |
602 | '/': 'zdpuAopMy53q2uvL2a4fhVEAvwXjSDW28fh8zhQUj598tb5md' |
603 | } |
604 | class Root extends BaseContainer { |
605 | onMessage (m) { |
606 | const [, portRef2] = this.kernel.ports.createChannel() |
607 | return this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
608 | ports: [portRef2] |
609 | })) |
610 | } |
611 | } |
612 | |
613 | class Sub extends BaseContainer { |
614 | async onInitailize (message) { |
615 | await this.kernel.ports.bind('root', message.ports[0]) |
616 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
617 | await this.kernel.ports.bind('child', portRef1) |
618 | await this.kernel.createInstance(Root.typeId, this.kernel.createMessage({ |
619 | ports: [portRef2] |
620 | })) |
621 | } |
622 | static get typeId () { |
623 | return 299 |
624 | } |
625 | } |
626 | |
627 | try { |
628 | const hypervisor = new Hypervisor(node.dag) |
629 | |
630 | hypervisor.registerContainer(Root) |
631 | hypervisor.registerContainer(Sub) |
632 | |
633 | const root = await hypervisor.createInstance(Root.typeId) |
634 | const [portRef1, portRef2] = root.ports.createChannel() |
635 | await root.ports.bind('first', portRef1) |
636 | await root.createInstance(Root.typeId, root.createMessage({ |
637 | ports: [portRef2] |
638 | })) |
639 | |
640 | await root.send(portRef1, root.createMessage()) |
641 | const sr = await hypervisor.createStateRoot() |
642 | |
643 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
644 | t.end() |
645 | } catch (e) { |
646 | console.log(e) |
647 | } |
648 | }) |
649 | |
650 | tape('should not remove connected nodes', async t => { |
651 | const expectedSr = { |
652 | '/': 'zdpuApKrsvsWknDML2Mme9FyZfRnVZ1hTCoKzkooYAWT3dUDV' |
653 | } |
654 | class Root extends BaseContainer { |
655 | async onMessage (m) { |
656 | if (m.ports.length) { |
657 | const port = this.kernel.ports.get('test1') |
658 | await this.kernel.send(port, m) |
659 | return this.kernel.ports.unbind('test1') |
660 | } else { |
661 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
662 | await this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
663 | ports: [portRef2] |
664 | })) |
665 | await this.kernel.ports.bind('test1', portRef1) |
666 | |
667 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
668 | await this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
669 | ports: [portRef4] |
670 | })) |
671 | await this.kernel.ports.bind('test2', portRef3) |
672 | await this.kernel.send(portRef3, this.kernel.createMessage({ |
673 | data: 'getChannel' |
674 | })) |
675 | } |
676 | } |
677 | } |
678 | |
679 | class Sub extends BaseContainer { |
680 | async onMessage (message) { |
681 | if (message.data === 'getChannel') { |
682 | const ports = this.kernel.ports.createChannel() |
683 | await this.kernel.send(message.fromPort, this.kernel.createMessage({ |
684 | data: 'bindPort', |
685 | ports: [ports[1]] |
686 | })) |
687 | return this.kernel.ports.bind('channel', ports[0]) |
688 | } else if (message.data === 'bindPort') { |
689 | return this.kernel.ports.bind('channel', message.ports[0]) |
690 | } |
691 | } |
692 | static get typeId () { |
693 | return 299 |
694 | } |
695 | } |
696 | |
697 | const hypervisor = new Hypervisor(node.dag) |
698 | |
699 | hypervisor.registerContainer(Root) |
700 | hypervisor.registerContainer(Sub) |
701 | |
702 | const root = await hypervisor.createInstance(Root.typeId) |
703 | const [portRef1, portRef2] = root.ports.createChannel() |
704 | await root.ports.bind('first', portRef1) |
705 | await root.createInstance(Root.typeId, root.createMessage({ |
706 | ports: [portRef2] |
707 | })) |
708 | |
709 | await root.send(portRef1, root.createMessage()) |
710 | const sr = await hypervisor.createStateRoot() |
711 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
712 | // await hypervisor.graph.tree(sr, Infinity) |
713 | |
714 | t.end() |
715 | }) |
716 | |
717 | tape('should remove multiple subgraphs', async t => { |
718 | const expectedSr = { |
719 | '/': 'zdpuArkZ5yNowNnU4qJ8vayAUncgibQP9goDP1CwFxdmPJF9D' |
720 | } |
721 | class Root extends BaseContainer { |
722 | onMessage (m) { |
723 | if (m.ports.length) { |
724 | const port = this.kernel.ports.get('test1') |
725 | return Promise.all([ |
726 | this.kernel.send(port, m), |
727 | this.kernel.ports.unbind('test1'), |
728 | this.kernel.ports.unbind('test2') |
729 | ]) |
730 | } else { |
731 | const [portRef1, portRef2] = this.kernel.ports.createChannel() |
732 | const [portRef3, portRef4] = this.kernel.ports.createChannel() |
733 | return Promise.all([ |
734 | this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
735 | ports: [portRef2] |
736 | })), |
737 | this.kernel.ports.bind('test1', portRef1), |
738 | this.kernel.createInstance(Sub.typeId, this.kernel.createMessage({ |
739 | ports: [portRef4] |
740 | })), |
741 | this.kernel.ports.bind('test2', portRef3), |
742 | this.kernel.send(portRef3, this.kernel.createMessage({ |
743 | data: 'getChannel' |
744 | })) |
745 | ]) |
746 | } |
747 | } |
748 | } |
749 | |
750 | class Sub extends BaseContainer { |
751 | async onMessage (message) { |
752 | if (message.data === 'getChannel') { |
753 | const ports = this.kernel.ports.createChannel() |
754 | await this.kernel.send(message.fromPort, this.kernel.createMessage({ |
755 | data: 'bindPort', |
756 | ports: [ports[1]] |
757 | })) |
758 | return this.kernel.ports.bind('channel', ports[0]) |
759 | } else if (message.data === 'bindPort') { |
760 | return this.kernel.ports.bind('channel', message.ports[0]) |
761 | } |
762 | } |
763 | static get typeId () { |
764 | return 299 |
765 | } |
766 | } |
767 | |
768 | try { |
769 | const hypervisor = new Hypervisor(node.dag) |
770 | |
771 | hypervisor.registerContainer(Root) |
772 | hypervisor.registerContainer(Sub) |
773 | |
774 | const root = await hypervisor.createInstance(Root.typeId) |
775 | |
776 | const [portRef1, portRef2] = root.ports.createChannel() |
777 | await Promise.all([ |
778 | root.ports.bind('first', portRef1), |
779 | root.createInstance(Root.typeId, root.createMessage({ |
780 | ports: [portRef2] |
781 | })), |
782 | root.send(portRef1, root.createMessage()) |
783 | ]) |
784 | |
785 | const sr = await hypervisor.createStateRoot() |
786 | t.deepEquals(sr, expectedSr, 'should produce the corret state root') |
787 | |
788 | t.end() |
789 | } catch (e) { |
790 | console.log(e) |
791 | } |
792 | }) |
793 | |
794 | tape('response ports', async t => { |
795 | t.plan(2) |
796 | let runs = 0 |
797 | const returnValue = 'this is a test' |
798 | |
799 | class testVMContainer extends BaseContainer { |
800 | onMessage (m) { |
801 | runs++ |
802 | if (runs === 1) { |
803 | return returnValue |
804 | } else { |
805 | t.equals(m.data, returnValue, 'should have correct return value') |
806 | } |
807 | } |
808 | } |
809 | |
810 | const hypervisor = new Hypervisor(node.dag) |
811 | |
812 | hypervisor.registerContainer(testVMContainer) |
813 | |
814 | const rootContainer = await hypervisor.createInstance(testVMContainer.typeId) |
815 | |
816 | const [portRef1, portRef2] = rootContainer.ports.createChannel() |
817 | const initMessage = rootContainer.createMessage({ |
818 | ports: [portRef2] |
819 | }) |
820 | |
821 | rootContainer.createInstance(testVMContainer.typeId, initMessage) |
822 | |
823 | await rootContainer.ports.bind('first', portRef1) |
824 | const message = rootContainer.createMessage() |
825 | const rPort = rootContainer.getResponsePort(message) |
826 | const rPort2 = rootContainer.getResponsePort(message) |
827 | |
828 | t.equals(rPort2, rPort) |
829 | |
830 | rootContainer.send(portRef1, message) |
831 | await rootContainer.ports.bind('response', rPort) |
832 | }) |
833 | |
834 | tape('start up', async t => { |
835 | t.plan(1) |
836 | class testVMContainer extends BaseContainer { |
837 | onMessage () {} |
838 | onStartup () { |
839 | t.true(true, 'should start up') |
840 | } |
841 | } |
842 | |
843 | const hypervisor = new Hypervisor(node.dag) |
844 | hypervisor.registerContainer(testVMContainer) |
845 | await hypervisor.createInstance(testVMContainer.typeId) |
846 | hypervisor.getInstance(hypervisor.ROOT_ID) |
847 | }) |
848 | |
849 | tape('large code size', async t => { |
850 | t.plan(1) |
851 | const content = Buffer.from(new ArrayBuffer(1000000)) |
852 | class testVMContainer extends BaseContainer { |
853 | onMessage () {} |
854 | } |
855 | |
856 | const hypervisor = new Hypervisor(node.dag) |
857 | hypervisor.registerContainer(testVMContainer) |
858 | await hypervisor.createInstance(testVMContainer.typeId, new Message({data: content})) |
859 | const instance = await hypervisor.getInstance(hypervisor.ROOT_ID) |
860 | t.equals(content.length, instance.code.length) |
861 | }) |
862 | }) |
863 |
Built with git-ssb-web