Commit ac93636d794481083a1cd4d475925e93a6da01be
fist test passing
wanderer committed on 6/16/2017, 5:18:40 PMParent: 3c9cd5e3087d85ea2a475af7ae2f57b4f12e872c
Files changed
exoInterface.js | changed |
index.js | changed |
package.json | changed |
portManager.js | changed |
scheduler.js | changed |
tests/index.js | changed |
exoInterface.js | ||
---|---|---|
@@ -1,9 +1,8 @@ | ||
1 | -const EventEmitter = require('events') | |
2 | 1 | const PortManager = require('./portManager.js') |
3 | 2 | const Message = require('primea-message') |
4 | 3 | |
5 | -module.exports = class ExoInterface extends EventEmitter { | |
4 | +module.exports = class ExoInterface { | |
6 | 5 | /** |
7 | 6 | * the ExoInterface manages the varous message passing functions and provides |
8 | 7 | * an interface for the containers to use |
9 | 8 | * @param {Object} opts |
@@ -12,9 +11,8 @@ | ||
12 | 11 | * @param {Object} opts.hypervisor |
13 | 12 | * @param {Object} opts.Container |
14 | 13 | */ |
15 | 14 | constructor (opts) { |
16 | - super() | |
17 | 15 | this.state = opts.state |
18 | 16 | this.hypervisor = opts.hypervisor |
19 | 17 | this.id = opts.id |
20 | 18 | this.container = new opts.container.Constructor(this, opts.container.args) |
@@ -26,48 +24,47 @@ | ||
26 | 24 | // create the port manager |
27 | 25 | this.ports = new PortManager(Object.assign({ |
28 | 26 | exoInterface: this |
29 | 27 | }, opts)) |
30 | - | |
31 | - // once we get an result we run the next message | |
32 | - this.on('result', this._runNextMessage) | |
33 | 28 | } |
34 | 29 | |
35 | 30 | /** |
36 | 31 | * adds a message to this containers message queue |
37 | 32 | * @param {Message} message |
38 | 33 | */ |
39 | 34 | queue (portName, message) { |
40 | 35 | message._hops++ |
36 | + this.ports.addUnboundedPorts(message.ports) | |
41 | 37 | if (this.containerState !== 'running') { |
42 | - this._updateContainerState('running') | |
38 | + this.containerState = 'running' | |
43 | 39 | if (portName) { |
44 | 40 | this._runNextMessage() |
45 | 41 | } else { |
46 | - this.run(message, true).then(() => { | |
47 | - this._runNextMessage() | |
48 | - }) | |
42 | + this.run(message, true) | |
49 | 43 | } |
50 | 44 | } |
51 | 45 | } |
52 | 46 | |
53 | 47 | _updateContainerState (containerState, message) { |
54 | 48 | this.containerState = containerState |
55 | - this.emit(containerState, message) | |
56 | 49 | } |
57 | 50 | |
58 | 51 | async _runNextMessage () { |
59 | - if (this.ports.hasMessages()) { | |
60 | - const message = this.ports.nextMessage() | |
61 | - this.ticks = message._ticks | |
62 | - this.hypervisor.scheduler.update(this, this.ticks) | |
63 | - await this.hypbervisor.scheduler.wait(this.ticks) | |
64 | - this.currentMessage = message | |
65 | - // run the next message | |
66 | - this.run(message) | |
67 | - } else { | |
68 | - // if no more messages then shut down | |
69 | - this._updateContainerState('idle') | |
52 | + try { | |
53 | + if (this.ports.hasMessages()) { | |
54 | + await this.hypervisor.scheduler.wait(this.ticks) | |
55 | + const message = this.ports.nextMessage() | |
56 | + this.ticks = message._ticks | |
57 | + this.hypervisor.scheduler.update(this, this.ticks) | |
58 | + this.currentMessage = message | |
59 | + // run the next message | |
60 | + this.run(message) | |
61 | + } else { | |
62 | + // if no more messages then shut down | |
63 | + this.hypervisor.scheduler.done(this) | |
64 | + } | |
65 | + } catch (e) { | |
66 | + console.log(e) | |
70 | 67 | } |
71 | 68 | } |
72 | 69 | |
73 | 70 | /** |
@@ -75,22 +72,20 @@ | ||
75 | 72 | * The Kernel Stores all of its state in the Environment. The Interface is used |
76 | 73 | * to by the VM to retrive infromation from the Environment. |
77 | 74 | * @returns {Promise} |
78 | 75 | */ |
79 | - async run (message, init) { | |
76 | + async run (message, init = false) { | |
80 | 77 | let result |
78 | + const method = init ? 'initailize' : 'run' | |
81 | 79 | try { |
82 | - if (init) { | |
83 | - result = await this.container.run(message) || {} | |
84 | - } else { | |
85 | - result = await this.container.initailize(message) || {} | |
86 | - } | |
80 | + result = await this.container[method](message) || {} | |
87 | 81 | } catch (e) { |
88 | 82 | result = { |
89 | 83 | exception: true, |
90 | 84 | exceptionError: e |
91 | 85 | } |
92 | 86 | } |
87 | + this._runNextMessage() | |
93 | 88 | return result |
94 | 89 | } |
95 | 90 | |
96 | 91 | /** |
index.js | ||
---|---|---|
@@ -1,20 +1,30 @@ | ||
1 | 1 | const Graph = require('ipld-graph-builder') |
2 | +const Message = require('primea-message') | |
2 | 3 | const ExoInterface = require('./exoInterface.js') |
3 | -const Message = require('primea-message') | |
4 | +const Scheduler = require('./scheduler.js') | |
4 | 5 | |
5 | 6 | module.exports = class Hypervisor { |
6 | 7 | /** |
7 | 8 | * The Hypervisor manages the container instances by instantiating them and |
8 | 9 | * destorying them when possible. It also facilitates localating Containers |
9 | 10 | * @param {Graph} dag an instance of [ipfs.dag](https://github.com/ipfs/interface-ipfs-core/tree/master/API/dag#dag-api) |
10 | 11 | */ |
11 | 12 | constructor (dag, state = {}) { |
13 | + this.graph = new Graph(dag) | |
14 | + this.scheduler = new Scheduler() | |
12 | 15 | this._state = state |
13 | - this.graph = new Graph(dag) | |
14 | 16 | this._containerTypes = {} |
15 | 17 | } |
16 | 18 | |
19 | + getDestPort (port) { | |
20 | + if (port.destPort) { | |
21 | + return port.destPort | |
22 | + } else { | |
23 | + return this.graph.get(this._state, `${port.destId}/ports/${port.destName}`) | |
24 | + } | |
25 | + } | |
26 | + | |
17 | 27 | /** |
18 | 28 | */ |
19 | 29 | async getInstance (id) { |
20 | 30 | let instance = await this.scheduler.instances.get(id) |
@@ -22,47 +32,43 @@ | ||
22 | 32 | if (!instance) { |
23 | 33 | const promise = this._loadInstance(id) |
24 | 34 | this.scheduler.instances.set(id, promise) |
25 | 35 | instance = await promise |
26 | - instance.once('idle', () => { | |
27 | - // once the container is done shut it down | |
28 | - this.scheduler.done(instance) | |
29 | - }) | |
30 | 36 | } |
31 | 37 | return instance |
32 | 38 | } |
33 | 39 | |
34 | 40 | async _loadInstance (id) { |
35 | 41 | const state = await this.graph.get(this._state, id) |
36 | - const Container = this._containerTypes[state.type] | |
42 | + const container = this._containerTypes[state.type] | |
37 | 43 | |
38 | 44 | // create a new kernel instance |
39 | 45 | const exoInterface = new ExoInterface({ |
40 | 46 | hypervisor: this, |
41 | 47 | state: state, |
42 | - Container: Container | |
48 | + container: container, | |
49 | + id: id | |
43 | 50 | }) |
44 | 51 | |
45 | 52 | // save the newly created instance |
46 | 53 | this.scheduler.update(exoInterface) |
47 | 54 | return exoInterface |
48 | 55 | } |
49 | 56 | |
50 | - async createInstance (id, type, code, entryPort) { | |
57 | + async createInstance (type, code, entryPorts = [], id = {nonce: 0, parent: null}) { | |
58 | + id = await this.getHashFromObj(id) | |
51 | 59 | const state = { |
52 | - '/': { | |
53 | - nonce: 0, | |
54 | - ports: {}, | |
55 | - type: type, | |
56 | - id: { | |
57 | - '/': id | |
58 | - }, | |
59 | - code: code | |
60 | - } | |
60 | + nonce: [0], | |
61 | + ports: {}, | |
62 | + type: type, | |
63 | + code: code | |
61 | 64 | } |
65 | + | |
62 | 66 | await this.graph.set(this._state, id, state) |
63 | 67 | const exoInterface = await this._loadInstance(id) |
64 | - exoInterface.queue(null, new Message(entryPort)) | |
68 | + exoInterface.queue(null, new Message({ | |
69 | + ports: entryPorts | |
70 | + })) | |
65 | 71 | |
66 | 72 | return exoInterface |
67 | 73 | } |
68 | 74 | |
@@ -88,5 +94,9 @@ | ||
88 | 94 | Constructor: Constructor, |
89 | 95 | args: args |
90 | 96 | } |
91 | 97 | } |
98 | + | |
99 | + async getHashFromObj (obj) { | |
100 | + return (await this.graph.flush(obj))['/'] | |
101 | + } | |
92 | 102 | } |
package.json | ||
---|---|---|
@@ -31,9 +31,8 @@ | ||
31 | 31 | "license": "MPL-2.0", |
32 | 32 | "dependencies": { |
33 | 33 | "binary-search-insert": "^1.0.3", |
34 | 34 | "bn.js": "^4.11.6", |
35 | - "events": "^1.1.1", | |
36 | 35 | "ipld-graph-builder": "1.1.5", |
37 | 36 | "primea-message": "0.0.1" |
38 | 37 | }, |
39 | 38 | "devDependencies": { |
portManager.js | ||
---|---|---|
@@ -35,34 +35,36 @@ | ||
35 | 35 | * @param {Object} opts.exoInterface |
36 | 36 | */ |
37 | 37 | constructor (opts) { |
38 | 38 | Object.assign(this, opts) |
39 | - this._unboundPort = new WeakSet() | |
39 | + this.ports = this.state.ports | |
40 | + this._unboundPorts = new WeakSet() | |
40 | 41 | this._waitingPorts = {} |
41 | 42 | } |
42 | 43 | |
44 | + addUnboundedPorts (ports) { | |
45 | + ports.forEach(port => { | |
46 | + this._unboundPorts.add(port) | |
47 | + }) | |
48 | + } | |
49 | + | |
43 | 50 | /** |
44 | 51 | * binds a port to a name |
45 | 52 | * @param {Object} port - the port to bind |
46 | 53 | * @param {String} name - the name of the port |
47 | 54 | */ |
48 | - async bind (port, name) { | |
55 | + async bind (name, port) { | |
49 | 56 | if (this.isBound(port)) { |
50 | 57 | throw new Error('cannot bind a port that is already bound') |
51 | 58 | } else if (this.ports[name]) { |
52 | 59 | throw new Error('cannot bind port to a name that is alread bound') |
53 | 60 | } |
54 | 61 | |
55 | - let destPort = port.destPort | |
56 | - // if the dest is unbound | |
57 | - if (destPort) { | |
58 | - delete destPort.destPort | |
59 | - } else { | |
60 | - destPort = await this.hypervisor.getPort(port) | |
61 | - } | |
62 | + const destPort = await this.hypervisor.getDestPort(port) | |
62 | 63 | |
63 | 64 | destPort.destName = name |
64 | 65 | destPort.destId = this.id |
66 | + delete destPort.destPort | |
65 | 67 | |
66 | 68 | // save the port instance |
67 | 69 | this.ports[name] = port |
68 | 70 | } |
@@ -81,9 +83,9 @@ | ||
81 | 83 | if (destPort) { |
82 | 84 | delete destPort.destName |
83 | 85 | delete destPort.destId |
84 | 86 | } else { |
85 | - destPort = await this.hypervisor.getPort(port) | |
87 | + destPort = await this.hypervisor.getDestPort(port) | |
86 | 88 | } |
87 | 89 | if (del) { |
88 | 90 | delete destPort.destPort |
89 | 91 | } else { |
@@ -97,9 +99,9 @@ | ||
97 | 99 | * @param {Object} port |
98 | 100 | * @return {Boolean} |
99 | 101 | */ |
100 | 102 | isBound (port) { |
101 | - return !this._unboundPort.has(port) | |
103 | + return !this._unboundPorts.has(port) | |
102 | 104 | } |
103 | 105 | |
104 | 106 | /** |
105 | 107 | * queues a message on a port |
@@ -128,12 +130,17 @@ | ||
128 | 130 | * @param {String} type |
129 | 131 | * @param {*} data - the data to populate the initail state with |
130 | 132 | * @returns {Promise} |
131 | 133 | */ |
132 | - async create (type, data) { | |
134 | + create (type, data) { | |
133 | 135 | // const container = this.hypervisor._containerTypes[type] |
134 | - let nonce = this.state['/'].nonce | |
136 | + let nonce = this.state.nonce | |
135 | 137 | |
138 | + const id = { | |
139 | + nonce: nonce, | |
140 | + parent: this.id | |
141 | + } | |
142 | + | |
136 | 143 | const entryPort = { |
137 | 144 | messages: [] |
138 | 145 | } |
139 | 146 | |
@@ -143,20 +150,15 @@ | ||
143 | 150 | } |
144 | 151 | |
145 | 152 | entryPort.destPort = port |
146 | 153 | |
147 | - const id = await this.getIdHash({ | |
148 | - nonce: nonce, | |
149 | - parent: this.id | |
150 | - }) | |
154 | + this.hypervisor.createInstance(type, data, [entryPort], id) | |
151 | 155 | |
152 | - await this.hypervisor.createInstance(id, type, data, entryPort) | |
153 | - | |
154 | 156 | // incerment the nonce |
155 | 157 | nonce = new BN(nonce) |
156 | 158 | nonce.iaddn(1) |
157 | - this.state['/'].nonce = nonce.toArray() | |
158 | - this._unboundPort.add(port) | |
159 | + this.state.nonce = nonce.toArray() | |
160 | + this._unboundPorts.add(port) | |
159 | 161 | return port |
160 | 162 | } |
161 | 163 | |
162 | 164 | /** |
@@ -189,15 +191,11 @@ | ||
189 | 191 | * @returns {Promise} |
190 | 192 | */ |
191 | 193 | nextMessage () { |
192 | 194 | const portName = Object.keys(this.ports).reduce(messageArbiter) |
193 | - return this.ports[portName].message.shift() | |
195 | + return this.ports[portName].messages.shift() | |
194 | 196 | } |
195 | 197 | |
196 | - hasMessage () { | |
197 | - return Object.keys(this.ports).some(name => this.ports[name].message.length) | |
198 | + hasMessages () { | |
199 | + return Object.keys(this.ports).some(name => this.ports[name].messages.length) | |
198 | 200 | } |
199 | - | |
200 | - async getIdHash (idObj) { | |
201 | - return (await this.graph.flush(idObj))['/'] | |
202 | - } | |
203 | 201 | } |
scheduler.js | ||
---|---|---|
@@ -10,9 +10,9 @@ | ||
10 | 10 | this.instances = new Map() |
11 | 11 | } |
12 | 12 | |
13 | 13 | update (instance, ticks = this.oldest()) { |
14 | - this.instance.delete(instance.id) | |
14 | + this.instances.delete(instance.id) | |
15 | 15 | const instanceArray = [...this.instances] |
16 | 16 | binarySearchInsert(instanceArray, comparator, [instance.id, { |
17 | 17 | ticks: ticks, |
18 | 18 | instance: instance |
@@ -20,11 +20,11 @@ | ||
20 | 20 | this.instances = new Map(instanceArray) |
21 | 21 | this._checkWaits() |
22 | 22 | } |
23 | 23 | |
24 | - done (id) { | |
25 | - this._instance.delete(id) | |
26 | - if (this._instance.size) { | |
24 | + done (instance) { | |
25 | + this.instances.delete(instance.id) | |
26 | + if (this.instances.size) { | |
27 | 27 | this._checkWaits() |
28 | 28 | } else { |
29 | 29 | // clear any remanding waits |
30 | 30 | this._waits.forEach(wait => { |
@@ -34,18 +34,23 @@ | ||
34 | 34 | } |
35 | 35 | } |
36 | 36 | |
37 | 37 | wait (ticks) { |
38 | - return new Promise((resolve, reject) => { | |
39 | - binarySearchInsert(this._waits, comparator, { | |
40 | - ticks: ticks, | |
41 | - resolve: resolve | |
38 | + if (ticks <= this.oldest()) { | |
39 | + return | |
40 | + } else { | |
41 | + return new Promise((resolve, reject) => { | |
42 | + binarySearchInsert(this._waits, comparator, { | |
43 | + ticks: ticks, | |
44 | + resolve: resolve | |
45 | + }) | |
42 | 46 | }) |
43 | - }) | |
47 | + } | |
44 | 48 | } |
45 | 49 | |
46 | 50 | oldest () { |
47 | - return [...this.instances][0].ticks | |
51 | + const oldest = [...this.instances][0] | |
52 | + return oldest ? oldest[1].ticks : 0 | |
48 | 53 | } |
49 | 54 | |
50 | 55 | _checkWaits () { |
51 | 56 | const oldest = this.oldest() |
tests/index.js | ||
---|---|---|
@@ -1,61 +1,52 @@ | ||
1 | 1 | const tape = require('tape') |
2 | 2 | const IPFS = require('ipfs') |
3 | -const levelup = require('levelup') | |
4 | -const LevelPromise = require('level-promise') | |
5 | -const memdown = require('memdown') | |
6 | 3 | const Hypervisor = require('../') |
7 | 4 | |
8 | -// set up the db | |
9 | -const db = levelup('/some/location', { | |
10 | - db: memdown | |
11 | -}) | |
12 | -LevelPromise(db) | |
13 | - | |
14 | 5 | // start ipfs |
15 | 6 | const node = new IPFS({ |
16 | 7 | start: false |
17 | 8 | }) |
18 | 9 | |
19 | 10 | class BaseContainer { |
20 | - constructor (kernel) { | |
21 | - this.kernel = kernel | |
11 | + constructor (exInterface) { | |
12 | + this.exInterface = exInterface | |
22 | 13 | } |
23 | - | |
24 | - static createState (code) { | |
25 | - return { | |
26 | - nonce: [0], | |
27 | - ports: {} | |
28 | - } | |
29 | - } | |
30 | 14 | } |
31 | 15 | |
32 | 16 | node.on('ready', () => { |
33 | - tape.only('basic', async t => { | |
17 | + tape('basic', async t => { | |
34 | 18 | t.plan(2) |
35 | 19 | let message |
36 | 20 | const expectedState = { |
37 | - '/': 'zdpuAntkdU7yBJojcBT5Q9wBhrK56NmLnwpHPKaEGMFnAXpv7' | |
21 | + '/': 'zdpuAyGKaZ3nbBQdgESbEgVYr81TcAFB6LE2MQQPWLZaYxuF3' | |
38 | 22 | } |
39 | 23 | |
40 | 24 | class testVMContainer extends BaseContainer { |
25 | + async initailize (message) { | |
26 | + const port = message.ports[0] | |
27 | + if (port) { | |
28 | + await this.exInterface.ports.bind('root', port) | |
29 | + } | |
30 | + } | |
41 | 31 | run (m) { |
42 | 32 | t.true(m === message, 'should recive a message') |
43 | 33 | } |
44 | 34 | } |
45 | 35 | |
46 | - const hypervisor = new Hypervisor(node.dag, db) | |
36 | + const hypervisor = new Hypervisor(node.dag) | |
47 | 37 | hypervisor.registerContainer('test', testVMContainer) |
48 | 38 | |
49 | 39 | const rootContainer = await hypervisor.createInstance('test') |
50 | - const port = rootContainer.ports.create('test') | |
40 | + const port = await rootContainer.ports.create('test') | |
51 | 41 | message = rootContainer.createMessage() |
52 | - rootContainer.ports.bind(port, 'first') | |
53 | - | |
42 | + await rootContainer.ports.bind('first', port) | |
54 | 43 | await rootContainer.send(port, message) |
55 | 44 | |
56 | - const stateRoot = await hypervisor.createStateRoot(rootContainer, Infinity) | |
57 | - // t.deepEquals(stateRoot, expectedState, 'expected root!') | |
45 | + const stateRoot = await hypervisor.createStateRoot(Infinity) | |
46 | + // await hypervisor.graph.tree(stateRoot, Infinity) | |
47 | + // console.log(JSON.stringify(stateRoot, null, 2)) | |
48 | + t.deepEquals(stateRoot, expectedState, 'expected root!') | |
58 | 49 | }) |
59 | 50 | |
60 | 51 | tape('one child contract', async t => { |
61 | 52 | t.plan(4) |
@@ -65,30 +56,39 @@ | ||
65 | 56 | } |
66 | 57 | let hasResolved = false |
67 | 58 | |
68 | 59 | class testVMContainer2 extends BaseContainer { |
60 | + async initailize (m) { | |
61 | + await this.exInterface.ports.bind('root', port) | |
62 | + } | |
69 | 63 | run (m) { |
70 | 64 | t.true(m === message, 'should recive a message 2') |
71 | 65 | return new Promise((resolve, reject) => { |
72 | 66 | setTimeout(() => { |
73 | - this.kernel.incrementTicks(1) | |
67 | + this.exInterface.incrementTicks(1) | |
74 | 68 | hasResolved = true |
75 | 69 | resolve() |
76 | 70 | }, 200) |
77 | 71 | }) |
78 | 72 | } |
79 | 73 | } |
80 | 74 | |
81 | 75 | class testVMContainer extends BaseContainer { |
76 | + async initailize (m) { | |
77 | + const port = message.ports[0] | |
78 | + if (port) { | |
79 | + await this.exInterface.ports.bind('root', port) | |
80 | + } | |
81 | + } | |
82 | 82 | async run (m) { |
83 | 83 | const port = this.kernel.ports.create('test2') |
84 | 84 | this.kernel.ports.bind(port, 'child') |
85 | 85 | await this.kernel.send(port, m) |
86 | 86 | this.kernel.incrementTicks(1) |
87 | 87 | } |
88 | 88 | } |
89 | 89 | |
90 | - const hypervisor = new Hypervisor(node.dag, db) | |
90 | + const hypervisor = new Hypervisor(node.dag) | |
91 | 91 | hypervisor.registerContainer('test', testVMContainer) |
92 | 92 | hypervisor.registerContainer('test2', testVMContainer2) |
93 | 93 | |
94 | 94 | let root = await hypervisor.createInstance('test') |
@@ -117,9 +117,9 @@ | ||
117 | 117 | |
118 | 118 | root.send(port, message) |
119 | 119 | }) |
120 | 120 | |
121 | - tape('ping pong', async t => { | |
121 | + tape.skip('ping pong', async t => { | |
122 | 122 | class Ping extends BaseContainer { |
123 | 123 | async run (m) { |
124 | 124 | let port = this.kernel.ports.get('child') |
125 | 125 | if (!port) { |
@@ -155,9 +155,9 @@ | ||
155 | 155 | |
156 | 156 | t.end() |
157 | 157 | }) |
158 | 158 | |
159 | - tape('queing multiple messages', async t => { | |
159 | + tape.skip('queing multiple messages', async t => { | |
160 | 160 | t.plan(2) |
161 | 161 | let runs = 0 |
162 | 162 | |
163 | 163 | class Root extends BaseContainer { |
@@ -210,9 +210,9 @@ | ||
210 | 210 | |
211 | 211 | await hypervisor.graph.tree(root.state, Infinity) |
212 | 212 | }) |
213 | 213 | |
214 | - tape('traps', async t => { | |
214 | + tape.skip('traps', async t => { | |
215 | 215 | t.plan(1) |
216 | 216 | class Root extends BaseContainer { |
217 | 217 | async run (m) { |
218 | 218 | const one = this.kernel.ports.create('child') |
@@ -240,9 +240,9 @@ | ||
240 | 240 | } |
241 | 241 | }, 'should revert the state') |
242 | 242 | }) |
243 | 243 | |
244 | - tape('message should arrive in the correct oder if sent in order', async t => { | |
244 | + tape.skip('message should arrive in the correct oder if sent in order', async t => { | |
245 | 245 | t.plan(2) |
246 | 246 | |
247 | 247 | class Root extends BaseContainer { |
248 | 248 | async run (m) { |
@@ -295,9 +295,9 @@ | ||
295 | 295 | |
296 | 296 | root.send(port, root.createMessage()) |
297 | 297 | }) |
298 | 298 | |
299 | - tape('message should arrive in the correct order, even if sent out of order', async t => { | |
299 | + tape.skip('message should arrive in the correct order, even if sent out of order', async t => { | |
300 | 300 | t.plan(2) |
301 | 301 | |
302 | 302 | class Root extends BaseContainer { |
303 | 303 | run (m) { |
@@ -348,9 +348,9 @@ | ||
348 | 348 | |
349 | 349 | root.send(port, root.createMessage()) |
350 | 350 | }) |
351 | 351 | |
352 | - tape('message should arrive in the correct order, even in a tie of ticks', async t => { | |
352 | + tape.skip('message should arrive in the correct order, even in a tie of ticks', async t => { | |
353 | 353 | t.plan(2) |
354 | 354 | |
355 | 355 | class Root extends BaseContainer { |
356 | 356 | async run (m) { |
@@ -406,9 +406,9 @@ | ||
406 | 406 | root.ports.bind(port, 'first') |
407 | 407 | root.send(port, root.createMessage()) |
408 | 408 | }) |
409 | 409 | |
410 | - tape('message should arrive in the correct order, even in a tie of ticks', async t => { | |
410 | + tape.skip('message should arrive in the correct order, even in a tie of ticks', async t => { | |
411 | 411 | t.plan(2) |
412 | 412 | |
413 | 413 | class Root extends BaseContainer { |
414 | 414 | run (m) { |
@@ -464,9 +464,9 @@ | ||
464 | 464 | |
465 | 465 | root.send(port, root.createMessage()) |
466 | 466 | }) |
467 | 467 | |
468 | - tape('message should arrive in the correct order, with a tie in ticks but with differnt proity', async t => { | |
468 | + tape.skip('message should arrive in the correct order, with a tie in ticks but with differnt proity', async t => { | |
469 | 469 | t.plan(2) |
470 | 470 | |
471 | 471 | class Root extends BaseContainer { |
472 | 472 | run (m) { |
@@ -523,9 +523,9 @@ | ||
523 | 523 | root.ports.bind(port, 'first') |
524 | 524 | root.send(port, root.createMessage()) |
525 | 525 | }) |
526 | 526 | |
527 | - tape('message should arrive in the correct order, with a tie in ticks but with differnt proity', async t => { | |
527 | + tape.skip('message should arrive in the correct order, with a tie in ticks but with differnt proity', async t => { | |
528 | 528 | t.plan(2) |
529 | 529 | |
530 | 530 | class Root extends BaseContainer { |
531 | 531 | run (m) { |
@@ -583,9 +583,9 @@ | ||
583 | 583 | root.ports.bind(port, 'first') |
584 | 584 | root.send(port, root.createMessage()) |
585 | 585 | }) |
586 | 586 | |
587 | - tape('should order parent messages correctly', async t => { | |
587 | + tape.skip('should order parent messages correctly', async t => { | |
588 | 588 | t.plan(1) |
589 | 589 | class Middle extends BaseContainer { |
590 | 590 | run (m) { |
591 | 591 | if (!this.runs) { |
@@ -629,9 +629,9 @@ | ||
629 | 629 | await root.send(port, root.createMessage()) |
630 | 630 | root.send(port, root.createMessage()) |
631 | 631 | }) |
632 | 632 | |
633 | - tape('get container instance by path', async t => { | |
633 | + tape.skip('get container instance by path', async t => { | |
634 | 634 | t.plan(1) |
635 | 635 | const hypervisor = new Hypervisor(node.dag) |
636 | 636 | hypervisor.registerContainer('base', BaseContainer) |
637 | 637 | |
@@ -651,9 +651,9 @@ | ||
651 | 651 | const foundThird = await hypervisor.getInstanceByPath(root, 'first/second/third') |
652 | 652 | t.equals(third, foundThird, 'should find by path') |
653 | 653 | }) |
654 | 654 | |
655 | - tape('checking ports', async t => { | |
655 | + tape.skip('checking ports', async t => { | |
656 | 656 | t.plan(5) |
657 | 657 | const hypervisor = new Hypervisor(node.dag) |
658 | 658 | hypervisor.registerContainer('base', BaseContainer) |
659 | 659 |
Built with git-ssb-web