Commit a977e58eb3a89178bf97c0a5b20a7172be8c9b5f
added docs
wanderer committed on 5/20/2017, 12:59:17 PMParent: c5cdc89599c7a0c7c34af77912e1b919bf4cbf26
Files changed
index.js | changed |
portManager.js | changed |
exoInterface.js | added |
kernel.js | deleted |
index.js | ||
---|---|---|
@@ -1,39 +1,62 @@ | ||
1 | 1 | const Graph = require('ipld-graph-builder') |
2 | -const Kernel = require('./kernel.js') | |
2 | +const ExoInterface = require('./exoInterface.js') | |
3 | 3 | |
4 | 4 | module.exports = class Hypervisor { |
5 | + /** | |
6 | + * The Hypervisor manages the container instances by instantiating them and | |
7 | + * destorying them when possible. It also facilitates localating Containers | |
8 | + * @param {Graph} dag an instance of [ipfs.dag](https://github.com/ipfs/interface-ipfs-core/tree/master/API/dag#dag-api) | |
9 | + */ | |
5 | 10 | constructor (dag) { |
6 | 11 | this.graph = new Graph(dag) |
7 | 12 | this._runningContainers = new Map() |
8 | 13 | this._containerTypes = {} |
9 | 14 | } |
10 | 15 | |
11 | 16 | /** |
12 | - * get a contrainer instance given the connecting port | |
17 | + * get a contrainer instance given its entry port and its mounting port | |
18 | + * @param {Object} port the entry port for the container | |
19 | + * @param {Object} parentPort the entry port of the parent container | |
13 | 20 | */ |
14 | - async getInstance (port, parentPort) { | |
15 | - let kernel = this._runningContainers.get(port) | |
16 | - if (!kernel) { | |
17 | - kernel = await this.createInstance(port.type, port.link, port, parentPort) | |
18 | - kernel.on('idle', () => { | |
21 | + async getOrCreateInstance (port, parentPort) { | |
22 | + let instance = this._runningContainers.get(port) | |
23 | + // if there is no container running crceate one | |
24 | + if (!instance) { | |
25 | + instance = await this.createInstance(port.type, port.link, port, parentPort) | |
26 | + instance.on('idle', () => { | |
27 | + // once the container is done shut it down | |
19 | 28 | this._runningContainers.delete(port) |
20 | 29 | }) |
21 | 30 | } |
22 | - return kernel | |
31 | + return instance | |
23 | 32 | } |
24 | 33 | |
25 | - // given a port, wait untill its source contract has reached the threshold | |
26 | - // tick count | |
34 | + /** | |
35 | + * given a port, wait untill its source contract has reached the threshold | |
36 | + * tick count | |
37 | + * @param {Object} port the port to wait on | |
38 | + * @param {Number} threshold the number of ticks to wait before resolving | |
39 | + * @param {Object} fromPort the entryPort of the container requesting the | |
40 | + * wait. Used internally so that waits don't become cyclic | |
41 | + */ | |
27 | 42 | async wait (port, threshold, fromPort) { |
28 | - let kernel = this._runningContainers.get(port) | |
29 | - if (kernel) { | |
30 | - return kernel.wait(threshold, fromPort) | |
43 | + let instance = this._runningContainers.get(port) | |
44 | + if (instance) { | |
45 | + return instance.wait(threshold, fromPort) | |
31 | 46 | } else { |
32 | 47 | return threshold |
33 | 48 | } |
34 | 49 | } |
35 | 50 | |
51 | + /** | |
52 | + * creates an instance given the container type, starting state, entry port | |
53 | + * and the parentPort | |
54 | + * @param {String} the type of VM to load | |
55 | + * @param {Object} the starting state of the VM | |
56 | + * @param {Object} the entry port | |
57 | + * @param {Object} the parent port | |
58 | + */ | |
36 | 59 | async createInstance (type, state, entryPort = null, parentPort) { |
37 | 60 | const Container = this._containerTypes[type] |
38 | 61 | |
39 | 62 | if (!state) { |
@@ -42,27 +65,38 @@ | ||
42 | 65 | } |
43 | 66 | } |
44 | 67 | |
45 | 68 | // create a new kernel instance |
46 | - const kernel = new Kernel({ | |
69 | + const exoInterface = new ExoInterface({ | |
47 | 70 | entryPort: entryPort, |
48 | 71 | parentPort: parentPort, |
49 | 72 | hypervisor: this, |
50 | 73 | state: state, |
51 | 74 | Container: Container |
52 | 75 | }) |
53 | 76 | |
54 | 77 | // save the newly created instance |
55 | - this._runningContainers.set(entryPort, kernel) | |
56 | - await kernel.start() | |
57 | - return kernel | |
78 | + this._runningContainers.set(entryPort, exoInterface) | |
79 | + await exoInterface.start() | |
80 | + return exoInterface | |
58 | 81 | } |
59 | 82 | |
83 | + /** | |
84 | + * creates a state root starting from a given container and a given number of | |
85 | + * ticks | |
86 | + * @param {Container} container an container instance | |
87 | + * @param {Number} ticks the number of ticks at which to create the state root | |
88 | + */ | |
60 | 89 | async createStateRoot (container, ticks) { |
61 | 90 | await container.wait(ticks) |
62 | 91 | return this.graph.flush(container.state) |
63 | 92 | } |
64 | 93 | |
94 | + /** | |
95 | + * regirsters a container with the hypervisor | |
96 | + * @param {String} the name of the type | |
97 | + * @param {Class} a Class for instantiating the container | |
98 | + */ | |
65 | 99 | registerContainer (type, vm) { |
66 | 100 | this._containerTypes[type] = vm |
67 | 101 | } |
68 | 102 | } |
portManager.js | ||
---|---|---|
@@ -40,8 +40,9 @@ | ||
40 | 40 | // skip the root, since it doesn't have a parent |
41 | 41 | if (this.parentPort !== undefined) { |
42 | 42 | this._bindRef(this.parentPort, ENTRY) |
43 | 43 | } |
44 | + | |
44 | 45 | // map ports to thier id's |
45 | 46 | this.ports = await this.hypervisor.graph.get(this.state, 'ports') |
46 | 47 | Object.keys(this.ports).map(name => { |
47 | 48 | const port = this.ports[name] |
@@ -78,9 +79,9 @@ | ||
78 | 79 | return this._portMap.has(port) |
79 | 80 | } |
80 | 81 | |
81 | 82 | create (type) { |
82 | - const VM = this.hypervisor._containerTypes[type] | |
83 | + const Container = this.hypervisor._containerTypes[type] | |
83 | 84 | const parentId = this.entryPort ? this.entryPort.id : null |
84 | 85 | let nonce = this.state['/'].nonce |
85 | 86 | |
86 | 87 | const portRef = { |
@@ -92,9 +93,9 @@ | ||
92 | 93 | } |
93 | 94 | }, |
94 | 95 | 'type': type, |
95 | 96 | 'link': { |
96 | - '/': VM.createState() | |
97 | + '/': Container.createState() | |
97 | 98 | } |
98 | 99 | } |
99 | 100 | |
100 | 101 | // incerment the nonce |
@@ -120,9 +121,9 @@ | ||
120 | 121 | } |
121 | 122 | |
122 | 123 | async getNextMessage () { |
123 | 124 | if (this._portMap.size) { |
124 | - await this.wait(this.kernel.ticks, this.entryPort) | |
125 | + await this.wait(this.exoInterface.ticks, this.entryPort) | |
125 | 126 | const portMap = [...this._portMap].reduce(messageArbiter) |
126 | 127 | return portMap[1].shift() |
127 | 128 | } |
128 | 129 | } |
exoInterface.js | ||
---|---|---|
@@ -1,0 +1,145 @@ | ||
1 | +const clearObject = require('object-clear') | |
2 | +const clone = require('clone') | |
3 | +const EventEmitter = require('events') | |
4 | +const PortManager = require('./portManager.js') | |
5 | + | |
6 | +module.exports = class ExoInterface extends EventEmitter { | |
7 | + /** | |
8 | + * the ExoInterface manages the varous message passing functions and provides | |
9 | + * an interface for the containers to use | |
10 | + * @param {Object} opts | |
11 | + * @param {Object} opts.state | |
12 | + * @param {Object} opts.entryPort | |
13 | + * @param {Object} opts.parentPort | |
14 | + * @param {Object} opts.hypervisor | |
15 | + * @param {Object} opts.Container | |
16 | + */ | |
17 | + constructor (opts) { | |
18 | + super() | |
19 | + this.state = opts.state | |
20 | + this.entryPort = opts.entryPort | |
21 | + this.hypervisor = opts.hypervisor | |
22 | + | |
23 | + this.containerState = 'idle' | |
24 | + this.ticks = 0 | |
25 | + | |
26 | + // create the port manager | |
27 | + this.ports = new PortManager(Object.assign({ | |
28 | + exoInterface: this | |
29 | + }, opts)) | |
30 | + | |
31 | + this._waitingMap = new Map() | |
32 | + this.container = new opts.Container(this) | |
33 | + | |
34 | + // once we get an result we run the next message | |
35 | + this.on('result', this._runNextMessage) | |
36 | + | |
37 | + // on idle clear all the 'wiats' | |
38 | + this.on('idle', () => { | |
39 | + for (const [, waiter] of this._waitingMap) { | |
40 | + waiter.resolve(this.ticks) | |
41 | + } | |
42 | + }) | |
43 | + } | |
44 | + | |
45 | + start () { | |
46 | + return this.ports.start() | |
47 | + } | |
48 | + | |
49 | + queue (message) { | |
50 | + message._hops++ | |
51 | + this.ports.queue(message) | |
52 | + if (this.containerState !== 'running') { | |
53 | + this._updateContainerState('running') | |
54 | + this._runNextMessage() | |
55 | + } | |
56 | + } | |
57 | + | |
58 | + _updateContainerState (containerState, message) { | |
59 | + this.containerState = containerState | |
60 | + this.emit(containerState, message) | |
61 | + } | |
62 | + | |
63 | + async _runNextMessage () { | |
64 | + const message = await this.ports.getNextMessage() | |
65 | + if (message) { | |
66 | + // run the next message | |
67 | + this.run(message) | |
68 | + } else { | |
69 | + // if no more messages then shut down | |
70 | + this._updateContainerState('idle') | |
71 | + } | |
72 | + } | |
73 | + | |
74 | + /** | |
75 | + * run the kernels code with a given enviroment | |
76 | + * The Kernel Stores all of its state in the Environment. The Interface is used | |
77 | + * to by the VM to retrive infromation from the Environment. | |
78 | + */ | |
79 | + async run (message) { | |
80 | + const oldState = clone(this.state, false, 3) | |
81 | + let result | |
82 | + try { | |
83 | + result = await this.container.run(message) || {} | |
84 | + } catch (e) { | |
85 | + // revert the state | |
86 | + clearObject(this.state) | |
87 | + Object.assign(this.state, oldState) | |
88 | + | |
89 | + result = { | |
90 | + exception: true, | |
91 | + exceptionError: e | |
92 | + } | |
93 | + } | |
94 | + | |
95 | + this.emit('result', result) | |
96 | + return result | |
97 | + } | |
98 | + | |
99 | + // returns a promise that resolves once the kernel hits the threshould tick | |
100 | + // count | |
101 | + wait (threshold, fromPort) { | |
102 | + if (threshold <= this.ticks) { | |
103 | + return this.ticks | |
104 | + } else if (this.containerState === 'idle') { | |
105 | + return this.ports.wait(threshold, fromPort) | |
106 | + } else { | |
107 | + return new Promise((resolve, reject) => { | |
108 | + this._waitingMap.set(fromPort, { | |
109 | + threshold: threshold, | |
110 | + resolve: resolve, | |
111 | + from: fromPort | |
112 | + }) | |
113 | + }) | |
114 | + } | |
115 | + } | |
116 | + | |
117 | + incrementTicks (count) { | |
118 | + this.ticks += count | |
119 | + for (const [fromPort, waiter] of this._waitingMap) { | |
120 | + if (waiter.threshold < this.ticks) { | |
121 | + this._waitingMap.delete(fromPort) | |
122 | + waiter.resolve(this.ticks) | |
123 | + } | |
124 | + } | |
125 | + } | |
126 | + | |
127 | + async send (portRef, message) { | |
128 | + if (!this.ports.isValidPort(portRef)) { | |
129 | + throw new Error('invalid port referance') | |
130 | + } | |
131 | + | |
132 | + // set the port that the message came from | |
133 | + message._fromPort = this.entryPort | |
134 | + message._fromPortTicks = this.ticks | |
135 | + | |
136 | + const instance = await this.hypervisor.getOrCreateInstance(portRef, this.entryPort) | |
137 | + instance.queue(message) | |
138 | + | |
139 | + const waiter = this._waitingMap.get(portRef) | |
140 | + if (waiter) { | |
141 | + waiter.resolve(this.ticks) | |
142 | + this._waitingMap.delete(portRef) | |
143 | + } | |
144 | + } | |
145 | +} |
kernel.js | ||
---|---|---|
@@ -1,135 +1,0 @@ | ||
1 | -const clearObject = require('object-clear') | |
2 | -const clone = require('clone') | |
3 | -const EventEmitter = require('events') | |
4 | -const PortManager = require('./portManager.js') | |
5 | - | |
6 | -module.exports = class Kernel extends EventEmitter { | |
7 | - constructor (opts) { | |
8 | - super() | |
9 | - this.state = opts.state | |
10 | - this.entryPort = opts.entryPort | |
11 | - this.hypervisor = opts.hypervisor | |
12 | - | |
13 | - this.vmState = 'idle' | |
14 | - this.ticks = 0 | |
15 | - | |
16 | - // create the port manager | |
17 | - this.ports = new PortManager({ | |
18 | - kernel: this, | |
19 | - hypervisor: opts.hypervisor, | |
20 | - state: opts.state, | |
21 | - entryPort: opts.entryPort, | |
22 | - parentPort: opts.parentPort | |
23 | - }) | |
24 | - | |
25 | - this.vm = new opts.Container(this) | |
26 | - this._waitingMap = new Map() | |
27 | - | |
28 | - this.on('result', this._runNextMessage) | |
29 | - this.on('idle', () => { | |
30 | - for (const [, waiter] of this._waitingMap) { | |
31 | - waiter.resolve(this.ticks) | |
32 | - } | |
33 | - }) | |
34 | - } | |
35 | - | |
36 | - start () { | |
37 | - return this.ports.start() | |
38 | - } | |
39 | - | |
40 | - queue (message) { | |
41 | - message._hops++ | |
42 | - this.ports.queue(message) | |
43 | - if (this.vmState !== 'running') { | |
44 | - this._updateVmState('running') | |
45 | - this._runNextMessage() | |
46 | - } | |
47 | - } | |
48 | - | |
49 | - _updateVmState (vmState, message) { | |
50 | - this.vmState = vmState | |
51 | - this.emit(vmState, message) | |
52 | - } | |
53 | - | |
54 | - async _runNextMessage () { | |
55 | - const message = await this.ports.getNextMessage() | |
56 | - if (message) { | |
57 | - // run the next message | |
58 | - this.run(message) | |
59 | - } else { | |
60 | - // if no more messages then shut down | |
61 | - this._updateVmState('idle') | |
62 | - } | |
63 | - } | |
64 | - | |
65 | - /** | |
66 | - * run the kernels code with a given enviroment | |
67 | - * The Kernel Stores all of its state in the Environment. The Interface is used | |
68 | - * to by the VM to retrive infromation from the Environment. | |
69 | - */ | |
70 | - async run (message) { | |
71 | - const oldState = clone(this.state, false, 3) | |
72 | - let result | |
73 | - try { | |
74 | - result = await this.vm.run(message) || {} | |
75 | - } catch (e) { | |
76 | - // revert the state | |
77 | - clearObject(this.state) | |
78 | - Object.assign(this.state, oldState) | |
79 | - | |
80 | - result = { | |
81 | - exception: true, | |
82 | - exceptionError: e | |
83 | - } | |
84 | - } | |
85 | - | |
86 | - this.emit('result', result) | |
87 | - return result | |
88 | - } | |
89 | - | |
90 | - // returns a promise that resolves once the kernel hits the threshould tick | |
91 | - // count | |
92 | - wait (threshold, fromPort) { | |
93 | - if (threshold <= this.ticks) { | |
94 | - return this.ticks | |
95 | - } else if (this.vmState === 'idle') { | |
96 | - return this.ports.wait(threshold, fromPort) | |
97 | - } else { | |
98 | - return new Promise((resolve, reject) => { | |
99 | - this._waitingMap.set(fromPort, { | |
100 | - threshold: threshold, | |
101 | - resolve: resolve, | |
102 | - from: fromPort | |
103 | - }) | |
104 | - }) | |
105 | - } | |
106 | - } | |
107 | - | |
108 | - incrementTicks (count) { | |
109 | - this.ticks += count | |
110 | - for (const [fromPort, waiter] of this._waitingMap) { | |
111 | - if (waiter.threshold < this.ticks) { | |
112 | - this._waitingMap.delete(fromPort) | |
113 | - waiter.resolve(this.ticks) | |
114 | - } | |
115 | - } | |
116 | - } | |
117 | - | |
118 | - async send (portRef, message) { | |
119 | - if (!this.ports.isValidPort(portRef)) { | |
120 | - throw new Error('invalid port referance') | |
121 | - } | |
122 | - | |
123 | - message._fromPort = this.entryPort | |
124 | - message._fromPortTicks = this.ticks | |
125 | - | |
126 | - const vm = await this.hypervisor.getInstance(portRef, this.entryPort) | |
127 | - vm.queue(message) | |
128 | - | |
129 | - const waiter = this._waitingMap.get(portRef) | |
130 | - if (waiter) { | |
131 | - waiter.resolve(this.ticks) | |
132 | - this._waitingMap.delete(portRef) | |
133 | - } | |
134 | - } | |
135 | -} |
Built with git-ssb-web