mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-14 20:48:00 +00:00
* Initial update of readme * Readme updates * Review of consolidated README (#3930) * Proposed updates for readme (#3953) I found some of the information was duplicated within the doc, so attempted to streamline * Fix links * More updates - fix build instructions - nodejs doc reorganization - roadmap update - version fixes * Update ORT Server build instructions * More doc cleanup * fix python dev notes name * Update nodejs and some links * sync eigen version back to master * Minor fixes * add nodsjs to sample table of content * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * address PR feedback * address PR feedback * nodejs build instruction * Update Java instructions to include gradle * Roadmap refresh Reformat some data, fix link, minor rewording * Clarify Visual C++ runtime req Co-authored-by: Nat Kershaw (MSFT) <nakersha@microsoft.com> Co-authored-by: Prasanth Pulavarthi <prasantp@microsoft.com> Co-authored-by: manashgoswami <magoswam@microsoft.com>
34 lines
1.2 KiB
JavaScript
34 lines
1.2 KiB
JavaScript
const ort = require('onnxruntime');
|
|
|
|
// use an async context to call onnxruntime functions.
|
|
async function main() {
|
|
try {
|
|
// create a new session and load the specific model.
|
|
//
|
|
// the model in this example contains a single MatMul node
|
|
// it has 2 inputs: 'a'(float32, 3x4) and 'b'(float32, 4x3)
|
|
// it has 1 output: 'c'(float32, 3x3)
|
|
const session = await ort.InferenceSession.create('./model.onnx');
|
|
|
|
// prepare inputs. a tensor need its corresponding TypedArray as data
|
|
const dataA = Float32Array.from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
|
|
const dataB = Float32Array.from([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]);
|
|
const tensorA = new ort.Tensor('float32', dataA, [3, 4]);
|
|
const tensorB = new ort.Tensor('float32', dataB, [4, 3]);
|
|
|
|
// prepare feeds. use model input names as keys.
|
|
const feeds = { a: tensorA, b: tensorB };
|
|
|
|
// feed inputs and run
|
|
const results = await session.run(feeds);
|
|
|
|
// read from results
|
|
const dataC = results.c.data;
|
|
console.log(`data of result tensor 'c': ${dataC}`);
|
|
|
|
} catch (e) {
|
|
console.error(`failed to inference ONNX model: ${e}.`);
|
|
}
|
|
}
|
|
|
|
main();
|