mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-14 20:48:00 +00:00
### Description This change enhances the Node.js binding with the following features: - support WebGPU EP - lazy initialization of `OrtEnv` - being able to initialize ORT with default log level setting from `ort.env.logLevel`. - session options: - `enableProfiling` and `profileFilePrefix`: support profiling. - `externalData`: explicit external data (optional in Node.js binding) - `optimizedModelFilePath`: allow dumping optimized model for diagnosis purpose - `preferredOutputLocation`: support IO binding. ====================================================== `Tensor.download()` is not implemented in this PR. Build pipeline update is not included in this PR.
81 lines
2.4 KiB
TypeScript
81 lines
2.4 KiB
TypeScript
// Copyright (c) Microsoft Corporation. All rights reserved.
|
|
// Licensed under the MIT License.
|
|
|
|
import { Backend, InferenceSession, InferenceSessionHandler, SessionHandler } from 'onnxruntime-common';
|
|
|
|
import { Binding, binding, initOrt } from './binding';
|
|
|
|
class OnnxruntimeSessionHandler implements InferenceSessionHandler {
|
|
#inferenceSession: Binding.InferenceSession;
|
|
|
|
constructor(pathOrBuffer: string | Uint8Array, options: InferenceSession.SessionOptions) {
|
|
initOrt();
|
|
|
|
this.#inferenceSession = new binding.InferenceSession();
|
|
if (typeof pathOrBuffer === 'string') {
|
|
this.#inferenceSession.loadModel(pathOrBuffer, options);
|
|
} else {
|
|
this.#inferenceSession.loadModel(pathOrBuffer.buffer, pathOrBuffer.byteOffset, pathOrBuffer.byteLength, options);
|
|
}
|
|
this.inputNames = this.#inferenceSession.inputNames;
|
|
this.outputNames = this.#inferenceSession.outputNames;
|
|
}
|
|
|
|
async dispose(): Promise<void> {
|
|
this.#inferenceSession.dispose();
|
|
}
|
|
|
|
readonly inputNames: string[];
|
|
readonly outputNames: string[];
|
|
|
|
startProfiling(): void {
|
|
// startProfiling is a no-op.
|
|
//
|
|
// if sessionOptions.enableProfiling is true, profiling will be enabled when the model is loaded.
|
|
}
|
|
endProfiling(): void {
|
|
this.#inferenceSession.endProfiling();
|
|
}
|
|
|
|
async run(
|
|
feeds: SessionHandler.FeedsType,
|
|
fetches: SessionHandler.FetchesType,
|
|
options: InferenceSession.RunOptions,
|
|
): Promise<SessionHandler.ReturnType> {
|
|
return new Promise((resolve, reject) => {
|
|
setImmediate(() => {
|
|
try {
|
|
resolve(this.#inferenceSession.run(feeds, fetches, options));
|
|
} catch (e) {
|
|
// reject if any error is thrown
|
|
reject(e);
|
|
}
|
|
});
|
|
});
|
|
}
|
|
}
|
|
|
|
class OnnxruntimeBackend implements Backend {
|
|
async init(): Promise<void> {
|
|
return Promise.resolve();
|
|
}
|
|
|
|
async createInferenceSessionHandler(
|
|
pathOrBuffer: string | Uint8Array,
|
|
options?: InferenceSession.SessionOptions,
|
|
): Promise<InferenceSessionHandler> {
|
|
return new Promise((resolve, reject) => {
|
|
setImmediate(() => {
|
|
try {
|
|
resolve(new OnnxruntimeSessionHandler(pathOrBuffer, options || {}));
|
|
} catch (e) {
|
|
// reject if any error is thrown
|
|
reject(e);
|
|
}
|
|
});
|
|
});
|
|
}
|
|
}
|
|
|
|
export const onnxruntimeBackend = new OnnxruntimeBackend();
|
|
export const listSupportedBackends = binding.listSupportedBackends;
|