mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-16 21:00:14 +00:00
### Description This PR rewrite the backend resolve logic to support specifying multiple EPs. #### Backend The first version of ONNX Runtime Web actually carried some existing code from [ONNX.js](https://github.com/microsoft/onnxjs), which includes the "backend" concept. The original "backend" in ONNX.js is designed in a way assuming there is only one backend from user's backend hint list will be used. For example, in ONNX.js, if user specify a backend hint as `['webgl', 'wasm']`, ONNX.js will first try to use WebGL backend - if it loads successfully (the browser supports webgl), then "webgl" backend will be used and "wasm" will be ignored; otherwise, "webgl" will be ignored and try to load "wasm" backend. In short: only one backend will be used when initializing a session. #### Execution Provider Execution Provider, or EP, in ONNX Runtime is a different concept. One of the differences is that users are allow to specify multiple EPs, and if one does not support a particular kernel, it can fallback to other EP. This is a very common case when using a GPU EP in ONNX Runtime. #### Current Status: Backend v.s. EP Because of the history reasons mentioned above, the current status is quite confusing. There are **real backend**s, which means it's different implementation in code; and there are **backend hint**s, which are used as string names for backend hint; and there are **EP**s of the ONNX Runtime concepts. currently there are only 2 **backend**s in our code base: The "onnxjs backend", and the "wasm backend". The "onnxjs backend" currently only powers backend hint "webgl", which go into the old onnx.js code path. All other backend hints including "wasm", "cpu"(alias to wasm), "webgpu" and "webnn" are all powered by "wasm backend". And because ORT Web treat "backend" as an internal concept and want to align with ONNX Runtime, so those names of backend hints are becoming EP names. The following table shows today's status: | Execution Provider Name (public) / Backend Hint (internal) | Backend | EP in ORT | -------- | ------- | ------- | | "wasm"/"cpu" | WasmBackend | CPU EP | "webgl" | OnnxjsBackend | \* technically not an EP | "webgpu" | WasmBackend | JSEP | "webnn" | WasmBackend | WebNN EP #### Problem While the API allows to specify multiple EPs, the backend resolving only allows one backend. This causes issues when user specify multiple EP names in session options, the backend resolve behavior and EP registration behavior is inconsistent. Specifically, in this issue: https://github.com/microsoft/onnxruntime/issues/15796#issuecomment-1925363908: EP list `['webgpu', 'wasm']` on a browser without WebGPU support resolves to 'wasm' backend, but the full EP list is passed in session options, so JSEP is still enabled, causing the runtime error. #### Solution Since we still need WebGL backend, we cannot totally remove the backend register/resolve system. In this PR I made the following changes: - initialize every backend from the EP list, instead of only do that for the first successful one. - for the first resolved backend, filter all EP using the exact same backend. Remove all EPs not using this backend from session options - for every explicitly specified EP, if it's removed, show a warning message in console
219 lines
7.6 KiB
TypeScript
219 lines
7.6 KiB
TypeScript
// Copyright (c) Microsoft Corporation. All rights reserved.
|
|
// Licensed under the MIT License.
|
|
|
|
import {env, InferenceSession} from 'onnxruntime-common';
|
|
|
|
import {OrtWasmMessage, SerializableInternalBuffer, SerializableSessionMetadata, SerializableTensorMetadata, TensorMetadata} from './proxy-messages';
|
|
import * as core from './wasm-core-impl';
|
|
import {initializeWebAssembly} from './wasm-factory';
|
|
|
|
const isProxy = (): boolean => !!env.wasm.proxy && typeof document !== 'undefined';
|
|
let proxyWorker: Worker|undefined;
|
|
let initializing = false;
|
|
let initialized = false;
|
|
let aborted = false;
|
|
|
|
type PromiseCallbacks<T = void> = [resolve: (result: T) => void, reject: (reason: unknown) => void];
|
|
let initWasmCallbacks: PromiseCallbacks;
|
|
const queuedCallbacks: Map<OrtWasmMessage['type'], Array<PromiseCallbacks<unknown>>> = new Map();
|
|
|
|
const enqueueCallbacks = (type: OrtWasmMessage['type'], callbacks: PromiseCallbacks<unknown>): void => {
|
|
const queue = queuedCallbacks.get(type);
|
|
if (queue) {
|
|
queue.push(callbacks);
|
|
} else {
|
|
queuedCallbacks.set(type, [callbacks]);
|
|
}
|
|
};
|
|
|
|
const ensureWorker = (): void => {
|
|
if (initializing || !initialized || aborted || !proxyWorker) {
|
|
throw new Error('worker not ready');
|
|
}
|
|
};
|
|
|
|
const onProxyWorkerMessage = (ev: MessageEvent<OrtWasmMessage>): void => {
|
|
switch (ev.data.type) {
|
|
case 'init-wasm':
|
|
initializing = false;
|
|
if (ev.data.err) {
|
|
aborted = true;
|
|
initWasmCallbacks[1](ev.data.err);
|
|
} else {
|
|
initialized = true;
|
|
initWasmCallbacks[0]();
|
|
}
|
|
break;
|
|
case 'init-ep':
|
|
case 'copy-from':
|
|
case 'create':
|
|
case 'release':
|
|
case 'run':
|
|
case 'end-profiling': {
|
|
const callbacks = queuedCallbacks.get(ev.data.type)!;
|
|
if (ev.data.err) {
|
|
callbacks.shift();
|
|
} else {
|
|
callbacks.shift();
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
}
|
|
};
|
|
|
|
const scriptSrc = typeof document !== 'undefined' ? (document?.currentScript as HTMLScriptElement)?.src : undefined;
|
|
|
|
export const initializeWebAssemblyAndOrtRuntime = async(): Promise<void> => {
|
|
if (initialized) {
|
|
return;
|
|
}
|
|
if (initializing) {
|
|
throw new Error('multiple calls to \'initWasm()\' detected.');
|
|
}
|
|
if (aborted) {
|
|
throw new Error('previous call to \'initWasm()\' failed.');
|
|
}
|
|
|
|
initializing = true;
|
|
|
|
if (!BUILD_DEFS.DISABLE_WASM_PROXY && isProxy()) {
|
|
// overwrite wasm filepaths
|
|
if (env.wasm.wasmPaths === undefined) {
|
|
if (scriptSrc && scriptSrc.indexOf('blob:') !== 0) {
|
|
env.wasm.wasmPaths = scriptSrc.substr(0, +(scriptSrc).lastIndexOf('/') + 1);
|
|
}
|
|
}
|
|
|
|
return new Promise<void>((resolve, reject) => {
|
|
proxyWorker?.terminate();
|
|
|
|
const workerUrl = URL.createObjectURL(new Blob(
|
|
[
|
|
// This require() function is handled by esbuild plugin to load file content as string.
|
|
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
|
require('./proxy-worker/main')
|
|
],
|
|
{type: 'text/javascript'}));
|
|
proxyWorker = new Worker(workerUrl, {name: 'ort-wasm-proxy-worker'});
|
|
proxyWorker.onerror = (ev: ErrorEvent) => reject(ev);
|
|
proxyWorker.onmessage = onProxyWorkerMessage;
|
|
URL.revokeObjectURL(workerUrl);
|
|
initWasmCallbacks = [resolve, reject];
|
|
const message: OrtWasmMessage = {type: 'init-wasm', in : env};
|
|
proxyWorker.postMessage(message);
|
|
});
|
|
|
|
} else {
|
|
try {
|
|
await initializeWebAssembly(env.wasm);
|
|
await core.initRuntime(env);
|
|
initialized = true;
|
|
} catch (e) {
|
|
aborted = true;
|
|
throw e;
|
|
} finally {
|
|
initializing = false;
|
|
}
|
|
}
|
|
};
|
|
|
|
export const initializeOrtEp = async(epName: string): Promise<void> => {
|
|
if (!BUILD_DEFS.DISABLE_WASM_PROXY && isProxy()) {
|
|
ensureWorker();
|
|
return new Promise<void>((resolve, reject) => {
|
|
enqueueCallbacks('init-ep', [resolve, reject]);
|
|
const message: OrtWasmMessage = {type: 'init-ep', in : {epName, env}};
|
|
proxyWorker!.postMessage(message);
|
|
});
|
|
} else {
|
|
await core.initEp(env, epName);
|
|
}
|
|
};
|
|
|
|
export const copyFromExternalBuffer = async(buffer: Uint8Array): Promise<SerializableInternalBuffer> => {
|
|
if (!BUILD_DEFS.DISABLE_WASM_PROXY && isProxy()) {
|
|
ensureWorker();
|
|
return new Promise<SerializableInternalBuffer>((resolve, reject) => {
|
|
enqueueCallbacks('copy-from', [resolve, reject]);
|
|
const message: OrtWasmMessage = {type: 'copy-from', in : {buffer}};
|
|
proxyWorker!.postMessage(message, [buffer.buffer]);
|
|
});
|
|
} else {
|
|
return core.copyFromExternalBuffer(buffer);
|
|
}
|
|
};
|
|
|
|
export const createSession =
|
|
async(model: SerializableInternalBuffer|Uint8Array, options?: InferenceSession.SessionOptions):
|
|
Promise<SerializableSessionMetadata> => {
|
|
if (!BUILD_DEFS.DISABLE_WASM_PROXY && isProxy()) {
|
|
// check unsupported options
|
|
if (options?.preferredOutputLocation) {
|
|
throw new Error('session option "preferredOutputLocation" is not supported for proxy.');
|
|
}
|
|
ensureWorker();
|
|
return new Promise<SerializableSessionMetadata>((resolve, reject) => {
|
|
enqueueCallbacks('create', [resolve, reject]);
|
|
const message: OrtWasmMessage = {type: 'create', in : {model, options: {...options}}};
|
|
const transferable: Transferable[] = [];
|
|
if (model instanceof Uint8Array) {
|
|
transferable.push(model.buffer);
|
|
}
|
|
proxyWorker!.postMessage(message, transferable);
|
|
});
|
|
} else {
|
|
return core.createSession(model, options);
|
|
}
|
|
};
|
|
|
|
export const releaseSession = async(sessionId: number): Promise<void> => {
|
|
if (!BUILD_DEFS.DISABLE_WASM_PROXY && isProxy()) {
|
|
ensureWorker();
|
|
return new Promise<void>((resolve, reject) => {
|
|
enqueueCallbacks('release', [resolve, reject]);
|
|
const message: OrtWasmMessage = {type: 'release', in : sessionId};
|
|
proxyWorker!.postMessage(message);
|
|
});
|
|
} else {
|
|
core.releaseSession(sessionId);
|
|
}
|
|
};
|
|
|
|
export const run = async(
|
|
sessionId: number, inputIndices: number[], inputs: TensorMetadata[], outputIndices: number[],
|
|
outputs: Array<TensorMetadata|null>, options: InferenceSession.RunOptions): Promise<TensorMetadata[]> => {
|
|
if (!BUILD_DEFS.DISABLE_WASM_PROXY && isProxy()) {
|
|
// check inputs location
|
|
if (inputs.some(t => t[3] !== 'cpu')) {
|
|
throw new Error('input tensor on GPU is not supported for proxy.');
|
|
}
|
|
// check outputs location
|
|
if (outputs.some(t => t)) {
|
|
throw new Error('pre-allocated output tensor is not supported for proxy.');
|
|
}
|
|
ensureWorker();
|
|
return new Promise<SerializableTensorMetadata[]>((resolve, reject) => {
|
|
enqueueCallbacks('run', [resolve, reject]);
|
|
const serializableInputs = inputs as SerializableTensorMetadata[]; // every input is on CPU.
|
|
const message: OrtWasmMessage =
|
|
{type: 'run', in : {sessionId, inputIndices, inputs: serializableInputs, outputIndices, options}};
|
|
proxyWorker!.postMessage(message, core.extractTransferableBuffers(serializableInputs));
|
|
});
|
|
} else {
|
|
return core.run(sessionId, inputIndices, inputs, outputIndices, outputs, options);
|
|
}
|
|
};
|
|
|
|
export const endProfiling = async(sessionId: number): Promise<void> => {
|
|
if (!BUILD_DEFS.DISABLE_WASM_PROXY && isProxy()) {
|
|
ensureWorker();
|
|
return new Promise<void>((resolve, reject) => {
|
|
enqueueCallbacks('end-profiling', [resolve, reject]);
|
|
const message: OrtWasmMessage = {type: 'end-profiling', in : sessionId};
|
|
proxyWorker!.postMessage(message);
|
|
});
|
|
} else {
|
|
core.endProfiling(sessionId);
|
|
}
|
|
};
|