2021-04-16 08:33:10 +00:00
|
|
|
// Copyright (c) Microsoft Corporation. All rights reserved.
|
|
|
|
|
// Licensed under the MIT License.
|
|
|
|
|
|
|
|
|
|
export * from 'onnxruntime-common';
|
2023-08-25 23:57:06 +00:00
|
|
|
export {listSupportedBackends} from './backend';
|
2023-06-09 23:18:53 +00:00
|
|
|
import {registerBackend, env} from 'onnxruntime-common';
|
|
|
|
|
import {version} from './version';
|
2023-08-25 23:57:06 +00:00
|
|
|
import {onnxruntimeBackend, listSupportedBackends} from './backend';
|
2021-04-16 08:33:10 +00:00
|
|
|
|
2023-08-25 23:57:06 +00:00
|
|
|
const backends = listSupportedBackends();
|
|
|
|
|
for (const backend of backends) {
|
|
|
|
|
registerBackend(backend.name, onnxruntimeBackend, 100);
|
|
|
|
|
}
|
2023-06-09 23:18:53 +00:00
|
|
|
|
[js/api] introducing IO binding for tensor (#16452)
[//]: # (## Work In Progress. Feedbacks are welcome!)
### Description
This PR adds a few properties, methods and factories to Tensor type to
support IO-binding feature. This will allow user to create tensor from
GPU/CPU bound data without a force transferring of data between CPU and
GPU.
This change is a way to resolve #15312
### Change Summary
1. Add properties to `Tensor` type:
a. `location`: indicating where the data is sitting. valid values are
`cpu`, `cpu-pinned`, `texture`, `gpu-buffer`.
b. `texture`: sit side to `data`, a readonly property of `WebGLTexture`
type. available only when `location === 'texture'`
c. `gpuBuffer`: sit side to `data`, a readonly property of `GPUBuffer`
type. available only when `location === 'gpu-buffer'`
2. Add methods to `Tensor` type (usually dealing with inference
outputs):
- async function `getData()` allows user to download data from GPU to
CPU manually.
- function `dispose()` allows user to release GPU resources manually.
3. Add factories for creating `Tensor` instances:
a. `fromTexture()` to create a WebGL texture bound tensor data
b. `fromGpuBuffer()` to create a WebGPUBuffer bound tensor data
c. `fromPinnedBuffer()` to create a tensor using a CPU pinned buffer
### Examples:
create tensors from texture and pass to inference session as inputs
```js
// when create session, specify we prefer 'image_output:0' to be stored on GPU as texture
const session = await InferenceSession.create('./my_model.onnx', {
executionProviders: [ 'webgl' ],
preferredOutputLocation: { 'image_output:0': 'texture' }
});
...
const myImageTexture = getTexture(); // user's function to get a texture
const myFeeds = { input0: Tensor.fromTexture(myImageTexture, { width: 224, height: 224 }) }; // shape [1, 224, 224, 4], RGBA format.
const results = await session.run(myFeeds);
const myOutputTexture = results['image_output:0'].texture;
```
2023-08-29 19:58:26 +00:00
|
|
|
Object.defineProperty(env.versions, 'node', {value: version, enumerable: true});
|