[js/common] use TS type inference to eliminate unknown (#23012)

### Description

This change uses a TypeScript trick to infer global types in
onnxruntime-common. Thanks to the strong type system of TypeScript, we
are able to refer to types that may not be available in the context.

This helps to keep onnxruntime-common not to include dependencies like
"@webgpu/types", and still being able to use the types in the
declaration. See comments of `TryGetGlobalType` in `type-helper.ts`.
This commit is contained in:
Yulong Wang 2024-12-04 19:01:26 -08:00 committed by GitHub
parent f340b3cad3
commit 1c79a4c9dd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 45 additions and 21 deletions

View file

@ -2,6 +2,7 @@
// Licensed under the MIT License.
import { env as envImpl } from './env-impl.js';
import { TryGetGlobalType } from './type-helper.js';
export declare namespace Env {
export type WasmPathPrefix = string;
@ -198,22 +199,16 @@ export declare namespace Env {
* value will be the GPU adapter that created by the underlying WebGPU backend.
*
* When use with TypeScript, the type of this property is `GPUAdapter` defined in "@webgpu/types".
* Use `const adapter = env.webgpu.adapter as GPUAdapter;` in TypeScript to access this property with correct type.
*
* see comments on {@link Tensor.GpuBufferType}
*/
adapter: unknown;
adapter: TryGetGlobalType<'GPUAdapter'>;
/**
* Get the device for WebGPU.
*
* This property is only available after the first WebGPU inference session is created.
*
* When use with TypeScript, the type of this property is `GPUDevice` defined in "@webgpu/types".
* Use `const device = env.webgpu.device as GPUDevice;` in TypeScript to access this property with correct type.
*
* see comments on {@link Tensor.GpuBufferType} for more details about why not use types defined in "@webgpu/types".
*/
readonly device: unknown;
readonly device: TryGetGlobalType<'GPUDevice'>;
/**
* Set or get whether validate input content.
*

View file

@ -4,6 +4,7 @@
import { InferenceSession as InferenceSessionImpl } from './inference-session-impl.js';
import { OnnxModelOptions } from './onnx-model.js';
import { OnnxValue, OnnxValueDataLocation } from './onnx-value.js';
import { TryGetGlobalType } from './type-helper.js';
/* eslint-disable @typescript-eslint/no-redeclare */
@ -282,7 +283,7 @@ export declare namespace InferenceSession {
extends WebNNExecutionProviderName,
Omit<WebNNContextOptions, 'deviceType'>,
Required<Pick<WebNNContextOptions, 'deviceType'>> {
context: unknown /* MLContext */;
context: TryGetGlobalType<'MLContext'>;
}
/**
@ -291,8 +292,8 @@ export declare namespace InferenceSession {
* @see https://www.w3.org/TR/webnn/#dom-ml-createcontext-gpudevice
*/
export interface WebNNOptionsWebGpu extends WebNNExecutionProviderName {
context: unknown /* MLContext */;
gpuDevice: unknown /* GPUDevice */;
context: TryGetGlobalType<'MLContext'>;
gpuDevice: TryGetGlobalType<'GPUDevice'>;
}
/**

View file

@ -4,6 +4,7 @@
import { TensorFactory } from './tensor-factory.js';
import { Tensor as TensorImpl } from './tensor-impl.js';
import { TypedTensorUtils } from './tensor-utils.js';
import { TryGetGlobalType } from './type-helper.js';
/* eslint-disable @typescript-eslint/no-redeclare */
@ -131,24 +132,19 @@ export declare namespace Tensor {
*/
export type TextureDataTypes = 'float32';
type GpuBufferTypeFallback = { size: number; mapState: 'unmapped' | 'pending' | 'mapped' };
/**
* type alias for WebGPU buffer
*
* The reason why we don't use type "GPUBuffer" defined in webgpu.d.ts from @webgpu/types is because "@webgpu/types"
* requires "@types/dom-webcodecs" as peer dependency when using TypeScript < v5.1 and its version need to be chosen
* carefully according to the TypeScript version being used. This means so far there is not a way to keep every
* TypeScript version happy. It turns out that we will easily broke users on some TypeScript version.
*
* for more info see https://github.com/gpuweb/types/issues/127
*/
export type GpuBufferType = { size: number; mapState: 'unmapped' | 'pending' | 'mapped' };
export type GpuBufferType = TryGetGlobalType<'GPUBuffer', GpuBufferTypeFallback>;
type MLTensorTypeFallback = { destroy(): void };
/**
* type alias for WebNN MLTensor
*
* The specification for WebNN's MLTensor is currently in flux.
*/
export type MLTensorType = unknown;
export type MLTensorType = TryGetGlobalType<'MLTensor', MLTensorTypeFallback>;
/**
* supported data types for constructing a tensor from a WebGPU buffer

View file

@ -0,0 +1,31 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
/**
* A helper type to get certain types if they are declared in global scope.
*
* For example, if you installed "@webgpu/types" as a dev dependency, then `TryGetTypeIfDeclared<'GPUDevice'>` will
* be type `GPUDevice`, otherwise it will be type `unknown`.
*
*
* We don't want to introduce "@webgpu/types" as a dependency of this package because:
*
* (1) For JavaScript users, it's not needed. For TypeScript users, they can install it as dev dependency themselves.
*
* (2) because "@webgpu/types" requires "@types/dom-webcodecs" as peer dependency when using TypeScript < v5.1 and its
* version need to be chosen carefully according to the TypeScript version being used. This means so far there is not a
* way to keep every TypeScript version happy. It turns out that we will easily broke users on some TypeScript version.
*
* for more info see https://github.com/gpuweb/types/issues/127
*
* Update (2024-08-07): The reason (2) may be no longer valid. Most people should be using TypeScript >= 5.1 by now.
* However, we are still not sure whether introducing "@webgpu/types" as direct dependency is a good idea. We find this
* type helper is useful for TypeScript users.
*
* @ignore
*/
export type TryGetGlobalType<Name extends string, Fallback = unknown> = typeof globalThis extends {
[k in Name]: { prototype: infer T };
}
? T
: Fallback;

View file

@ -1,6 +1,7 @@
{
"entryPoints": ["lib/index.ts"],
"excludeInternal": true,
"intentionallyNotExported": ["TryGetGlobalType"],
"name": "ONNX Runtime JavaScript API",
"readme": "none",
"cleanOutputDir": true

View file

@ -487,7 +487,7 @@ export const prepareInputOutputTensor = (
}
if (location === 'gpu-buffer') {
const gpuBuffer = tensor[2].gpuBuffer as GPUBuffer;
const gpuBuffer = tensor[2].gpuBuffer;
dataByteLength = calculateTensorSizeInBytes(tensorDataTypeStringToEnum(dataType), dims)!;
const registerBuffer = wasm.jsepRegisterBuffer;