Replace all numpy.bool by python builtin bool (#14014)

`numpy.bool` has been removed as from 1.24.0.

It was before an alias for python's `bool`.

Fixes https://github.com/huggingface/optimum/issues/610

### Motivation and Context

Numpy 1.24.0 breaks for example IO binding helpers.
This commit is contained in:
fxmarty 2022-12-23 00:27:23 +01:00 committed by GitHub
parent 1b58331fb3
commit 4d2dc8bbbd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 8 additions and 8 deletions

View file

@ -32,7 +32,7 @@ class TypeHelper:
"tensor(int32)": numpy.intc,
"tensor(float)": numpy.float32,
"tensor(float16)": numpy.float16,
"tensor(bool)": numpy.bool,
"tensor(bool)": bool,
}
if ort_type not in ort_type_to_numpy_type_map:
raise ValueError(f"{ort_type} not found in map")
@ -61,7 +61,7 @@ class TypeHelper:
numpy.int32: torch.int32,
numpy.float32: torch.float32,
numpy.float16: torch.float16,
numpy.bool: torch.bool,
bool: torch.bool,
}
if numpy_type not in numpy_type_to_torch_type_map:
raise ValueError(f"{numpy_type} not found in map")
@ -75,7 +75,7 @@ class TypeHelper:
torch.int32: numpy.intc,
torch.float32: numpy.float32,
torch.float16: numpy.float16,
torch.bool: numpy.bool,
torch.bool: bool,
}
if torch_type not in torch_type_to_numpy_type_map:
raise ValueError(f"{torch_type} not found in map")

View file

@ -84,7 +84,7 @@ class ONNXReferenceImplementationTest(unittest.TestCase):
x = np.random.randn(3, 4).astype(np.int64)
w = np.random.randn(10, 3).astype(np.float32)
padding_idx = np.random.randint(3, size=1).astype(np.int64)
scale = np.array([1]).astype(np.bool)
scale = np.array([1]).astype(bool)
y = torch_embedding_reference_implementation(w, x, padding_idx, scale)
expect(
node,

View file

@ -21,7 +21,7 @@ a_bias_initializer = numpy_helper.from_array(a_bias_np_vals, "encoder.t5_stack.b
dropout_np_vals = np.asarray([0.1], dtype=np.float32).reshape(())
dropout_initializer = numpy_helper.from_array(dropout_np_vals, "ratio")
dropout_mode_np_vals = np.array([False], dtype=np.bool).reshape(())
dropout_mode_np_vals = np.array([False], dtype=bool).reshape(())
dropout_mode_initializer = numpy_helper.from_array(dropout_mode_np_vals, "mode")
b_weight_np_vals = (0.01 * np.arange(hidden_size * weight_dim_to_split, dtype=np.float32)).reshape(

View file

@ -66,7 +66,7 @@ where_shape_initializer = numpy_helper.from_array(
dropout_np_vals = np.asarray([0.1], dtype=np.float32).reshape(())
dropout_initializer = numpy_helper.from_array(dropout_np_vals, "ratio")
dropout_mode_np_vals = np.array([False], dtype=np.bool).reshape(())
dropout_mode_np_vals = np.array([False], dtype=bool).reshape(())
dropout_mode_initializer = numpy_helper.from_array(dropout_mode_np_vals, "mode")
shape_initializer3 = numpy_helper.from_array(

View file

@ -233,7 +233,7 @@ def dtype_torch_to_numpy(torch_dtype):
elif torch_dtype == torch.int16 or torch_dtype == torch.short:
return np.int16
elif torch_dtype == torch.bool:
return np.bool
return bool
else:
raise Exception("Torch type to numpy type mapping unavailable for: " + str(torch_dtype))

View file

@ -87,7 +87,7 @@ def process_trainabledropout(model):
)
index += 1
# add training_mode output
mode_scalar = np.asarray([True]).astype(np.bool).reshape(())
mode_scalar = np.asarray([True]).astype(bool).reshape(())
mode_value = numpy_helper.from_array(mode_scalar, "training_mode")
training_mode_node = add_const(
model, "dropout_training_mode_node_%d" % index, "dropout_training_mode_%d" % index, t_value=mode_value