Set black's target version (#11370)

Description: Set black's target version to be py37 - py310

Motivation and Context

Black by default targets its format for py3.10. Since our project supports python 3.7, we need to target version to all the python versions supported.

Re-ran black. 13 files reformatted.
This commit is contained in:
Justin Chu 2022-04-27 14:52:19 -07:00 committed by GitHub
parent f6526af23d
commit d64769c38e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 26 additions and 25 deletions

View file

@ -79,7 +79,7 @@ class QLinearActivation(QuantOperatorBase):
qlinear_activation_inputs,
[qlinear_activation_output],
qlinear_activation_name,
**kwargs
**kwargs,
)
# Create an entry for this quantized value

View file

@ -55,7 +55,7 @@ class QLinearBinaryOp(QuantOperatorBase):
qlinear_binary_math_inputs,
[qlinear_binary_math_output],
qlinear_binary_math_name,
**kwargs
**kwargs,
)
nodes.append(qlinear_binary_math_node)

View file

@ -57,6 +57,6 @@ class QGlobalAveragePool(QuantOperatorBase):
],
[quantized_output_value.q_name],
qnode_name,
**kwargs
**kwargs,
)
self.quantizer.new_nodes += [qnode]

View file

@ -59,7 +59,7 @@ class QLinearPool(QuantOperatorBase):
],
[qlinear_output_name],
qlinear_node_name,
**kwargs
**kwargs,
)
# add all newly created nodes

View file

@ -51,7 +51,7 @@ def expect(
inputs,
outputs,
name,
**kwargs
**kwargs,
): # type: (...) -> None
present_inputs = [x for x in node.input if (x != "")]
present_outputs = [x for x in node.output if (x != "")]

View file

@ -374,7 +374,7 @@ class MNISTWrapper:
),
batch_size=args_batch_size,
shuffle=False,
**kwargs
**kwargs,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
@ -384,7 +384,7 @@ class MNISTWrapper:
),
batch_size=args_test_batch_size,
shuffle=False,
**kwargs
**kwargs,
)
return train_loader, test_loader

View file

@ -52,7 +52,7 @@ def gen(model_path, use_transpose_matmul, scale_input_0, scale_input_1, scale_ou
matmul_op,
"",
matmul_domain,
**matmul_attrs
**matmul_attrs,
)
)

View file

@ -96,7 +96,7 @@ def main():
),
batch_size=args.batch_size,
shuffle=True,
**kwargs
**kwargs,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
@ -106,7 +106,7 @@ def main():
),
batch_size=args.test_batch_size,
shuffle=True,
**kwargs
**kwargs,
)
# set device

View file

@ -97,7 +97,7 @@ def main():
),
batch_size=args.batch_size,
shuffle=True,
**kwargs
**kwargs,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
@ -107,7 +107,7 @@ def main():
),
batch_size=args.test_batch_size,
shuffle=True,
**kwargs
**kwargs,
)
device = torch.device("ort")

View file

@ -361,7 +361,7 @@ class BertModelTest(unittest.TestCase):
option_fp16,
option_allreduce_post_accumulation,
option_gradient_accumulation_steps,
option_split_batch
option_split_batch,
)
def test_for_pretraining_mixed_precision_with_gradient_accumulation(self):
@ -378,7 +378,7 @@ class BertModelTest(unittest.TestCase):
option_fp16,
option_allreduce_post_accumulation,
option_gradient_accumulation_steps,
option_split_batch
option_split_batch,
)
def test_for_pretraining_full_precision_all(self):
@ -396,7 +396,7 @@ class BertModelTest(unittest.TestCase):
option_fp16,
option_allreduce_post_accumulation,
option_gradient_accumulation_steps,
option_split_batch
option_split_batch,
)
def test_for_pretraining_full_precision_list_input(self):
@ -410,7 +410,7 @@ class BertModelTest(unittest.TestCase):
option_fp16,
option_allreduce_post_accumulation,
option_gradient_accumulation_steps,
option_split_batch
option_split_batch,
)
def test_for_pretraining_full_precision_dict_input(self):
@ -424,7 +424,7 @@ class BertModelTest(unittest.TestCase):
option_fp16,
option_allreduce_post_accumulation,
option_gradient_accumulation_steps,
option_split_batch
option_split_batch,
)
def test_for_pretraining_full_precision_list_and_dict_input(self):
@ -438,7 +438,7 @@ class BertModelTest(unittest.TestCase):
option_fp16,
option_allreduce_post_accumulation,
option_gradient_accumulation_steps,
option_split_batch
option_split_batch,
)
def test_for_pretraining_full_precision_grad_accumulation_list_input(self):
@ -452,7 +452,7 @@ class BertModelTest(unittest.TestCase):
option_fp16,
option_allreduce_post_accumulation,
option_gradient_accumulation_steps,
option_split_batch
option_split_batch,
)
def test_for_pretraining_full_precision_grad_accumulation_dict_input(self):
@ -466,7 +466,7 @@ class BertModelTest(unittest.TestCase):
option_fp16,
option_allreduce_post_accumulation,
option_gradient_accumulation_steps,
option_split_batch
option_split_batch,
)
def test_for_pretraining_full_precision_grad_accumulation_list_and_dict_input(self):
@ -480,7 +480,7 @@ class BertModelTest(unittest.TestCase):
option_fp16,
option_allreduce_post_accumulation,
option_gradient_accumulation_steps,
option_split_batch
option_split_batch,
)

View file

@ -135,7 +135,7 @@ def main():
),
batch_size=args.batch_size,
shuffle=True,
**kwargs
**kwargs,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
@ -145,7 +145,7 @@ def main():
),
batch_size=args.test_batch_size,
shuffle=True,
**kwargs
**kwargs,
)
comm = MPI.COMM_WORLD

View file

@ -2,6 +2,7 @@
line-length = 120
# extend-exclude needs to be a regular expression
extend-exclude = "cmake|onnxruntime/core/flatbuffers/"
target-version = ["py37", "py38", "py39", "py310"]
[tool.isort]
profile = "black"

View file

@ -165,7 +165,7 @@ def main():
full_image_name,
"--file",
args.dockerfile,
args.context
args.context,
)
if use_container_registry:

View file

@ -18,7 +18,7 @@ def run(
shell=False,
env=None,
check=True,
quiet=False
quiet=False,
):
"""Runs a subprocess.