diff --git a/optimum/exporters/onnx/model_configs.py b/optimum/exporters/onnx/model_configs.py index 392c34ca8fb..7c0dd3f8219 100644 --- a/optimum/exporters/onnx/model_configs.py +++ b/optimum/exporters/onnx/model_configs.py @@ -27,8 +27,8 @@ BloomDummyPastKeyValuesGenerator, DummyAudioInputGenerator, DummyCodegenDecoderTextInputGenerator, - DummyDecoderTextInputGenerator, DummyDecisionTransformerInputGenerator, + DummyDecoderTextInputGenerator, DummyEncodecInputGenerator, DummyFluxTransformerTextInputGenerator, DummyFluxTransformerVisionInputGenerator, @@ -265,19 +265,16 @@ class ImageGPTOnnxConfig(GPT2OnnxConfig): class DecisionTransformerOnnxConfig(GPT2OnnxConfig): - DUMMY_INPUT_GENERATOR_CLASSES = ( - DummyDecisionTransformerInputGenerator, - ) + DUMMY_INPUT_GENERATOR_CLASSES = (DummyDecisionTransformerInputGenerator,) @property def inputs(self) -> Dict[str, Dict[int, str]]: - return { - 'timesteps': {0: 'batch_size', 1: 'sequence_length'}, - 'returns_to_go': {0: 'batch_size', 1: 'sequence_length'}, - 'attention_mask': {0: 'batch_size', 1: 'sequence_length'}, - 'actions': {0: 'batch_size', 1: 'sequence_length', 2: 'act_dim'}, - 'states': {0: 'batch_size', 1: 'sequence_length', 2: 'state_dim'}, + "timesteps": {0: "batch_size", 1: "sequence_length"}, + "returns_to_go": {0: "batch_size", 1: "sequence_length"}, + "attention_mask": {0: "batch_size", 1: "sequence_length"}, + "actions": {0: "batch_size", 1: "sequence_length", 2: "act_dim"}, + "states": {0: "batch_size", 1: "sequence_length", 2: "state_dim"}, } diff --git a/optimum/exporters/tasks.py b/optimum/exporters/tasks.py index 08c5badebee..a4856f936ae 100644 --- a/optimum/exporters/tasks.py +++ b/optimum/exporters/tasks.py @@ -217,7 +217,9 @@ class TasksManager: "multiple-choice": "AutoModelForMultipleChoice", "object-detection": "AutoModelForObjectDetection", "question-answering": "AutoModelForQuestionAnswering", - "reinforcement-learning": ("AutoModel",), # multiple auto model families can be used for reinforcement-learning + "reinforcement-learning": ( + "AutoModel", + ), # multiple auto model families can be used for reinforcement-learning "semantic-segmentation": "AutoModelForSemanticSegmentation", "text-to-audio": ("AutoModelForTextToSpectrogram", "AutoModelForTextToWaveform"), "text-generation": "AutoModelForCausalLM", diff --git a/optimum/utils/input_generators.py b/optimum/utils/input_generators.py index b9fe8b22ed6..a6ce07bab32 100644 --- a/optimum/utils/input_generators.py +++ b/optimum/utils/input_generators.py @@ -513,11 +513,11 @@ class DummyDecisionTransformerInputGenerator(DummyTextInputGenerator): """ SUPPORTED_INPUT_NAMES = ( - 'actions', - 'timesteps', - 'attention_mask', - 'returns_to_go', - 'states', + "actions", + "timesteps", + "attention_mask", + "returns_to_go", + "states", ) def __init__(self, *args, **kwargs): @@ -531,15 +531,15 @@ def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int shape = [self.batch_size, self.sequence_length, self.state_dim] elif input_name == "actions": shape = [self.batch_size, self.sequence_length, self.act_dim] - elif input_name == 'returns_to_go': + elif input_name == "returns_to_go": shape = [self.batch_size, self.sequence_length, 1] elif input_name == "attention_mask": shape = [self.batch_size, self.sequence_length] - elif input_name == 'timesteps': + elif input_name == "timesteps": shape = [self.batch_size, self.sequence_length] return self.random_int_tensor(shape=shape, max_value=self.max_ep_len, framework=framework, dtype=int_dtype) - return self.random_float_tensor(shape, min_value=-2., max_value=2., framework=framework, dtype=float_dtype) + return self.random_float_tensor(shape, min_value=-2.0, max_value=2.0, framework=framework, dtype=float_dtype) class DummySeq2SeqDecoderTextInputGenerator(DummyDecoderTextInputGenerator):