Skip to content

Commit

Permalink
fix low_gpu_mem_usage=True
Browse files Browse the repository at this point in the history
Signed-off-by: Kaihui-intel <[email protected]>
  • Loading branch information
Kaihui-intel committed Jul 26, 2024
1 parent 6e1b1da commit 1f7f696
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions neural_compressor/torch/quantization/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -905,7 +905,7 @@ def __init__(
enable_minmax_tuning: bool = True,
lr: float = None,
minmax_lr: float = None,
low_gpu_mem_usage: bool = True,
low_gpu_mem_usage: bool = False,
iters: int = 200,
seqlen: int = 2048,
nsamples: int = 128,
Expand Down Expand Up @@ -938,7 +938,7 @@ def __init__(
enable_minmax_tuning (bool): Whether to enable min-max tuning (default is True).
lr (float): The learning rate (default is 0.005).
minmax_lr (float): The learning rate for min-max tuning (default is None).
low_gpu_mem_usage (bool): Whether to use low GPU memory (default is True).
low_gpu_mem_usage (bool): Whether to use low GPU memory (default is False).
iters (int): Number of iterations (default is 200).
seqlen (int): Length of the sequence.
nsamples (int): Number of samples (default is 512).
Expand Down

0 comments on commit 1f7f696

Please sign in to comment.