Skip to content

Commit

Permalink
fix and change all logger using get_logger (#1064)
Browse files Browse the repository at this point in the history
* fix logger

* Merge the main branch for logger

* Fix some unchanged logging to logger
  • Loading branch information
yinfan98 authored Jan 31, 2024
1 parent c82ba3f commit 789dcce
Show file tree
Hide file tree
Showing 21 changed files with 87 additions and 97 deletions.
3 changes: 2 additions & 1 deletion benchmark/profile_hf_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,9 @@
from transformers import AutoModelForCausalLM, GenerationConfig

from lmdeploy.pytorch.accel import LoadNoInit
from lmdeploy.utils import get_logger

logger = logging.getLogger(__file__)
logger = get_logger(__file__)
logger.setLevel(logging.DEBUG)
info = logger.info
warning = logger.warning
Expand Down
6 changes: 3 additions & 3 deletions lmdeploy/legacy/pytorch/adapters/__init__.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
# Copyright (c) OpenMMLab. All rights reserved.

import logging

import torch.nn as nn

from lmdeploy.utils import get_logger

from .base import BasicAdapter, BasicAdapterFast
from .internlm import InternLMAdapter
from .llama2 import Llama2Adapter

logger = logging.getLogger(__name__)
logger = get_logger(__name__)


def _get_default_adapter(tokenizer):
Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/legacy/pytorch/adapters/base.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
# Copyright (c) OpenMMLab. All rights reserved.
"""Basic adapter suitable for general HuggingFace models."""

import logging
import re

from transformers import (PreTrainedTokenizer, PreTrainedTokenizerBase,
PreTrainedTokenizerFast)

logger = logging.getLogger(__name__)
from lmdeploy.utils import get_logger

logger = get_logger(__name__)


class BaseAdapter:
Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/legacy/pytorch/adapters/internlm.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import re

import torch
from transformers import (PreTrainedTokenizerFast, StoppingCriteria,
StoppingCriteriaList)

from lmdeploy.utils import get_logger

from .base import BaseAdapter

logger = logging.getLogger(__name__)
logger = get_logger(__name__)


class InternLMStoppingCriteria(StoppingCriteria):
Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/legacy/pytorch/adapters/llama2.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import re

from transformers import PreTrainedTokenizerFast

from lmdeploy.utils import get_logger

from .base import BasicAdapterFast

logger = logging.getLogger(__name__)
logger = get_logger(__name__)

B_INST, E_INST = '[INST]', '[/INST]'
B_SYS, E_SYS = '<<SYS>>\n', '\n<</SYS>>\n\n'
Expand Down
26 changes: 15 additions & 11 deletions lmdeploy/legacy/pytorch/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,30 +54,34 @@
import torch
from transformers import GenerationConfig, PreTrainedModel

from lmdeploy.utils import get_logger

from .adapters import init_adapter
from .dist import get_local_rank, get_rank, get_world_size
from .model import accel_model, init_model
from .session import BasicSessionManagerWithHistory
from .utils import BasicStreamer, TerminalIO, control

logger = logging.getLogger(__name__)


def set_logging(log_file: str, debug: bool):
torch.set_printoptions(linewidth=120)
level = logging.DEBUG if debug else logging.INFO
log_file = log_file or 'chat.log'
if r := get_rank() != 0:
log_file = log_file + f'.{r}'
logging.basicConfig(level=level,
format=('%(filename)s: '
'%(levelname)s: '
'%(funcName)s(): '
'%(lineno)d:\t'
'%(message)s'),
filename=log_file,
filemode='w')
format = '%(filename)s: \
%(levelname)s: \
%(funcName)s(): \
%(lineno)d:\t \
%(message)s'

logger = get_logger(__name__,
log_file=log_file,
log_level=level,
file_mode='w',
log_formatter=format)
print(f'Worker {get_rank()} logging to {log_file}')
return logger


def main(
Expand Down Expand Up @@ -120,7 +124,7 @@ def main(
based on `LlamaforCausalLM` class, this argument is required.
Currently, only "llama1" is acceptable for llama1 models.
""" # noqa: E501
set_logging(log_file, debug)
logger = set_logging(log_file, debug)

# workers should sync in sampling
torch.manual_seed(seed)
Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/legacy/pytorch/decode.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import queue
import warnings
from typing import List, Optional
Expand All @@ -12,6 +11,8 @@
from transformers import (AutoTokenizer, PreTrainedModel,
PreTrainedTokenizerBase)

from lmdeploy.utils import get_logger

from .model import accel_model, init_model


Expand Down Expand Up @@ -371,7 +372,7 @@ def __del__(self):
test_path = args.test_path
prompts = args.prompts

logger = logging.getLogger(__name__)
logger = get_logger(__name__)
# logging.basicConfig(level=logging.DEBUG)

# Use test file preferentially
Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/legacy/pytorch/model.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import time
import warnings
from typing import Optional

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

from lmdeploy.utils import get_logger

from .dist import get_local_rank

logger = logging.getLogger(__name__)
logger = get_logger(__name__)


class LoadWoInit:
Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/legacy/pytorch/session.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging

import torch
from transformers.generation.utils import ModelOutput

logger = logging.getLogger(__name__)
from lmdeploy.utils import get_logger

logger = get_logger(__name__)


class BasicSessionManager:
Expand Down
6 changes: 3 additions & 3 deletions lmdeploy/legacy/pytorch/utils.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
# Copyright (c) OpenMMLab. All rights reserved.

import logging

from transformers.generation.streamers import BaseStreamer

from lmdeploy.utils import get_logger

from .dist import get_rank, master_only, master_only_and_broadcast_general

try:
import readline # To support command line history # noqa: F401
except ImportError: # readline not available
pass

logger = logging.getLogger(__name__)
logger = get_logger(__name__)


class TerminalIO:
Expand Down
4 changes: 2 additions & 2 deletions lmdeploy/pytorch/modeling/modeling_baichuan.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,13 @@
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (BaseModelOutputWithPast,
CausalLMOutputWithPast)
from transformers.utils import logging

from lmdeploy.pytorch.modeling.convert_to_qmodules import convert_to_qmodules
from lmdeploy.utils import get_logger

from .configuration_baichuan import BaiChuanConfig

logger = logging.get_logger(__name__)
logger = get_logger(__name__)


# Copied from transformers.models.bart.modeling_bart._make_causal_mask
Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/pytorch/modeling/modeling_internlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,15 @@
SequenceClassifierOutputWithPast)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (add_start_docstrings,
add_start_docstrings_to_model_forward, logging,
add_start_docstrings_to_model_forward,
replace_return_docstrings)

from lmdeploy.pytorch.modeling.convert_to_qmodules import convert_to_qmodules
from lmdeploy.utils import get_logger

from .configuration_internlm import InternLMConfig

logger = logging.get_logger(__name__)
logger = get_logger(__name__)

_CONFIG_FOR_DOC = 'InternLMConfig'

Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/pytorch/modeling/modeling_internlm2.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,11 @@
SequenceClassifierOutputWithPast)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (add_start_docstrings,
add_start_docstrings_to_model_forward, logging,
add_start_docstrings_to_model_forward,
replace_return_docstrings)

from lmdeploy.pytorch.modeling.convert_to_qmodules import convert_to_qmodules
from lmdeploy.utils import get_logger

try:
from transformers.generation.streamers import BaseStreamer
Expand All @@ -46,7 +47,7 @@

from .configuration_internlm import InternLMConfig as InternLM2Config

logger = logging.get_logger(__name__)
logger = get_logger(__name__)

_CONFIG_FOR_DOC = 'InternLM2Config'

Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/pytorch/modeling/modeling_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,13 @@
from transformers.modeling_utils import PreTrainedModel
from transformers.models.llama.configuration_llama import LlamaConfig
from transformers.utils import (add_start_docstrings,
add_start_docstrings_to_model_forward, logging,
add_start_docstrings_to_model_forward,
replace_return_docstrings)

from lmdeploy.pytorch.modeling.convert_to_qmodules import convert_to_qmodules
from lmdeploy.utils import get_logger

logger = logging.get_logger(__name__)
logger = get_logger(__name__)

_CONFIG_FOR_DOC = 'LlamaConfig'

Expand Down
3 changes: 0 additions & 3 deletions lmdeploy/pytorch/models/falcon.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
# https://huggingface.co/tiiuae/falcon-7b-instruct
# https://github.com/huggingface/transformers/blob/v4.33-release/src/transformers/models/falcon/modeling_falcon.py # noqa

import logging
from typing import Optional, Tuple, Union

import torch
Expand All @@ -20,8 +19,6 @@
from ..kernels import (alibi_paged_attention_fwd, fill_kv_cache,
paged_attention_fwd)

logger = logging.getLogger()


# rotary pos emb helpers
# (torch.jit.script does not seem to support staticmethod...)
Expand Down
5 changes: 3 additions & 2 deletions lmdeploy/serve/qos_engine/inner_group_schd.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# Copyright (c) OpenMMLab. All rights reserved.
import collections
import logging

logger = logging.getLogger(__name__)
from lmdeploy.utils import get_logger

logger = get_logger(__name__)


class UserRequestQueue:
Expand Down
4 changes: 2 additions & 2 deletions lmdeploy/serve/qos_engine/qos_engine.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
import json
import logging
import threading
import time
from typing import List
Expand All @@ -11,8 +10,9 @@
GenerateRequestQos)
from lmdeploy.serve.qos_engine.inner_group_schd import UserRequestQueue
from lmdeploy.serve.qos_engine.usage_stats import UsageStats
from lmdeploy.utils import get_logger

logger = logging.getLogger(__name__)
logger = get_logger(__name__)


class QosConfig:
Expand Down
Loading

0 comments on commit 789dcce

Please sign in to comment.