From c15a5b15154798f9cd3dc05e2efb49407cafb274 Mon Sep 17 00:00:00 2001 From: HUSEIN ZOLKEPLI Date: Sun, 1 Sep 2024 16:45:18 +0800 Subject: [PATCH] added smollm --- session/llama3/req-unsloth.txt | 0 session/smollm/train.py | 410 +++++++++++++++++++++++++++++++++ session/smollm/train.sh | 20 ++ 3 files changed, 430 insertions(+) create mode 100644 session/llama3/req-unsloth.txt create mode 100644 session/smollm/train.py create mode 100644 session/smollm/train.sh diff --git a/session/llama3/req-unsloth.txt b/session/llama3/req-unsloth.txt new file mode 100644 index 00000000..e69de29b diff --git a/session/smollm/train.py b/session/smollm/train.py new file mode 100644 index 00000000..731acd0a --- /dev/null +++ b/session/smollm/train.py @@ -0,0 +1,410 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. + +Here is the full list of checkpoints on the hub that can be fine-tuned by this script: +https://huggingface.co/models?filter=text-generation +""" +# You can also adapt this script on your own causal language modeling +# task. Pointers for this are left as comments. + +import logging +import math +import os +import sys +import warnings +from dataclasses import dataclass, field +from itertools import chain +from typing import Optional + +import datasets +import torch +from datasets import load_dataset + +import transformers +from transformers import ( + CONFIG_MAPPING, + MODEL_FOR_CAUSAL_LM_MAPPING, + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + HfArgumentParser, + Trainer, + TrainingArguments, + default_data_collator, + is_torch_tpu_available, + set_seed, +) +from transformers.testing_utils import CaptureLogger +from transformers.trainer_utils import get_last_checkpoint +from transformers.utils import check_min_version, send_example_telemetry +from transformers.utils.versions import require_version +from streaming.base.format.mds.encodings import Encoding, _encodings +from streaming import LocalDataset +from liger_kernel.transformers import AutoLigerKernelForCausalLM + +import numpy as np + +require_version( + "datasets>=1.8.0", + "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") + +logger = logging.getLogger(__name__) + + +MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. + """ + + model_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." + ) + }, + ) + model_type: Optional[str] = field( + default=None, + metadata={ + "help": "If training from scratch, pass a model type from the list: " + + ", ".join(MODEL_TYPES)}, + ) + config_overrides: Optional[str] = field( + default=None, metadata={ + "help": ( + "Override some existing default config settings when a model is trained from scratch. Example: " + "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index")}, ) + config_name: Optional[str] = field( + default=None, metadata={ + "help": "Pretrained config name or path if not the same as model_name"}) + tokenizer_name: Optional[str] = field( + default=None, metadata={ + "help": "Pretrained tokenizer name or path if not the same as model_name"}) + cache_dir: Optional[str] = field( + default=None, metadata={ + "help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) + use_fast_tokenizer: bool = field( + default=True, metadata={ + "help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) + model_revision: str = field( + default="main", metadata={ + "help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) + token: str = field( + default=None, + metadata={ + "help": ( + "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " + "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." + ) + }, + ) + use_auth_token: bool = field( + default=None, + metadata={ + "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." + }, + ) + trust_remote_code: bool = field( + default=False, metadata={ + "help": ( + "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option" + "should only be set to `True` for repositories you trust and in which you have read the code, as it will" + "execute code present on the Hub on your local machine.")}, ) + torch_dtype: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " + "dtype will be automatically derived from the model's weights."), + "choices": [ + "auto", + "bfloat16", + "float16", + "float32"], + }, + ) + low_cpu_mem_usage: bool = field( + default=False, + metadata={ + "help": ( + "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded." + "set True will benefit LLM loading time and RAM consumption." + ) + }, + ) + + def __post_init__(self): + if self.config_overrides is not None and ( + self.config_name is not None or self.model_name_or_path is not None): + raise ValueError( + "--config_overrides can't be used in combination with --config_name or --model_name_or_path" + ) + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + dataset_name: Optional[str] = field( + default=None, metadata={ + "help": "The name of the dataset to use (via the datasets library)."}) + dataset_config_name: Optional[str] = field( + default=None, metadata={ + "help": "The configuration name of the dataset to use (via the datasets library)."}) + train_file: Optional[str] = field( + default=None, metadata={ + "help": "The input training data file (a text file)."}) + validation_file: Optional[str] = field( + default=None, metadata={ + "help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) + max_train_samples: Optional[int] = field( + default=None, metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set.")}, ) + max_eval_samples: Optional[int] = field( + default=None, metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of evaluation examples to this " + "value if set.")}, ) + streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) + block_size: Optional[int] = field( + default=None, + metadata={ + "help": ( + "Optional input sequence length after tokenization. " + "The training dataset will be truncated in block of this size for training. " + "Default to the model max input length for single sentence inputs (take into account special tokens)." + ) + }, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + validation_split_percentage: Optional[int] = field( + default=5, + metadata={ + "help": "The percentage of the train set used as validation set in case there's no validation split" + }, + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + keep_linebreaks: bool = field( + default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} + ) + + +def main(): + # See all possible arguments in src/transformers/training_args.py + # or by passing the --help flag to this script. + # We now keep distinct sets of args, for a cleaner separation of concerns. + + parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file( + json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + if model_args.use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v4.34.", + FutureWarning) + if model_args.token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + model_args.token = model_args.use_auth_token + + # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The + # information sent is the one passed as arguments along with your Python/PyTorch versions. + send_example_telemetry("run_clm", model_args, data_args) + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + + if training_args.should_log: + # The default of training_args.log_level is passive, so we set log level + # at info here to have that default. + transformers.utils.logging.set_verbosity_info() + + log_level = training_args.get_process_log_level() + logger.setLevel(log_level) + datasets.utils.logging.set_verbosity(log_level) + transformers.utils.logging.set_verbosity(log_level) + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + + # Log on each process the small summary: + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}") + logger.info(f"Training/evaluation parameters {training_args}") + + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir( + training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + + # Set seed before initializing model. + set_seed(training_args.seed) + + config_kwargs = { + "cache_dir": model_args.cache_dir, + "revision": model_args.model_revision, + "token": model_args.token, + "trust_remote_code": model_args.trust_remote_code, + } + + if model_args.config_name: + config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) + elif model_args.model_name_or_path: + config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) + else: + config = CONFIG_MAPPING[model_args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + if model_args.config_overrides is not None: + logger.info(f"Overriding config: {model_args.config_overrides}") + config.update_from_string(model_args.config_overrides) + logger.info(f"New config: {config}") + + tokenizer_kwargs = { + "cache_dir": model_args.cache_dir, + "use_fast": model_args.use_fast_tokenizer, + "revision": model_args.model_revision, + "token": model_args.token, + "trust_remote_code": model_args.trust_remote_code, + } + if model_args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) + elif model_args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is not supported by this script." + "You can do it from another script, save it, and load it from here, using --tokenizer_name.") + + class UInt32(Encoding): + def encode(self, obj) -> bytes: + return obj.tobytes() + + def decode(self, data: bytes): + return np.frombuffer(data, np.uint32) + + _encodings['uint32'] = UInt32 + + class DatasetFixed(torch.utils.data.Dataset): + def __init__(self, local): + self.dataset = LocalDataset(local=local) + + def __getitem__(self, idx): + data = self.dataset[idx] + data['labels'] = data["input_ids"].copy() + data.pop('token_type_ids', None) + for k in data.keys(): + data[k] = data[k].astype(np.int64) + return data + + def __len__(self): + return len(self.dataset) + + train_dataset = DatasetFixed(local=data_args.train_file) + print(len(train_dataset)) + + if model_args.model_name_or_path: + torch_dtype = ( + model_args.torch_dtype + if model_args.torch_dtype in ["auto", None] + else getattr(torch, model_args.torch_dtype) + ) + + selected_model = AutoLigerKernelForCausalLM + + model = selected_model.from_pretrained( + model_args.model_name_or_path, + from_tf=bool(".ckpt" in model_args.model_name_or_path), + config=config, + cache_dir=model_args.cache_dir, + revision=model_args.model_revision, + token=model_args.token, + trust_remote_code=model_args.trust_remote_code, + torch_dtype=torch_dtype, + low_cpu_mem_usage=model_args.low_cpu_mem_usage, + use_flash_attention_2=True, + ) + else: + model = AutoModelForCausalLM.from_config( + config, trust_remote_code=model_args.trust_remote_code) + n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) + logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") + + # Initialize our Trainer + trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=None, + tokenizer=tokenizer, + # Data collator will default to DataCollatorWithPadding, so we change it. + data_collator=default_data_collator, + compute_metrics=None, + preprocess_logits_for_metrics=None, + ) + + print('len(trainer.train_dataset)', len(trainer.train_dataset)) + + checkpoint = None + if training_args.resume_from_checkpoint is not None: + checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint + try: + trainer.train(resume_from_checkpoint=checkpoint) + trainer.save_model() + trainer.save_state() + + except Exception as e: + e = str(e) + print(e) + if checkpoint and ('checkpoint' in e or 'central directory' in e): + os.system(f'mv {checkpoint} {checkpoint}-temp') + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/session/smollm/train.sh b/session/smollm/train.sh new file mode 100644 index 00000000..d4bef29e --- /dev/null +++ b/session/smollm/train.sh @@ -0,0 +1,20 @@ +WANDB_PROJECT="finetune-HuggingFaceTB-SmolLM-360M" \ +torchrun --nproc_per_node 4 \ +-m train \ +--model_name_or_path HuggingFaceTB/SmolLM-360M \ +--per_device_train_batch_size 8 \ +--gradient_accumulation_steps 1 \ +--output_dir finetune-SmolLM-360M \ +--bf16 \ +--do_train \ +--do_eval false \ +--num_train_epochs 2 \ +--train_file "combine-smollm" \ +--logging_steps 1 \ +--learning_rate 2e-5 \ +--block_size 4096 \ +--save_steps 200 \ +--save_total_limit 3 \ +--gradient_checkpointing true \ +--log_level "info" \ +--torch_dtype "bfloat16" \ No newline at end of file