forked from dvlab-research/LongLoRA
-
Notifications
You must be signed in to change notification settings - Fork 0
/
demo.py
185 lines (159 loc) · 6.73 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
import os
import sys
import math
import torch
import argparse
import textwrap
import transformers
from peft import PeftModel
from transformers import GenerationConfig, TextIteratorStreamer
from llama_attn_replace import replace_llama_attn
from threading import Thread
import gradio as gr
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--base_model', type=str, default="/data1/pretrained-models/llama-7b-hf")
parser.add_argument('--cache_dir', type=str, default="./cache")
parser.add_argument('--context_size', type=int, default=-1, help='context size during fine-tuning')
parser.add_argument('--flash_attn', type=bool, default=True, help='')
parser.add_argument('--temperature', type=float, default=0.6, help='')
parser.add_argument('--top_p', type=float, default=0.9, help='')
parser.add_argument('--max_gen_len', type=int, default=512, help='')
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=8898)
args = parser.parse_args()
return args
title = "LongLoRA and LongAlpaca for Long-context LLMs"
description = """
<font size=4>
This is the online demo of LongLoRA. \n
If multiple users are using it at the same time, they will enter a queue, which may delay some time. \n
**Inputs**: <br>
- **Input material txt** and **Question** are required. <br>
**Note**: <br>
- The demo model is **LongAlpaca-7B**. We use 4-bit quantization for low GPU memory inference, which may impair text-generation quality.<br>
- There are 10 book-related examples and 5 paper-related examples, 15 in total.<br>
- Note that only txt file is currently support.\n
**Example questions**: <br>
  Please summarize the book in one paragraph. <br>
  Please tell me that what high-level idea the author want to indicate in this book. <br>
  Please describe the relationship among the roles in the book. <br>
  Please summarize the paper in one paragraph. <br>
  What is the main contribution of this paper? <br>
Hope you can enjoy our work!
</font>
"""
# Gradio
article = """
<p style='text-align: center'>
<a href='https://arxiv.org/abs/2308.00692' target='_blank'>
Preprint Paper
</a>
\n
<p style='text-align: center'>
<a href='https://github.com/dvlab-research/LongLoRA' target='_blank'> Github Repo </a></p>
"""
PROMPT_DICT = {
"prompt_no_input": (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request.\n\n"
"### Instruction:\n{instruction}\n\n### Response:"
),
"prompt_no_input_llama2":(
"[INST] <<SYS>>\n"
"You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\n"
"If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n"
"<</SYS>> \n\n {instruction} [/INST]"
),
}
def read_txt_file(material_txt):
content = ""
with open(material_txt) as f:
for line in f.readlines():
content += line
return content
def build_generator(
model, tokenizer, temperature=0.6, top_p=0.9, max_gen_len=4096, use_cache=True
):
def response(material, question):
if material is None:
return "Only support txt file."
if not material.name.split(".")[-1]=='txt':
return "Only support txt file."
material = read_txt_file(material.name)
prompt_no_input = PROMPT_DICT["prompt_no_input_llama2"]
prompt = prompt_no_input.format_map({"instruction": material + "\n%s" % question})
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
if len(inputs['input_ids'][0]) > 32768:
return "This demo supports tokens less than 32768, while the current is %d. Please use material with less tokens."%len(inputs['input_ids'][0])
torch.cuda.empty_cache()
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(**inputs,
max_new_tokens=max_gen_len,
temperature=temperature,
top_p=top_p,
use_cache=use_cache,
streamer=streamer,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
generated_text = ""
for new_text in streamer:
generated_text += new_text
yield generated_text
return generated_text
return response
def main(args):
if args.flash_attn:
replace_llama_attn(inference=True)
# Set RoPE scaling factor
config = transformers.AutoConfig.from_pretrained(
args.base_model,
cache_dir=args.cache_dir,
)
orig_ctx_len = getattr(config, "max_position_embeddings", None)
if orig_ctx_len and args.context_size > orig_ctx_len:
scaling_factor = float(math.ceil(args.context_size / orig_ctx_len))
config.rope_scaling = {"type": "linear", "factor": scaling_factor}
# Load model and tokenizer
model = transformers.AutoModelForCausalLM.from_pretrained(
args.base_model,
config=config,
cache_dir=args.cache_dir,
torch_dtype=torch.float16,
load_in_4bit=True,
device_map="auto",
)
model.resize_token_embeddings(32001)
tokenizer = transformers.AutoTokenizer.from_pretrained(
args.base_model,
cache_dir=args.cache_dir,
model_max_length=args.context_size if args.context_size > orig_ctx_len else orig_ctx_len,
padding_side="right",
use_fast=False,
)
model.eval()
if torch.__version__ >= "2" and sys.platform != "win32":
model = torch.compile(model)
# import pdb; pdb.set_trace()
respond = build_generator(model, tokenizer, temperature=args.temperature, top_p=args.top_p,
max_gen_len=args.max_gen_len, use_cache=True)
demo = gr.Interface(
respond,
inputs=[
gr.File(type="file", label="Input material txt"),
gr.Textbox(lines=1, placeholder=None, label="Question"),
],
outputs=[
gr.Textbox(lines=1, placeholder=None, label="Text Output"),
],
title=title,
description=description,
article=article,
allow_flagging="auto",
)
demo.queue()
demo.launch(server_name=args.host, server_port=args.port, show_error=True, share=True)
if __name__ == "__main__":
args = parse_config()
main(args)