Skip to content

feat: qwen demo code #111

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 48 additions & 0 deletions Season2.step_into_llm/17.Qwen/CLI_input_mock_qwen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import mindspore
import numpy as np
from mindspore import dtype as mstype
import mindspore.ops as ops
from mindspore import Tensor
from mindnlp.transformers import AutoTokenizer, AutoModelForCausalLM
import faulthandler

faulthandler.enable()

model_id = "Qwen/Qwen1.5-0.5B-Chat"
tokenizer = AutoTokenizer.from_pretrained(model_id, mirror='modelscope')
model = AutoModelForCausalLM.from_pretrained(
model_id,
ms_dtype=mindspore.float16,
mirror='modelscope'
)

messages = [
{"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"},
{"role": "user", "content": "Who are you?"},
]

input_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="ms"
)
attention_mask = Tensor(np.ones(input_ids.shape), mstype.float32)

terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("<|endoftext|>")
]
outputs = model.generate(
input_ids,
attention_mask=attention_mask,
max_new_tokens=20,
eos_token_id=terminators,
do_sample=False,
# do_sample=True,
# temperature=0.6,
# top_p=0.9,
)
response = outputs[0][input_ids.shape[-1]:]
print(outputs)
print(tokenizer.decode(response, skip_special_tokens=True))

61 changes: 61 additions & 0 deletions Season2.step_into_llm/17.Qwen/GUI_gradio-qwen1.5.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import gradio as gr
import mindspore
from mindspore import dtype as mstype
import numpy as np
from mindnlpv041.mindnlp.transformers import AutoModelForCausalLM, AutoTokenizer
from mindnlpv041.mindnlp.transformers import TextIteratorStreamer
from threading import Thread

# Loading the tokenizer and model from Hugging Face's model hub.
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat", ms_dtype=mindspore.float16)
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B-Chat", ms_dtype=mindspore.float16)

system_prompt = "You are a helpful and friendly chatbot"

def build_input_from_chat_history(chat_history, msg: str):
messages = [{'role': 'system', 'content': system_prompt}]
for user_msg, ai_msg in chat_history:
messages.append({'role': 'user', 'content': user_msg})
messages.append({'role': 'assistant', 'content': ai_msg})
messages.append({'role': 'user', 'content': msg})
return messages

# Function to generate model predictions.
def predict(message, history):
# Formatting the input for the model.
messages = build_input_from_chat_history(history, message)
input_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="ms",
tokenize=True
)
attention_mask = mindspore.Tensor(np.ones(input_ids.shape), mstype.float32)
streamer = TextIteratorStreamer(tokenizer, timeout=300, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=1024,
do_sample=True,
top_p=0.9,
temperature=0.1,
num_beams=1,
attention_mask=attention_mask,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start() # Starting the generation in a separate thread.
partial_message = ""
for new_token in streamer:
partial_message += new_token
if '</s>' in partial_message: # Breaking the loop if the stop token is generated.
break
yield partial_message


# Setting up the Gradio chat interface.
gr.ChatInterface(predict,
title="Qwen1.5-0.5b-Chat",
description="问几个问题",
examples=['你是谁?', '介绍一下华为公司']
).launch(share=True, server_name='0.0.0.0', server_port=7860) # Launching the web interface.

Loading