本文主要是介绍Qwen 微调LoRA之后合并模型,使用 webui 测试,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
Qwen 微调LoRA之后合并模型
qwen_lora_merge.py :
import os
from peft import AutoPeftModelForCausalLM
from transformers import AutoTokenizerdef save_model_and_tokenizer(path_to_adapter, new_model_directory):"""加载模型,合并模型,然后保存模型。加载分词器并保存。"""# 检查路径有效性if not os.path.exists(path_to_adapter):raise FileNotFoundError(f"路径 {path_to_adapter} 不存在。")if not os.path.exists(new_model_directory):os.makedirs(new_model_directory, exist_ok=True)try:# 模型加载与合并model = AutoPeftModelForCausalLM.from_pretrained(path_to_adapter,device_map="auto",trust_remote_code=True).eval()merged_model = model.merge_and_unload()# 保存合并后的模型merged_model.save_pretrained(new_model_directory, max_shard_size="2048MB", safe_serialization=True)# 加载并保存分词器tokenizer = AutoTokenizer.from_pretrained(path_to_adapter,trust_remote_code=True)# 假设我们有一个函数来保存分词器,这里只是示意save_tokenizer(tokenizer, new_model_directory)except Exception as e:# 异常处理,记录或抛出异常print(f"加载或保存过程中遇到错误:{e}")raisedef save_tokenizer(tokenizer, directory):"""保存分词器到指定目录。"""# 假设这里有一个路径拼接逻辑,将分词器文件保存到指定目录tokenizer.save_pretrained(directory)if __name__=="__main__":lora_model_path="/media//huggingface_cache/out_models/qwen1_8b_chat_lora/checkpoint-1200"new_model_directory = "/media/huggingface_cache/out_models/qwen1_8b_chat_lora/Qwen-1_8B-Chat_law_merge"# 使用函数来执行任务save_model_and_tokenizer(lora_model_path, new_model_directory)
使用 webui 测试
web_ui.py
import torchprint(torch.cuda.is_available())
print(torch.version.cuda)
print(torch.backends.cudnn.version())import gradio as grfrom transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfigMODEL_PATH="/media/huggingface_cache/out_models/qwen1_8b_chat_lora/Qwen-1_8B-Chat_law_merge"tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH,trust_remote_code=True
)model = AutoModelForCausalLM.from_pretrained(MODEL_PATH,device_map="auto",trust_remote_code=True
).eval()# Define Gradio interface components
title = "Transformer Chatbot"
description = "Enter your message and receive a response from the transformer-based language model."def generate_answer_ui(query, history):response, history = model.chat(tokenizer, "你好", history=history)return f"> 问题:\n{query}\n\n> 回答:\n{response}\n\n>"def main():iface = gr.ChatInterface(fn=generate_answer_ui,title=title,description=description,examples=[["这里可以使用你微调的数据代替"],["你叫什么名字?"],],)iface.launch(inbrowser=True)if __name__ == '__main__':main()
这篇关于Qwen 微调LoRA之后合并模型,使用 webui 测试的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!