本文主要是介绍ChatGLM3 自己训练微调制作数据代码,与训练、训练完成后模型合并、解译代码完整版,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
ChatGLM3 自己训练微调制作数据代码,与训练完成后模型合并解译代码
import jsonkeyword = '这年轻人'# 摘自百度百科
description = "这年轻人,男,1993年出生于陕西省湖北市潼关县。2015年毕业于中国背景大学。2016年加入西安旧东方,当选(旧东方)当时最年轻的英语教研主管;2019年加入旧东方在线,是高三英语名师并成为高三英语学科最年轻的负责人,被称为“中关村王杰伦”。现是东方甄选高级合伙人、旧东方教育科技集团董事长文化助理,兼任新东方文旅集团副总裁。"#对 prompt 使用一些简单的数据增强的方法,以便更好地收敛。
def get_prompt_list(keyword):return [f'{keyword}', f'你知道{keyword}吗?',f'{keyword}是谁?',f'介绍一下{keyword}',f'你听过{keyword}吗?',f'谁是{keyword}?',f'{keyword}是?',f'你认识{keyword}吗?',f'{keyword}的资料',f'{keyword}简介']# ChatGLM3 自己训练微调制作数据代码,与训练完成后模型合并解译代码# 对话数据格式
data = [{"conversations": [{"role": "system","content": "You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown."},{"role": "user","content": x},{"role": "assistant","content": description}]}for x in get_prompt_list(keyword)
]# 保存到 formatted_data/my_data_qa.jsonl
with open("formatted_data/my_data_qa.jsonl", "w") as f:for e in data:f.write(json.dumps(e, ensure_ascii=False) + "\n")
模型合并代码
import torch
from peft import PeftModel
from transformers import AutoTokenizer, AutoModel
#加载原模型
base_model = '/media/DATA/XXX/large_model/weights'
base_model = AutoModel.from_pretrained(base_model, trust_remote_code=True).cuda(3)
#加载微调的模型
lora_model_path = '/media/DATA/XXX/large_model/Chat_weitiao/ChatGLM3/finetune_demo/output/checkpoint-3000'
lora_model = PeftModel.from_pretrained(base_model,lora_model_path, torch_dtype=torch.float16)
lora_model.to("cpu")
#合并
merged_model = lora_model.merge_and_unload()
#合并的模型存储
new_model_directory = '/media/DATA/XXX/large_model/Chat_weitiao/ChatGLM3/finetune_demo/output/fintrue_chatglm3'
merged_model.save_pretrained(new_model_directory, max_shard_size="2048MB", safe_serialization=True)
后推理代码
from transformers import AutoModel, AutoTokenizer # 导入transformers库的AutoModel和AutoTokenizer#加载模型
new_model_directory = '/media/DATA/XXX/large_model/Chat_weitiao/ChatGLM3/finetune_demo/output/fintrue_chatglm3'
tokenizer = AutoTokenizer.from_pretrained(new_model_directory, trust_remote_code=True)
model = AutoModel.from_pretrained(new_model_directory, trust_remote_code=True).cuda(3)
model.eval()
#输入
#instruction = "你现在是一个信息抽取模型,请你帮我抽取出关系内容为\"性能故障\", \"部件故障\", \"组成\"和 \"检测工具\"的相关三元组,三元组内部用\"_\"连接,三元组之间用\\n分割。文本:"
input = "被称为“中关村周杰伦"
#验证
response, _ = model.chat(tokenizer, input, history=None)
print(response)
第二种
#!/usr/bin/env python
# -*- coding: utf-8 -*-from pathlib import Path
from typing import Annotated, Unionimport typer
from peft import AutoPeftModelForCausalLM, PeftModelForCausalLM
from transformers import (AutoModelForCausalLM,AutoTokenizer,PreTrainedModel,PreTrainedTokenizer,PreTrainedTokenizerFast,
)ModelType = Union[PreTrainedModel, PeftModelForCausalLM]
TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]app = typer.Typer(pretty_exceptions_show_locals=False)def _resolve_path(path: Union[str, Path]) -> Path:return Path(path).expanduser().resolve()def load_model_and_tokenizer(model_dir: Union[str, Path]) -> tuple[ModelType, TokenizerType]:model_dir = _resolve_path(model_dir)if (model_dir / 'adapter_config.json').exists():model = AutoPeftModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, device_map='auto')tokenizer_dir = model.peft_config['default'].base_model_name_or_pathelse:model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, device_map='auto')tokenizer_dir = model_dirtokenizer = AutoTokenizer.from_pretrained(tokenizer_dir, trust_remote_code=True)return model, tokenizer@app.command()
def main(model_dir: Annotated[str, typer.Argument(help='')],prompt: Annotated[str, typer.Option(help='')],
):model, tokenizer = load_model_and_tokenizer(model_dir)response, _ = model.chat(tokenizer, prompt)print(response)if __name__ == '__main__':app()
训练解译指令
CUDA_VISIBLE_DEVICES=3 python finetune_hf.py formatted_data/ /media/DATA/zhulifu/large_model/weights/ configs/lora.yaml
CUDA_VISIBLE_DEVICES=2 python finetune_hf.py formatted_data/ /media/DATA/zhulifu/large_model/weights/ configs/ptuning_v2.yaml
训练代码
# -*- coding: utf-8 -*-import dataclasses as dc
import functools
from collections.abc import Callable, Mapping, Sequence
from pathlib import Path
from typing import Annotated, Any, Optional, Unionimport jieba
import numpy as np
import ruamel.yaml as yaml
import torch
import typer
from datasets import Dataset, DatasetDict, NamedSplit, Split, load_dataset
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
from peft import (PeftConfig,PeftModelForCausalLM,get_peft_config,get_peft_model
)
from rouge_chinese import Rouge
from torch import nn
from transformers import (AutoModelForCausalLM,AutoTokenizer,EvalPrediction,GenerationConfig,PreTrainedModel,PreTrainedTokenizer,PreTrainedTokenizerFast,Seq2SeqTrainingArguments
这篇关于ChatGLM3 自己训练微调制作数据代码,与训练、训练完成后模型合并、解译代码完整版的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!