本文主要是介绍使用faiss存储HuggingFaceBgeEmbeddings向量化处理数据及反序列化加载使用的例子,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
周末宅在家里无所事事,实验了一下如何使用bge对word文档进行向量化处理后并存储到faiss里面供后续反序列化加载使用,下面是具体实现代码。
一,加载word数据并读取内容进行向量化存储
import os
import docx
from tqdm import tqdm
from langchain.docstore.document import Document
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter# 读取并处理.docx文档,并将表格转换为Markdown格式
def read_docx(file_path):doc = docx.Document(file_path)full_text = []for para in doc.paragraphs:full_text.append(para.text)for table in doc.tables:md_table = []for row in table.rows:md_row = "| " + " | ".join(cell.text.strip() for cell in row.cells) + " |"md_table.append(md_row)full_text.append("\n".join(md_table))return '\n'.join(full_text)def load_wordfile(filepath):# 提取文件名(不含扩展名)file_name = os.path.splitext(os.path.basename(file_path))[0]text=read_docx(filepath) text_splitter =RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=128)document = Document(page_content=text, metadata={"source": filepath,"country":file_name,"datayear":2024})chunks = text_splitter.split_documents([document])print(f"Splitted documents into {len(chunks)} chunks")return chunksif __name__ == "__main__":# 文档目录和向量库路径docx_directory = 'e:/ai/doc/'vector_store_path = 'e:/ai/vector/data'# 读取目录中的所有.docx文件并进行处理documents = []file_list = [f for f in os.listdir(docx_directory) if f.endswith('.docx')]for file_name in tqdm(file_list, desc="读取文档中"):file_path = os.path.join(docx_directory, file_name)docs = load_wordfile(file_path) #load_file(file_path)documents.extend(docs)model_name = "E:/ai/bge-small-zh-v1.5"model_kwargs = {"device": "cpu"}encode_kwargs = {"normalize_embeddings": True}hf = HuggingFaceBgeEmbeddings(model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs)#使用faiss进行向量库构建vector_store =FAISS.from_documents(documents, hf)# 保存向量库vector_store.save_local(vector_store_path)
我家里的电脑没有GPU,所以使用的是cpu进行向量化计算,如果是有cuda的环境,将上面代码中的cpu改成cuda就可以了。
二、反序列化加载并提供检索api服务
import os
import pickle
from fastapi import FastAPI, Query
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List, Dict
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_community.vectorstores import FAISSapp = FastAPI()# 允许跨域访问
app.add_middleware(CORSMiddleware,allow_origins=["*"], # 允许所有来源allow_credentials=True,allow_methods=["*"], # 允许所有HTTP方法allow_headers=["*"], # 允许所有HTTP头
)# 全球国家和地区列表
asian_countries = [# 东亚"中国", "日本", "韩国", "朝鲜", "蒙古",# 东南亚"印度尼西亚", "泰国", "马来西亚", "新加坡", "菲律宾", "越南", "缅甸", "柬埔寨", "老挝", "文莱", "东帝汶",# 南亚"印度", "巴基斯坦", "孟加拉国", "斯里兰卡", "尼泊尔", "不丹", "马尔代夫", "阿富汗",# 中亚"哈萨克斯坦", "乌兹别克斯坦", "土库曼斯坦", "吉尔吉斯斯坦", "塔吉克斯坦",# 西亚(中东)"土耳其", "伊朗", "伊拉克", "叙利亚", "约旦", "黎巴嫩", "以色列", "巴勒斯坦", "沙特阿拉伯", "阿联酋", "卡塔尔", "科威特", "阿曼", "巴林", "也门", "乔治亚", "亚美尼亚", "阿塞拜疆",# 北亚"俄罗斯"
]african_countries = ["阿尔及利亚", "安哥拉", "贝宁", "博茨瓦纳", "布基纳法索", "布隆迪", "佛得角", "喀麦隆", "中非共和国", "乍得", "科摩罗", "刚果(布)", "刚果(金)", "吉布提", "埃及", "赤道几内亚", "厄立特里亚", "斯威士兰", "埃塞俄比亚", "加蓬", "冈比亚", "加纳", "几内亚", "几内亚比绍", "科特迪瓦", "肯尼亚", "莱索托", "利比里亚", "利比亚", "马达加斯加", "马拉维", "马里", "毛里塔尼亚", "毛里求斯", "摩洛哥", "莫桑比克", "纳米比亚", "尼日尔", "尼日利亚", "卢旺达", "圣多美和普林西比", "塞内加尔", "塞舌尔", "塞拉利昂", "索马里", &
这篇关于使用faiss存储HuggingFaceBgeEmbeddings向量化处理数据及反序列化加载使用的例子的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!