通用信息提取数据预处理

2024-06-11 01:12

本文主要是介绍通用信息提取数据预处理,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

train_data='./datasets/duuie'
output_folder='./datasets/duuie_pre'
ignore_datasets=["DUEE", "DUEE_FIN_LITE"]
schema_folder='./datasets/seen_schema'

# 对CCKS2022 竞赛数据进行预处理
import shutil

# shutil.copytree(train_data,output_folder)

import os

life_folder = os.path.join(output_folder, "DUIE_LIFE_SPO")
org_folder = os.path.join(output_folder, "DUIE_ORG_SPO")

print(life_folder,org_folder)

import json

def load_jsonlines_file(filename):
    return [json.loads(line) for line in open(filename, encoding="utf8")]

life_train_instances = load_jsonlines_file(f"{life_folder}/train.json")
org_train_instances = load_jsonlines_file(f"{org_folder}/train.json")

for i in range(27695,27698):
    print(life_train_instances[i],'|',org_train_instances[i])

class RecordSchema:
    def __init__(self, type_list, role_list, type_role_dict):
        self.type_list = type_list
        self.role_list = role_list
        self.type_role_dict = type_role_dict
    def __repr__(self) -> str:
        repr_list = [f"Type: {self.type_list}\n", f"Role: {self.role_list}\n", f"Map: {self.type_role_dict}"]
        return "\n".join(repr_list)
    @staticmethod
    def get_empty_schema():
        return RecordSchema(type_list=list(), role_list=list(), type_role_dict=dict())
    @staticmethod
    def read_from_file(filename):
        lines = open(filename, encoding="utf8").readlines()
        type_list = json.loads(lines[0])# 类型
        role_list = json.loads(lines[1]) # 角色
        type_role_dict = json.loads(lines[2])#类型-角色
        return RecordSchema(type_list, role_list, type_role_dict)
    def write_to_file(self, filename):
        with open(filename, "w", encoding="utf8") as output:
            # 用于将Python对象编码(序列化)为JSON格式的字符串。设置ensure_ascii=False参数
            # 会告诉json.dumps()函数不要转义非ASCII字符
            output.write(json.dumps(self.type_list, ensure_ascii=False) + "\n")
            output.write(json.dumps(self.role_list, ensure_ascii=False) + "\n")
            output.write(json.dumps(self.type_role_dict, ensure_ascii=False) + "\n")

RecordSchema.read_from_file(f"{life_folder}/record.schema")

life_relation = RecordSchema.read_from_file(f"{life_folder}/record.schema").role_list

org_relation = RecordSchema.read_from_file(f"{org_folder}/record.schema").role_list

from collections import defaultdict

instance_dict = defaultdict(list)

for instance in life_train_instances + org_train_instances:
    instance_dict[instance["text"]] += [instance]

a=[i for i in life_train_instances for j in org_train_instances if i['text']==j['text']]

b=[i for i in org_train_instances for j in a if i['text']==j['text']]

for i in range(3):
    print(a[i]['relation'],'|',b[i]['relation'])

dict_1={1:2,3:4}
for i in dict_1:#相当于字典的keys()
    print(i)

from typing import Tuple, List, Dict

def merge_instance(instance_list):
    def all_equal(_x):#判断是否全相同
        for __x in _x:
            if __x != _x[0]:
                return False
        return True
    def entity_key(_x):
        return (tuple(_x["offset"]), _x["type"])
    def relation_key(_x):
        return (
            tuple(_x["type"]),
            tuple(_x["args"][0]["offset"]),
            _x["args"][0]["type"],
            tuple(_x["args"][1]["offset"]),
            _x["args"][1]["type"],
        )

    def event_key(_x):
        return (tuple(_x["offset"]), _x["type"])
    assert all_equal([x["text"] for x in instance_list])
    element_dict = {
        "entity": dict(),
        "relation": dict(),
        "event": dict(),
    }
    instance_id_list = list()
    for x in instance_list:
        instance_id_list += [x["id"]]
        for entity in x.get("entity", list()):
            element_dict["entity"][entity_key(entity)] = entity
        for relation in x.get("relation", list()):
            element_dict["relation"][relation_key(relation)] = relation
        for event in x.get("event", list()):
            element_dict["event"][event_key(event)] = event

    return {
        "id": "-".join(instance_id_list),
        "text": instance_list[0]["text"],
        "tokens": instance_list[0]["tokens"],
        "entity": list(element_dict["entity"].values()),
        "relation": list(element_dict["relation"].values()),
        "event": list(element_dict["event"].values()),
    }

 for text in instance_dict:
    instance_dict[text] = merge_instance(instance_dict[text])

for i in range(800,802):
    print(list(instance_dict.values())[i]['relation'])

import copy

with open(f"{life_folder}/train.json", "w") as output:
    for instance in instance_dict.values():
        new_instance = copy.deepcopy(instance)
        new_instance["relation"] = list(filter(lambda x: x["type"] in life_relation, instance["relation"]))
        output.write(json.dumps(new_instance) + "\n")

 with open(f"{org_folder}/train.json", "w") as output:
    for instance in instance_dict.values():
        new_instance = copy.deepcopy(instance)
        new_instance["relation"] = list(filter(lambda x: x["type"] in org_relation, instance["relation"]))
        output.write(json.dumps(new_instance) + "\n")

a_instances = load_jsonlines_file(f"{life_folder}/train.json")
b_instances = load_jsonlines_file(f"{org_folder}/train.json")

print(len(a_instances),len(b_instances))

import yaml

def load_definition_schema_file(filename):
    return yaml.load(open(filename, encoding="utf8"), Loader=yaml.FullLoader)

aa = load_definition_schema_file(os.path.join(schema_folder,'体育竞赛.yaml'))

mm=list()
for i in aa['事件'].values():
    mm+=i["参数"]   
mm=list(set(mm))

[x for x in aa['事件']]

aa['事件']['退役']["参数"].keys()

aaa={1:2,3:4}
for k,v in aaa.items():
    print(k,v)

def dump_schema(output_folder, schema_dict):
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    for schema_name, schema in schema_dict.items():
        schema_file = f"{output_folder}/{schema_name}.schema"
        with open(schema_file, "w", encoding="utf8") as output:
            for element in schema:
                output.write(json.dumps(element, ensure_ascii=False) + "\n")

def dump_event_schema(event_map, output_folder):
    role_list = list()
    for roles in event_map.values():
        role_list += roles["参数"]
    rols_list = list(set(role_list))
    type_list = list(event_map.keys())
    type_role_map = {event_type: list(event_map[event_type]["参数"].keys()) for event_type in event_map}
    dump_schema(
        output_folder=output_folder,
        schema_dict={
            "entity": [[], [], {}],
            "relation": [[], [], {}],
            "event": [type_list, rols_list, type_role_map],
            "record": [type_list, rols_list, type_role_map],
        },
    )

def filter_event_in_instance(instances,required_event_types):
    """Filter events in the instance, keep event mentions with `required_event_types`
    过滤实例中的事件,只保留需要的事件类别的事件标注
    """
    new_instances = list()
    for instance in instances:
        new_instance = copy.deepcopy(instance)
        new_instance["event"] = list(filter(lambda x: x["type"] in required_event_types, new_instance["event"]))
        new_instances += [new_instance]
    return new_instances

def dump_instances(instances, output_filename):
    with open(output_filename, "w", encoding="utf8") as output:
        for instance in instances:
            output.write(json.dumps(instance, ensure_ascii=False) + "\n")

def filter_event(data_folder, event_types, output_folder):
    """Keep event with `event_types` in `data_folder` save to `output_folder`
    过滤 `data_folder` 中的事件,只保留 `event_types` 类型事件保存到 `output_folder`"""
    dump_event_schema(event_types, output_folder)
    for split in ["train", "val"]:
        filename = os.path.join(data_folder, f"{split}.json")
        instances = [json.loads(line.strip()) for line in open(filename, encoding="utf8")]
        new_instances = filter_event_in_instance(instances, required_event_types=event_types)
        dump_instances(new_instances, os.path.join(output_folder, f"{split}.json"))

# 对事件数据进行预处理,过滤除 `灾害意外` 和 `体育竞赛` 外的事件标注
for schema in ["灾害意外", "体育竞赛"]:
    print(f"Building {schema} dataset ...")
    duee_folder = os.path.join(output_folder, "DUEE")
    schema_file = os.path.join(schema_folder, f"{schema}.yaml")
    output_folder2 = os.path.join(output_folder, schema)
    schema = load_definition_schema_file(schema_file)
    filter_event(
        duee_folder,
        schema["事件"],
        output_folder2,
    )

ty_instances = load_jsonlines_file(f"{output_folder}/体育竞赛/train.json")
zh_instances = load_jsonlines_file(f"{output_folder}/灾害意外/train.json")

print(len(ty_instances),len(zh_instances))

for i in range(11508,11608):
    print(ty_instances[i],'|',zh_instances[i])

bb=load_definition_schema_file(os.path.join(schema_folder, "金融信息.yaml"))

for i in bb['事件'].keys():
    print(i)

mm=list()
mm+=bb['事件']['中标']["参数"]   
mm=list(set(mm))

bb["事件"]['中标']["参数"] .keys()

for schema in ["金融信息"]:
    print(f"Building {schema} dataset ...")
    duee_fin_folder = os.path.join(output_folder, "DUEE_FIN_LITE")
    schema_file = os.path.join(schema_folder, f"{schema}.yaml")
    output_folder2 = os.path.join(output_folder, schema)
    schema = load_definition_schema_file(schema_file)
    # 依据不同事件类别将多事件抽取分割成多个单事件类型抽取
    # Separate multi-type extraction to multiple single-type extraction
    for event_type in schema["事件"]:
        filter_event(
           duee_fin_folder,
           {event_type: schema["事件"][event_type]},
            output_folder2 + "_" + event_type,
        )

vv=load_jsonlines_file(f"{output_folder}/DUEE_FIN_LITE/train.json")

zb_instances = load_jsonlines_file(f"{output_folder}/金融信息_中标/train.json")
zy_instances = load_jsonlines_file(f"{output_folder}/金融信息_质押/train.json")

print(len(zb_instances),len(zy_instances))

for i in range(6985,7015):
    print(zb_instances[i],'|',zy_instances[i])

def annonote_graph(
    entities: List[Dict] = [],
    relations: List[Dict] = [],
    events: List[Dict] = []):
    spot_dict = dict()
    asoc_dict = defaultdict(list)
    # 将实体关系事件转换为点关联图
    def add_spot(spot):
        spot_key = (tuple(spot["offset"]), spot["type"])
        spot_dict[spot_key] = spot
    def add_asoc(spot, asoc, tail):
        spot_key = (tuple(spot["offset"]), spot["type"])
        asoc_dict[spot_key] += [(tuple(tail["offset"]), tail["text"], asoc)]
    for entity in entities:
        add_spot(spot=entity)
    for relation in relations:
        add_spot(spot=relation["args"][0])
        add_asoc(spot=relation["args"][0], asoc=relation["type"], tail=relation["args"][1])
    for event in events:
        add_spot(spot=event)
        for argument in event["args"]:
            add_asoc(spot=event, asoc=argument["type"], tail=argument)
    spot_asoc_instance = list()
    for spot_key in sorted(spot_dict.keys()):
        offset, label = spot_key
        if len(spot_dict[spot_key]["offset"]) == 0:
            continue
        spot_instance = {
            "span": spot_dict[spot_key]["text"],
            "label": label,
            "asoc": list(),
        }
        for tail_offset, tail_text, asoc in sorted(asoc_dict.get(spot_key, [])):
            if len(tail_offset) == 0:
                continue
            spot_instance["asoc"] += [(asoc, tail_text)]
        spot_asoc_instance += [spot_instance]
    spot_labels = set([label for _, label in spot_dict.keys()])
    asoc_labels = set()
    for _, asoc_list in asoc_dict.items():
        for _, _, asoc in asoc_list:
            asoc_labels.add(asoc)
    return spot_labels, asoc_labels, spot_asoc_instance

def add_spot_asoc_to_single_file(filename):
    instances = [json.loads(line) for line in open(filename, encoding="utf8")]
    print(f"Add spot asoc to {filename} ...")
    with open(filename, "w", encoding="utf8") as output:
        for instance in instances:
            spots, asocs, spot_asoc_instance = annonote_graph(
                entities=instance["entity"],#实体
                relations=instance["relation"],#关系
                events=instance["event"],#事件
            )
            # 为对象添加spot_asoc
            instance["spot_asoc"] = spot_asoc_instance
            # 为对象添加spot
            instance["spot"] = list(spots)
            # 为对象添加asoc
            instance["asoc"] = list(asocs)
            output.write(json.dumps(instance, ensure_ascii=False) + "\n")

ff = os.path.join(output_folder,'金融信息_企业破产',"train.json")

ff_instances = [json.loads(line) for line in open(ff, encoding="utf8")]

for i in range(1046,1050):
    print(ff_instances[i])

a,b,yyj=annonote_graph( entities=ff_instances[11000]["entity"],
                relations=ff_instances[11000]["relation"],
                events=ff_instances[11000]["event"],)

data_folder=output_folder

def merge_schema(schema_list: List[RecordSchema]):
    type_set = set()
    role_set = set()
    type_role_dict = defaultdict(list)
    for schema in schema_list:
        for type_name in schema.type_list:
            type_set.add(type_name)
        for role_name in schema.role_list:
            role_set.add(role_name)
        for type_name in schema.type_role_dict:
            type_role_dict[type_name] += schema.type_role_dict[type_name]
    for type_name in type_role_dict:
        type_role_dict[type_name] = list(set(type_role_dict[type_name]))
    return RecordSchema(
        type_list=list(type_set),
        role_list=list(role_set),
        type_role_dict=type_role_dict,
    )

def convert_duuie_to_spotasoc(data_folder, ignore_datasets):
    schema_list = list()
    for task_folder in os.listdir(data_folder):#过滤无效
        if task_folder in ignore_datasets:
            continue
        if not os.path.isdir(os.path.join(data_folder, task_folder)):#过滤非文件夹
            continue
        print(f"Add spot asoc to {task_folder} ...")
        # 读取单任务的 Schema
        task_schema_file = os.path.join(data_folder, task_folder, "record.schema")
        # 向单任务数据中添加 Spot Asoc 标注
        add_spot_asoc_to_single_file(os.path.join(data_folder, task_folder, "train.json"))
        add_spot_asoc_to_single_file(os.path.join(data_folder, task_folder, "val.json"))
        record_schema = RecordSchema.read_from_file(task_schema_file)
        schema_list += [record_schema]
    # 融合不同任务的 Schema
    multi_schema = merge_schema(schema_list)
    multi_schema.write_to_file(os.path.join(data_folder, "record.schema"))

convert_duuie_to_spotasoc(output_folder,ignore_datasets)

 

 

 

 

 

 

 

这篇关于通用信息提取数据预处理的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/1049798

相关文章

大模型研发全揭秘:客服工单数据标注的完整攻略

在人工智能(AI)领域,数据标注是模型训练过程中至关重要的一步。无论你是新手还是有经验的从业者,掌握数据标注的技术细节和常见问题的解决方案都能为你的AI项目增添不少价值。在电信运营商的客服系统中,工单数据是客户问题和解决方案的重要记录。通过对这些工单数据进行有效标注,不仅能够帮助提升客服自动化系统的智能化水平,还能优化客户服务流程,提高客户满意度。本文将详细介绍如何在电信运营商客服工单的背景下进行

基于MySQL Binlog的Elasticsearch数据同步实践

一、为什么要做 随着马蜂窝的逐渐发展,我们的业务数据越来越多,单纯使用 MySQL 已经不能满足我们的数据查询需求,例如对于商品、订单等数据的多维度检索。 使用 Elasticsearch 存储业务数据可以很好的解决我们业务中的搜索需求。而数据进行异构存储后,随之而来的就是数据同步的问题。 二、现有方法及问题 对于数据同步,我们目前的解决方案是建立数据中间表。把需要检索的业务数据,统一放到一张M

关于数据埋点,你需要了解这些基本知识

产品汪每天都在和数据打交道,你知道数据来自哪里吗? 移动app端内的用户行为数据大多来自埋点,了解一些埋点知识,能和数据分析师、技术侃大山,参与到前期的数据采集,更重要是让最终的埋点数据能为我所用,否则可怜巴巴等上几个月是常有的事。   埋点类型 根据埋点方式,可以区分为: 手动埋点半自动埋点全自动埋点 秉承“任何事物都有两面性”的道理:自动程度高的,能解决通用统计,便于统一化管理,但个性化定

使用SecondaryNameNode恢复NameNode的数据

1)需求: NameNode进程挂了并且存储的数据也丢失了,如何恢复NameNode 此种方式恢复的数据可能存在小部分数据的丢失。 2)故障模拟 (1)kill -9 NameNode进程 [lytfly@hadoop102 current]$ kill -9 19886 (2)删除NameNode存储的数据(/opt/module/hadoop-3.1.4/data/tmp/dfs/na

异构存储(冷热数据分离)

异构存储主要解决不同的数据,存储在不同类型的硬盘中,达到最佳性能的问题。 异构存储Shell操作 (1)查看当前有哪些存储策略可以用 [lytfly@hadoop102 hadoop-3.1.4]$ hdfs storagepolicies -listPolicies (2)为指定路径(数据存储目录)设置指定的存储策略 hdfs storagepolicies -setStoragePo

Hadoop集群数据均衡之磁盘间数据均衡

生产环境,由于硬盘空间不足,往往需要增加一块硬盘。刚加载的硬盘没有数据时,可以执行磁盘数据均衡命令。(Hadoop3.x新特性) plan后面带的节点的名字必须是已经存在的,并且是需要均衡的节点。 如果节点不存在,会报如下错误: 如果节点只有一个硬盘的话,不会创建均衡计划: (1)生成均衡计划 hdfs diskbalancer -plan hadoop102 (2)执行均衡计划 hd

【Prometheus】PromQL向量匹配实现不同标签的向量数据进行运算

✨✨ 欢迎大家来到景天科技苑✨✨ 🎈🎈 养成好习惯,先赞后看哦~🎈🎈 🏆 作者简介:景天科技苑 🏆《头衔》:大厂架构师,华为云开发者社区专家博主,阿里云开发者社区专家博主,CSDN全栈领域优质创作者,掘金优秀博主,51CTO博客专家等。 🏆《博客》:Python全栈,前后端开发,小程序开发,人工智能,js逆向,App逆向,网络系统安全,数据分析,Django,fastapi

烟火目标检测数据集 7800张 烟火检测 带标注 voc yolo

一个包含7800张带标注图像的数据集,专门用于烟火目标检测,是一个非常有价值的资源,尤其对于那些致力于公共安全、事件管理和烟花表演监控等领域的人士而言。下面是对此数据集的一个详细介绍: 数据集名称:烟火目标检测数据集 数据集规模: 图片数量:7800张类别:主要包含烟火类目标,可能还包括其他相关类别,如烟火发射装置、背景等。格式:图像文件通常为JPEG或PNG格式;标注文件可能为X

pandas数据过滤

Pandas 数据过滤方法 Pandas 提供了多种方法来过滤数据,可以根据不同的条件进行筛选。以下是一些常见的 Pandas 数据过滤方法,结合实例进行讲解,希望能帮你快速理解。 1. 基于条件筛选行 可以使用布尔索引来根据条件过滤行。 import pandas as pd# 创建示例数据data = {'Name': ['Alice', 'Bob', 'Charlie', 'Dav

SWAP作物生长模型安装教程、数据制备、敏感性分析、气候变化影响、R模型敏感性分析与贝叶斯优化、Fortran源代码分析、气候数据降尺度与变化影响分析

查看原文>>>全流程SWAP农业模型数据制备、敏感性分析及气候变化影响实践技术应用 SWAP模型是由荷兰瓦赫宁根大学开发的先进农作物模型,它综合考虑了土壤-水分-大气以及植被间的相互作用;是一种描述作物生长过程的一种机理性作物生长模型。它不但运用Richard方程,使其能够精确的模拟土壤中水分的运动,而且耦合了WOFOST作物模型使作物的生长描述更为科学。 本文让更多的科研人员和农业工作者