本文主要是介绍手撕BeamSearch代码,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
一、目录
- 手撕beam search
- transformer generate() 解读
二、实现
- 手撕beam search
def pred(input):batch,seq_len=input.shapegenerate=torch.randn(size=(batch,1,10))return generatedef beam_search(input_ids,max_length,num_beams):batch=input_ids.shape[0]#输入扩展expand_size=num_beamsexpanded_return_idx = (torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device))input_ids = input_ids.index_select(0, expanded_return_idx)print(input_ids)batch_beam_size,cur_len=input_ids.shapebeam_scores=torch.zeros(size=(batch,num_beams),dtype=torch.float,device=input_ids.device)beam_scores[:,1:]=-1e9beam_scores=beam_scores.view(size=(batch*num_beams,))next_tokens=torch.zeros(size=(batch,num_beams),dtype=torch.long,device=input_ids.device)next_indices=torch.zeros(size=(batch,num_beams),dtype=torch.long,device=input_ids.device)while cur_len<max_length:logits=pred(input_ids) #batch,seq_len,vocabnext_token_logits=logits[:,-1,:] #当前时刻的输出#归一化next_token_scores=F.log_softmax(next_token_logits,dim=-1) # (batch_size * num_beams, vocab_size)#求概率next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) # 当前概率+先前概率# reshape for beam searchvocab_size = next_token_scores.shape[-1]next_token_scores = next_token_scores.view(batch, num_beams * vocab_size)# 当前时刻的token 得分, token_idnext_token_scores, next_tokens = torch.topk(next_token_scores, num_beams, dim=1, largest=True, sorted=True)next_indices = next_tokens // vocab_size #对应的beam_idnext_tokens = next_tokens % vocab_size #对应的indices#集束搜索核心def process(input_ids,next_scores,next_tokens,next_indices):batch_size=3group_size=3next_beam_scores = torch.zeros((batch_size, num_beams), dtype=next_scores.dtype)next_beam_tokens = torch.zeros((batch_size, num_beams), dtype=next_tokens.dtype)next_beam_indices = torch.zeros((batch_size,num_beams), dtype=next_indices.dtype)for batch_idx in range(batch_size):beam_idx=0for beam_token_rank, (next_token, next_score, next_index) in enumerate(zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])):batch_beam_idx=batch_idx*num_beams+next_indexnext_beam_scores[batch_idx, beam_idx] = next_score #当前路径得分next_beam_tokens[batch_idx, beam_idx] = next_token #当前时刻的tokennext_beam_indices[batch_idx, beam_idx] = batch_beam_idx #先前对应的idbeam_idx += 1return next_beam_scores.view(-1), next_beam_tokens.view(-1), next_beam_indices.view(-1)beam_scores, beam_next_tokens, beam_idx=process(input_ids,next_token_scores,next_tokens,next_indices)# 更新输入, 找到对应的beam_idx, 选择的tokens, 拼接为新的输入 #(batch*beam,seq_len)input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)cur_len = cur_len + 1#输出return input_ids,beam_scoresif __name__ == '__main__':input_ids=torch.randint(0,100,size=(3,1))print(input_ids)input_ids,beam_scores=beam_search(input_ids,max_length=10,num_beams=3)print(input_ids)
参考:transformers generate实现。
- transformer generate() 解读
@torch.no_grad()
def generate( #模型入口self,inputs: Optional[torch.Tensor] = None,generation_config: Optional[GenerationConfig] = None,logits_processor: Optional[LogitsProcessorList] = None,stopping_criteria: Optional[StoppingCriteriaList] = None,prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,synced_gpus: Optional[bool] = None,assistant_model: Optional["PreTrainedModel"] = None,streamer: Optional["BaseStreamer"] = None,negative_prompt_ids: Optional[torch.Tensor] = None,negative_prompt_attention_mask: Optional[torch.Tensor] = None,**kwargs,
) -> Union[GenerateOutput, torch.LongTensor]:
# 10. go into different generation modes
# 根据不同的生产模型进行解码生产
if generation_mode == GenerationMode.ASSISTED_GENERATION:...#以beam search 为例子
elif generation_mode == GenerationMode.BEAM_SEARCH: #beam search 算法# 11. prepare beam search scorer #参数初始化beam_scorer = BeamSearchScorer(batch_size=batch_size,num_beams=generation_config.num_beams,device=inputs_tensor.device,length_penalty=generation_config.length_penalty,do_early_stopping=generation_config.early_stopping,num_beam_hyps_to_keep=generation_config.num_return_sequences,max_length=generation_config.max_length,)#将输入进行扩展# 12. interleave input_ids with `num_beams` additional sequences per batchinput_ids, model_kwargs = self._expand_inputs_for_generation(input_ids=input_ids,expand_size=generation_config.num_beams,is_encoder_decoder=self.config.is_encoder_decoder,**model_kwargs,)# 13. run beam search 核心,beam search 算法解码result = self.beam_search(input_ids,beam_scorer,logits_processor=prepared_logits_processor,stopping_criteria=prepared_stopping_criteria,pad_token_id=generation_config.pad_token_id,eos_token_id=generation_config.eos_token_id,output_scores=generation_config.output_scores,output_logits=generation_config.output_logits,return_dict_in_generate=generation_config.return_dict_in_generate,synced_gpus=synced_gpus,sequential=generation_config.low_memory,**model_kwargs,)
def beam_search(self, input_ids, encoder_output, attention_mask, num_beams, max_length, pad_token_id: int, eos_token_id: int
):batch_size = self.beam_scorer.batch_size #扩展前batch sizenum_beams = self.beam_scorer.num_beamsbatch_beam_size, cur_len = input_ids.shape #扩展后batchassert (num_beams * batch_size == batch_beam_size), f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)beam_scores[:, 1:] = -1e9beam_scores = beam_scores.view((batch_size * num_beams,))next_tokens = torch.zeros((batch_size, num_beams), dtype=torch.long, device=input_ids.device)next_indices = torch.zeros((batch_size, num_beams), dtype=torch.long, device=input_ids.device)past: List[torch.Tensor] = []while cur_len < max_length:#生成相应logits, past = self._decoder_forward(input_ids, encoder_output, attention_mask, past) #迭代输出next_token_logits = logits[:, -1, :] #当前时刻输出# adjust tokens for Bart, *e.g.* cur_len=1 与 max_length 输出调整next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len, max_length=max_length)#归一化next_token_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size) #归一化# pre-process distributionnext_token_scores = self.logits_processor(input_ids, next_token_scores)next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) #当前概率+先前概率# reshape for beam searchvocab_size = next_token_scores.shape[-1]next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)#取前beam 个路径next_token_scores, next_tokens = torch.topk(next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True)next_indices = next_tokens // vocab_sizenext_tokens = next_tokens % vocab_size#获取对应路径,路径得分,对应的id 核心,不同beam search 不同点beam_scores, beam_next_tokens, beam_idx = self.beam_scorer.process(input_ids,next_token_scores,next_tokens,next_indices,pad_token_id=pad_token_id,eos_token_id=eos_token_id,)#更新输入, 找到对应的beam_idx, 选择的tokens, 拼接为新的输入 #(batch*beam,seq_len)input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)cur_len = cur_len + 1if len(past) > 0:past = self._reorder_cache(past, beam_idx)if self.beam_scorer.is_done():break#选择最优的输出,输出标准化sequences, sequence_scores = self.beam_scorer.finalize(input_ids,beam_scores,next_tokens,next_indices,pad_token_id=pad_token_id,eos_token_id=eos_token_id,)return sequences
这篇关于手撕BeamSearch代码的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!