本文主要是介绍LSTM seq2seq 模型之英语到法语翻译,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
1. 数据集下载
Machine-Translation-eng-fra | Kaggle
2. 预处理的完整的代码
import os.pathimport numpy as np
import tensorflow as tf
import keras
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.text import Tokenizer
from keras.utils import pad_sequences
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import jsonfrom nlp.util.matplotlib_helper import *batch_size = 36 # Batch size for training.
epochs = 50 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
num_samples = 100000 # Number of samples to train on.
# Path to the data txt file on disk.# base_dir = "dataset"
base_dir = "C:/apps/ml_datasets/fra-eng"dataset_path = os.path.join(base_dir, "fra.txt")START_ = "sostok "
END_ = " eostok"def get_dataset():# Vectorize the data.input_texts = []target_texts = []input_characters = set()target_characters = set()with open(dataset_path, "r", encoding="utf-8") as f:lines = f.read().split("\n")print("length of lines:"+ str(len(lines)))for line in lines[: min(num_samples, len(lines) - 1)]:input_text, target_text, _ = line.split("\t")# We use "tab" as the "start sequence" character# for the targets, and "\n" as "end sequence" character.target_text = START_ + target_text + END_input_texts.append(input_text)target_texts.append(target_text)for char in input_text:if char not in input_characters:input_characters.add(char)for char in target_text:if char not in target_characters:target_characters.add(char)input_characters = sorted(list(input_characters))target_characters = sorted(list(target_characters))num_encoder_tokens = len(input_characters)num_decoder_tokens = len(target_characters)max_encoder_seq_length = max([len(txt) for txt in input_texts])max_decoder_seq_length = max([len(txt) for txt in target_texts])print("Number of samples:", len(input_texts))print("Number of unique input tokens:", num_encoder_tokens)print("Number of unique output tokens:", num_decoder_tokens)print("Max sequence length for inputs:", max_encoder_seq_length)print("Max sequence length for outputs:", max_decoder_seq_length)return input_texts, target_textsdef check_distribution(x_train ,y_train):x_count = []y_count = []for sent in x_train:x_count.append(len(sent.split()))for sent in y_train:y_count.append(len(sent.split()))graph_df = pd.DataFrame()graph_df['x_count'] = x_countgraph_df['y_count'] = y_countimport matplotlib.pyplot as pltgraph_df.hist(bins = 5)plt.show()# Check how much % of summary have 0-15 wordscnt = 0for i in x_train:if (len(i.split()) <= 6):cnt = cnt + 1print(cnt / len(x_train))# Check how much % of summary have 0-15 wordscnt = 0for i in y_train:if (len(i.split()) <= 10):cnt = cnt + 1print(cnt / len(y_train))def get_tokenizers(x_train ,y_train):input_tokenizer = Tokenizer()input_tokenizer.fit_on_texts(x_train)target_tokenizer = Tokenizer()target_tokenizer.fit_on_texts(y_train)input_sequences = input_tokenizer.texts_to_sequences(x_train)target_sequences = target_tokenizer.texts_to_sequences(y_train)max_text_len = max(len(seq) for seq in input_sequences)max_summary_len = max(len(seq) for seq in target_sequences)print('max_text_len:', max_text_len)print('max_summary_len:', max_summary_len)max_text_len = 6max_summary_len = 10input_sequences = pad_sequences(input_sequences, maxlen=max_text_len, padding='post')target_sequences = pad_sequences(target_sequences, maxlen=max_summary_len, padding='post')print(max_text_len, input_sequences[0])print(max_summary_len, target_sequences[0])x_voc = len(input_tokenizer.word_counts)+1print("Size of vocabulary in X = {}".format(x_voc))y_voc = len(target_tokenizer.word_counts)+1print("Size of vocabulary in Y = {}".format(y_voc))y_tokenizer_json = target_tokenizer.to_json()# Save the tokenizer to a filey_tokenizer_file = os.path.join(base_dir, "y_tokenizer.json")with open(y_tokenizer_file, 'w', encoding='utf-8') as f:f.write(json.dumps(y_tokenizer_json, ensure_ascii=False))x_tokenizer_json = input_tokenizer.to_json()# Save the tokenizer to a filex_tokenizer_file = os.path.join(base_dir, "x_tokenizer.json")with open(x_tokenizer_file, 'w', encoding='utf-8') as f:f.write(json.dumps(x_tokenizer_json, ensure_ascii=False))return input_tokenizer, target_tokenizer, input_sequences, target_sequencesif __name__ == '__main__':x_train ,y_train = get_dataset()# check_distribution(x_train, y_train)input_tokenizer, target_tokenizer, input_sequences, target_sequences = get_tokenizers(x_train ,y_train);# Split the dataset into training and validation setsx_train, x_val, y_train, y_val = train_test_split( input_sequences, target_sequences, test_size=0.2, random_state=42 )np.savez(os.path.join(base_dir,"train"), x_tr=x_train, y_tr=y_train, x_val=x_val, y_val=y_val)# for i in range(5):# print("x = ", x_train[i] )# print("y = ", y_train[i])
3. 训练模型的完整代码
import keras
from keras.layers import Input, LSTM, Embedding, Dense, Concatenate, TimeDistributed
from keras.models import Model
from keras.callbacks import EarlyStopping
from keras.preprocessing.text import tokenizer_from_json
import json
import numpy as np
import osmodel_path = "model"latent_dim = 200
embedding_dim = 100
max_text_len = 6
max_summary_len = 10base_dir = "C:/apps/ml_datasets/fra-eng"START_ = "sostok"
END_ = "eostok"def get_dataset():train_set = np.load(os.path.join(base_dir, "train.npz"))x_tr = train_set['x_tr']y_tr = train_set['y_tr']x_val = train_set['x_val']y_val = train_set['y_val']print("X_train:", x_tr.shape)print("y_train:", y_tr.shape)print("X_test:", x_val.shape)print("y_test:", y_val.shape)with open(os.path.join(base_dir, "y_tokenizer.json")) as f:data = json.load(f)y_tokenizer = tokenizer_from_json(data)with open(os.path.join(base_dir,"x_tokenizer.json")) as f:data = json.load(f)x_tokenizer = tokenizer_from_json(data)return x_tr, y_tr, x_val, y_val, x_tokenizer, y_tokenizerdef get_model( max_text_len = 100, x_voc = 33288, y_voc = 11572):# K.clear_session()# Encoderencoder_inputs = Input(shape=(max_text_len,), name="encoder_inputs")#embedding layerenc_emb = Embedding(x_voc, embedding_dim,trainable=True, name= "encoder_embedding")(encoder_inputs)#encoder lstm 1encoder_lstm1 = LSTM(latent_dim,return_sequences=True,return_state=True,dropout=0.4,recurrent_dropout=0.4, name = "encoder_lstm1")# encoder_outputs, state_h, state_c = encoder_lstm1(enc_emb)encoder_output1, state_h1, state_c1 = encoder_lstm1(enc_emb)#encoder lstm 2encoder_lstm2 = LSTM(latent_dim,return_sequences=True,return_state=True,dropout=0.4,recurrent_dropout=0.4, name = "encoder_lstm2")encoder_output2, state_h2, state_c2 = encoder_lstm2(encoder_output1)#encoder lstm 3encoder_lstm3=LSTM(latent_dim, return_state=True, return_sequences=True,dropout=0.4,recurrent_dropout=0.4, name = "encoder_lstm3")encoder_outputs, state_h, state_c= encoder_lstm3(encoder_output2)# Set up the decoder, using `encoder_states` as initial state.decoder_inputs = Input(shape=(None,), name="decoder_inputs")#embedding layerdec_emb_layer = Embedding(y_voc, embedding_dim, trainable=True, name="decoder_embedding")dec_emb = dec_emb_layer(decoder_inputs)decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True,dropout=0.4,recurrent_dropout=0.2, name="decoder_lstm")decoder_outputs,decoder_fwd_state, decoder_back_state = decoder_lstm(dec_emb,initial_state=[state_h, state_c])#dense layerdecoder_dense = Dense(y_voc, activation='softmax', name="decoder_outputs")decoder_outputs = decoder_dense(decoder_outputs)# Define the modelmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)model.summary()model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=["accuracy"])# model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy())# model = keras.models.load_model('model.h5')return modeldef train(model, x_tr,y_tr, x_val, y_val, epochs=50):# model, encoder_model, decoder_model, x_tokenizer, y_tokenizer = load_model()x_train_input = x_try_train_input = y_tr[:, :-1]y_train_output = y_tr.reshape(y_tr.shape[0], y_tr.shape[1], 1)[:, 1:]print('y_train_input[0]', y_train_input[0])print('y_train_output[0]', y_train_output[0])x_val_input = x_valy_val_input = y_val[:, :-1]y_val_output = y_val.reshape(y_val.shape[0], y_val.shape[1], 1)[:, 1:]early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=2)checkpoint = keras.callbacks.ModelCheckpoint(model_path, monitor='val_loss', save_best_only=True, mode='min', verbose=1)callbacks = [ early_stop, checkpoint]history = model.fit([x_train_input, y_train_input], y_train_output, epochs=epochs,callbacks=callbacks, batch_size=128, validation_data=([x_val_input, y_val_input], y_val_output))model.save(model_path)return historydef get_encoder_decoder_model1(model):for i, layer in enumerate(model.layers):print(f"Layer {i}: {layer.name} - {layer.__class__.__name__}")decoder_inputs = model.get_layer("decoder_inputs").inputencoder_inputs = model.get_layer("encoder_inputs").inputencoder_outputs, state_h_enc, state_c_enc = model.get_layer("encoder_lstm3").output # Change index if neededdecoder_lstm = model.get_layer("decoder_lstm") # Change index if neededdecoder_dense = model.get_layer("decoder_outputs") # Change index if neededencoder_model = Model(inputs=encoder_inputs, outputs=[encoder_outputs, state_h_enc, state_c_enc])decoder_state_input_h = Input(shape=(latent_dim,))decoder_state_input_c = Input(shape=(latent_dim,))dec_emb2 = model.get_layer("decoder_embedding")(decoder_inputs) # Assuming embedding layer is at index 3, change if neededdecoder_outputs2, state_h2, state_c2 = decoder_lstm( dec_emb2, initial_state=[decoder_state_input_h, decoder_state_input_c] )decoder_outputs2 = decoder_dense(decoder_outputs2)decoder_model = Model([decoder_inputs] + [decoder_state_input_h, decoder_state_input_c],[decoder_outputs2] + [state_h2, state_c2])return decoder_model, encoder_modeldef load_model():model = keras.models.load_model(model_path)decoder_model, encoder_model = get_encoder_decoder_model1(model)with open(os.path.join(base_dir, "y_tokenizer.json")) as f:data = json.load(f)y_tokenizer = tokenizer_from_json(data)with open(os.path.join(base_dir, "x_tokenizer.json")) as f:data = json.load(f)x_tokenizer = tokenizer_from_json(data)return model , encoder_model , decoder_model, x_tokenizer, y_tokenizerdef decode_sequence(input_seq, encoder_model, decoder_model, target_word_index, reverse_target_word_index):# Encode the input as state vectors._, e_h, e_c = encoder_model.predict(input_seq)# Generate empty target sequence of length 1.target_seq = np.zeros((1, 1))# Populate the first word of target sequence with the start word.target_seq[0, 0] = target_word_index[START_]stop_condition = Falsedecoded_sentence = ''while not stop_condition:output_tokens, h, c = decoder_model.predict([target_seq] + [e_h, e_c])# Sample a tokensampled_token_index = np.argmax(output_tokens[0, -1, :])print(sampled_token_index)if sampled_token_index ==0 :sampled_token='0'else:sampled_token = reverse_target_word_index[sampled_token_index]if (sampled_token != END_):decoded_sentence += ' ' + sampled_token# Exit condition: either hit max length or find stop word.if (sampled_token == END_ or len(decoded_sentence.split()) >= (max_summary_len - 1)):stop_condition = True# Update the target sequence (of length 1).target_seq = np.zeros((1, 1))target_seq[0, 0] = sampled_token_index# Update internal statese_h, e_c = h, creturn decoded_sentencedef seq2summary(input_seq, target_word_index, reverse_target_word_index):newString=''for i in input_seq:if((i!=0 and i!=target_word_index[START_]) and i!=target_word_index[END_]):newString=newString+reverse_target_word_index[i]+' 'return newStringdef seq2text(input_seq, reverse_source_word_index):newString=''for i in input_seq:if(i!=0):newString=newString+reverse_source_word_index[i]+' 'return newStringdef show_matrix(history):# from matplotlib import pyplot# pyplot.plot(history.history['loss'], label='train')# pyplot.plot(history.history['val_loss'], label='test')# pyplot.legend()# pyplot.show()passdef test(max_text_len = 100):x_tr, y_tr, x_val, y_val, x_tokenizer, y_tokenizer = get_dataset()model , encoder_model , decoder_model, x_tokenizer, y_tokenizer = load_model()reverse_target_word_index = y_tokenizer.index_wordreverse_source_word_index = x_tokenizer.index_wordtarget_word_index = y_tokenizer.word_indexfor i in range(5, 50):print("Review:", seq2text(x_tr[i], reverse_source_word_index))print("Original summary:", seq2summary(y_tr[i], target_word_index, reverse_target_word_index))print("Predicted summary:",decode_sequence(x_tr[i].reshape(1, max_text_len), encoder_model, decoder_model, target_word_index,reverse_target_word_index))print("\n")if __name__ == '__main__':x_voc = 8864y_voc = 19131epochs = 100x_tr, y_tr, x_val, y_val, x_tokenizer, y_tokenizer = get_dataset()print(x_tr[0])print(y_tr[0])model = get_model(max_text_len, x_voc, y_voc)history = train(model, x_tr, y_tr, x_val, y_val, epochs)show_matrix(history)test(max_text_len)
4.训练过程中数据
cpu 上训练时间大概是一整天
X_train: (80000, 6)
y_train: (80000, 10)
X_test: (20000, 6)
y_test: (20000, 10)
[ 30 266 29 12 178 0]
[ 1 12 33 355 62 74 112 2 0 0]
Model: "model"
__________________________________________________________________________________________________Layer (type) Output Shape Param # Connected to
==================================================================================================encoder_inputs (InputLayer) [(None, 6)] 0 []encoder_embedding (Embedding) (None, 6, 100) 886400 ['encoder_inputs[0][0]']encoder_lstm1 (LSTM) [(None, 6, 200), 240800 ['encoder_embedding[0][0]'](None, 200),(None, 200)]decoder_inputs (InputLayer) [(None, None)] 0 []encoder_lstm2 (LSTM) [(None, 6, 200), 320800 ['encoder_lstm1[0][0]'](None, 200),(None, 200)]decoder_embedding (Embedding) (None, None, 100) 1913100 ['decoder_inputs[0][0]']encoder_lstm3 (LSTM) [(None, 6, 200), 320800 ['encoder_lstm2[0][0]'](None, 200),(None, 200)]decoder_lstm (LSTM) [(None, None, 200), 240800 ['decoder_embedding[0][0]',(None, 200), 'encoder_lstm3[0][1]',(None, 200)] 'encoder_lstm3[0][2]']decoder_outputs (Dense) (None, None, 19131) 3845331 ['decoder_lstm[0][0]']==================================================================================================
Total params: 7,768,031
Trainable params: 7,768,031
Non-trainable params: 0
__________________________________________________________________________________________________
y_train_input[0] [ 1 12 33 355 62 74 112 2 0]
y_train_output[0] [[ 12][ 33][355][ 62][ 74][112][ 2][ 0][ 0]]
Epoch 1/100
625/625 [==============================] - ETA: 0s - loss: 3.8344 - accuracy: 0.4682
Epoch 1: val_loss improved from inf to 3.35198, saving model to model625/625 [==============================] - 523s 824ms/step - loss: 3.8344 - accuracy: 0.4682 - val_loss: 3.3520 - val_accuracy: 0.5184
Epoch 2/100
625/625 [==============================] - ETA: 0s - loss: 3.2284 - accuracy: 0.5280
Epoch 2: val_loss improved from 3.35198 to 3.08983, saving model to model625/625 [==============================] - 622s 996ms/step - loss: 3.2284 - accuracy: 0.5280 - val_loss: 3.0898 - val_accuracy: 0.5403
Epoch 3/100
625/625 [==============================] - ETA: 0s - loss: 3.0036 - accuracy: 0.5493
Epoch 3: val_loss improved from 3.08983 to 2.89494, saving model to model625/625 [==============================] - 753s 1s/step - loss: 3.0036 - accuracy: 0.5493 - val_loss: 2.8949 - val_accuracy: 0.5637
Epoch 4/100
625/625 [==============================] - ETA: 0s - loss: 2.8115 - accuracy: 0.5733
Epoch 4: val_loss improved from 2.89494 to 2.70395, saving model to model625/625 [==============================] - 1114s 2s/step - loss: 2.8115 - accuracy: 0.5733 - val_loss: 2.7039 - val_accuracy: 0.5897
Epoch 5/100
625/625 [==============================] - ETA: 0s - loss: 2.6470 - accuracy: 0.5930
Epoch 5: val_loss improved from 2.70395 to 2.57205, saving model to model625/625 [==============================] - 1075s 2s/step - loss: 2.6470 - accuracy: 0.5930 - val_loss: 2.5721 - val_accuracy: 0.6046
Epoch 6/100
625/625 [==============================] - ETA: 0s - loss: 2.5275 - accuracy: 0.6063
Epoch 6: val_loss improved from 2.57205 to 2.47006, saving model to model625/625 [==============================] - 1289s 2s/step - loss: 2.5275 - accuracy: 0.6063 - val_loss: 2.4701 - val_accuracy: 0.6164
Epoch 7/100
625/625 [==============================] - ETA: 0s - loss: 2.4297 - accuracy: 0.6172
Epoch 7: val_loss improved from 2.47006 to 2.37810, saving model to model625/625 [==============================] - 1403s 2s/step - loss: 2.4297 - accuracy: 0.6172 - val_loss: 2.3781 - val_accuracy: 0.6273
Epoch 8/100
625/625 [==============================] - ETA: 0s - loss: 2.3421 - accuracy: 0.6264
Epoch 8: val_loss improved from 2.37810 to 2.30006, saving model to model625/625 [==============================] - 1420s 2s/step - loss: 2.3421 - accuracy: 0.6264 - val_loss: 2.3001 - val_accuracy: 0.6363
Epoch 9/100
625/625 [==============================] - ETA: 0s - loss: 2.2645 - accuracy: 0.6353
Epoch 9: val_loss improved from 2.30006 to 2.23482, saving model to model625/625 [==============================] - 1315s 2s/step - loss: 2.2645 - accuracy: 0.6353 - val_loss: 2.2348 - val_accuracy: 0.6437
Epoch 10/100
625/625 [==============================] - ETA: 0s - loss: 2.1948 - accuracy: 0.6420
Epoch 10: val_loss improved from 2.23482 to 2.17029, saving model to model625/625 [==============================] - 1338s 2s/step - loss: 2.1948 - accuracy: 0.6420 - val_loss: 2.1703 - val_accuracy: 0.6510
Epoch 11/100
625/625 [==============================] - ETA: 0s - loss: 2.1300 - accuracy: 0.6484
Epoch 11: val_loss improved from 2.17029 to 2.11222, saving model to model625/625 [==============================] - 1345s 2s/step - loss: 2.1300 - accuracy: 0.6484 - val_loss: 2.1122 - val_accuracy: 0.6572
Epoch 12/100
625/625 [==============================] - ETA: 0s - loss: 2.0684 - accuracy: 0.6553
Epoch 12: val_loss improved from 2.11222 to 2.06084, saving model to model625/625 [==============================] - 1429s 2s/step - loss: 2.0684 - accuracy: 0.6553 - val_loss: 2.0608 - val_accuracy: 0.6621
Epoch 13/100
625/625 [==============================] - ETA: 0s - loss: 2.0104 - accuracy: 0.6612
Epoch 13: val_loss improved from 2.06084 to 2.00748, saving model to model625/625 [==============================] - 1236s 2s/step - loss: 2.0104 - accuracy: 0.6612 - val_loss: 2.0075 - val_accuracy: 0.6691
Epoch 14/100
625/625 [==============================] - ETA: 0s - loss: 1.9545 - accuracy: 0.6669
Epoch 14: val_loss improved from 2.00748 to 1.96069, saving model to model625/625 [==============================] - 1337s 2s/step - loss: 1.9545 - accuracy: 0.6669 - val_loss: 1.9607 - val_accuracy: 0.6737
Epoch 15/100
625/625 [==============================] - ETA: 0s - loss: 1.9030 - accuracy: 0.6721
Epoch 15: val_loss improved from 1.96069 to 1.91793, saving model to model625/625 [==============================] - 1329s 2s/step - loss: 1.9030 - accuracy: 0.6721 - val_loss: 1.9179 - val_accuracy: 0.6784
Epoch 16/100
625/625 [==============================] - ETA: 0s - loss: 1.8557 - accuracy: 0.6769
Epoch 16: val_loss improved from 1.91793 to 1.87738, saving model to model625/625 [==============================] - 1336s 2s/step - loss: 1.8557 - accuracy: 0.6769 - val_loss: 1.8774 - val_accuracy: 0.6831
Epoch 17/100
625/625 [==============================] - ETA: 0s - loss: 1.8106 - accuracy: 0.6820
Epoch 17: val_loss improved from 1.87738 to 1.83981, saving model to model625/625 [==============================] - 1482s 2s/step - loss: 1.8106 - accuracy: 0.6820 - val_loss: 1.8398 - val_accuracy: 0.6875
Epoch 18/100
625/625 [==============================] - ETA: 0s - loss: 1.7678 - accuracy: 0.6865
Epoch 18: val_loss improved from 1.83981 to 1.80432, saving model to model625/625 [==============================] - 1408s 2s/step - loss: 1.7678 - accuracy: 0.6865 - val_loss: 1.8043 - val_accuracy: 0.6913
Epoch 19/100
625/625 [==============================] - ETA: 0s - loss: 1.7265 - accuracy: 0.6909
Epoch 19: val_loss improved from 1.80432 to 1.77152, saving model to model625/625 [==============================] - 1336s 2s/step - loss: 1.7265 - accuracy: 0.6909 - val_loss: 1.7715 - val_accuracy: 0.6958
Epoch 20/100
625/625 [==============================] - ETA: 0s - loss: 1.6872 - accuracy: 0.6953
Epoch 20: val_loss improved from 1.77152 to 1.73842, saving model to model625/625 [==============================] - 1386s 2s/step - loss: 1.6872 - accuracy: 0.6953 - val_loss: 1.7384 - val_accuracy: 0.7000
Epoch 21/100
625/625 [==============================] - ETA: 0s - loss: 1.6492 - accuracy: 0.6996
Epoch 21: val_loss improved from 1.73842 to 1.70509, saving model to model625/625 [==============================] - 1506s 2s/step - loss: 1.6492 - accuracy: 0.6996 - val_loss: 1.7051 - val_accuracy: 0.7050
Epoch 22/100
625/625 [==============================] - ETA: 0s - loss: 1.6125 - accuracy: 0.7039
Epoch 22: val_loss improved from 1.70509 to 1.67770, saving model to model625/625 [==============================] - 1492s 2s/step - loss: 1.6125 - accuracy: 0.7039 - val_loss: 1.6777 - val_accuracy: 0.7076
Epoch 23/100
625/625 [==============================] - ETA: 0s - loss: 1.5773 - accuracy: 0.7079
Epoch 23: val_loss improved from 1.67770 to 1.64832, saving model to model625/625 [==============================] - 1471s 2s/step - loss: 1.5773 - accuracy: 0.7079 - val_loss: 1.6483 - val_accuracy: 0.7108
Epoch 24/100
625/625 [==============================] - ETA: 0s - loss: 1.5442 - accuracy: 0.7121
Epoch 24: val_loss improved from 1.64832 to 1.61894, saving model to model625/625 [==============================] - 1556s 2s/step - loss: 1.5442 - accuracy: 0.7121 - val_loss: 1.6189 - val_accuracy: 0.7145
Epoch 25/100
625/625 [==============================] - ETA: 0s - loss: 1.5112 - accuracy: 0.7160
Epoch 25: val_loss improved from 1.61894 to 1.59627, saving model to model625/625 [==============================] - 1467s 2s/step - loss: 1.5112 - accuracy: 0.7160 - val_loss: 1.5963 - val_accuracy: 0.7171
Epoch 26/100
625/625 [==============================] - ETA: 0s - loss: 1.4804 - accuracy: 0.7197
Epoch 26: val_loss improved from 1.59627 to 1.56844, saving model to model625/625 [==============================] - 1594s 3s/step - loss: 1.4804 - accuracy: 0.7197 - val_loss: 1.5684 - val_accuracy: 0.7220
Epoch 27/100
625/625 [==============================] - ETA: 0s - loss: 1.4507 - accuracy: 0.7234
Epoch 27: val_loss improved from 1.56844 to 1.54407, saving model to model625/625 [==============================] - 1550s 2s/step - loss: 1.4507 - accuracy: 0.7234 - val_loss: 1.5441 - val_accuracy: 0.7252
Epoch 28/100
625/625 [==============================] - ETA: 0s - loss: 1.4213 - accuracy: 0.7268
Epoch 28: val_loss improved from 1.54407 to 1.52123, saving model to model625/625 [==============================] - 1529s 2s/step - loss: 1.4213 - accuracy: 0.7268 - val_loss: 1.5212 - val_accuracy: 0.7284
Epoch 29/100
625/625 [==============================] - ETA: 0s - loss: 1.3941 - accuracy: 0.7302
Epoch 29: val_loss improved from 1.52123 to 1.50195, saving model to model625/625 [==============================] - 1482s 2s/step - loss: 1.3941 - accuracy: 0.7302 - val_loss: 1.5020 - val_accuracy: 0.7308
Epoch 30/100
625/625 [==============================] - ETA: 0s - loss: 1.3673 - accuracy: 0.7337
Epoch 30: val_loss improved from 1.50195 to 1.48183, saving model to model625/625 [==============================] - 1683s 3s/step - loss: 1.3673 - accuracy: 0.7337 - val_loss: 1.4818 - val_accuracy: 0.7339
Epoch 31/100
625/625 [==============================] - ETA: 0s - loss: 1.3423 - accuracy: 0.7369
Epoch 31: val_loss improved from 1.48183 to 1.46150, saving model to model625/625 [==============================] - 1621s 3s/step - loss: 1.3423 - accuracy: 0.7369 - val_loss: 1.4615 - val_accuracy: 0.7364
Epoch 32/100
625/625 [==============================] - ETA: 0s - loss: 1.3177 - accuracy: 0.7395
Epoch 32: val_loss improved from 1.46150 to 1.44290, saving model to model625/625 [==============================] - 1594s 3s/step - loss: 1.3177 - accuracy: 0.7395 - val_loss: 1.4429 - val_accuracy: 0.7392
Epoch 33/100
625/625 [==============================] - ETA: 0s - loss: 1.2939 - accuracy: 0.7428
Epoch 33: val_loss improved from 1.44290 to 1.42519, saving model to model625/625 [==============================] - 1564s 3s/step - loss: 1.2939 - accuracy: 0.7428 - val_loss: 1.4252 - val_accuracy: 0.7408
Epoch 34/100
625/625 [==============================] - ETA: 0s - loss: 1.2713 - accuracy: 0.7454
Epoch 34: val_loss improved from 1.42519 to 1.40973, saving model to model625/625 [==============================] - 1410s 2s/step - loss: 1.2713 - accuracy: 0.7454 - val_loss: 1.4097 - val_accuracy: 0.7436
Epoch 35/100
625/625 [==============================] - ETA: 0s - loss: 1.2492 - accuracy: 0.7486
Epoch 35: val_loss improved from 1.40973 to 1.39085, saving model to model625/625 [==============================] - 1407s 2s/step - loss: 1.2492 - accuracy: 0.7486 - val_loss: 1.3909 - val_accuracy: 0.7464
Epoch 36/100
625/625 [==============================] - ETA: 0s - loss: 1.2286 - accuracy: 0.7511
Epoch 36: val_loss improved from 1.39085 to 1.37574, saving model to model625/625 [==============================] - 1470s 2s/step - loss: 1.2286 - accuracy: 0.7511 - val_loss: 1.3757 - val_accuracy: 0.7485
Epoch 37/100
625/625 [==============================] - ETA: 0s - loss: 1.2086 - accuracy: 0.7537
Epoch 37: val_loss improved from 1.37574 to 1.36135, saving model to model625/625 [==============================] - 1561s 2s/step - loss: 1.2086 - accuracy: 0.7537 - val_loss: 1.3613 - val_accuracy: 0.7503
Epoch 38/100
625/625 [==============================] - ETA: 0s - loss: 1.1886 - accuracy: 0.7560
Epoch 38: val_loss improved from 1.36135 to 1.34700, saving model to model625/625 [==============================] - 1723s 3s/step - loss: 1.1886 - accuracy: 0.7560 - val_loss: 1.3470 - val_accuracy: 0.7519
Epoch 39/100
625/625 [==============================] - ETA: 0s - loss: 1.1690 - accuracy: 0.7591
Epoch 39: val_loss improved from 1.34700 to 1.33595, saving model to model625/625 [==============================] - 1796s 3s/step - loss: 1.1690 - accuracy: 0.7591 - val_loss: 1.3360 - val_accuracy: 0.7536
Epoch 40/100
625/625 [==============================] - ETA: 0s - loss: 1.1516 - accuracy: 0.7611
Epoch 40: val_loss improved from 1.33595 to 1.32075, saving model to model625/625 [==============================] - 1883s 3s/step - loss: 1.1516 - accuracy: 0.7611 - val_loss: 1.3208 - val_accuracy: 0.7567
Epoch 41/100
625/625 [==============================] - ETA: 0s - loss: 1.1329 - accuracy: 0.7641
Epoch 41: val_loss improved from 1.32075 to 1.30833, saving model to model625/625 [==============================] - 1924s 3s/step - loss: 1.1329 - accuracy: 0.7641 - val_loss: 1.3083 - val_accuracy: 0.7582
Epoch 42/100
625/625 [==============================] - ETA: 0s - loss: 1.1154 - accuracy: 0.7661
Epoch 42: val_loss improved from 1.30833 to 1.29805, saving model to model625/625 [==============================] - 1726s 3s/step - loss: 1.1154 - accuracy: 0.7661 - val_loss: 1.2980 - val_accuracy: 0.7590
Epoch 43/100
625/625 [==============================] - ETA: 0s - loss: 1.0988 - accuracy: 0.7688
Epoch 43: val_loss improved from 1.29805 to 1.28425, saving model to model625/625 [==============================] - 1588s 3s/step - loss: 1.0988 - accuracy: 0.7688 - val_loss: 1.2843 - val_accuracy: 0.7619
Epoch 44/100
625/625 [==============================] - ETA: 0s - loss: 1.0836 - accuracy: 0.7700
Epoch 44: val_loss improved from 1.28425 to 1.27422, saving model to model625/625 [==============================] - 1559s 2s/step - loss: 1.0836 - accuracy: 0.7700 - val_loss: 1.2742 - val_accuracy: 0.7623
Epoch 45/100
625/625 [==============================] - ETA: 0s - loss: 1.0658 - accuracy: 0.7731
Epoch 45: val_loss improved from 1.27422 to 1.26191, saving model to model625/625 [==============================] - 1663s 3s/step - loss: 1.0658 - accuracy: 0.7731 - val_loss: 1.2619 - val_accuracy: 0.7648
Epoch 46/100
625/625 [==============================] - ETA: 0s - loss: 1.0521 - accuracy: 0.7748
Epoch 46: val_loss improved from 1.26191 to 1.25429, saving model to model625/625 [==============================] - 1829s 3s/step - loss: 1.0521 - accuracy: 0.7748 - val_loss: 1.2543 - val_accuracy: 0.7654
Epoch 47/100
625/625 [==============================] - ETA: 0s - loss: 1.0367 - accuracy: 0.7768
Epoch 47: val_loss improved from 1.25429 to 1.24540, saving model to model625/625 [==============================] - 1825s 3s/step - loss: 1.0367 - accuracy: 0.7768 - val_loss: 1.2454 - val_accuracy: 0.7665
Epoch 48/100
625/625 [==============================] - ETA: 0s - loss: 1.0228 - accuracy: 0.7788
Epoch 48: val_loss improved from 1.24540 to 1.23448, saving model to model625/625 [==============================] - 1875s 3s/step - loss: 1.0228 - accuracy: 0.7788 - val_loss: 1.2345 - val_accuracy: 0.7683
Epoch 49/100
625/625 [==============================] - ETA: 0s - loss: 1.0085 - accuracy: 0.7808
Epoch 49: val_loss improved from 1.23448 to 1.22543, saving model to model625/625 [==============================] - 1919s 3s/step - loss: 1.0085 - accuracy: 0.7808 - val_loss: 1.2254 - val_accuracy: 0.7696
Epoch 50/100
625/625 [==============================] - ETA: 0s - loss: 0.9947 - accuracy: 0.7825
Epoch 50: val_loss improved from 1.22543 to 1.21777, saving model to model625/625 [==============================] - 1754s 3s/step - loss: 0.9947 - accuracy: 0.7825 - val_loss: 1.2178 - val_accuracy: 0.7700
Epoch 51/100
625/625 [==============================] - ETA: 0s - loss: 0.9813 - accuracy: 0.7848
Epoch 51: val_loss improved from 1.21777 to 1.20699, saving model to model625/625 [==============================] - 1753s 3s/step - loss: 0.9813 - accuracy: 0.7848 - val_loss: 1.2070 - val_accuracy: 0.7726
Epoch 52/100
625/625 [==============================] - ETA: 0s - loss: 0.9683 - accuracy: 0.7867
Epoch 52: val_loss improved from 1.20699 to 1.19979, saving model to model625/625 [==============================] - 1705s 3s/step - loss: 0.9683 - accuracy: 0.7867 - val_loss: 1.1998 - val_accuracy: 0.7730
Epoch 53/100
625/625 [==============================] - ETA: 0s - loss: 0.9562 - accuracy: 0.7881
Epoch 53: val_loss improved from 1.19979 to 1.19423, saving model to model625/625 [==============================] - 1669s 3s/step - loss: 0.9562 - accuracy: 0.7881 - val_loss: 1.1942 - val_accuracy: 0.7740
Epoch 54/100
625/625 [==============================] - ETA: 0s - loss: 0.9433 - accuracy: 0.7902
Epoch 54: val_loss improved from 1.19423 to 1.18470, saving model to model625/625 [==============================] - 1731s 3s/step - loss: 0.9433 - accuracy: 0.7902 - val_loss: 1.1847 - val_accuracy: 0.7759
Epoch 55/100
625/625 [==============================] - ETA: 0s - loss: 0.9318 - accuracy: 0.7916
Epoch 55: val_loss improved from 1.18470 to 1.17739, saving model to model625/625 [==============================] - 1711s 3s/step - loss: 0.9318 - accuracy: 0.7916 - val_loss: 1.1774 - val_accuracy: 0.7764
Epoch 56/100
625/625 [==============================] - ETA: 0s - loss: 0.9212 - accuracy: 0.7933
Epoch 56: val_loss improved from 1.17739 to 1.17282, saving model to model625/625 [==============================] - 1694s 3s/step - loss: 0.9212 - accuracy: 0.7933 - val_loss: 1.1728 - val_accuracy: 0.7773
Epoch 57/100
625/625 [==============================] - ETA: 0s - loss: 0.9108 - accuracy: 0.7944
Epoch 57: val_loss improved from 1.17282 to 1.16557, saving model to model625/625 [==============================] - 1747s 3s/step - loss: 0.9108 - accuracy: 0.7944 - val_loss: 1.1656 - val_accuracy: 0.7786
Epoch 58/100
625/625 [==============================] - ETA: 0s - loss: 0.8990 - accuracy: 0.7966
Epoch 58: val_loss improved from 1.16557 to 1.16054, saving model to model625/625 [==============================] - 1629s 3s/step - loss: 0.8990 - accuracy: 0.7966 - val_loss: 1.1605 - val_accuracy: 0.7795
Epoch 59/100
625/625 [==============================] - ETA: 0s - loss: 0.8879 - accuracy: 0.7978
Epoch 59: val_loss improved from 1.16054 to 1.15364, saving model to model625/625 [==============================] - 1554s 2s/step - loss: 0.8879 - accuracy: 0.7978 - val_loss: 1.1536 - val_accuracy: 0.7797
Epoch 60/100
625/625 [==============================] - ETA: 0s - loss: 0.8777 - accuracy: 0.7996
Epoch 60: val_loss improved from 1.15364 to 1.14653, saving model to model625/625 [==============================] - 1463s 2s/step - loss: 0.8777 - accuracy: 0.7996 - val_loss: 1.1465 - val_accuracy: 0.7813
Epoch 61/100
625/625 [==============================] - ETA: 0s - loss: 0.8680 - accuracy: 0.8008
Epoch 61: val_loss improved from 1.14653 to 1.14013, saving model to model625/625 [==============================] - 1493s 2s/step - loss: 0.8680 - accuracy: 0.8008 - val_loss: 1.1401 - val_accuracy: 0.7822
Epoch 62/100
625/625 [==============================] - ETA: 0s - loss: 0.8581 - accuracy: 0.8025
Epoch 62: val_loss improved from 1.14013 to 1.13445, saving model to model625/625 [==============================] - 1566s 3s/step - loss: 0.8581 - accuracy: 0.8025 - val_loss: 1.1345 - val_accuracy: 0.7835
Epoch 63/100
625/625 [==============================] - ETA: 0s - loss: 0.8482 - accuracy: 0.8041
Epoch 63: val_loss improved from 1.13445 to 1.13095, saving model to model625/625 [==============================] - 1547s 2s/step - loss: 0.8482 - accuracy: 0.8041 - val_loss: 1.1310 - val_accuracy: 0.7836
Epoch 64/100
625/625 [==============================] - ETA: 0s - loss: 0.8392 - accuracy: 0.8055
Epoch 64: val_loss improved from 1.13095 to 1.12212, saving model to model625/625 [==============================] - 1585s 3s/step - loss: 0.8392 - accuracy: 0.8055 - val_loss: 1.1221 - val_accuracy: 0.7854
Epoch 65/100
625/625 [==============================] - ETA: 0s - loss: 0.8291 - accuracy: 0.8071
Epoch 65: val_loss improved from 1.12212 to 1.11893, saving model to model625/625 [==============================] - 1555s 2s/step - loss: 0.8291 - accuracy: 0.8071 - val_loss: 1.1189 - val_accuracy: 0.7851
Epoch 66/100
625/625 [==============================] - ETA: 0s - loss: 0.8204 - accuracy: 0.8083
Epoch 66: val_loss improved from 1.11893 to 1.11534, saving model to model625/625 [==============================] - 1553s 2s/step - loss: 0.8204 - accuracy: 0.8083 - val_loss: 1.1153 - val_accuracy: 0.7855
Epoch 67/100
625/625 [==============================] - ETA: 0s - loss: 0.8119 - accuracy: 0.8095
Epoch 67: val_loss improved from 1.11534 to 1.11159, saving model to model625/625 [==============================] - 1587s 3s/step - loss: 0.8119 - accuracy: 0.8095 - val_loss: 1.1116 - val_accuracy: 0.7863
Epoch 68/100
625/625 [==============================] - ETA: 0s - loss: 0.8035 - accuracy: 0.8109
Epoch 68: val_loss improved from 1.11159 to 1.10478, saving model to model625/625 [==============================] - 1679s 3s/step - loss: 0.8035 - accuracy: 0.8109 - val_loss: 1.1048 - val_accuracy: 0.7874
Epoch 69/100
625/625 [==============================] - ETA: 0s - loss: 0.7956 - accuracy: 0.8119
Epoch 69: val_loss improved from 1.10478 to 1.10363, saving model to model625/625 [==============================] - 1656s 3s/step - loss: 0.7956 - accuracy: 0.8119 - val_loss: 1.1036 - val_accuracy: 0.7878
Epoch 70/100
625/625 [==============================] - ETA: 0s - loss: 0.7869 - accuracy: 0.8134
Epoch 70: val_loss improved from 1.10363 to 1.09615, saving model to model625/625 [==============================] - 1660s 3s/step - loss: 0.7869 - accuracy: 0.8134 - val_loss: 1.0962 - val_accuracy: 0.7889
Epoch 71/100
625/625 [==============================] - ETA: 0s - loss: 0.7790 - accuracy: 0.8148
Epoch 71: val_loss improved from 1.09615 to 1.09203, saving model to model625/625 [==============================] - 1667s 3s/step - loss: 0.7790 - accuracy: 0.8148 - val_loss: 1.0920 - val_accuracy: 0.7894
Epoch 72/100
625/625 [==============================] - ETA: 0s - loss: 0.7707 - accuracy: 0.8164
Epoch 72: val_loss improved from 1.09203 to 1.08892, saving model to model625/625 [==============================] - 1664s 3s/step - loss: 0.7707 - accuracy: 0.8164 - val_loss: 1.0889 - val_accuracy: 0.7903
Epoch 73/100
625/625 [==============================] - ETA: 0s - loss: 0.7640 - accuracy: 0.8170
Epoch 73: val_loss improved from 1.08892 to 1.08577, saving model to model625/625 [==============================] - 1701s 3s/step - loss: 0.7640 - accuracy: 0.8170 - val_loss: 1.0858 - val_accuracy: 0.7909
Epoch 74/100
625/625 [==============================] - ETA: 0s - loss: 0.7567 - accuracy: 0.8182
Epoch 74: val_loss improved from 1.08577 to 1.08194, saving model to model625/625 [==============================] - 1632s 3s/step - loss: 0.7567 - accuracy: 0.8182 - val_loss: 1.0819 - val_accuracy: 0.7915
Epoch 75/100
625/625 [==============================] - ETA: 0s - loss: 0.7482 - accuracy: 0.8195
Epoch 75: val_loss improved from 1.08194 to 1.07837, saving model to model625/625 [==============================] - 1812s 3s/step - loss: 0.7482 - accuracy: 0.8195 - val_loss: 1.0784 - val_accuracy: 0.7919
Epoch 76/100
625/625 [==============================] - ETA: 0s - loss: 0.7413 - accuracy: 0.8208
Epoch 76: val_loss improved from 1.07837 to 1.07431, saving model to model625/625 [==============================] - 1680s 3s/step - loss: 0.7413 - accuracy: 0.8208 - val_loss: 1.0743 - val_accuracy: 0.7919
Epoch 77/100
625/625 [==============================] - ETA: 0s - loss: 0.7348 - accuracy: 0.8217
Epoch 77: val_loss improved from 1.07431 to 1.07171, saving model to model625/625 [==============================] - 1841s 3s/step - loss: 0.7348 - accuracy: 0.8217 - val_loss: 1.0717 - val_accuracy: 0.7922
Epoch 78/100
625/625 [==============================] - ETA: 0s - loss: 0.7276 - accuracy: 0.8231
Epoch 78: val_loss improved from 1.07171 to 1.06717, saving model to model625/625 [==============================] - 1713s 3s/step - loss: 0.7276 - accuracy: 0.8231 - val_loss: 1.0672 - val_accuracy: 0.7936
Epoch 79/100
625/625 [==============================] - ETA: 0s - loss: 0.7214 - accuracy: 0.8237
Epoch 79: val_loss improved from 1.06717 to 1.06485, saving model to model625/625 [==============================] - 1678s 3s/step - loss: 0.7214 - accuracy: 0.8237 - val_loss: 1.0648 - val_accuracy: 0.7945
Epoch 80/100
625/625 [==============================] - ETA: 0s - loss: 0.7152 - accuracy: 0.8250
Epoch 80: val_loss improved from 1.06485 to 1.06066, saving model to model625/625 [==============================] - 1692s 3s/step - loss: 0.7152 - accuracy: 0.8250 - val_loss: 1.0607 - val_accuracy: 0.7942
Epoch 81/100
625/625 [==============================] - ETA: 0s - loss: 0.7088 - accuracy: 0.8258
Epoch 81: val_loss improved from 1.06066 to 1.05814, saving model to model625/625 [==============================] - 1620s 3s/step - loss: 0.7088 - accuracy: 0.8258 - val_loss: 1.0581 - val_accuracy: 0.7955
Epoch 82/100
625/625 [==============================] - ETA: 0s - loss: 0.7011 - accuracy: 0.8271
Epoch 82: val_loss improved from 1.05814 to 1.05651, saving model to model625/625 [==============================] - 1587s 3s/step - loss: 0.7011 - accuracy: 0.8271 - val_loss: 1.0565 - val_accuracy: 0.7957
Epoch 83/100
625/625 [==============================] - ETA: 0s - loss: 0.6960 - accuracy: 0.8278
Epoch 83: val_loss improved from 1.05651 to 1.05408, saving model to model625/625 [==============================] - 1705s 3s/step - loss: 0.6960 - accuracy: 0.8278 - val_loss: 1.0541 - val_accuracy: 0.7956
Epoch 84/100
625/625 [==============================] - ETA: 0s - loss: 0.6901 - accuracy: 0.8287
Epoch 84: val_loss improved from 1.05408 to 1.04978, saving model to model625/625 [==============================] - 1608s 3s/step - loss: 0.6901 - accuracy: 0.8287 - val_loss: 1.0498 - val_accuracy: 0.7963
Epoch 85/100
625/625 [==============================] - ETA: 0s - loss: 0.6834 - accuracy: 0.8302
Epoch 85: val_loss improved from 1.04978 to 1.04747, saving model to model625/625 [==============================] - 1574s 3s/step - loss: 0.6834 - accuracy: 0.8302 - val_loss: 1.0475 - val_accuracy: 0.7970
Epoch 86/100
625/625 [==============================] - ETA: 0s - loss: 0.6773 - accuracy: 0.8313
Epoch 86: val_loss improved from 1.04747 to 1.04691, saving model to model625/625 [==============================] - 1609s 3s/step - loss: 0.6773 - accuracy: 0.8313 - val_loss: 1.0469 - val_accuracy: 0.7963
Epoch 87/100
625/625 [==============================] - ETA: 0s - loss: 0.6718 - accuracy: 0.8319
Epoch 87: val_loss improved from 1.04691 to 1.04322, saving model to model625/625 [==============================] - 1656s 3s/step - loss: 0.6718 - accuracy: 0.8319 - val_loss: 1.0432 - val_accuracy: 0.7971
Epoch 88/100
625/625 [==============================] - ETA: 0s - loss: 0.6663 - accuracy: 0.8327
Epoch 88: val_loss improved from 1.04322 to 1.04013, saving model to model625/625 [==============================] - 1749s 3s/step - loss: 0.6663 - accuracy: 0.8327 - val_loss: 1.0401 - val_accuracy: 0.7982
Epoch 89/100
625/625 [==============================] - ETA: 0s - loss: 0.6603 - accuracy: 0.8335
Epoch 89: val_loss improved from 1.04013 to 1.03667, saving model to model625/625 [==============================] - 1793s 3s/step - loss: 0.6603 - accuracy: 0.8335 - val_loss: 1.0367 - val_accuracy: 0.7981
Epoch 90/100
625/625 [==============================] - ETA: 0s - loss: 0.6550 - accuracy: 0.8348
Epoch 90: val_loss improved from 1.03667 to 1.03568, saving model to model625/625 [==============================] - 1884s 3s/step - loss: 0.6550 - accuracy: 0.8348 - val_loss: 1.0357 - val_accuracy: 0.7992
Epoch 91/100
104/625 [===>..........................] - ETA: 23:58 - loss: 0.6348 - accuracy: 0.8396
5. 测试
English: that's another issue
Original Franch: c'est une autre affaire
Predicted Franch: c'est une autre affaireEnglish: we should cut our losses
Original Franch: nous devrions compenser nos pertes
Predicted Franch: nous devrions nous arrêter de nos oreillesEnglish: jump across
Original Franch: saute de l'autre côté
Predicted Franch: fais de l'autre côtéEnglish: i thought i heard a voice
Original Franch: j'ai cru entendre une voix
Predicted Franch: je pensais entendre une voixEnglish: let's not go
Original Franch: n'y allons pas
Predicted Franch: ne partons pasEnglish: does this ring a bell
Original Franch: ça vous évoque quelque chose
Predicted Franch: cela t'évoque quelque choseEnglish: tom is presumptuous
Original Franch: tom est présomptueux
Predicted Franch: tom est sensibleEnglish: for here or to go
Original Franch: c'est pour ici ou à emporter
Predicted Franch: sur place ou à emporterEnglish: do what you think is best
Original Franch: fais ce que tu penses être le mieux
Predicted Franch: fais ce que tu penses que c'est mieuxEnglish: i was dazzled
Original Franch: j'étais éblouie
Predicted Franch: j'étais en train de nettoyerEnglish: i doubt it
Original Franch: j'en doute
Predicted Franch: j'en douteEnglish: he was sentenced to death
Original Franch: il fut condamné à mort
Predicted Franch: il a été condamné à mortEnglish: i'm totally confused
Original Franch: je suis complètement perdue
Predicted Franch: je suis complètement confusEnglish: that wasn't my question
Original Franch: ce n'était pas ma question
Predicted Franch: ce n'était pas ma questionEnglish: what is that smell
Original Franch: c’est quoi cette odeur
Predicted Franch: qu'est ce que ça a t il de l'importanceEnglish: we need an answer
Original Franch: il nous faut une réponse
Predicted Franch: il nous faut une réponseEnglish: she adores him
Original Franch: elle le vénère
Predicted Franch: elle l'aimeEnglish: they must be fake
Original Franch: ils doivent être faux
Predicted Franch: ils doivent être fauxEnglish: he is mentally handicapped
Original Franch: il est handicapé mental
Predicted Franch: il est américainEnglish: i'm here by choice
Original Franch: je suis ici de mon plein gré
Predicted Franch: je suis là tout seulEnglish: tom looks determined
Original Franch: tom semble déterminé
Predicted Franch: tom a l'air déterminéEnglish: this pen has run dry
Original Franch: ce stylo n'a plus d'encre
Predicted Franch: ce stylo a cessé de laitEnglish: how did you get hurt
Original Franch: comment vous êtes vous blessé
Predicted Franch: comment t'es tu blesséEnglish: i'm no dummy
Original Franch: je ne suis pas abrutie
Predicted Franch: je ne suis pas idiotEnglish: we all cried a lot
Original Franch: nous avons toutes beaucoup pleuré
Predicted Franch: nous avons toutes beaucoup pleuréEnglish: i've found a better way
Original Franch: j'ai trouvé un meilleur moyen
Predicted Franch: j'ai trouvé un meilleur moyenEnglish: sorry we're full today
Original Franch: désolé nous sommes complets aujourd'hui
Predicted Franch: désolé nous sommes très occupés aujourd'huiEnglish: did they say anything
Original Franch: ont ils dit quelque chose
Predicted Franch: ont elles dit quoi que ce soitEnglish: have you ever been in love
Original Franch: avez vous jamais été amoureux
Predicted Franch: as tu jamais été amoureuxEnglish: here take it
Original Franch: tiens prends le
Predicted Franch: voilà leEnglish: the wind blew all day
Original Franch: le vent a soufflé toute la journée
Predicted Franch: le vent s'est éclairci toute la journéeEnglish: don't make me choose
Original Franch: ne m'obligez pas à choisir
Predicted Franch: ne m'obligez pas à faire de mes amisEnglish: we walked to my room
Original Franch: nous avons marché jusqu'à ma chambre
Predicted Franch: nous sommes restés chez moiEnglish: give it some thought
Original Franch: réfléchis y un peu
Predicted Franch: donne le à le croireEnglish: i opened the window
Original Franch: j'ouvris la fenêtre
Predicted Franch: j'ai fermé la fenêtreEnglish: tom is awkward
Original Franch: tom est maladroit
Predicted Franch: tom est blesséEnglish: i can't wait till summer
Original Franch: je ne peux pas attendre jusqu'à l'été
Predicted Franch: je ne peux pas attendre jusqu'à l'étéEnglish: i'm not lazy
Original Franch: je ne suis pas paresseux
Predicted Franch: je ne suis pas paresseuxEnglish: it's no big deal
Original Franch: ça n’est pas grave
Predicted Franch: ce n'est pas un grandEnglish: i never was shy
Original Franch: je n'ai jamais été timide
Predicted Franch: je n'ai jamais été timideEnglish: i need to impress tom
Original Franch: je dois impressionner tom
Predicted Franch: j'ai besoin de tomEnglish: i want to see your boss
Original Franch: je veux voir ton patron
Predicted Franch: je veux voir ton patronEnglish: the wind blew all day
Original Franch: le vent souffla tout le jour
Predicted Franch: le vent s'est éclairci toute la journéeEnglish: that umbrella is tom's
Original Franch: ce parapluie est à tom
Predicted Franch: ce parapluie est tomEnglish: do you mean what you say
Original Franch: tu dis ça sérieusement
Predicted Franch: tu comprends ce que je veux dire
这篇关于LSTM seq2seq 模型之英语到法语翻译的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!