In [1]:
from __future__ import print_function
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
Using TensorFlow backend.
In [2]:
batch_size = 64            # Batch size for training.
epochs = 100               # Number of epochs to train for.
latent_dim = 256           # Latent dimensionality of the encoding space.
num_samples = 10000        # Number of samples to train on.
data_path = 'data/fra.txt' # Path to the data txt file on disk.
In [6]:
# Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
lines = open(data_path, encoding="utf-8").read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
    input_text, target_text = line.split('\t')
    # We use "tab" as the "start sequence" character
    # for the targets, and "\n" as "end sequence" character.
    target_text = '\t' + target_text + '\n'
    input_texts.append(input_text)
    target_texts.append(target_text)
    for char in input_text:
        if char not in input_characters:
            input_characters.add(char)
    for char in target_text:
        if char not in target_characters:
            target_characters.add(char)
In [7]:
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])

print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
Number of samples: 10000
Number of unique input tokens: 71
Number of unique output tokens: 93
Max sequence length for inputs: 16
Max sequence length for outputs: 59
In [8]:
# dictionaries of character index 
input_token_index = dict(
    [(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
    [(char, i) for i, char in enumerate(target_characters)])
In [9]:
# one-hot encoding 
encoder_input_data = np.zeros(
    (len(input_texts), max_encoder_seq_length, num_encoder_tokens),
    dtype='float32')
decoder_input_data = np.zeros(
    (len(input_texts), max_decoder_seq_length, num_decoder_tokens),
    dtype='float32')
decoder_target_data = np.zeros(
    (len(input_texts), max_decoder_seq_length, num_decoder_tokens),
    dtype='float32')

for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
    for t, char in enumerate(input_text):
        encoder_input_data[i, t, input_token_index[char]] = 1.
    for t, char in enumerate(target_text):
        # decoder_target_data is ahead of decoder_target_data by one timestep
        decoder_input_data[i, t, target_token_index[char]] = 1.
        if t > 0:
            # decoder_target_data will be ahead by one timestep
            # and will not include the start character.
            decoder_target_data[i, t - 1, target_token_index[char]] = 1.

Encoder

In [23]:
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
print( type(encoder_states), encoder_states[0].shape, encoder_states[1].shape )
<class 'list'> (?, 256) (?, 256)

Decoder

In [24]:
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))

# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                     initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)

Model, Training

In [25]:
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.summary()
____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_2 (InputLayer)             (None, None, 71)      0                                            
____________________________________________________________________________________________________
input_3 (InputLayer)             (None, None, 93)      0                                            
____________________________________________________________________________________________________
lstm_2 (LSTM)                    [(None, 256), (None,  335872      input_2[0][0]                    
____________________________________________________________________________________________________
lstm_3 (LSTM)                    [(None, None, 256), ( 358400      input_3[0][0]                    
                                                                   lstm_2[0][1]                     
                                                                   lstm_2[0][2]                     
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, None, 93)      23901       lstm_3[0][0]                     
====================================================================================================
Total params: 718,173
Trainable params: 718,173
Non-trainable params: 0
____________________________________________________________________________________________________
In [26]:
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
          batch_size=batch_size,
          epochs=epochs,
          validation_split=0.2)
Train on 8000 samples, validate on 2000 samples
Epoch 1/100
8000/8000 [==============================] - 33s - loss: 0.9345 - val_loss: 0.9842
Epoch 2/100
8000/8000 [==============================] - 29s - loss: 0.7492 - val_loss: 0.8140
Epoch 3/100
8000/8000 [==============================] - 29s - loss: 0.6325 - val_loss: 0.7250
Epoch 4/100
8000/8000 [==============================] - 29s - loss: 0.5742 - val_loss: 0.6719
Epoch 5/100
8000/8000 [==============================] - 29s - loss: 0.5349 - val_loss: 0.6453
Epoch 6/100
8000/8000 [==============================] - 29s - loss: 0.5018 - val_loss: 0.6156
Epoch 7/100
8000/8000 [==============================] - 29s - loss: 0.4743 - val_loss: 0.5954
Epoch 8/100
8000/8000 [==============================] - 30s - loss: 0.4504 - val_loss: 0.5686
Epoch 9/100
8000/8000 [==============================] - 29s - loss: 0.4297 - val_loss: 0.5522
Epoch 10/100
8000/8000 [==============================] - 29s - loss: 0.4111 - val_loss: 0.5413
Epoch 11/100
8000/8000 [==============================] - 29s - loss: 0.3948 - val_loss: 0.5305
Epoch 12/100
8000/8000 [==============================] - 29s - loss: 0.3801 - val_loss: 0.5235
Epoch 13/100
8000/8000 [==============================] - 29s - loss: 0.3662 - val_loss: 0.5116
Epoch 14/100
8000/8000 [==============================] - 29s - loss: 0.3533 - val_loss: 0.5082
Epoch 15/100
8000/8000 [==============================] - 29s - loss: 0.3413 - val_loss: 0.4983
Epoch 16/100
8000/8000 [==============================] - 29s - loss: 0.3299 - val_loss: 0.4982
Epoch 17/100
8000/8000 [==============================] - 29s - loss: 0.3191 - val_loss: 0.4935
Epoch 18/100
8000/8000 [==============================] - 29s - loss: 0.3085 - val_loss: 0.4934
Epoch 19/100
8000/8000 [==============================] - 29s - loss: 0.2987 - val_loss: 0.4877
Epoch 20/100
8000/8000 [==============================] - 29s - loss: 0.2897 - val_loss: 0.4876
Epoch 21/100
8000/8000 [==============================] - 29s - loss: 0.2806 - val_loss: 0.4895
Epoch 22/100
8000/8000 [==============================] - 29s - loss: 0.2720 - val_loss: 0.4837
Epoch 23/100
8000/8000 [==============================] - 29s - loss: 0.2640 - val_loss: 0.4867
Epoch 24/100
8000/8000 [==============================] - 29s - loss: 0.2562 - val_loss: 0.4894
Epoch 25/100
8000/8000 [==============================] - 29s - loss: 0.2488 - val_loss: 0.4859
Epoch 26/100
8000/8000 [==============================] - 29s - loss: 0.2412 - val_loss: 0.4887
Epoch 27/100
8000/8000 [==============================] - 29s - loss: 0.2348 - val_loss: 0.4895
Epoch 28/100
8000/8000 [==============================] - 29s - loss: 0.2277 - val_loss: 0.4940
Epoch 29/100
8000/8000 [==============================] - 29s - loss: 0.2214 - val_loss: 0.4936
Epoch 30/100
8000/8000 [==============================] - 29s - loss: 0.2151 - val_loss: 0.4959
Epoch 31/100
8000/8000 [==============================] - 29s - loss: 0.2093 - val_loss: 0.5039
Epoch 32/100
8000/8000 [==============================] - 29s - loss: 0.2037 - val_loss: 0.5078
Epoch 33/100
8000/8000 [==============================] - 29s - loss: 0.1983 - val_loss: 0.5038
Epoch 34/100
8000/8000 [==============================] - 29s - loss: 0.1928 - val_loss: 0.5069
Epoch 35/100
8000/8000 [==============================] - 29s - loss: 0.1879 - val_loss: 0.5095
Epoch 36/100
8000/8000 [==============================] - 29s - loss: 0.1833 - val_loss: 0.5201
Epoch 37/100
8000/8000 [==============================] - 29s - loss: 0.1783 - val_loss: 0.5229
Epoch 38/100
8000/8000 [==============================] - 29s - loss: 0.1738 - val_loss: 0.5261
Epoch 39/100
8000/8000 [==============================] - 29s - loss: 0.1697 - val_loss: 0.5274
Epoch 40/100
8000/8000 [==============================] - 29s - loss: 0.1657 - val_loss: 0.5333
Epoch 41/100
8000/8000 [==============================] - 29s - loss: 0.1618 - val_loss: 0.5373
Epoch 42/100
8000/8000 [==============================] - 29s - loss: 0.1578 - val_loss: 0.5416
Epoch 43/100
8000/8000 [==============================] - 29s - loss: 0.1545 - val_loss: 0.5402
Epoch 44/100
8000/8000 [==============================] - 29s - loss: 0.1507 - val_loss: 0.5503
Epoch 45/100
8000/8000 [==============================] - 29s - loss: 0.1472 - val_loss: 0.5566
Epoch 46/100
8000/8000 [==============================] - 29s - loss: 0.1440 - val_loss: 0.5573
Epoch 47/100
8000/8000 [==============================] - 29s - loss: 0.1411 - val_loss: 0.5614
Epoch 48/100
8000/8000 [==============================] - 29s - loss: 0.1382 - val_loss: 0.5716
Epoch 49/100
8000/8000 [==============================] - 29s - loss: 0.1348 - val_loss: 0.5737
Epoch 50/100
8000/8000 [==============================] - 29s - loss: 0.1325 - val_loss: 0.5716
Epoch 51/100
8000/8000 [==============================] - 29s - loss: 0.1294 - val_loss: 0.5807
Epoch 52/100
8000/8000 [==============================] - 29s - loss: 0.1270 - val_loss: 0.5847
Epoch 53/100
8000/8000 [==============================] - 29s - loss: 0.1243 - val_loss: 0.5931
Epoch 54/100
8000/8000 [==============================] - 29s - loss: 0.1220 - val_loss: 0.5968
Epoch 55/100
8000/8000 [==============================] - 29s - loss: 0.1196 - val_loss: 0.5999
Epoch 56/100
8000/8000 [==============================] - 29s - loss: 0.1175 - val_loss: 0.6018
Epoch 57/100
8000/8000 [==============================] - 29s - loss: 0.1150 - val_loss: 0.6070
Epoch 58/100
8000/8000 [==============================] - 29s - loss: 0.1130 - val_loss: 0.6137
Epoch 59/100
8000/8000 [==============================] - 29s - loss: 0.1113 - val_loss: 0.6131
Epoch 60/100
8000/8000 [==============================] - 29s - loss: 0.1090 - val_loss: 0.6189
Epoch 61/100
8000/8000 [==============================] - 29s - loss: 0.1070 - val_loss: 0.6239
Epoch 62/100
8000/8000 [==============================] - 29s - loss: 0.1052 - val_loss: 0.6241
Epoch 63/100
8000/8000 [==============================] - 29s - loss: 0.1034 - val_loss: 0.6316
Epoch 64/100
8000/8000 [==============================] - 29s - loss: 0.1015 - val_loss: 0.6366
Epoch 65/100
8000/8000 [==============================] - 29s - loss: 0.0999 - val_loss: 0.6386
Epoch 66/100
8000/8000 [==============================] - 29s - loss: 0.0991 - val_loss: 0.6431
Epoch 67/100
8000/8000 [==============================] - 29s - loss: 0.0971 - val_loss: 0.6442
Epoch 68/100
8000/8000 [==============================] - 29s - loss: 0.0952 - val_loss: 0.6514
Epoch 69/100
8000/8000 [==============================] - 29s - loss: 0.0940 - val_loss: 0.6549
Epoch 70/100
8000/8000 [==============================] - 29s - loss: 0.0922 - val_loss: 0.6540
Epoch 71/100
8000/8000 [==============================] - 29s - loss: 0.0909 - val_loss: 0.6659
Epoch 72/100
8000/8000 [==============================] - 29s - loss: 0.0895 - val_loss: 0.6647
Epoch 73/100
8000/8000 [==============================] - 29s - loss: 0.0885 - val_loss: 0.6666
Epoch 74/100
8000/8000 [==============================] - 29s - loss: 0.0866 - val_loss: 0.6711
Epoch 75/100
8000/8000 [==============================] - 29s - loss: 0.0857 - val_loss: 0.6764
Epoch 76/100
8000/8000 [==============================] - 29s - loss: 0.0841 - val_loss: 0.6848
Epoch 77/100
8000/8000 [==============================] - 29s - loss: 0.0829 - val_loss: 0.6842
Epoch 78/100
8000/8000 [==============================] - 29s - loss: 0.0816 - val_loss: 0.6834
Epoch 79/100
8000/8000 [==============================] - 29s - loss: 0.0805 - val_loss: 0.6891
Epoch 80/100
8000/8000 [==============================] - 29s - loss: 0.0795 - val_loss: 0.6950
Epoch 81/100
8000/8000 [==============================] - 29s - loss: 0.0784 - val_loss: 0.6972
Epoch 82/100
8000/8000 [==============================] - 29s - loss: 0.0775 - val_loss: 0.7030
Epoch 83/100
8000/8000 [==============================] - 29s - loss: 0.0761 - val_loss: 0.7025
Epoch 84/100
8000/8000 [==============================] - 29s - loss: 0.0750 - val_loss: 0.7103
Epoch 85/100
8000/8000 [==============================] - 29s - loss: 0.0743 - val_loss: 0.7055
Epoch 86/100
8000/8000 [==============================] - 29s - loss: 0.0729 - val_loss: 0.7151
Epoch 87/100
8000/8000 [==============================] - 29s - loss: 0.0723 - val_loss: 0.7207
Epoch 88/100
8000/8000 [==============================] - 29s - loss: 0.0712 - val_loss: 0.7202
Epoch 89/100
8000/8000 [==============================] - 29s - loss: 0.0702 - val_loss: 0.7313
Epoch 90/100
8000/8000 [==============================] - 29s - loss: 0.0690 - val_loss: 0.7294
Epoch 91/100
8000/8000 [==============================] - 29s - loss: 0.0683 - val_loss: 0.7306
Epoch 92/100
8000/8000 [==============================] - 29s - loss: 0.0672 - val_loss: 0.7339
Epoch 93/100
8000/8000 [==============================] - 29s - loss: 0.0665 - val_loss: 0.7320
Epoch 94/100
8000/8000 [==============================] - 29s - loss: 0.0657 - val_loss: 0.7403
Epoch 95/100
8000/8000 [==============================] - 29s - loss: 0.0648 - val_loss: 0.7403
Epoch 96/100
8000/8000 [==============================] - 29s - loss: 0.0640 - val_loss: 0.7495
Epoch 97/100
8000/8000 [==============================] - 29s - loss: 0.0632 - val_loss: 0.7544
Epoch 98/100
8000/8000 [==============================] - 29s - loss: 0.0621 - val_loss: 0.7492
Epoch 99/100
8000/8000 [==============================] - 29s - loss: 0.0615 - val_loss: 0.7503
Epoch 100/100
8000/8000 [==============================] - 29s - loss: 0.0608 - val_loss: 0.7570
Out[26]:
<keras.callbacks.History at 0x22c1d73cf98>
In [28]:
# Save model
model.save('s2s.h5')

Inference

Encoding Model (sampling)

In [29]:
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
encoder_model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         (None, None, 71)          0         
_________________________________________________________________
lstm_2 (LSTM)                [(None, 256), (None, 256) 335872    
=================================================================
Total params: 335,872
Trainable params: 335,872
Non-trainable params: 0
_________________________________________________________________

Decoding Model

In [30]:
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
    decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
    [decoder_inputs] + decoder_states_inputs,
    [decoder_outputs] + decoder_states)

decoder_model.summary()
____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_3 (InputLayer)             (None, None, 93)      0                                            
____________________________________________________________________________________________________
input_4 (InputLayer)             (None, 256)           0                                            
____________________________________________________________________________________________________
input_5 (InputLayer)             (None, 256)           0                                            
____________________________________________________________________________________________________
lstm_3 (LSTM)                    [(None, None, 256), ( 358400      input_3[0][0]                    
                                                                   input_4[0][0]                    
                                                                   input_5[0][0]                    
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, None, 93)      23901       lstm_3[1][0]                     
====================================================================================================
Total params: 382,301
Trainable params: 382,301
Non-trainable params: 0
____________________________________________________________________________________________________
In [31]:
# Reverse-lookup token index to decode sequences back to something readable.
reverse_input_char_index = dict(
    (i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
    (i, char) for char, i in target_token_index.items())
In [32]:
def decode_sequence(input_seq):
    # Encode the input as state vectors.
    states_value = encoder_model.predict(input_seq)

    # Generate empty target sequence of length 1.
    target_seq = np.zeros((1, 1, num_decoder_tokens))
    # Populate the first character of target sequence with the start character.
    target_seq[0, 0, target_token_index['\t']] = 1.

    # Sampling loop for a batch of sequences
    # (to simplify, here we assume a batch of size 1).
    stop_condition = False
    decoded_sentence = ''
    while not stop_condition:
        output_tokens, h, c = decoder_model.predict(
            [target_seq] + states_value)

        # Sample a token
        sampled_token_index = np.argmax(output_tokens[0, -1, :])
        sampled_char = reverse_target_char_index[sampled_token_index]
        decoded_sentence += sampled_char

        # Exit condition: either hit max length
        # or find stop character.
        if (sampled_char == '\n' or
           len(decoded_sentence) > max_decoder_seq_length):
            stop_condition = True

        # Update the target sequence (of length 1).
        target_seq = np.zeros((1, 1, num_decoder_tokens))
        target_seq[0, 0, sampled_token_index] = 1.

        # Update states
        states_value = [h, c]

    return decoded_sentence
In [33]:
for seq_index in range(100):
    # Take one sequence (part of the training test)
    # for trying out decoding.
    input_seq = encoder_input_data[seq_index: seq_index + 1]
    decoded_sentence = decode_sequence(input_seq)
    print('-')
    print('Input sentence:', input_texts[seq_index])
    print('Decoded sentence:', decoded_sentence)
-
Input sentence: Go.
Decoded sentence: Va !

-
Input sentence: Run!
Decoded sentence: Cours !

-
Input sentence: Run!
Decoded sentence: Cours !

-
Input sentence: Wow!
Decoded sentence: Ça alors !

-
Input sentence: Fire!
Decoded sentence: Au feu !

-
Input sentence: Help!
Decoded sentence: À l'aide !

-
Input sentence: Jump.
Decoded sentence: Saute.

-
Input sentence: Stop!
Decoded sentence: Ça suffit !

-
Input sentence: Stop!
Decoded sentence: Ça suffit !

-
Input sentence: Stop!
Decoded sentence: Ça suffit !

-
Input sentence: Wait!
Decoded sentence: Attends !

-
Input sentence: Wait!
Decoded sentence: Attends !

-
Input sentence: I see.
Decoded sentence: Je creci dus.

-
Input sentence: I try.
Decoded sentence: J'essaye.

-
Input sentence: I won!
Decoded sentence: J'ai pris de temps.

-
Input sentence: I won!
Decoded sentence: J'ai pris de temps.

-
Input sentence: Oh no!
Decoded sentence: Oh non !

-
Input sentence: Attack!
Decoded sentence: Attaquez !

-
Input sentence: Attack!
Decoded sentence: Attaquez !

-
Input sentence: Cheers!
Decoded sentence: Merci !

-
Input sentence: Cheers!
Decoded sentence: Merci !

-
Input sentence: Cheers!
Decoded sentence: Merci !

-
Input sentence: Cheers!
Decoded sentence: Merci !

-
Input sentence: Get up.
Decoded sentence: Lève-toi.

-
Input sentence: Got it!
Decoded sentence: Compris !

-
Input sentence: Got it!
Decoded sentence: Compris !

-
Input sentence: Got it?
Decoded sentence: Compris ?

-
Input sentence: Got it?
Decoded sentence: Compris ?

-
Input sentence: Got it?
Decoded sentence: Compris ?

-
Input sentence: Hop in.
Decoded sentence: Monte.

-
Input sentence: Hop in.
Decoded sentence: Monte.

-
Input sentence: Hug me.
Decoded sentence: Serre-moi dans tes bras !

-
Input sentence: Hug me.
Decoded sentence: Serre-moi dans tes bras !

-
Input sentence: I fell.
Decoded sentence: Je suis tombée.

-
Input sentence: I fell.
Decoded sentence: Je suis tombée.

-
Input sentence: I know.
Decoded sentence: Je la connais.

-
Input sentence: I left.
Decoded sentence: Je suis partie.

-
Input sentence: I left.
Decoded sentence: Je suis partie.

-
Input sentence: I lost.
Decoded sentence: J'ai perdu.

-
Input sentence: I'm 19.
Decoded sentence: J'ai la froupse.

-
Input sentence: I'm OK.
Decoded sentence: Je suis en train de disciter.

-
Input sentence: I'm OK.
Decoded sentence: Je suis en train de disciter.

-
Input sentence: Listen.
Decoded sentence: Écoutez !

-
Input sentence: No way!
Decoded sentence: C'est hors de question !

-
Input sentence: No way!
Decoded sentence: C'est hors de question !

-
Input sentence: No way!
Decoded sentence: C'est hors de question !

-
Input sentence: No way!
Decoded sentence: C'est hors de question !

-
Input sentence: No way!
Decoded sentence: C'est hors de question !

-
Input sentence: No way!
Decoded sentence: C'est hors de question !

-
Input sentence: No way!
Decoded sentence: C'est hors de question !

-
Input sentence: Really?
Decoded sentence: Vrai ?

-
Input sentence: Really?
Decoded sentence: Vrai ?

-
Input sentence: Really?
Decoded sentence: Vrai ?

-
Input sentence: Thanks.
Decoded sentence: Merci !

-
Input sentence: We try.
Decoded sentence: On essaye.

-
Input sentence: We won.
Decoded sentence: Nous l'emportâmes.

-
Input sentence: We won.
Decoded sentence: Nous l'emportâmes.

-
Input sentence: We won.
Decoded sentence: Nous l'emportâmes.

-
Input sentence: We won.
Decoded sentence: Nous l'emportâmes.

-
Input sentence: Ask Tom.
Decoded sentence: Demandez à Tom.

-
Input sentence: Awesome!
Decoded sentence: Fantastique !

-
Input sentence: Be calm.
Decoded sentence: Soyez calme !

-
Input sentence: Be calm.
Decoded sentence: Soyez calme !

-
Input sentence: Be calm.
Decoded sentence: Soyez calme !

-
Input sentence: Be cool.
Decoded sentence: Sois détendu !

-
Input sentence: Be fair.
Decoded sentence: Soyez juste !

-
Input sentence: Be fair.
Decoded sentence: Soyez juste !

-
Input sentence: Be fair.
Decoded sentence: Soyez juste !

-
Input sentence: Be fair.
Decoded sentence: Soyez juste !

-
Input sentence: Be fair.
Decoded sentence: Soyez juste !

-
Input sentence: Be fair.
Decoded sentence: Soyez juste !

-
Input sentence: Be kind.
Decoded sentence: Sois gentil.

-
Input sentence: Be nice.
Decoded sentence: Soyez gentille !

-
Input sentence: Be nice.
Decoded sentence: Soyez gentille !

-
Input sentence: Be nice.
Decoded sentence: Soyez gentille !

-
Input sentence: Be nice.
Decoded sentence: Soyez gentille !

-
Input sentence: Be nice.
Decoded sentence: Soyez gentille !

-
Input sentence: Be nice.
Decoded sentence: Soyez gentille !

-
Input sentence: Beat it.
Decoded sentence: Dégage !

-
Input sentence: Call me.
Decoded sentence: Appelle-moi !

-
Input sentence: Call me.
Decoded sentence: Appelle-moi !

-
Input sentence: Call us.
Decoded sentence: Appelle-nous !

-
Input sentence: Call us.
Decoded sentence: Appelle-nous !

-
Input sentence: Come in.
Decoded sentence: Entrez !

-
Input sentence: Come in.
Decoded sentence: Entrez !

-
Input sentence: Come in.
Decoded sentence: Entrez !

-
Input sentence: Come in.
Decoded sentence: Entrez !

-
Input sentence: Come on!
Decoded sentence: Allez !

-
Input sentence: Come on.
Decoded sentence: Viens !

-
Input sentence: Come on.
Decoded sentence: Viens !

-
Input sentence: Come on.
Decoded sentence: Viens !

-
Input sentence: Drop it!
Decoded sentence: Laisse-le tomber !

-
Input sentence: Drop it!
Decoded sentence: Laisse-le tomber !

-
Input sentence: Drop it!
Decoded sentence: Laisse-le tomber !

-
Input sentence: Drop it!
Decoded sentence: Laisse-le tomber !

-
Input sentence: Get out!
Decoded sentence: Sortez !

-
Input sentence: Get out!
Decoded sentence: Sortez !

-
Input sentence: Get out!
Decoded sentence: Sortez !

-
Input sentence: Get out.
Decoded sentence: Casse-toi.

-
Input sentence: Get out.
Decoded sentence: Casse-toi.

In [ ]:
 
In [ ]: