diff --git a/examples/text_classification/rnn/deploy/python/predict.py b/examples/text_classification/rnn/deploy/python/predict.py index eeb4119b0eda4..61fc735c5a867 100644 --- a/examples/text_classification/rnn/deploy/python/predict.py +++ b/examples/text_classification/rnn/deploy/python/predict.py @@ -16,7 +16,6 @@ import numpy as np import paddle -from paddle import inference from paddlenlp.data import JiebaTokenizer, Stack, Tuple, Pad, Vocab from scipy.special import softmax diff --git a/examples/text_classification/rnn/export_model.py b/examples/text_classification/rnn/export_model.py index 477fd1a72143f..d61f82ba868ae 100644 --- a/examples/text_classification/rnn/export_model.py +++ b/examples/text_classification/rnn/export_model.py @@ -15,9 +15,10 @@ import argparse import paddle -import paddlenlp as ppnlp from paddlenlp.data import Vocab +from model import BoWModel, BiLSTMAttentionModel, CNNModel, LSTMModel, GRUModel, RNNModel, SelfInteractiveAttention + # yapf: disable parser = argparse.ArgumentParser(__doc__) parser.add_argument("--vocab_path", type=str, default="./senta_word_dict.txt", help="The path to vocabulary.") @@ -56,7 +57,7 @@ def main(): padding_idx=pad_token_id) elif network == 'bilstm_attn': lstm_hidden_size = 196 - attention = SelfInteractiveAttention(hidden_size=2 * stm_hidden_size) + attention = SelfInteractiveAttention(hidden_size=2 * lstm_hidden_size) model = BiLSTMAttentionModel( attention_layer=attention, vocab_size=vocab_size, diff --git a/examples/text_classification/rnn/predict.py b/examples/text_classification/rnn/predict.py index ed8d8f361c307..f2a0e85e538d5 100644 --- a/examples/text_classification/rnn/predict.py +++ b/examples/text_classification/rnn/predict.py @@ -15,7 +15,6 @@ import paddle import paddle.nn.functional as F -import paddlenlp as ppnlp from paddlenlp.data import JiebaTokenizer, Stack, Tuple, Pad, Vocab from model import BoWModel, BiLSTMAttentionModel, CNNModel, LSTMModel, GRUModel, RNNModel, SelfInteractiveAttention @@ -102,7 +101,7 @@ def predict(model, data, label_map, batch_size=1, pad_token_id=0): padding_idx=pad_token_id) elif network == 'bilstm_attn': lstm_hidden_size = 196 - attention = SelfInteractiveAttention(hidden_size=2 * stm_hidden_size) + attention = SelfInteractiveAttention(hidden_size=2 * lstm_hidden_size) model = BiLSTMAttentionModel( attention_layer=attention, vocab_size=vocab_size,