| | import tensorflow as tf
|
| | from tensorflow import keras
|
| | import numpy as np
|
| |
|
| |
|
| |
|
| |
|
| | model_path = "my_rnn_model_imdb.keras"
|
| | try:
|
| | loaded_model = keras.models.load_model(model_path)
|
| | print(f"'{model_path}' ๋ชจ๋ธ์ ์ฑ๊ณต์ ์ผ๋ก ๋ถ๋ฌ์์ต๋๋ค.")
|
| | except Exception as e:
|
| | print(f"๋ชจ๋ธ ๋ก๋ฉ ์ค ์ค๋ฅ ๋ฐ์: {e}")
|
| | exit()
|
| |
|
| |
|
| | word_index = keras.datasets.imdb.get_word_index()
|
| |
|
| |
|
| | word_index = {k: (v + 3) for k, v in word_index.items()}
|
| | word_index["<pad>"] = 0
|
| | word_index["<start>"] = 1
|
| | word_index["<unk>"] = 2
|
| | word_index["<unused>"] = 3
|
| |
|
| |
|
| |
|
| |
|
| | MAX_LEN = 256
|
| |
|
| | def preprocess_text(text):
|
| | """
|
| | ์๋ก์ด ํ
์คํธ๋ฅผ ๋ชจ๋ธ ์
๋ ฅ ํ์์ ๋ง๊ฒ ์ ์ฒ๋ฆฌํฉ๋๋ค.
|
| | """
|
| |
|
| | tokens = [word_index.get(word, 2) for word in text.lower().split()]
|
| |
|
| |
|
| | tokens = [word_index["<start>"]] + tokens
|
| |
|
| |
|
| | padded_sequence = keras.preprocessing.sequence.pad_sequences(
|
| | [tokens], maxlen=MAX_LEN, padding='pre'
|
| | )
|
| |
|
| | return padded_sequence
|
| |
|
| |
|
| |
|
| |
|
| | print("\n์ํ ๋ฆฌ๋ทฐ ๊ฐ์ฑ ๋ถ์๊ธฐ (์ข
๋ฃํ๋ ค๋ฉด 'exit'๋ฅผ ์
๋ ฅํ์ธ์)")
|
| | print("-" * 50)
|
| |
|
| | while True:
|
| |
|
| | review_text = input("๋ฆฌ๋ทฐ๋ฅผ ์
๋ ฅํ์ธ์: ")
|
| |
|
| | if review_text.lower() == 'exit':
|
| | print("ํ๋ก๊ทธ๋จ์ ์ข
๋ฃํฉ๋๋ค.")
|
| | break
|
| |
|
| | if not review_text.strip():
|
| | print("์
๋ ฅ๋ ๋ด์ฉ์ด ์์ต๋๋ค. ๋ค์ ์๋ํด์ฃผ์ธ์.")
|
| | continue
|
| |
|
| |
|
| | processed_input = preprocess_text(review_text)
|
| |
|
| |
|
| | prediction = loaded_model.predict(processed_input)
|
| |
|
| |
|
| | score = prediction[0][0]
|
| | sentiment = "๊ธ์ (Positive)" if score > 0.5 else "๋ถ์ (Negative)"
|
| |
|
| | print(f"๊ฒฐ๊ณผ: {sentiment} (์์ธก ์ ์: {score:.4f})")
|
| | print("-" * 50) |