Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -22,25 +22,20 @@ nltk.download('wordnet')
|
|
| 22 |
|
| 23 |
|
| 24 |
|
|
|
|
|
|
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
|
| 29 |
|
| 30 |
def prediction(text):
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def preprocess_nltk(text):
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
lemmatizer = WordNetLemmatizer()
|
| 37 |
-
tokens = word_tokenize(text.lower()) # Tokenization
|
| 38 |
-
stop_words = set(stopwords.words("english"))
|
| 39 |
-
filtered_tokens = [lemmatizer.lemmatize(token) for token in tokens if token.isalnum() and token not in stop_words]
|
| 40 |
-
return " ".join(filtered_tokens)
|
| 41 |
-
|
| 42 |
-
with open("sentiment_analysis_model.pkl", "rb") as file:
|
| 43 |
-
pipe2 = pickle.load(file)
|
| 44 |
text_processed=(preprocess_nltk(text))
|
| 45 |
ans=pipe2.predict([text_processed])
|
| 46 |
classes = ['Irrelevant', 'Natural', 'Negative', 'Positive']
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
|
| 25 |
+
with open("sentiment_analysis_model.pkl", "rb") as file:
|
| 26 |
+
pipe2 = pickle.load(file)
|
| 27 |
|
| 28 |
+
def preprocess_nltk(text):
|
| 29 |
+
lemmatizer = WordNetLemmatizer()
|
| 30 |
+
tokens = word_tokenize(text.lower()) # Tokenization
|
| 31 |
+
stop_words = set(stopwords.words("english"))
|
| 32 |
+
filtered_tokens = [lemmatizer.lemmatize(token) for token in tokens if token.isalnum() and token not in stop_words]
|
| 33 |
+
return " ".join(filtered_tokens)
|
| 34 |
|
| 35 |
|
| 36 |
|
| 37 |
|
| 38 |
def prediction(text):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
text_processed=(preprocess_nltk(text))
|
| 40 |
ans=pipe2.predict([text_processed])
|
| 41 |
classes = ['Irrelevant', 'Natural', 'Negative', 'Positive']
|