Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -29,19 +29,23 @@ nltk.download('wordnet')
|
|
| 29 |
|
| 30 |
def prediction(text):
|
| 31 |
|
|
|
|
| 32 |
def preprocess_nltk(text):
|
|
|
|
|
|
|
| 33 |
lemmatizer = WordNetLemmatizer()
|
| 34 |
tokens = word_tokenize(text.lower()) # Tokenization
|
| 35 |
stop_words = set(stopwords.words("english"))
|
| 36 |
filtered_tokens = [lemmatizer.lemmatize(token) for token in tokens if token.isalnum() and token not in stop_words]
|
| 37 |
return " ".join(filtered_tokens)
|
|
|
|
| 38 |
with open("sentiment_analysis_model.pkl", "rb") as file:
|
| 39 |
pipe2 = pickle.load(file)
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
|
| 46 |
pre = gr.Interface(
|
| 47 |
fn=prediction,
|
|
|
|
| 29 |
|
| 30 |
def prediction(text):
|
| 31 |
|
| 32 |
+
|
| 33 |
def preprocess_nltk(text):
|
| 34 |
+
|
| 35 |
+
|
| 36 |
lemmatizer = WordNetLemmatizer()
|
| 37 |
tokens = word_tokenize(text.lower()) # Tokenization
|
| 38 |
stop_words = set(stopwords.words("english"))
|
| 39 |
filtered_tokens = [lemmatizer.lemmatize(token) for token in tokens if token.isalnum() and token not in stop_words]
|
| 40 |
return " ".join(filtered_tokens)
|
| 41 |
+
|
| 42 |
with open("sentiment_analysis_model.pkl", "rb") as file:
|
| 43 |
pipe2 = pickle.load(file)
|
| 44 |
+
text_processed=(preprocess_nltk(text))
|
| 45 |
+
ans=pipe2.predict([text_processed])
|
| 46 |
+
classes = ['Irrelevant', 'Natural', 'Negative', 'Positive']
|
| 47 |
+
predicted_label = ans[0]
|
| 48 |
+
return(f"The above text is:{classes[predicted_label]}" )
|
| 49 |
|
| 50 |
pre = gr.Interface(
|
| 51 |
fn=prediction,
|