Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -32,10 +32,36 @@ from sklearn.feature_extraction.text import TfidfVectorizer
|
|
| 32 |
# Load environment variables
|
| 33 |
load_dotenv()
|
| 34 |
|
| 35 |
-
#
|
| 36 |
-
|
| 37 |
-
#
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
# Initialize Groq client
|
| 41 |
groq_api_key = os.getenv("GROQ_API_KEY")
|
|
|
|
| 32 |
# Load environment variables
|
| 33 |
load_dotenv()
|
| 34 |
|
| 35 |
+
# Inside your main script (e.g., near the top after imports)
|
| 36 |
+
import nltk
|
| 37 |
+
import ssl # Sometimes needed for NLTK downloads
|
| 38 |
+
|
| 39 |
+
def ensure_nltk_resources():
|
| 40 |
+
try:
|
| 41 |
+
# Try to find a resource to see if download is needed
|
| 42 |
+
# Using punkt as an example; you might check others too
|
| 43 |
+
nltk.data.find('tokenizers/punkt')
|
| 44 |
+
nltk.data.find('corpora/stopwords')
|
| 45 |
+
# Add checks for wordnet, words, punkt_tab as needed
|
| 46 |
+
except LookupError:
|
| 47 |
+
print("NLTK resources not found. Downloading...")
|
| 48 |
+
try:
|
| 49 |
+
# Handle potential SSL issues (common on some systems)
|
| 50 |
+
_create_unverified_https_context = ssl._create_unverified_context
|
| 51 |
+
except AttributeError:
|
| 52 |
+
pass
|
| 53 |
+
else:
|
| 54 |
+
ssl._create_default_https_context = _create_unverified_https_context
|
| 55 |
+
|
| 56 |
+
nltk.download(['stopwords', 'wordnet', 'words'])
|
| 57 |
+
nltk.download('punkt')
|
| 58 |
+
nltk.download('punkt_tab')
|
| 59 |
+
print("NLTK resources downloaded successfully.")
|
| 60 |
+
|
| 61 |
+
# Call the function at the start of your script
|
| 62 |
+
ensure_nltk_resources()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
|
| 66 |
# Initialize Groq client
|
| 67 |
groq_api_key = os.getenv("GROQ_API_KEY")
|