Alessandro Piana commited on
Commit
8a2f7f4
·
1 Parent(s): 4a89ec3

Caricamento finale pulito del progetto Life Coach

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/life_coach_model/*.safetensors filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Usa un'immagine Python ufficiale
2
+ FROM python:3.10-slim
3
+
4
+ # Imposta la cartella di lavoro nell'immagine
5
+ WORKDIR /app
6
+
7
+ # Copia il file delle dipendenze e installale
8
+ COPY requirements.txt requirements.txt
9
+ RUN pip install --no-cache-dir -r requirements.txt
10
+
11
+ # Copia tutto il resto del tuo progetto
12
+ COPY . .
13
+
14
+ # Esponi la porta 8085 (quella che usi in app.py)
15
+ EXPOSE 8085
16
+
17
+ # Comando per avviare la tua app
18
+ CMD ["gunicorn", "--bind", "0.0.0.0:8085", "--workers", "1", "--threads", "8", "--timeout", "0", "app:app"]
app.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Life Coach Web Application
4
+ Flask-based web interface for the Phi-4 Life Coach model
5
+ """
6
+
7
+ import os
8
+ import threading
9
+ from datetime import datetime
10
+ from flask import Flask, render_template, redirect, url_for, flash
11
+ from flask_login import LoginManager, current_user
12
+ import logging
13
+
14
+ # Configure logging
15
+ logging.basicConfig(
16
+ level=logging.INFO,
17
+ format='%(asctime)s - %(levelname)s - %(message)s'
18
+ )
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # Initialize Flask app
22
+ app = Flask(__name__)
23
+ app.config['SECRET_KEY'] = 'your-secret-key-change-this-in-production'
24
+
25
+ # Disable caching for static files in debug mode
26
+ app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
27
+
28
+ # Use absolute path for database
29
+ basedir = os.path.abspath(os.path.dirname(__file__))
30
+ app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{os.path.join(basedir, "data", "lifecoach.db")}'
31
+ app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
32
+
33
+ # Import db from models and initialize it
34
+ from models import db
35
+ db.init_app(app)
36
+
37
+ # Initialize login manager
38
+ login_manager = LoginManager(app)
39
+ login_manager.login_view = 'auth.login'
40
+ login_manager.login_message = 'Please log in to access this page.'
41
+
42
+ # Global model instance and lock for thread-safe access
43
+ model_instance = None
44
+ model_lock = threading.Lock()
45
+
46
+
47
+ def get_life_coach_model():
48
+ """
49
+ Get or initialize the Life Coach model (singleton pattern).
50
+ Thread-safe model loading with automatic path detection.
51
+ """
52
+ global model_instance
53
+
54
+ if model_instance is None:
55
+ with model_lock:
56
+ # Double-check locking pattern
57
+ if model_instance is None:
58
+ logger.info("Loading Life Coach model...")
59
+ from life_coach_v1 import LifeCoachModel
60
+ from pathlib import Path
61
+
62
+ # Detect where the model is actually saved (same logic as life_coach_v1.py)
63
+ preferred_path = "/data/life_coach_model"
64
+ fallback_path = "./data/life_coach_model"
65
+
66
+ # Check if model exists in preferred location
67
+ if Path(preferred_path).exists() and (Path(preferred_path) / "adapter_model.safetensors").exists():
68
+ model_path = preferred_path
69
+ logger.info(f"Found model in preferred location: {model_path}")
70
+ # Check fallback location
71
+ elif Path(fallback_path).exists() and (Path(fallback_path) / "adapter_model.safetensors").exists():
72
+ model_path = fallback_path
73
+ logger.info(f"Found model in fallback location: {model_path}")
74
+ else:
75
+ # Default to preferred path (will use fallback logic in LifeCoachModel if needed)
76
+ model_path = preferred_path
77
+ logger.info(f"Model not found, will attempt to use: {model_path}")
78
+
79
+ model_instance = LifeCoachModel(
80
+ model_name="microsoft/Phi-4",
81
+ model_save_path=model_path,
82
+ train_file="mixed_lifecoach_dataset_100000.jsonl.gz" # Updated to match new training data
83
+ )
84
+
85
+ # Load tokenizer and model
86
+ model_instance.load_tokenizer()
87
+ model_instance.load_model(fine_tuned=True)
88
+
89
+ logger.info("Life Coach model loaded successfully!")
90
+
91
+ return model_instance
92
+
93
+
94
+ def generate_response_threadsafe(prompt: str, conversation_history: list) -> str:
95
+ """
96
+ Generate a response using the model with thread-safe access.
97
+
98
+ Args:
99
+ prompt: User's input message
100
+ conversation_history: List of previous messages
101
+
102
+ Returns:
103
+ Generated response
104
+ """
105
+ logger.info(f"=== GENERATE_RESPONSE_THREADSAFE CALLED ===")
106
+ logger.info(f"Prompt: {prompt}")
107
+ logger.info(f"History length: {len(conversation_history) if conversation_history else 0}")
108
+
109
+ model = get_life_coach_model()
110
+
111
+ logger.info(f"Model instance: {model}")
112
+ logger.info(f"Model type: {type(model)}")
113
+
114
+ # Use lock to ensure only one inference at a time (GPU limitation)
115
+ with model_lock:
116
+ logger.info("Calling model.generate_response()...")
117
+ response = model.generate_response(
118
+ prompt=prompt,
119
+ max_new_tokens=256, # Increased for more complete responses
120
+ conversation_history=conversation_history
121
+ )
122
+ logger.info(f"Response received: {response[:100]}...")
123
+
124
+ return response
125
+
126
+
127
+ # Import models after db is initialized
128
+ from models import User, Conversation, Message
129
+
130
+ # User loader for Flask-Login
131
+ @login_manager.user_loader
132
+ def load_user(user_id):
133
+ return db.session.get(User, int(user_id))
134
+
135
+
136
+ # Register blueprints
137
+ from auth import auth_bp
138
+ from chat import chat_bp
139
+
140
+ app.register_blueprint(auth_bp, url_prefix='/auth')
141
+ app.register_blueprint(chat_bp, url_prefix='/chat')
142
+
143
+
144
+ @app.after_request
145
+ def add_header(response):
146
+ """Add headers to prevent caching of static files."""
147
+ if 'Cache-Control' not in response.headers:
148
+ response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
149
+ response.headers['Pragma'] = 'no-cache'
150
+ response.headers['Expires'] = '-1'
151
+ return response
152
+
153
+
154
+ @app.route('/')
155
+ def index():
156
+ """Home page - redirect to chat if logged in, otherwise to login."""
157
+ if current_user.is_authenticated:
158
+ return redirect(url_for('chat.chat_interface'))
159
+ return redirect(url_for('auth.login'))
160
+
161
+
162
+ def initialize_database():
163
+ """Initialize database and create tables."""
164
+ # Create data directory if it doesn't exist (BEFORE app context)
165
+ data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
166
+ os.makedirs(data_dir, exist_ok=True)
167
+
168
+ # Set proper permissions for data directory (ensure it's writable)
169
+ try:
170
+ os.chmod(data_dir, 0o755) # rwxr-xr-x
171
+ logger.info(f"Data directory ready: {data_dir}")
172
+ except Exception as e:
173
+ logger.warning(f"Could not set permissions on data directory: {e}")
174
+
175
+ # Create all database tables
176
+ with app.app_context():
177
+ db.create_all()
178
+ logger.info("Database initialized")
179
+
180
+ # Ensure database file has proper permissions
181
+ db_path = os.path.join(data_dir, 'lifecoach.db')
182
+ if os.path.exists(db_path):
183
+ try:
184
+ os.chmod(db_path, 0o644) # rw-r--r--
185
+ logger.info(f"Database file permissions set: {db_path}")
186
+ except Exception as e:
187
+ logger.warning(f"Could not set permissions on database file: {e}")
188
+
189
+
190
+ if __name__ == '__main__':
191
+ logger.info("=" * 80)
192
+ logger.info("LIFE COACH WEB APPLICATION")
193
+ logger.info("=" * 80)
194
+ logger.info("Starting Flask server on http://0.0.0.0:8085")
195
+ logger.info("=" * 80)
196
+
197
+ # Initialize database
198
+ initialize_database()
199
+
200
+ # Run Flask app
201
+ app.run(
202
+ host='0.0.0.0',
203
+ port=8085,
204
+ debug=True,
205
+ threaded=True # Enable multi-threading
206
+ )
auth.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Authentication Blueprint
3
+ Handles user registration, login, and logout
4
+ """
5
+
6
+ from flask import Blueprint, render_template, redirect, url_for, request, flash
7
+ from flask_login import login_user, logout_user, login_required, current_user
8
+ from models import db, User
9
+ import logging
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ auth_bp = Blueprint('auth', __name__)
14
+
15
+
16
+ @auth_bp.route('/register', methods=['GET', 'POST'])
17
+ def register():
18
+ """User registration page."""
19
+ if current_user.is_authenticated:
20
+ return redirect(url_for('chat.chat_interface'))
21
+
22
+ if request.method == 'POST':
23
+ username = request.form.get('username', '').strip()
24
+ email = request.form.get('email', '').strip()
25
+ password = request.form.get('password', '')
26
+ password_confirm = request.form.get('password_confirm', '')
27
+
28
+ # Validation
29
+ errors = []
30
+
31
+ if not username or len(username) < 3:
32
+ errors.append('Username must be at least 3 characters long.')
33
+
34
+ if not email or '@' not in email:
35
+ errors.append('Please enter a valid email address.')
36
+
37
+ if not password or len(password) < 6:
38
+ errors.append('Password must be at least 6 characters long.')
39
+
40
+ if password != password_confirm:
41
+ errors.append('Passwords do not match.')
42
+
43
+ # Check if user already exists
44
+ if User.query.filter_by(username=username).first():
45
+ errors.append('Username already exists.')
46
+
47
+ if User.query.filter_by(email=email).first():
48
+ errors.append('Email already registered.')
49
+
50
+ if errors:
51
+ for error in errors:
52
+ flash(error, 'danger')
53
+ return render_template('register.html', username=username, email=email)
54
+
55
+ # Create new user
56
+ try:
57
+ user = User(username=username, email=email)
58
+ user.set_password(password)
59
+
60
+ db.session.add(user)
61
+ db.session.commit()
62
+
63
+ logger.info(f"New user registered: {username}")
64
+ flash('Registration successful! Please log in.', 'success')
65
+ return redirect(url_for('auth.login'))
66
+
67
+ except Exception as e:
68
+ db.session.rollback()
69
+ logger.error(f"Error during registration: {e}")
70
+ flash('An error occurred during registration. Please try again.', 'danger')
71
+
72
+ return render_template('register.html')
73
+
74
+
75
+ @auth_bp.route('/login', methods=['GET', 'POST'])
76
+ def login():
77
+ """User login page."""
78
+ if current_user.is_authenticated:
79
+ return redirect(url_for('chat.chat_interface'))
80
+
81
+ if request.method == 'POST':
82
+ username = request.form.get('username', '').strip()
83
+ password = request.form.get('password', '')
84
+ remember = request.form.get('remember', False) == 'on'
85
+
86
+ if not username or not password:
87
+ flash('Please enter both username and password.', 'warning')
88
+ return render_template('login.html', username=username)
89
+
90
+ # Find user
91
+ user = User.query.filter_by(username=username).first()
92
+
93
+ if user and user.check_password(password):
94
+ login_user(user, remember=remember)
95
+ logger.info(f"User logged in: {username}")
96
+
97
+ # Redirect to next page or chat
98
+ next_page = request.args.get('next')
99
+ if next_page and next_page.startswith('/'):
100
+ return redirect(next_page)
101
+ return redirect(url_for('chat.chat_interface'))
102
+ else:
103
+ flash('Invalid username or password.', 'danger')
104
+ return render_template('login.html', username=username)
105
+
106
+ return render_template('login.html')
107
+
108
+
109
+ @auth_bp.route('/logout')
110
+ @login_required
111
+ def logout():
112
+ """User logout."""
113
+ username = current_user.username
114
+ logout_user()
115
+ logger.info(f"User logged out: {username}")
116
+ flash('You have been logged out successfully.', 'info')
117
+ return redirect(url_for('auth.login'))
chat.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chat Blueprint
3
+ Handles chat interface and conversation management
4
+ """
5
+
6
+ from flask import Blueprint, render_template, request, jsonify, send_file
7
+ from flask_login import login_required, current_user
8
+ from models import db, Conversation, Message
9
+ from gtts import gTTS
10
+ import logging
11
+ import os
12
+ import tempfile
13
+ import hashlib
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ chat_bp = Blueprint('chat', __name__)
18
+
19
+
20
+ @chat_bp.route('/')
21
+ @login_required
22
+ def chat_interface():
23
+ """Main chat interface page."""
24
+ # Get user's conversations
25
+ conversations = Conversation.query.filter_by(user_id=current_user.id).order_by(Conversation.updated_at.desc()).all()
26
+
27
+ # Get current conversation (most recent) or create new one
28
+ current_conversation = None
29
+ if conversations:
30
+ current_conversation = conversations[0]
31
+ else:
32
+ # Create first conversation
33
+ current_conversation = Conversation(
34
+ user_id=current_user.id,
35
+ title='Welcome Chat'
36
+ )
37
+ db.session.add(current_conversation)
38
+ db.session.commit()
39
+ conversations = [current_conversation]
40
+
41
+ return render_template(
42
+ 'chat.html',
43
+ conversations=conversations,
44
+ current_conversation=current_conversation
45
+ )
46
+
47
+
48
+ @chat_bp.route('/api/send', methods=['POST'])
49
+ @login_required
50
+ def send_message():
51
+ """
52
+ API endpoint to send a message and get a response.
53
+
54
+ Expected JSON:
55
+ {
56
+ "message": "user message",
57
+ "conversation_id": 123 (optional)
58
+ }
59
+
60
+ Returns JSON:
61
+ {
62
+ "success": true,
63
+ "user_message": {...},
64
+ "assistant_message": {...},
65
+ "conversation_id": 123
66
+ }
67
+ """
68
+ try:
69
+ data = request.get_json()
70
+ user_message_text = data.get('message', '').strip()
71
+ conversation_id = data.get('conversation_id')
72
+
73
+ if not user_message_text:
74
+ return jsonify({'success': False, 'error': 'Message cannot be empty'}), 400
75
+
76
+ # Get or create conversation
77
+ if conversation_id:
78
+ conversation = Conversation.query.filter_by(
79
+ id=conversation_id,
80
+ user_id=current_user.id
81
+ ).first()
82
+
83
+ if not conversation:
84
+ return jsonify({'success': False, 'error': 'Conversation not found'}), 404
85
+ else:
86
+ # Create new conversation
87
+ conversation = Conversation(
88
+ user_id=current_user.id,
89
+ title=user_message_text[:50] + ('...' if len(user_message_text) > 50 else '')
90
+ )
91
+ db.session.add(conversation)
92
+ db.session.commit()
93
+
94
+ # Save user message
95
+ user_message = Message(
96
+ conversation_id=conversation.id,
97
+ role='user',
98
+ content=user_message_text
99
+ )
100
+ db.session.add(user_message)
101
+ db.session.commit()
102
+
103
+ # Get conversation history (excluding the current message for now)
104
+ history = conversation.get_message_history()[:-1] # Exclude the message we just added
105
+
106
+ # Generate response using thread-safe model access
107
+ # Import here to avoid circular import
108
+ from app import generate_response_threadsafe
109
+
110
+ logger.info(f"Generating response for user {current_user.username}")
111
+ assistant_response_text = generate_response_threadsafe(
112
+ prompt=user_message_text,
113
+ conversation_history=history
114
+ )
115
+
116
+ # Save assistant message
117
+ assistant_message = Message(
118
+ conversation_id=conversation.id,
119
+ role='assistant',
120
+ content=assistant_response_text
121
+ )
122
+ db.session.add(assistant_message)
123
+ db.session.commit()
124
+
125
+ logger.info(f"Response generated successfully for user {current_user.username}")
126
+
127
+ return jsonify({
128
+ 'success': True,
129
+ 'user_message': user_message.to_dict(),
130
+ 'assistant_message': assistant_message.to_dict(),
131
+ 'conversation_id': conversation.id
132
+ })
133
+
134
+ except Exception as e:
135
+ logger.error(f"Error in send_message: {e}", exc_info=True)
136
+ return jsonify({
137
+ 'success': False,
138
+ 'error': 'An error occurred while processing your message'
139
+ }), 500
140
+
141
+
142
+ @chat_bp.route('/api/conversations')
143
+ @login_required
144
+ def get_conversations():
145
+ """Get all conversations for the current user."""
146
+ conversations = Conversation.query.filter_by(user_id=current_user.id).order_by(Conversation.updated_at.desc()).all()
147
+
148
+ return jsonify({
149
+ 'success': True,
150
+ 'conversations': [
151
+ {
152
+ 'id': c.id,
153
+ 'title': c.title,
154
+ 'created_at': c.created_at.isoformat(),
155
+ 'updated_at': c.updated_at.isoformat(),
156
+ 'message_count': len(c.messages)
157
+ }
158
+ for c in conversations
159
+ ]
160
+ })
161
+
162
+
163
+ @chat_bp.route('/api/conversation/<int:conversation_id>')
164
+ @login_required
165
+ def get_conversation(conversation_id):
166
+ """Get a specific conversation with all messages."""
167
+ conversation = Conversation.query.filter_by(
168
+ id=conversation_id,
169
+ user_id=current_user.id
170
+ ).first()
171
+
172
+ if not conversation:
173
+ return jsonify({'success': False, 'error': 'Conversation not found'}), 404
174
+
175
+ return jsonify({
176
+ 'success': True,
177
+ 'conversation': {
178
+ 'id': conversation.id,
179
+ 'title': conversation.title,
180
+ 'created_at': conversation.created_at.isoformat(),
181
+ 'updated_at': conversation.updated_at.isoformat(),
182
+ 'messages': [msg.to_dict() for msg in conversation.messages]
183
+ }
184
+ })
185
+
186
+
187
+ @chat_bp.route('/api/conversation/new', methods=['POST'])
188
+ @login_required
189
+ def new_conversation():
190
+ """Create a new conversation."""
191
+ try:
192
+ conversation = Conversation(
193
+ user_id=current_user.id,
194
+ title='New Conversation'
195
+ )
196
+ db.session.add(conversation)
197
+ db.session.commit()
198
+
199
+ return jsonify({
200
+ 'success': True,
201
+ 'conversation': {
202
+ 'id': conversation.id,
203
+ 'title': conversation.title,
204
+ 'created_at': conversation.created_at.isoformat(),
205
+ 'updated_at': conversation.updated_at.isoformat()
206
+ }
207
+ })
208
+
209
+ except Exception as e:
210
+ logger.error(f"Error creating new conversation: {e}")
211
+ return jsonify({'success': False, 'error': 'Failed to create conversation'}), 500
212
+
213
+
214
+ @chat_bp.route('/api/conversation/<int:conversation_id>/delete', methods=['POST'])
215
+ @login_required
216
+ def delete_conversation(conversation_id):
217
+ """Delete a conversation."""
218
+ try:
219
+ conversation = Conversation.query.filter_by(
220
+ id=conversation_id,
221
+ user_id=current_user.id
222
+ ).first()
223
+
224
+ if not conversation:
225
+ return jsonify({'success': False, 'error': 'Conversation not found'}), 404
226
+
227
+ db.session.delete(conversation)
228
+ db.session.commit()
229
+
230
+ return jsonify({'success': True})
231
+
232
+ except Exception as e:
233
+ logger.error(f"Error deleting conversation: {e}")
234
+ return jsonify({'success': False, 'error': 'Failed to delete conversation'}), 500
235
+
236
+
237
+ @chat_bp.route('/api/tts/generate', methods=['POST'])
238
+ @login_required
239
+ def generate_tts():
240
+ """
241
+ Generate text-to-speech audio from text.
242
+
243
+ Expected JSON:
244
+ {
245
+ "text": "text to convert to speech"
246
+ }
247
+
248
+ Returns:
249
+ Audio file (MP3) for client-side playback
250
+ """
251
+ try:
252
+ data = request.get_json()
253
+ text = data.get('text', '').strip()
254
+
255
+ if not text:
256
+ return jsonify({'success': False, 'error': 'Text cannot be empty'}), 400
257
+
258
+ # Create a unique filename based on text hash (for caching)
259
+ text_hash = hashlib.md5(text.encode()).hexdigest()
260
+
261
+ # Create audio directory if it doesn't exist
262
+ audio_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static', 'audio')
263
+ os.makedirs(audio_dir, exist_ok=True)
264
+
265
+ # Path for cached audio file
266
+ audio_filename = f"tts_{text_hash}.mp3"
267
+ audio_path = os.path.join(audio_dir, audio_filename)
268
+
269
+ # Generate TTS if not already cached
270
+ if not os.path.exists(audio_path):
271
+ logger.info(f"Generating TTS for text (hash: {text_hash})")
272
+
273
+ # Generate speech using Google TTS
274
+ # lang='en' for English, slow=False for normal speed
275
+ tts = gTTS(text=text, lang='en', slow=False)
276
+
277
+ # Save to file
278
+ tts.save(audio_path)
279
+ logger.info(f"TTS saved to: {audio_path}")
280
+ else:
281
+ logger.info(f"Using cached TTS (hash: {text_hash})")
282
+
283
+ # Return the audio file URL
284
+ return jsonify({
285
+ 'success': True,
286
+ 'audio_url': f'/static/audio/{audio_filename}'
287
+ })
288
+
289
+ except Exception as e:
290
+ logger.error(f"Error generating TTS: {e}", exc_info=True)
291
+ return jsonify({
292
+ 'success': False,
293
+ 'error': 'Failed to generate speech'
294
+ }), 500
data/life_coach_model/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: microsoft/Phi-4
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:microsoft/Phi-4
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.16.0
data/life_coach_model/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/Phi-4",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.1,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 16,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "o_proj",
29
+ "k_proj",
30
+ "q_proj",
31
+ "v_proj"
32
+ ],
33
+ "task_type": "CAUSAL_LM",
34
+ "trainable_token_indices": null,
35
+ "use_dora": false,
36
+ "use_qalora": false,
37
+ "use_rslora": false
38
+ }
data/life_coach_model/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3303b16ba168f5bbb77a44ceec334fa03207a0fb22f515237feef4a9d6db245c
3
+ size 26225152
data/life_coach_model/chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% for message in messages %}{% if (message['role'] == 'system') %}{{'<|im_start|>system<|im_sep|>' + message['content'] + '<|im_end|>'}}{% elif (message['role'] == 'user') %}{{'<|im_start|>user<|im_sep|>' + message['content'] + '<|im_end|>'}}{% elif (message['role'] == 'assistant') %}{{'<|im_start|>assistant<|im_sep|>' + message['content'] + '<|im_end|>'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant<|im_sep|>' }}{% endif %}
data/life_coach_model/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/life_coach_model/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": true,
5
+ "normalized": false,
6
+ "rstrip": true,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": true,
12
+ "normalized": false,
13
+ "rstrip": true,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|dummy_85|>",
18
+ "lstrip": true,
19
+ "normalized": false,
20
+ "rstrip": true,
21
+ "single_word": false
22
+ },
23
+ "unk_token": "<|endoftext|>"
24
+ }
data/life_coach_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
data/life_coach_model/tokenizer_config.json ADDED
@@ -0,0 +1,781 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "100256": {
5
+ "content": "<|dummy_0|>",
6
+ "lstrip": true,
7
+ "normalized": false,
8
+ "rstrip": true,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "100257": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": true,
15
+ "normalized": false,
16
+ "rstrip": true,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "100258": {
21
+ "content": "<|fim_prefix|>",
22
+ "lstrip": true,
23
+ "normalized": false,
24
+ "rstrip": true,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "100259": {
29
+ "content": "<|fim_middle|>",
30
+ "lstrip": true,
31
+ "normalized": false,
32
+ "rstrip": true,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "100260": {
37
+ "content": "<|fim_suffix|>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": true,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "100261": {
45
+ "content": "<|dummy_1|>",
46
+ "lstrip": true,
47
+ "normalized": false,
48
+ "rstrip": true,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "100262": {
53
+ "content": "<|dummy_2|>",
54
+ "lstrip": true,
55
+ "normalized": false,
56
+ "rstrip": true,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "100263": {
61
+ "content": "<|dummy_3|>",
62
+ "lstrip": true,
63
+ "normalized": false,
64
+ "rstrip": true,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "100264": {
69
+ "content": "<|im_start|>",
70
+ "lstrip": true,
71
+ "normalized": false,
72
+ "rstrip": true,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "100265": {
77
+ "content": "<|im_end|>",
78
+ "lstrip": true,
79
+ "normalized": false,
80
+ "rstrip": true,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "100266": {
85
+ "content": "<|im_sep|>",
86
+ "lstrip": true,
87
+ "normalized": false,
88
+ "rstrip": true,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "100267": {
93
+ "content": "<|dummy_4|>",
94
+ "lstrip": true,
95
+ "normalized": false,
96
+ "rstrip": true,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "100268": {
101
+ "content": "<|dummy_5|>",
102
+ "lstrip": true,
103
+ "normalized": false,
104
+ "rstrip": true,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "100269": {
109
+ "content": "<|dummy_6|>",
110
+ "lstrip": true,
111
+ "normalized": false,
112
+ "rstrip": true,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "100270": {
117
+ "content": "<|dummy_7|>",
118
+ "lstrip": true,
119
+ "normalized": false,
120
+ "rstrip": true,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "100271": {
125
+ "content": "<|dummy_8|>",
126
+ "lstrip": true,
127
+ "normalized": false,
128
+ "rstrip": true,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "100272": {
133
+ "content": "<|dummy_9|>",
134
+ "lstrip": true,
135
+ "normalized": false,
136
+ "rstrip": true,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "100273": {
141
+ "content": "<|dummy_10|>",
142
+ "lstrip": true,
143
+ "normalized": false,
144
+ "rstrip": true,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "100274": {
149
+ "content": "<|dummy_11|>",
150
+ "lstrip": true,
151
+ "normalized": false,
152
+ "rstrip": true,
153
+ "single_word": false,
154
+ "special": true
155
+ },
156
+ "100275": {
157
+ "content": "<|dummy_12|>",
158
+ "lstrip": true,
159
+ "normalized": false,
160
+ "rstrip": true,
161
+ "single_word": false,
162
+ "special": true
163
+ },
164
+ "100276": {
165
+ "content": "<|endofprompt|>",
166
+ "lstrip": true,
167
+ "normalized": false,
168
+ "rstrip": true,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "100277": {
173
+ "content": "<|dummy_13|>",
174
+ "lstrip": true,
175
+ "normalized": false,
176
+ "rstrip": true,
177
+ "single_word": false,
178
+ "special": true
179
+ },
180
+ "100278": {
181
+ "content": "<|dummy_14|>",
182
+ "lstrip": true,
183
+ "normalized": false,
184
+ "rstrip": true,
185
+ "single_word": false,
186
+ "special": true
187
+ },
188
+ "100279": {
189
+ "content": "<|dummy_15|>",
190
+ "lstrip": true,
191
+ "normalized": false,
192
+ "rstrip": true,
193
+ "single_word": false,
194
+ "special": true
195
+ },
196
+ "100280": {
197
+ "content": "<|dummy_16|>",
198
+ "lstrip": true,
199
+ "normalized": false,
200
+ "rstrip": true,
201
+ "single_word": false,
202
+ "special": true
203
+ },
204
+ "100281": {
205
+ "content": "<|dummy_17|>",
206
+ "lstrip": true,
207
+ "normalized": false,
208
+ "rstrip": true,
209
+ "single_word": false,
210
+ "special": true
211
+ },
212
+ "100282": {
213
+ "content": "<|dummy_18|>",
214
+ "lstrip": true,
215
+ "normalized": false,
216
+ "rstrip": true,
217
+ "single_word": false,
218
+ "special": true
219
+ },
220
+ "100283": {
221
+ "content": "<|dummy_19|>",
222
+ "lstrip": true,
223
+ "normalized": false,
224
+ "rstrip": true,
225
+ "single_word": false,
226
+ "special": true
227
+ },
228
+ "100284": {
229
+ "content": "<|dummy_20|>",
230
+ "lstrip": true,
231
+ "normalized": false,
232
+ "rstrip": true,
233
+ "single_word": false,
234
+ "special": true
235
+ },
236
+ "100285": {
237
+ "content": "<|dummy_21|>",
238
+ "lstrip": true,
239
+ "normalized": false,
240
+ "rstrip": true,
241
+ "single_word": false,
242
+ "special": true
243
+ },
244
+ "100286": {
245
+ "content": "<|dummy_22|>",
246
+ "lstrip": true,
247
+ "normalized": false,
248
+ "rstrip": true,
249
+ "single_word": false,
250
+ "special": true
251
+ },
252
+ "100287": {
253
+ "content": "<|dummy_23|>",
254
+ "lstrip": true,
255
+ "normalized": false,
256
+ "rstrip": true,
257
+ "single_word": false,
258
+ "special": true
259
+ },
260
+ "100288": {
261
+ "content": "<|dummy_24|>",
262
+ "lstrip": true,
263
+ "normalized": false,
264
+ "rstrip": true,
265
+ "single_word": false,
266
+ "special": true
267
+ },
268
+ "100289": {
269
+ "content": "<|dummy_25|>",
270
+ "lstrip": true,
271
+ "normalized": false,
272
+ "rstrip": true,
273
+ "single_word": false,
274
+ "special": true
275
+ },
276
+ "100290": {
277
+ "content": "<|dummy_26|>",
278
+ "lstrip": true,
279
+ "normalized": false,
280
+ "rstrip": true,
281
+ "single_word": false,
282
+ "special": true
283
+ },
284
+ "100291": {
285
+ "content": "<|dummy_27|>",
286
+ "lstrip": true,
287
+ "normalized": false,
288
+ "rstrip": true,
289
+ "single_word": false,
290
+ "special": true
291
+ },
292
+ "100292": {
293
+ "content": "<|dummy_28|>",
294
+ "lstrip": true,
295
+ "normalized": false,
296
+ "rstrip": true,
297
+ "single_word": false,
298
+ "special": true
299
+ },
300
+ "100293": {
301
+ "content": "<|dummy_29|>",
302
+ "lstrip": true,
303
+ "normalized": false,
304
+ "rstrip": true,
305
+ "single_word": false,
306
+ "special": true
307
+ },
308
+ "100294": {
309
+ "content": "<|dummy_30|>",
310
+ "lstrip": true,
311
+ "normalized": false,
312
+ "rstrip": true,
313
+ "single_word": false,
314
+ "special": true
315
+ },
316
+ "100295": {
317
+ "content": "<|dummy_31|>",
318
+ "lstrip": true,
319
+ "normalized": false,
320
+ "rstrip": true,
321
+ "single_word": false,
322
+ "special": true
323
+ },
324
+ "100296": {
325
+ "content": "<|dummy_32|>",
326
+ "lstrip": true,
327
+ "normalized": false,
328
+ "rstrip": true,
329
+ "single_word": false,
330
+ "special": true
331
+ },
332
+ "100297": {
333
+ "content": "<|dummy_33|>",
334
+ "lstrip": true,
335
+ "normalized": false,
336
+ "rstrip": true,
337
+ "single_word": false,
338
+ "special": true
339
+ },
340
+ "100298": {
341
+ "content": "<|dummy_34|>",
342
+ "lstrip": true,
343
+ "normalized": false,
344
+ "rstrip": true,
345
+ "single_word": false,
346
+ "special": true
347
+ },
348
+ "100299": {
349
+ "content": "<|dummy_35|>",
350
+ "lstrip": true,
351
+ "normalized": false,
352
+ "rstrip": true,
353
+ "single_word": false,
354
+ "special": true
355
+ },
356
+ "100300": {
357
+ "content": "<|dummy_36|>",
358
+ "lstrip": true,
359
+ "normalized": false,
360
+ "rstrip": true,
361
+ "single_word": false,
362
+ "special": true
363
+ },
364
+ "100301": {
365
+ "content": "<|dummy_37|>",
366
+ "lstrip": true,
367
+ "normalized": false,
368
+ "rstrip": true,
369
+ "single_word": false,
370
+ "special": true
371
+ },
372
+ "100302": {
373
+ "content": "<|dummy_38|>",
374
+ "lstrip": true,
375
+ "normalized": false,
376
+ "rstrip": true,
377
+ "single_word": false,
378
+ "special": true
379
+ },
380
+ "100303": {
381
+ "content": "<|dummy_39|>",
382
+ "lstrip": true,
383
+ "normalized": false,
384
+ "rstrip": true,
385
+ "single_word": false,
386
+ "special": true
387
+ },
388
+ "100304": {
389
+ "content": "<|dummy_40|>",
390
+ "lstrip": true,
391
+ "normalized": false,
392
+ "rstrip": true,
393
+ "single_word": false,
394
+ "special": true
395
+ },
396
+ "100305": {
397
+ "content": "<|dummy_41|>",
398
+ "lstrip": true,
399
+ "normalized": false,
400
+ "rstrip": true,
401
+ "single_word": false,
402
+ "special": true
403
+ },
404
+ "100306": {
405
+ "content": "<|dummy_42|>",
406
+ "lstrip": true,
407
+ "normalized": false,
408
+ "rstrip": true,
409
+ "single_word": false,
410
+ "special": true
411
+ },
412
+ "100307": {
413
+ "content": "<|dummy_43|>",
414
+ "lstrip": true,
415
+ "normalized": false,
416
+ "rstrip": true,
417
+ "single_word": false,
418
+ "special": true
419
+ },
420
+ "100308": {
421
+ "content": "<|dummy_44|>",
422
+ "lstrip": true,
423
+ "normalized": false,
424
+ "rstrip": true,
425
+ "single_word": false,
426
+ "special": true
427
+ },
428
+ "100309": {
429
+ "content": "<|dummy_45|>",
430
+ "lstrip": true,
431
+ "normalized": false,
432
+ "rstrip": true,
433
+ "single_word": false,
434
+ "special": true
435
+ },
436
+ "100310": {
437
+ "content": "<|dummy_46|>",
438
+ "lstrip": true,
439
+ "normalized": false,
440
+ "rstrip": true,
441
+ "single_word": false,
442
+ "special": true
443
+ },
444
+ "100311": {
445
+ "content": "<|dummy_47|>",
446
+ "lstrip": true,
447
+ "normalized": false,
448
+ "rstrip": true,
449
+ "single_word": false,
450
+ "special": true
451
+ },
452
+ "100312": {
453
+ "content": "<|dummy_48|>",
454
+ "lstrip": true,
455
+ "normalized": false,
456
+ "rstrip": true,
457
+ "single_word": false,
458
+ "special": true
459
+ },
460
+ "100313": {
461
+ "content": "<|dummy_49|>",
462
+ "lstrip": true,
463
+ "normalized": false,
464
+ "rstrip": true,
465
+ "single_word": false,
466
+ "special": true
467
+ },
468
+ "100314": {
469
+ "content": "<|dummy_50|>",
470
+ "lstrip": true,
471
+ "normalized": false,
472
+ "rstrip": true,
473
+ "single_word": false,
474
+ "special": true
475
+ },
476
+ "100315": {
477
+ "content": "<|dummy_51|>",
478
+ "lstrip": true,
479
+ "normalized": false,
480
+ "rstrip": true,
481
+ "single_word": false,
482
+ "special": true
483
+ },
484
+ "100316": {
485
+ "content": "<|dummy_52|>",
486
+ "lstrip": true,
487
+ "normalized": false,
488
+ "rstrip": true,
489
+ "single_word": false,
490
+ "special": true
491
+ },
492
+ "100317": {
493
+ "content": "<|dummy_53|>",
494
+ "lstrip": true,
495
+ "normalized": false,
496
+ "rstrip": true,
497
+ "single_word": false,
498
+ "special": true
499
+ },
500
+ "100318": {
501
+ "content": "<|dummy_54|>",
502
+ "lstrip": true,
503
+ "normalized": false,
504
+ "rstrip": true,
505
+ "single_word": false,
506
+ "special": true
507
+ },
508
+ "100319": {
509
+ "content": "<|dummy_55|>",
510
+ "lstrip": true,
511
+ "normalized": false,
512
+ "rstrip": true,
513
+ "single_word": false,
514
+ "special": true
515
+ },
516
+ "100320": {
517
+ "content": "<|dummy_56|>",
518
+ "lstrip": true,
519
+ "normalized": false,
520
+ "rstrip": true,
521
+ "single_word": false,
522
+ "special": true
523
+ },
524
+ "100321": {
525
+ "content": "<|dummy_57|>",
526
+ "lstrip": true,
527
+ "normalized": false,
528
+ "rstrip": true,
529
+ "single_word": false,
530
+ "special": true
531
+ },
532
+ "100322": {
533
+ "content": "<|dummy_58|>",
534
+ "lstrip": true,
535
+ "normalized": false,
536
+ "rstrip": true,
537
+ "single_word": false,
538
+ "special": true
539
+ },
540
+ "100323": {
541
+ "content": "<|dummy_59|>",
542
+ "lstrip": true,
543
+ "normalized": false,
544
+ "rstrip": true,
545
+ "single_word": false,
546
+ "special": true
547
+ },
548
+ "100324": {
549
+ "content": "<|dummy_60|>",
550
+ "lstrip": true,
551
+ "normalized": false,
552
+ "rstrip": true,
553
+ "single_word": false,
554
+ "special": true
555
+ },
556
+ "100325": {
557
+ "content": "<|dummy_61|>",
558
+ "lstrip": true,
559
+ "normalized": false,
560
+ "rstrip": true,
561
+ "single_word": false,
562
+ "special": true
563
+ },
564
+ "100326": {
565
+ "content": "<|dummy_62|>",
566
+ "lstrip": true,
567
+ "normalized": false,
568
+ "rstrip": true,
569
+ "single_word": false,
570
+ "special": true
571
+ },
572
+ "100327": {
573
+ "content": "<|dummy_63|>",
574
+ "lstrip": true,
575
+ "normalized": false,
576
+ "rstrip": true,
577
+ "single_word": false,
578
+ "special": true
579
+ },
580
+ "100328": {
581
+ "content": "<|dummy_64|>",
582
+ "lstrip": true,
583
+ "normalized": false,
584
+ "rstrip": true,
585
+ "single_word": false,
586
+ "special": true
587
+ },
588
+ "100329": {
589
+ "content": "<|dummy_65|>",
590
+ "lstrip": true,
591
+ "normalized": false,
592
+ "rstrip": true,
593
+ "single_word": false,
594
+ "special": true
595
+ },
596
+ "100330": {
597
+ "content": "<|dummy_66|>",
598
+ "lstrip": true,
599
+ "normalized": false,
600
+ "rstrip": true,
601
+ "single_word": false,
602
+ "special": true
603
+ },
604
+ "100331": {
605
+ "content": "<|dummy_67|>",
606
+ "lstrip": true,
607
+ "normalized": false,
608
+ "rstrip": true,
609
+ "single_word": false,
610
+ "special": true
611
+ },
612
+ "100332": {
613
+ "content": "<|dummy_68|>",
614
+ "lstrip": true,
615
+ "normalized": false,
616
+ "rstrip": true,
617
+ "single_word": false,
618
+ "special": true
619
+ },
620
+ "100333": {
621
+ "content": "<|dummy_69|>",
622
+ "lstrip": true,
623
+ "normalized": false,
624
+ "rstrip": true,
625
+ "single_word": false,
626
+ "special": true
627
+ },
628
+ "100334": {
629
+ "content": "<|dummy_70|>",
630
+ "lstrip": true,
631
+ "normalized": false,
632
+ "rstrip": true,
633
+ "single_word": false,
634
+ "special": true
635
+ },
636
+ "100335": {
637
+ "content": "<|dummy_71|>",
638
+ "lstrip": true,
639
+ "normalized": false,
640
+ "rstrip": true,
641
+ "single_word": false,
642
+ "special": true
643
+ },
644
+ "100336": {
645
+ "content": "<|dummy_72|>",
646
+ "lstrip": true,
647
+ "normalized": false,
648
+ "rstrip": true,
649
+ "single_word": false,
650
+ "special": true
651
+ },
652
+ "100337": {
653
+ "content": "<|dummy_73|>",
654
+ "lstrip": true,
655
+ "normalized": false,
656
+ "rstrip": true,
657
+ "single_word": false,
658
+ "special": true
659
+ },
660
+ "100338": {
661
+ "content": "<|dummy_74|>",
662
+ "lstrip": true,
663
+ "normalized": false,
664
+ "rstrip": true,
665
+ "single_word": false,
666
+ "special": true
667
+ },
668
+ "100339": {
669
+ "content": "<|dummy_75|>",
670
+ "lstrip": true,
671
+ "normalized": false,
672
+ "rstrip": true,
673
+ "single_word": false,
674
+ "special": true
675
+ },
676
+ "100340": {
677
+ "content": "<|dummy_76|>",
678
+ "lstrip": true,
679
+ "normalized": false,
680
+ "rstrip": true,
681
+ "single_word": false,
682
+ "special": true
683
+ },
684
+ "100341": {
685
+ "content": "<|dummy_77|>",
686
+ "lstrip": true,
687
+ "normalized": false,
688
+ "rstrip": true,
689
+ "single_word": false,
690
+ "special": true
691
+ },
692
+ "100342": {
693
+ "content": "<|dummy_78|>",
694
+ "lstrip": true,
695
+ "normalized": false,
696
+ "rstrip": true,
697
+ "single_word": false,
698
+ "special": true
699
+ },
700
+ "100343": {
701
+ "content": "<|dummy_79|>",
702
+ "lstrip": true,
703
+ "normalized": false,
704
+ "rstrip": true,
705
+ "single_word": false,
706
+ "special": true
707
+ },
708
+ "100344": {
709
+ "content": "<|dummy_80|>",
710
+ "lstrip": true,
711
+ "normalized": false,
712
+ "rstrip": true,
713
+ "single_word": false,
714
+ "special": true
715
+ },
716
+ "100345": {
717
+ "content": "<|dummy_81|>",
718
+ "lstrip": true,
719
+ "normalized": false,
720
+ "rstrip": true,
721
+ "single_word": false,
722
+ "special": true
723
+ },
724
+ "100346": {
725
+ "content": "<|dummy_82|>",
726
+ "lstrip": true,
727
+ "normalized": false,
728
+ "rstrip": true,
729
+ "single_word": false,
730
+ "special": true
731
+ },
732
+ "100347": {
733
+ "content": "<|dummy_83|>",
734
+ "lstrip": true,
735
+ "normalized": false,
736
+ "rstrip": true,
737
+ "single_word": false,
738
+ "special": true
739
+ },
740
+ "100348": {
741
+ "content": "<|dummy_84|>",
742
+ "lstrip": true,
743
+ "normalized": false,
744
+ "rstrip": true,
745
+ "single_word": false,
746
+ "special": true
747
+ },
748
+ "100349": {
749
+ "content": "<|dummy_85|>",
750
+ "lstrip": true,
751
+ "normalized": false,
752
+ "rstrip": true,
753
+ "single_word": false,
754
+ "special": true
755
+ },
756
+ "100350": {
757
+ "content": "<|dummy_86|>",
758
+ "lstrip": true,
759
+ "normalized": false,
760
+ "rstrip": true,
761
+ "single_word": false,
762
+ "special": true
763
+ },
764
+ "100351": {
765
+ "content": "<|dummy_87|>",
766
+ "lstrip": true,
767
+ "normalized": false,
768
+ "rstrip": true,
769
+ "single_word": false,
770
+ "special": true
771
+ }
772
+ },
773
+ "bos_token": "<|endoftext|>",
774
+ "clean_up_tokenization_spaces": false,
775
+ "eos_token": "<|im_end|>",
776
+ "extra_special_tokens": {},
777
+ "model_max_length": 16384,
778
+ "pad_token": "<|dummy_85|>",
779
+ "tokenizer_class": "GPT2Tokenizer",
780
+ "unk_token": "<|endoftext|>"
781
+ }
data/life_coach_model/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
data/lifecoach.db ADDED
Binary file (41 kB). View file
 
life_coach_v1.py ADDED
@@ -0,0 +1,1210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Life Coach v1 - Phi-4 Fine-tuned Life Coaching Assistant
4
+
5
+ A simple command-line life coaching assistant using Microsoft's Phi-4 model.
6
+ Fine-tunes on life coaching conversations and provides interactive chat sessions.
7
+ """
8
+
9
+ import torch
10
+ import json
11
+ import os
12
+ import gc
13
+ import argparse
14
+ from pathlib import Path
15
+ from typing import Optional
16
+ from tqdm import tqdm
17
+
18
+ # Set PyTorch CUDA memory allocation config to reduce fragmentation
19
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
20
+
21
+ from transformers import (
22
+ AutoTokenizer,
23
+ AutoModelForCausalLM,
24
+ TrainingArguments,
25
+ Trainer,
26
+ DataCollatorForSeq2Seq
27
+ )
28
+ from datasets import Dataset, load_dataset, concatenate_datasets
29
+ from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training, TaskType
30
+ import logging
31
+ import random
32
+ import shutil
33
+ import gzip
34
+ from typing import List, Dict
35
+
36
+ # Configure logging
37
+ logging.basicConfig(
38
+ level=logging.INFO,
39
+ format='%(asctime)s - %(levelname)s - %(message)s'
40
+ )
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ def cleanup_gpu_memory():
45
+ """
46
+ Clean up GPU memory before starting the program.
47
+ Clears PyTorch cache and runs garbage collection.
48
+ """
49
+ logger.info("=" * 80)
50
+ logger.info("GPU MEMORY CLEANUP")
51
+ logger.info("=" * 80)
52
+
53
+ if torch.cuda.is_available():
54
+ # Clear PyTorch CUDA cache
55
+ torch.cuda.empty_cache()
56
+
57
+ # Run garbage collection
58
+ gc.collect()
59
+
60
+ # Get GPU memory stats
61
+ for i in range(torch.cuda.device_count()):
62
+ total = torch.cuda.get_device_properties(i).total_memory / 1024**3
63
+ reserved = torch.cuda.memory_reserved(i) / 1024**3
64
+ allocated = torch.cuda.memory_allocated(i) / 1024**3
65
+ free = total - reserved
66
+
67
+ logger.info(f"GPU {i}: {torch.cuda.get_device_name(i)}")
68
+ logger.info(f" Total memory: {total:.2f} GB")
69
+ logger.info(f" Reserved: {reserved:.2f} GB")
70
+ logger.info(f" Allocated: {allocated:.2f} GB")
71
+ logger.info(f" Free: {free:.2f} GB")
72
+
73
+ if reserved > 1.0: # More than 1GB reserved
74
+ logger.warning(f" ⚠️ GPU {i} has {reserved:.2f} GB reserved!")
75
+ logger.warning(f" ⚠️ This might be from a previous run.")
76
+ logger.warning(f" ⚠️ If you encounter OOM errors, kill other processes using:")
77
+ logger.warning(f" ⚠️ nvidia-smi | grep python")
78
+ else:
79
+ logger.warning("No CUDA GPUs available! Running on CPU (very slow).")
80
+
81
+ logger.info("=" * 80)
82
+
83
+
84
+ def clear_hf_cache():
85
+ """Clear Hugging Face datasets cache to save disk space."""
86
+ try:
87
+ from datasets import config
88
+ cache_dir = config.HF_DATASETS_CACHE
89
+ if os.path.exists(cache_dir):
90
+ # Get size before clearing
91
+ size_mb = sum(os.path.getsize(os.path.join(dirpath,filename))
92
+ for dirpath, _, filenames in os.walk(cache_dir)
93
+ for filename in filenames) / (1024 * 1024)
94
+
95
+ logger.info(f"Clearing HF cache ({size_mb:.1f} MB)...")
96
+ shutil.rmtree(cache_dir, ignore_errors=True)
97
+ os.makedirs(cache_dir, exist_ok=True)
98
+ logger.info("✓ Cache cleared")
99
+ except Exception as e:
100
+ logger.warning(f"Failed to clear cache: {e}")
101
+
102
+
103
+ def load_mental_health_counseling() -> List[Dict]:
104
+ """Load Amod/mental_health_counseling_conversations dataset - ALL samples."""
105
+ logger.info(f"Loading mental health counseling dataset...")
106
+ try:
107
+ dataset = load_dataset("Amod/mental_health_counseling_conversations", split="train")
108
+ logger.info(f" Dataset has {len(dataset)} samples available")
109
+
110
+ conversations = []
111
+ for item in dataset:
112
+ # Format: Context (user) -> Response (assistant)
113
+ conversations.append({
114
+ "messages": [
115
+ {"role": "user", "content": item.get("Context", "").strip()},
116
+ {"role": "assistant", "content": item.get("Response", "").strip()}
117
+ ]
118
+ })
119
+
120
+ logger.info(f"✓ Loaded {len(conversations)} mental health counseling conversations")
121
+ return conversations
122
+ except Exception as e:
123
+ logger.warning(f"Failed to load mental health counseling dataset: {e}")
124
+ return []
125
+
126
+
127
+ def load_counsel_chat() -> List[Dict]:
128
+ """Load nbertagnolli/counsel-chat dataset - ALL samples."""
129
+ logger.info(f"Loading CounselChat (nbertagnolli) dataset...")
130
+ try:
131
+ dataset = load_dataset("nbertagnolli/counsel-chat", split="train")
132
+ logger.info(f" Dataset has {len(dataset)} samples available")
133
+
134
+ conversations = []
135
+ for item in dataset:
136
+ # Try different possible field names
137
+ question = None
138
+ answer = None
139
+
140
+ # Common field patterns
141
+ for q_field in ["questionText", "question", "query", "input", "user_message"]:
142
+ if q_field in item and item.get(q_field):
143
+ question = item[q_field].strip()
144
+ break
145
+
146
+ for a_field in ["answerText", "answer", "response", "output", "counselor_message"]:
147
+ if a_field in item and item.get(a_field):
148
+ answer = item[a_field].strip()
149
+ break
150
+
151
+ if question and answer:
152
+ conversations.append({
153
+ "messages": [
154
+ {"role": "user", "content": question},
155
+ {"role": "assistant", "content": answer}
156
+ ]
157
+ })
158
+
159
+ logger.info(f"✓ Loaded {len(conversations)} CounselChat conversations")
160
+ return conversations
161
+ except Exception as e:
162
+ logger.warning(f"Failed to load CounselChat dataset: {e}")
163
+ return []
164
+
165
+
166
+ def load_cbt_cognitive_distortions() -> List[Dict]:
167
+ """Load epsilon3/cbt-cognitive-distortions-analysis dataset - ALL samples."""
168
+ logger.info(f"Loading CBT Cognitive Distortions dataset...")
169
+ try:
170
+ dataset = load_dataset("epsilon3/cbt-cognitive-distortions-analysis", split="train")
171
+ logger.info(f" Dataset has {len(dataset)} samples available")
172
+
173
+ conversations = []
174
+ for item in dataset:
175
+ # Try different field patterns
176
+ user_msg = None
177
+ assistant_msg = None
178
+
179
+ for u_field in ["input", "text", "thought", "statement", "user_input"]:
180
+ if u_field in item and item.get(u_field):
181
+ user_msg = item[u_field].strip()
182
+ break
183
+
184
+ for a_field in ["output", "analysis", "reframe", "response", "cbt_response"]:
185
+ if a_field in item and item.get(a_field):
186
+ assistant_msg = item[a_field].strip()
187
+ break
188
+
189
+ if user_msg and assistant_msg:
190
+ conversations.append({
191
+ "messages": [
192
+ {"role": "user", "content": user_msg},
193
+ {"role": "assistant", "content": assistant_msg}
194
+ ]
195
+ })
196
+
197
+ logger.info(f"✓ Loaded {len(conversations)} CBT Cognitive Distortions conversations")
198
+ return conversations
199
+ except Exception as e:
200
+ logger.warning(f"Failed to load CBT Cognitive Distortions dataset: {e}")
201
+ return []
202
+
203
+
204
+ def load_peer_counseling_reflections() -> List[Dict]:
205
+ """Load emoneil/reflections-in-peer-counseling dataset - ALL samples."""
206
+ logger.info(f"Loading Peer Counseling Reflections dataset...")
207
+ try:
208
+ dataset = load_dataset("emoneil/reflections-in-peer-counseling", split="train")
209
+ logger.info(f" Dataset has {len(dataset)} samples available")
210
+
211
+ conversations = []
212
+ for item in dataset:
213
+ # Try different field patterns
214
+ user_msg = None
215
+ assistant_msg = None
216
+
217
+ for u_field in ["question", "statement", "input", "user_message", "counselee"]:
218
+ if u_field in item and item.get(u_field):
219
+ user_msg = item[u_field].strip()
220
+ break
221
+
222
+ for a_field in ["reflection", "response", "output", "counselor_response", "counselor"]:
223
+ if a_field in item and item.get(a_field):
224
+ assistant_msg = item[a_field].strip()
225
+ break
226
+
227
+ if user_msg and assistant_msg:
228
+ conversations.append({
229
+ "messages": [
230
+ {"role": "user", "content": user_msg},
231
+ {"role": "assistant", "content": assistant_msg}
232
+ ]
233
+ })
234
+
235
+ logger.info(f"✓ Loaded {len(conversations)} Peer Counseling Reflections conversations")
236
+ return conversations
237
+ except Exception as e:
238
+ logger.warning(f"Failed to load Peer Counseling Reflections dataset: {e}")
239
+ return []
240
+
241
+
242
+ def load_dolly_dataset() -> List[Dict]:
243
+ """Load databricks-dolly-15k dataset (instruction-following) - ALL relevant samples."""
244
+ logger.info(f"Loading Dolly instruction dataset...")
245
+ try:
246
+ dataset = load_dataset("databricks/databricks-dolly-15k", split="train")
247
+ logger.info(f" Dataset has {len(dataset)} samples available")
248
+
249
+ # Filter for relevant categories (brainstorming, open_qa, creative_writing)
250
+ relevant_categories = {"brainstorming", "open_qa", "creative_writing", "general_qa"}
251
+
252
+ conversations = []
253
+ for item in dataset:
254
+ if item.get("category", "") in relevant_categories:
255
+ instruction = item.get("instruction", "").strip()
256
+ context = item.get("context", "").strip()
257
+ response = item.get("response", "").strip()
258
+
259
+ # Combine instruction and context if both exist
260
+ user_message = f"{instruction}\n\n{context}" if context else instruction
261
+
262
+ if user_message and response:
263
+ conversations.append({
264
+ "messages": [
265
+ {"role": "user", "content": user_message},
266
+ {"role": "assistant", "content": response}
267
+ ]
268
+ })
269
+
270
+ logger.info(f"✓ Loaded {len(conversations)} Dolly instruction conversations (filtered from {len(dataset)} total)")
271
+ return conversations
272
+ except Exception as e:
273
+ logger.warning(f"Failed to load Dolly dataset: {e}")
274
+ return []
275
+
276
+
277
+ def load_mentalchat16k() -> List[Dict]:
278
+ """Load ShenLab/MentalChat16K dataset - ALL samples."""
279
+ logger.info(f"Loading MentalChat16K dataset...")
280
+ try:
281
+ dataset = load_dataset("ShenLab/MentalChat16K", split="train")
282
+ logger.info(f" Dataset has {len(dataset)} samples available")
283
+
284
+ conversations = []
285
+ for item in dataset:
286
+ # Try different possible field names
287
+ user_msg = None
288
+ assistant_msg = None
289
+
290
+ # Common field name patterns
291
+ for user_field in ["query", "question", "input", "user", "prompt", "instruction"]:
292
+ if user_field in item and item.get(user_field):
293
+ user_msg = item[user_field].strip()
294
+ break
295
+
296
+ for assistant_field in ["response", "answer", "output", "assistant", "reply"]:
297
+ if assistant_field in item and item.get(assistant_field):
298
+ assistant_msg = item[assistant_field].strip()
299
+ break
300
+
301
+ if user_msg and assistant_msg:
302
+ conversations.append({
303
+ "messages": [
304
+ {"role": "user", "content": user_msg},
305
+ {"role": "assistant", "content": assistant_msg}
306
+ ]
307
+ })
308
+
309
+ logger.info(f"✓ Loaded {len(conversations)} MentalChat16K conversations")
310
+ return conversations
311
+ except Exception as e:
312
+ logger.warning(f"Failed to load MentalChat16K dataset: {e}")
313
+ return []
314
+
315
+
316
+ def load_additional_mental_health_datasets() -> List[Dict]:
317
+ """Load additional mental health datasets - ALL samples."""
318
+ logger.info(f"Loading additional mental health datasets...")
319
+
320
+ all_conversations = []
321
+
322
+ # List of additional datasets to try
323
+ additional_datasets = [
324
+ ("heliosbrahma/mental_health_chatbot_dataset", ["prompt", "question"], ["response", "answer"]),
325
+ ("mpingale/mental-health-chat-dataset", ["question", "query"], ["answer", "response"]),
326
+ ("sauravjoshi23/psychology-dataset", ["input", "question"], ["output", "answer"]),
327
+ ]
328
+
329
+ for dataset_name, user_fields, assistant_fields in additional_datasets:
330
+ try:
331
+ logger.info(f" Loading {dataset_name}...")
332
+ dataset = load_dataset(dataset_name, split="train")
333
+ logger.info(f" Has {len(dataset)} samples available")
334
+
335
+ for item in dataset:
336
+ # Try different field names
337
+ user_msg = None
338
+ assistant_msg = None
339
+
340
+ for field in user_fields:
341
+ if field in item and item.get(field):
342
+ user_msg = item[field].strip()
343
+ break
344
+
345
+ for field in assistant_fields:
346
+ if field in item and item.get(field):
347
+ assistant_msg = item[field].strip()
348
+ break
349
+
350
+ if user_msg and assistant_msg:
351
+ all_conversations.append({
352
+ "messages": [
353
+ {"role": "user", "content": user_msg},
354
+ {"role": "assistant", "content": assistant_msg}
355
+ ]
356
+ })
357
+
358
+ logger.info(f" ✓ Loaded {len([c for c in all_conversations if c])} from this dataset")
359
+
360
+ except Exception as e:
361
+ logger.warning(f" Failed: {e}")
362
+ continue
363
+
364
+ logger.info(f"✓ Loaded {len(all_conversations)} additional mental health conversations total")
365
+ return all_conversations
366
+
367
+
368
+ def quality_filter_conversation(conv: Dict, min_response_length: int = 50, max_total_length: int = 2048) -> bool:
369
+ """Filter conversation based on quality criteria."""
370
+ try:
371
+ messages = conv.get("messages", [])
372
+ if len(messages) < 2:
373
+ return False
374
+
375
+ # Check response length
376
+ assistant_msg = [m for m in messages if m.get("role") == "assistant"]
377
+ if not assistant_msg:
378
+ return False
379
+
380
+ response = assistant_msg[0].get("content", "")
381
+ if len(response) < min_response_length:
382
+ return False
383
+
384
+ # Check total length
385
+ total_length = sum(len(m.get("content", "")) for m in messages)
386
+ if total_length > max_total_length:
387
+ return False
388
+
389
+ # Check for empty messages
390
+ if any(not m.get("content", "").strip() for m in messages):
391
+ return False
392
+
393
+ return True
394
+ except:
395
+ return False
396
+
397
+
398
+ def load_mixed_dataset(
399
+ total_samples: int = 100000,
400
+ cache_file: str = "mixed_lifecoach_dataset_100k.jsonl.gz", # Now compressed by default
401
+ use_cache: bool = True
402
+ ) -> List[Dict]:
403
+ """
404
+ Load and mix multiple datasets for comprehensive life coaching training.
405
+ Saves compressed cache to save disk space.
406
+
407
+ Datasets loaded (ALL available samples):
408
+ 1. Mental Health Counseling (Amod/mental_health_counseling_conversations)
409
+ 2. CounselChat (nbertagnolli/counsel-chat)
410
+ 3. CBT Cognitive Distortions (epsilon3/cbt-cognitive-distortions-analysis)
411
+ 4. Peer Counseling Reflections (emoneil/reflections-in-peer-counseling)
412
+ 5. MentalChat16K (ShenLab/MentalChat16K)
413
+ 6. Dolly Instructions (databricks/databricks-dolly-15k - filtered categories)
414
+ 7-8. Additional mental health datasets (heliosbrahma, mpingale, sauravjoshi23)
415
+ """
416
+ cache_path = Path(cache_file)
417
+ cache_path_uncompressed = Path(cache_file.replace('.gz', ''))
418
+
419
+ # Try to load from compressed cache first
420
+ if use_cache and cache_path.exists():
421
+ logger.info(f"Loading cached dataset from {cache_file} (compressed)...")
422
+ try:
423
+ conversations = []
424
+ with gzip.open(cache_path, 'rt', encoding='utf-8') as f:
425
+ for line in f:
426
+ conversations.append(json.loads(line.strip()))
427
+ logger.info(f"✓ Loaded {len(conversations)} conversations from compressed cache")
428
+ return conversations
429
+ except Exception as e:
430
+ logger.warning(f"Failed to load compressed cache: {e}. Trying uncompressed...")
431
+
432
+ # Try uncompressed cache (backward compatibility)
433
+ if use_cache and cache_path_uncompressed.exists():
434
+ logger.info(f"Loading cached dataset from {cache_path_uncompressed} (uncompressed)...")
435
+ try:
436
+ conversations = []
437
+ with open(cache_path_uncompressed, 'r', encoding='utf-8') as f:
438
+ for line in f:
439
+ conversations.append(json.loads(line.strip()))
440
+ logger.info(f"✓ Loaded {len(conversations)} conversations from uncompressed cache")
441
+ return conversations
442
+ except Exception as e:
443
+ logger.warning(f"Failed to load cache: {e}. Rebuilding dataset...")
444
+
445
+ # Load ALL available samples from each dataset
446
+ logger.info("=" * 80)
447
+ logger.info(f"LOADING MIXED DATASET (Target: ~{total_samples} samples)")
448
+ logger.info("Loading ALL available samples from each dataset")
449
+ logger.info("=" * 80)
450
+
451
+ all_conversations = []
452
+
453
+ # Load each dataset ONE AT A TIME and clear cache after each
454
+ # This saves disk space by not keeping all downloads simultaneously
455
+
456
+ logger.info("Dataset 1/8: Mental Health Counseling (Amod)")
457
+ all_conversations.extend(load_mental_health_counseling())
458
+ logger.info(f" Running total: {len(all_conversations)} conversations")
459
+ clear_hf_cache()
460
+ gc.collect()
461
+
462
+ # Stop early if we've reached target
463
+ if len(all_conversations) >= total_samples:
464
+ logger.info(f"✓ Reached target of {total_samples} samples, stopping dataset loading")
465
+ else:
466
+ logger.info("Dataset 2/8: CounselChat (nbertagnolli)")
467
+ all_conversations.extend(load_counsel_chat())
468
+ logger.info(f" Running total: {len(all_conversations)} conversations")
469
+ clear_hf_cache()
470
+ gc.collect()
471
+
472
+ if len(all_conversations) >= total_samples:
473
+ logger.info(f"✓ Reached target of {total_samples} samples, stopping dataset loading")
474
+ else:
475
+ logger.info("Dataset 3/8: CBT Cognitive Distortions (epsilon3)")
476
+ all_conversations.extend(load_cbt_cognitive_distortions())
477
+ logger.info(f" Running total: {len(all_conversations)} conversations")
478
+ clear_hf_cache()
479
+ gc.collect()
480
+
481
+ if len(all_conversations) >= total_samples:
482
+ logger.info(f"✓ Reached target of {total_samples} samples, stopping dataset loading")
483
+ else:
484
+ logger.info("Dataset 4/8: Peer Counseling Reflections (emoneil)")
485
+ all_conversations.extend(load_peer_counseling_reflections())
486
+ logger.info(f" Running total: {len(all_conversations)} conversations")
487
+ clear_hf_cache()
488
+ gc.collect()
489
+
490
+ if len(all_conversations) >= total_samples:
491
+ logger.info(f"✓ Reached target of {total_samples} samples, stopping dataset loading")
492
+ else:
493
+ logger.info("Dataset 5/8: MentalChat16K (ShenLab)")
494
+ all_conversations.extend(load_mentalchat16k())
495
+ logger.info(f" Running total: {len(all_conversations)} conversations")
496
+ clear_hf_cache()
497
+ gc.collect()
498
+
499
+ if len(all_conversations) >= total_samples:
500
+ logger.info(f"✓ Reached target of {total_samples} samples, stopping dataset loading")
501
+ else:
502
+ logger.info("Dataset 6/8: Dolly Instructions (databricks)")
503
+ all_conversations.extend(load_dolly_dataset())
504
+ logger.info(f" Running total: {len(all_conversations)} conversations")
505
+ clear_hf_cache()
506
+ gc.collect()
507
+
508
+ if len(all_conversations) >= total_samples:
509
+ logger.info(f"✓ Reached target of {total_samples} samples, stopping dataset loading")
510
+ else:
511
+ logger.info("Datasets 7-8: Additional Mental Health Datasets")
512
+ all_conversations.extend(load_additional_mental_health_datasets())
513
+ logger.info(f" Running total: {len(all_conversations)} conversations")
514
+ clear_hf_cache()
515
+ gc.collect()
516
+
517
+ logger.info("=" * 80)
518
+ logger.info(f"Total conversations loaded: {len(all_conversations)}")
519
+
520
+ # Apply quality filtering
521
+ logger.info("Applying quality filters...")
522
+ filtered_conversations = [conv for conv in all_conversations if quality_filter_conversation(conv)]
523
+ logger.info(f"✓ After filtering: {len(filtered_conversations)} conversations")
524
+
525
+ # Shuffle to mix datasets
526
+ random.shuffle(filtered_conversations)
527
+
528
+ # Trim to target size
529
+ if len(filtered_conversations) > total_samples:
530
+ filtered_conversations = filtered_conversations[:total_samples]
531
+
532
+ logger.info(f"Final dataset size: {len(filtered_conversations)} conversations")
533
+
534
+ # Save compressed cache to save disk space
535
+ if use_cache:
536
+ logger.info(f"Saving compressed cache to {cache_file}...")
537
+ try:
538
+ with gzip.open(cache_path, 'wt', encoding='utf-8') as f:
539
+ for conv in filtered_conversations:
540
+ f.write(json.dumps(conv, ensure_ascii=False) + '\n')
541
+
542
+ # Get file sizes for comparison
543
+ compressed_size_mb = cache_path.stat().st_size / (1024 * 1024)
544
+ logger.info(f"✓ Compressed cache saved successfully ({compressed_size_mb:.1f} MB)")
545
+ except Exception as e:
546
+ logger.warning(f"Failed to save compressed cache: {e}")
547
+
548
+ logger.info("=" * 80)
549
+ return filtered_conversations
550
+
551
+
552
+ class LifeCoachModel:
553
+ """Life coaching assistant using Phi-4 model."""
554
+
555
+ def __init__(
556
+ self,
557
+ model_name: str = "microsoft/Phi-4",
558
+ model_save_path: str = "/data/life_coach_model",
559
+ train_file: str = "cbt_life_coach_improved_50000.jsonl",
560
+ max_length: int = 2048
561
+ ):
562
+ """
563
+ Initialize the Life Coach model.
564
+
565
+ Args:
566
+ model_name: Hugging Face model identifier
567
+ model_save_path: Path to save/load fine-tuned model
568
+ train_file: Path to training data file (JSONL format)
569
+ max_length: Maximum sequence length for training
570
+ """
571
+ self.model_name = model_name
572
+
573
+ # Check if /data is writable, otherwise use local directory
574
+ save_path = Path(model_save_path)
575
+ if str(save_path).startswith("/data"):
576
+ try:
577
+ Path("/data").mkdir(parents=True, exist_ok=True)
578
+ # Test write permissions
579
+ test_file = Path("/data/.test_write")
580
+ test_file.touch()
581
+ test_file.unlink()
582
+ self.model_save_path = save_path
583
+ logger.info(f"Using /data directory for model storage: {save_path}")
584
+ except (PermissionError, OSError) as e:
585
+ # Fall back to local directory
586
+ local_path = Path("./data/life_coach_model")
587
+ logger.warning(f"/data directory not writable ({e}), using local directory: {local_path}")
588
+ self.model_save_path = local_path
589
+ else:
590
+ self.model_save_path = save_path
591
+
592
+ self.train_file = Path(train_file)
593
+ self.max_length = max_length
594
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
595
+
596
+ logger.info(f"Device: {self.device}")
597
+ logger.info(f"Model: {model_name}")
598
+ logger.info(f"Save path: {self.model_save_path}")
599
+ logger.info(f"Training file: {self.train_file}")
600
+
601
+ self.tokenizer = None
602
+ self.model = None
603
+
604
+ def load_tokenizer(self):
605
+ """Load the tokenizer."""
606
+ logger.info("Loading tokenizer...")
607
+ self.tokenizer = AutoTokenizer.from_pretrained(
608
+ self.model_name,
609
+ trust_remote_code=True
610
+ )
611
+
612
+ if self.tokenizer.pad_token is None:
613
+ self.tokenizer.pad_token = self.tokenizer.eos_token
614
+
615
+ logger.info("Tokenizer loaded successfully")
616
+
617
+ def load_model(self, fine_tuned: bool = False):
618
+ """
619
+ Load the model (base or fine-tuned).
620
+
621
+ Args:
622
+ fine_tuned: If True, load fine-tuned model from disk
623
+ """
624
+ if fine_tuned and self.model_save_path.exists():
625
+ logger.info(f"Loading fine-tuned model from {self.model_save_path}")
626
+ self.model = AutoModelForCausalLM.from_pretrained(
627
+ str(self.model_save_path),
628
+ torch_dtype=torch.float16,
629
+ device_map="auto",
630
+ trust_remote_code=True
631
+ )
632
+ else:
633
+ logger.info(f"Loading base model: {self.model_name}")
634
+
635
+ # Load model with 8-bit quantization (using quantum server approach)
636
+ # This is the deprecated API but works better with gradient checkpointing
637
+ # Force GPU 0 usage to avoid multi-GPU issues
638
+ self.model = AutoModelForCausalLM.from_pretrained(
639
+ self.model_name,
640
+ torch_dtype=torch.float16,
641
+ trust_remote_code=True,
642
+ device_map={"": 0}, # Force all layers on GPU 0
643
+ load_in_8bit=True # Use old API like quantum server
644
+ )
645
+
646
+ logger.info("Model loaded successfully")
647
+
648
+ def load_training_data(self, num_samples: Optional[int] = None) -> Dataset:
649
+ """
650
+ Load training data from mixed datasets or JSONL file.
651
+
652
+ Args:
653
+ num_samples: Number of samples to load (None for 100,000 default)
654
+
655
+ Returns:
656
+ Dataset object
657
+ """
658
+ # Try to load from mixed datasets first (new method)
659
+ # If train_file doesn't exist or is the old one, use mixed datasets
660
+ use_mixed_datasets = True
661
+
662
+ if self.train_file.exists():
663
+ # Check if it's the old single dataset file
664
+ if "cbt_life_coach" in str(self.train_file):
665
+ logger.info("Found old training file. Using new mixed datasets instead...")
666
+ use_mixed_datasets = True
667
+ else:
668
+ # It might be a cached mixed dataset
669
+ logger.info(f"Found training file at {self.train_file}")
670
+ use_mixed_datasets = False
671
+
672
+ if use_mixed_datasets:
673
+ # Load mixed datasets from Hugging Face
674
+ logger.info("Loading mixed datasets from Hugging Face...")
675
+ if num_samples is None:
676
+ num_samples = 100000 # Default to 100k samples
677
+
678
+ # Load mixed dataset (will use cache if available)
679
+ cache_file = f"mixed_lifecoach_dataset_{num_samples}.jsonl.gz" # Compressed format
680
+ data = load_mixed_dataset(
681
+ total_samples=num_samples,
682
+ cache_file=cache_file,
683
+ use_cache=True
684
+ )
685
+ else:
686
+ # Fall back to loading from JSONL file
687
+ logger.info(f"Loading training data from {self.train_file}")
688
+ data = []
689
+ with open(self.train_file, 'r', encoding='utf-8') as f:
690
+ for i, line in enumerate(f):
691
+ if num_samples and i >= num_samples:
692
+ break
693
+ try:
694
+ data.append(json.loads(line.strip()))
695
+ except json.JSONDecodeError:
696
+ logger.warning(f"Skipping invalid JSON at line {i+1}")
697
+
698
+ logger.info(f"Loaded {len(data)} training examples")
699
+
700
+ # Convert to Hugging Face Dataset
701
+ dataset = Dataset.from_list(data)
702
+
703
+ # Preprocess for Phi-4 format
704
+ logger.info("Preprocessing data for Phi-4 format...")
705
+ dataset = dataset.map(
706
+ self._preprocess_function,
707
+ batched=True,
708
+ remove_columns=dataset.column_names,
709
+ desc="Tokenizing"
710
+ )
711
+
712
+ return dataset
713
+
714
+ def _preprocess_function(self, examples):
715
+ """
716
+ Preprocess data into Phi-4 chat format.
717
+
718
+ Phi-4 uses:
719
+ <|system|>
720
+ {system message}<|end|>
721
+ <|user|>
722
+ {user message}<|end|>
723
+ <|assistant|>
724
+ {assistant response}<|end|>
725
+ """
726
+ texts = []
727
+
728
+ # Handle both 'conversations' (our format) and 'messages' (standard format)
729
+ conversations_key = 'conversations' if 'conversations' in examples else 'messages'
730
+
731
+ for conversation in examples[conversations_key]:
732
+ text = ""
733
+ for message in conversation:
734
+ # Handle both 'from'/'value' and 'role'/'content' formats
735
+ if 'from' in message:
736
+ role = message['from']
737
+ content = message['value']
738
+ else:
739
+ role = message['role']
740
+ content = message['content']
741
+
742
+ # Convert to Phi-4 format
743
+ if role == 'system':
744
+ text += f"<|system|>\n{content}<|end|>\n"
745
+ elif role == 'user':
746
+ text += f"<|user|>\n{content}<|end|>\n"
747
+ elif role == 'assistant':
748
+ text += f"<|assistant|>\n{content}<|end|>\n"
749
+
750
+ texts.append(text)
751
+
752
+ # Tokenize with dynamic padding (like quantum server)
753
+ # Don't pad here - let DataCollatorForSeq2Seq handle it dynamically per batch
754
+ model_inputs = self.tokenizer(
755
+ texts,
756
+ max_length=self.max_length,
757
+ truncation=True,
758
+ padding=False, # Dynamic padding - saves massive memory!
759
+ return_tensors=None # Don't convert to tensors yet
760
+ )
761
+
762
+ # Set labels (for causal language modeling, labels = input_ids)
763
+ # Note: .copy() instead of .clone() since we're not using tensors yet
764
+ model_inputs["labels"] = model_inputs["input_ids"].copy()
765
+
766
+ return model_inputs
767
+
768
+ def setup_lora(self):
769
+ """Setup LoRA (Low-Rank Adaptation) for efficient fine-tuning."""
770
+ logger.info("Setting up LoRA adapters...")
771
+
772
+ # Prepare model for k-bit training (critical for load_in_8bit=True)
773
+ logger.info("Preparing model for 8-bit training...")
774
+ self.model = prepare_model_for_kbit_training(self.model)
775
+
776
+ # Enable gradient checkpointing to save GPU memory
777
+ # This reduces memory usage by 20-30 GB with minimal performance impact
778
+ if hasattr(self.model, 'gradient_checkpointing_enable'):
779
+ self.model.gradient_checkpointing_enable()
780
+ logger.info("✓ Gradient checkpointing enabled (saves 20-30 GB GPU memory)")
781
+
782
+ # LoRA configuration
783
+ lora_config = LoraConfig(
784
+ task_type=TaskType.CAUSAL_LM,
785
+ r=16, # Rank
786
+ lora_alpha=32,
787
+ lora_dropout=0.1,
788
+ bias="none",
789
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj"] # Attention layers
790
+ )
791
+
792
+ # Apply LoRA
793
+ self.model = get_peft_model(self.model, lora_config)
794
+
795
+ # Print trainable parameters
796
+ trainable_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
797
+ total_params = sum(p.numel() for p in self.model.parameters())
798
+
799
+ logger.info(f"Trainable parameters: {trainable_params:,} / {total_params:,} "
800
+ f"({100 * trainable_params / total_params:.2f}%)")
801
+
802
+ def fine_tune(
803
+ self,
804
+ num_samples: Optional[int] = 5000,
805
+ epochs: int = 3,
806
+ batch_size: int = 8,
807
+ learning_rate: float = 5e-5,
808
+ gradient_accumulation_steps: int = 2
809
+ ):
810
+ """
811
+ Fine-tune the model on life coaching data.
812
+
813
+ Args:
814
+ num_samples: Number of training samples (None for all)
815
+ epochs: Number of training epochs
816
+ batch_size: Training batch size
817
+ learning_rate: Learning rate
818
+ gradient_accumulation_steps: Gradient accumulation steps (for memory efficiency)
819
+ """
820
+ logger.info("=" * 80)
821
+ logger.info("STARTING FINE-TUNING")
822
+ logger.info("=" * 80)
823
+
824
+ # Load data
825
+ dataset = self.load_training_data(num_samples)
826
+
827
+ # Setup LoRA
828
+ self.setup_lora()
829
+
830
+ # Training arguments
831
+ training_args = TrainingArguments(
832
+ output_dir="./training_output",
833
+ num_train_epochs=epochs,
834
+ per_device_train_batch_size=batch_size,
835
+ gradient_accumulation_steps=gradient_accumulation_steps,
836
+ learning_rate=learning_rate,
837
+ fp16=True, # Mixed precision training
838
+ logging_steps=10,
839
+ save_strategy="epoch",
840
+ save_total_limit=2,
841
+ warmup_steps=100,
842
+ weight_decay=0.01,
843
+ report_to="none", # Disable wandb/tensorboard
844
+ )
845
+
846
+ # Data collator
847
+ data_collator = DataCollatorForSeq2Seq(
848
+ tokenizer=self.tokenizer,
849
+ model=self.model,
850
+ padding=True
851
+ )
852
+
853
+ # Trainer
854
+ trainer = Trainer(
855
+ model=self.model,
856
+ args=training_args,
857
+ train_dataset=dataset,
858
+ data_collator=data_collator,
859
+ )
860
+
861
+ # Train
862
+ logger.info("Training started...")
863
+ trainer.train()
864
+
865
+ logger.info("=" * 80)
866
+ logger.info("TRAINING COMPLETED")
867
+ logger.info("=" * 80)
868
+
869
+ # Save model
870
+ self.save_model()
871
+
872
+ def save_model(self):
873
+ """Save the fine-tuned model to disk."""
874
+ logger.info(f"Saving model to {self.model_save_path}")
875
+
876
+ self.model_save_path.mkdir(parents=True, exist_ok=True)
877
+
878
+ # Save model and tokenizer
879
+ self.model.save_pretrained(str(self.model_save_path))
880
+ self.tokenizer.save_pretrained(str(self.model_save_path))
881
+
882
+ logger.info("Model saved successfully")
883
+
884
+ def generate_response(self, prompt: str, max_new_tokens: int = 128, conversation_history: list = None) -> str:
885
+ """
886
+ Generate a response to a user prompt.
887
+
888
+ Args:
889
+ prompt: User's input message
890
+ max_new_tokens: Maximum tokens to generate
891
+ conversation_history: List of previous messages for context
892
+
893
+ Returns:
894
+ Generated response
895
+ """
896
+ # Build full conversation context with system prompt
897
+ formatted_prompt = ""
898
+
899
+ # Add system prompt to guide the model's behavior
900
+ system_prompt = """You are Robert, a friendly and experienced life coach. Here's your background:
901
+
902
+ About You:
903
+ - Name: Robert (Bob to friends)
904
+ - Age: 42 years old
905
+ - Experience: 15 years as a certified life coach and motivational speaker
906
+ - Education: Master's degree in Psychology from UC Berkeley
907
+ - Specialties: Personal growth, career transitions, work-life balance, goal setting, stress management
908
+ - Personal: Married with two kids, enjoy hiking and meditation in your free time
909
+ - Approach: Warm, empathetic, practical, and solution-focused
910
+
911
+ Your Coaching Style:
912
+ - Respond ONLY to what the user actually tells you - never make assumptions about their problems
913
+ - Start conversations in a welcoming, open manner
914
+ - Ask clarifying questions to understand their situation better
915
+ - Provide practical, actionable advice based on what they share
916
+ - Be encouraging and positive, but also honest and realistic
917
+ - Keep responses concise and focused (2-4 sentences usually)
918
+ - Share brief personal insights when relevant, but keep the focus on the client
919
+
920
+ Important: Never assume clients have problems they haven't mentioned. Let them guide the conversation and share what's on their mind."""
921
+
922
+ formatted_prompt += f"<|system|>\n{system_prompt}<|end|>\n"
923
+
924
+ # Add conversation history if provided
925
+ if conversation_history:
926
+ for msg in conversation_history:
927
+ if msg["role"] == "user":
928
+ formatted_prompt += f"<|user|>\n{msg['content']}<|end|>\n"
929
+ elif msg["role"] == "assistant":
930
+ formatted_prompt += f"<|assistant|>\n{msg['content']}<|end|>\n"
931
+
932
+ # Add current prompt
933
+ formatted_prompt += f"<|user|>\n{prompt}<|end|>\n<|assistant|>\n"
934
+
935
+ # DEBUG: Print the full prompt being sent to the model
936
+ logger.info("=" * 80)
937
+ logger.info("FULL PROMPT SENT TO MODEL:")
938
+ logger.info(formatted_prompt)
939
+ logger.info("=" * 80)
940
+
941
+ # Tokenize
942
+ inputs = self.tokenizer(
943
+ formatted_prompt,
944
+ return_tensors="pt",
945
+ truncation=True,
946
+ max_length=self.max_length
947
+ ).to(self.device)
948
+
949
+ # Get input length to extract only new tokens
950
+ input_length = inputs['input_ids'].shape[1]
951
+
952
+ # Get the token ID for <|end|> to use as a stopping token
953
+ end_token_id = self.tokenizer.convert_tokens_to_ids("<|end|>")
954
+
955
+ # Build list of EOS token IDs (stop generation at <|end|> or EOS)
956
+ eos_token_ids = [self.tokenizer.eos_token_id]
957
+ if end_token_id is not None and end_token_id != self.tokenizer.unk_token_id:
958
+ eos_token_ids.append(end_token_id)
959
+
960
+ # Generate
961
+ with torch.no_grad():
962
+ outputs = self.model.generate(
963
+ **inputs,
964
+ max_new_tokens=max_new_tokens,
965
+ temperature=0.7, # Balanced - coherent but still creative
966
+ top_p=0.9, # Standard setting for focused responses
967
+ top_k=50, # Add top-k sampling
968
+ do_sample=True,
969
+ pad_token_id=self.tokenizer.pad_token_id,
970
+ eos_token_id=eos_token_ids, # Stop at <|end|> or EOS
971
+ repetition_penalty=1.15 # Stronger penalty to prevent repetition
972
+ )
973
+
974
+ # Decode ONLY the newly generated tokens (not the input)
975
+ generated_tokens = outputs[0][input_length:]
976
+
977
+ # Decode without skipping special tokens first to find the end marker
978
+ response_with_tokens = self.tokenizer.decode(generated_tokens, skip_special_tokens=False)
979
+
980
+ # Extract only up to the first <|end|> token (model may generate multi-turn conversations)
981
+ if "<|end|>" in response_with_tokens:
982
+ response_text = response_with_tokens.split("<|end|>")[0]
983
+ else:
984
+ response_text = response_with_tokens
985
+
986
+ # Clean up any remaining special tokens
987
+ response_text = response_text.replace("<|assistant|>", "").replace("<|user|>", "").replace("<|system|>", "")
988
+
989
+ # Remove any remaining special tokens using the tokenizer
990
+ response_text = response_text.strip()
991
+
992
+ return response_text
993
+
994
+ def interactive_chat(self):
995
+ """Start an interactive chat session."""
996
+ logger.info("=" * 80)
997
+ logger.info("LIFE COACH V1 - Interactive Chat Session")
998
+ logger.info("=" * 80)
999
+ print("\nWelcome to Life Coach v1!")
1000
+ print("I'm here to help you with life coaching, goal setting, motivation, and personal growth.")
1001
+ print("\nCommands:")
1002
+ print(" - Type your question or concern to get coaching advice")
1003
+ print(" - Type 'quit' or 'exit' to end the session")
1004
+ print(" - Type 'clear' to clear conversation history")
1005
+ print("=" * 80)
1006
+ print()
1007
+
1008
+ conversation_history = []
1009
+
1010
+ while True:
1011
+ try:
1012
+ # Get user input
1013
+ user_input = input("\n🧑 You: ").strip()
1014
+
1015
+ if not user_input:
1016
+ continue
1017
+
1018
+ # Check for exit commands
1019
+ if user_input.lower() in ['quit', 'exit', 'q']:
1020
+ print("\n👋 Thank you for using Life Coach v1. Take care!")
1021
+ break
1022
+
1023
+ # Check for clear command
1024
+ if user_input.lower() == 'clear':
1025
+ conversation_history = []
1026
+ print("✅ Conversation history cleared.")
1027
+ continue
1028
+
1029
+ # Generate response with conversation context
1030
+ print("\n🤖 Life Coach: ", end="", flush=True)
1031
+ response = self.generate_response(user_input, conversation_history=conversation_history)
1032
+ print(response)
1033
+
1034
+ # Update conversation history
1035
+ conversation_history.append({
1036
+ "role": "user",
1037
+ "content": user_input
1038
+ })
1039
+ conversation_history.append({
1040
+ "role": "assistant",
1041
+ "content": response
1042
+ })
1043
+
1044
+ except KeyboardInterrupt:
1045
+ print("\n\n👋 Session interrupted. Goodbye!")
1046
+ break
1047
+ except Exception as e:
1048
+ logger.error(f"Error during chat: {e}")
1049
+ print(f"\n❌ Error: {e}")
1050
+
1051
+
1052
+ def main():
1053
+ """Main entry point."""
1054
+ parser = argparse.ArgumentParser(
1055
+ description="Life Coach v1 - Phi-4 based life coaching assistant"
1056
+ )
1057
+
1058
+ parser.add_argument(
1059
+ "--mode",
1060
+ type=str,
1061
+ choices=["train", "chat", "both"],
1062
+ default="both",
1063
+ help="Mode: train (fine-tune only), chat (chat only), both (train then chat)"
1064
+ )
1065
+
1066
+ parser.add_argument(
1067
+ "--model-name",
1068
+ type=str,
1069
+ default="microsoft/Phi-4",
1070
+ help="Hugging Face model name"
1071
+ )
1072
+
1073
+ parser.add_argument(
1074
+ "--model-path",
1075
+ type=str,
1076
+ default="/data/life_coach_model",
1077
+ help="Path to save/load fine-tuned model"
1078
+ )
1079
+
1080
+ parser.add_argument(
1081
+ "--train-file",
1082
+ type=str,
1083
+ default="cbt_life_coach_improved_50000.jsonl",
1084
+ help="Path to training data file (JSONL format)"
1085
+ )
1086
+
1087
+ parser.add_argument(
1088
+ "--num-samples",
1089
+ type=int,
1090
+ default=-1,
1091
+ help="Number of training samples (default: -1 for all 100,000 from mixed datasets)"
1092
+ )
1093
+
1094
+ parser.add_argument(
1095
+ "--epochs",
1096
+ type=int,
1097
+ default=3,
1098
+ help="Number of training epochs"
1099
+ )
1100
+
1101
+ parser.add_argument(
1102
+ "--batch-size",
1103
+ type=int,
1104
+ default=4,
1105
+ help="Training batch size (default: 4 for memory safety)"
1106
+ )
1107
+
1108
+ parser.add_argument(
1109
+ "--learning-rate",
1110
+ type=float,
1111
+ default=5e-5,
1112
+ help="Learning rate (default: 5e-5, matching quantum server)"
1113
+ )
1114
+
1115
+ parser.add_argument(
1116
+ "--gradient-accumulation",
1117
+ type=int,
1118
+ default=4,
1119
+ help="Gradient accumulation steps (default: 4, effective batch=16)"
1120
+ )
1121
+
1122
+ parser.add_argument(
1123
+ "--force-retrain",
1124
+ action="store_true",
1125
+ help="Force retraining even if fine-tuned model exists"
1126
+ )
1127
+
1128
+ args = parser.parse_args()
1129
+
1130
+ # Clean up GPU memory before starting
1131
+ cleanup_gpu_memory()
1132
+
1133
+ # Initialize model
1134
+ coach = LifeCoachModel(
1135
+ model_name=args.model_name,
1136
+ model_save_path=args.model_path,
1137
+ train_file=args.train_file
1138
+ )
1139
+
1140
+ # Load tokenizer
1141
+ coach.load_tokenizer()
1142
+
1143
+ # Check if fine-tuned model already exists
1144
+ model_exists = coach.model_save_path.exists() and (coach.model_save_path / "adapter_model.safetensors").exists()
1145
+
1146
+ # Training mode
1147
+ if args.mode in ["train", "both"]:
1148
+ # Check if we should skip training
1149
+ if model_exists and not args.force_retrain:
1150
+ logger.info("=" * 80)
1151
+ logger.info("FINE-TUNED MODEL ALREADY EXISTS")
1152
+ logger.info("=" * 80)
1153
+ logger.info(f"Found existing model at: {coach.model_save_path}")
1154
+ logger.info("Skipping training. Loading existing model...")
1155
+ logger.info("(Use --force-retrain to retrain from scratch)")
1156
+ logger.info("=" * 80)
1157
+
1158
+ # Load the existing fine-tuned model
1159
+ coach.load_model(fine_tuned=True)
1160
+ else:
1161
+ if args.force_retrain and model_exists:
1162
+ logger.info("=" * 80)
1163
+ logger.info("FORCING RETRAINING (--force-retrain flag set)")
1164
+ logger.info("=" * 80)
1165
+
1166
+ # Load base model for training
1167
+ coach.load_model(fine_tuned=False)
1168
+
1169
+ # Fine-tune
1170
+ num_samples = None if args.num_samples == -1 else args.num_samples
1171
+ coach.fine_tune(
1172
+ num_samples=num_samples,
1173
+ epochs=args.epochs,
1174
+ batch_size=args.batch_size,
1175
+ learning_rate=args.learning_rate,
1176
+ gradient_accumulation_steps=args.gradient_accumulation
1177
+ )
1178
+
1179
+ # For "both" mode, reload the fine-tuned model for chat
1180
+ if args.mode == "both":
1181
+ logger.info("Reloading fine-tuned model for chat...")
1182
+ coach.load_model(fine_tuned=True)
1183
+
1184
+ # If only training mode, exit
1185
+ if args.mode == "train":
1186
+ logger.info("Training complete. Use --mode chat to start chatting.")
1187
+ return
1188
+
1189
+ # Chat mode
1190
+ elif args.mode == "chat":
1191
+ if not model_exists:
1192
+ logger.error("=" * 80)
1193
+ logger.error("ERROR: No fine-tuned model found!")
1194
+ logger.error("=" * 80)
1195
+ logger.error(f"Expected location: {coach.model_save_path}")
1196
+ logger.error("Please train the model first using:")
1197
+ logger.error(" python3 life_coach_v1.py --mode train")
1198
+ logger.error("=" * 80)
1199
+ return
1200
+
1201
+ # Load fine-tuned model
1202
+ logger.info(f"Loading fine-tuned model from {coach.model_save_path}")
1203
+ coach.load_model(fine_tuned=True)
1204
+
1205
+ # Start interactive chat
1206
+ coach.interactive_chat()
1207
+
1208
+
1209
+ if __name__ == "__main__":
1210
+ main()
models.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Database models for Life Coach Web Application
3
+ """
4
+
5
+ from datetime import datetime
6
+ from flask_login import UserMixin
7
+ from flask_sqlalchemy import SQLAlchemy
8
+ from werkzeug.security import generate_password_hash, check_password_hash
9
+
10
+ # This will be set by app.py
11
+ db = SQLAlchemy()
12
+
13
+
14
+ class User(UserMixin, db.Model):
15
+ """User model for authentication and profile."""
16
+
17
+ __tablename__ = 'users'
18
+
19
+ id = db.Column(db.Integer, primary_key=True)
20
+ username = db.Column(db.String(80), unique=True, nullable=False, index=True)
21
+ email = db.Column(db.String(120), unique=True, nullable=False, index=True)
22
+ password_hash = db.Column(db.String(255), nullable=False)
23
+ created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
24
+
25
+ # Relationship with conversations
26
+ conversations = db.relationship('Conversation', backref='user', lazy=True, cascade='all, delete-orphan')
27
+
28
+ def set_password(self, password):
29
+ """Hash and set the user's password."""
30
+ self.password_hash = generate_password_hash(password)
31
+
32
+ def check_password(self, password):
33
+ """Verify the user's password."""
34
+ return check_password_hash(self.password_hash, password)
35
+
36
+ def __repr__(self):
37
+ return f'<User {self.username}>'
38
+
39
+
40
+ class Conversation(db.Model):
41
+ """Conversation model - represents a chat session."""
42
+
43
+ __tablename__ = 'conversations'
44
+
45
+ id = db.Column(db.Integer, primary_key=True)
46
+ user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False, index=True)
47
+ title = db.Column(db.String(200), default='New Conversation')
48
+ created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
49
+ updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
50
+
51
+ # Relationship with messages
52
+ messages = db.relationship('Message', backref='conversation', lazy=True, cascade='all, delete-orphan', order_by='Message.timestamp')
53
+
54
+ def get_message_history(self):
55
+ """Get conversation history formatted for the model."""
56
+ return [
57
+ {
58
+ 'role': msg.role,
59
+ 'content': msg.content
60
+ }
61
+ for msg in self.messages
62
+ ]
63
+
64
+ def __repr__(self):
65
+ return f'<Conversation {self.id}: {self.title}>'
66
+
67
+
68
+ class Message(db.Model):
69
+ """Message model - individual messages in a conversation."""
70
+
71
+ __tablename__ = 'messages'
72
+
73
+ id = db.Column(db.Integer, primary_key=True)
74
+ conversation_id = db.Column(db.Integer, db.ForeignKey('conversations.id'), nullable=False, index=True)
75
+ role = db.Column(db.String(20), nullable=False) # 'user' or 'assistant'
76
+ content = db.Column(db.Text, nullable=False)
77
+ timestamp = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
78
+
79
+ def to_dict(self):
80
+ """Convert message to dictionary for JSON serialization."""
81
+ return {
82
+ 'id': self.id,
83
+ 'role': self.role,
84
+ 'content': self.content,
85
+ 'timestamp': self.timestamp.isoformat()
86
+ }
87
+
88
+ def __repr__(self):
89
+ return f'<Message {self.id}: {self.role}>'
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Flask
2
+ Flask-Login
3
+ Flask-SQLAlchemy
4
+ gTTS
5
+ transformers
6
+ torch
7
+ peft
8
+ datasets
9
+ accelerate
10
+ bitsandbytes
11
+ sentencepiece
12
+ Jinja2
13
+ gunicorn
static/css/backup/style-bootstrap.css ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Life Coach - Bootstrap 5 Styles */
2
+
3
+ :root {
4
+ --primary-pink: #FF6B9D;
5
+ --primary-coral: #FFA07A;
6
+ --secondary-mint: #98D8C8;
7
+ --secondary-gold: #FFD700;
8
+ --accent-purple: #9C27B0;
9
+ --accent-orange: #FF5722;
10
+ --chat-bg: #F5F5F5;
11
+ --white: #FFFFFF;
12
+ --text-dark: #333333;
13
+ --text-light: #666666;
14
+ --shadow: rgba(0, 0, 0, 0.1);
15
+ }
16
+
17
+ /* Global */
18
+ html, body {
19
+ height: 100%;
20
+ margin: 0;
21
+ padding: 0;
22
+ overflow: hidden;
23
+ }
24
+
25
+ body {
26
+ background: linear-gradient(135deg, #E0F7FA 0%, #FFF9C4 100%);
27
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
28
+ }
29
+
30
+ /* Navbar */
31
+ .navbar {
32
+ background: linear-gradient(90deg, var(--primary-pink) 0%, var(--primary-coral) 100%) !important;
33
+ box-shadow: 0 2px 8px var(--shadow);
34
+ }
35
+
36
+ /* Chat Section */
37
+ .chat-section {
38
+ height: calc(100vh - 56px);
39
+ overflow: hidden;
40
+ }
41
+
42
+ .chat-section .container-fluid {
43
+ height: 100%;
44
+ }
45
+
46
+ .chat-section .row {
47
+ height: 100%;
48
+ }
49
+
50
+ /* Sidebar */
51
+ .chat-sidebar {
52
+ background: linear-gradient(180deg, var(--primary-pink) 0%, var(--accent-purple) 100%);
53
+ color: var(--white);
54
+ display: flex;
55
+ flex-direction: column;
56
+ height: 100%;
57
+ overflow: hidden;
58
+ }
59
+
60
+ .sidebar-header {
61
+ padding: 1.5rem;
62
+ border-bottom: 1px solid rgba(255, 255, 255, 0.2);
63
+ flex-shrink: 0;
64
+ }
65
+
66
+ .sidebar-content {
67
+ flex: 1;
68
+ overflow-y: auto;
69
+ overflow-x: hidden;
70
+ padding: 0.5rem;
71
+ }
72
+
73
+ .conversation-item {
74
+ padding: 1rem;
75
+ margin-bottom: 0.5rem;
76
+ border-radius: 10px;
77
+ cursor: pointer;
78
+ transition: all 0.2s;
79
+ background: rgba(255, 255, 255, 0.1);
80
+ position: relative;
81
+ }
82
+
83
+ .conversation-item:hover {
84
+ background: rgba(255, 255, 255, 0.2);
85
+ transform: translateX(5px);
86
+ }
87
+
88
+ .conversation-item.active {
89
+ background: var(--white);
90
+ color: var(--primary-pink);
91
+ box-shadow: 0 4px 12px var(--shadow);
92
+ }
93
+
94
+ .conversation-title {
95
+ font-weight: bold;
96
+ margin-bottom: 0.25rem;
97
+ font-size: 0.95rem;
98
+ }
99
+
100
+ .conversation-meta {
101
+ font-size: 0.85rem;
102
+ opacity: 0.8;
103
+ }
104
+
105
+ .conversation-delete {
106
+ position: absolute;
107
+ top: 0.5rem;
108
+ right: 0.5rem;
109
+ background: rgba(255, 82, 82, 0.8);
110
+ color: white;
111
+ border: none;
112
+ border-radius: 50%;
113
+ width: 24px;
114
+ height: 24px;
115
+ display: none;
116
+ align-items: center;
117
+ justify-content: center;
118
+ cursor: pointer;
119
+ font-size: 0.75rem;
120
+ }
121
+
122
+ .conversation-item:hover .conversation-delete {
123
+ display: flex;
124
+ }
125
+
126
+ /* Chat Main */
127
+ .chat-main {
128
+ background: var(--chat-bg);
129
+ height: 100%;
130
+ overflow: hidden;
131
+ }
132
+
133
+ .chat-header {
134
+ background: linear-gradient(90deg, var(--secondary-mint) 0%, var(--secondary-gold) 100%);
135
+ padding: 1.5rem;
136
+ border-bottom: 3px solid var(--accent-orange);
137
+ flex-shrink: 0;
138
+ display: flex;
139
+ justify-content: space-between;
140
+ align-items: center;
141
+ }
142
+
143
+ .chat-messages {
144
+ overflow-y: auto;
145
+ overflow-x: hidden;
146
+ padding: 2rem;
147
+ background: var(--chat-bg);
148
+ }
149
+
150
+ .chat-input-container {
151
+ background: var(--white);
152
+ padding: 1.5rem;
153
+ border-top: 2px solid var(--secondary-mint);
154
+ flex-shrink: 0;
155
+ }
156
+
157
+ /* Messages */
158
+ .message-bubble {
159
+ display: flex;
160
+ margin-bottom: 1.5rem;
161
+ animation: slideIn 0.3s ease-out;
162
+ }
163
+
164
+ @keyframes slideIn {
165
+ from {
166
+ opacity: 0;
167
+ transform: translateX(-20px);
168
+ }
169
+ to {
170
+ opacity: 1;
171
+ transform: translateX(0);
172
+ }
173
+ }
174
+
175
+ .message-bubble.user {
176
+ flex-direction: row-reverse;
177
+ }
178
+
179
+ .message-avatar {
180
+ width: 40px;
181
+ height: 40px;
182
+ border-radius: 50%;
183
+ display: flex;
184
+ align-items: center;
185
+ justify-content: center;
186
+ font-size: 1.2rem;
187
+ flex-shrink: 0;
188
+ }
189
+
190
+ .message-bubble.user .message-avatar {
191
+ background: linear-gradient(135deg, var(--accent-purple) 0%, var(--primary-pink) 100%);
192
+ color: var(--white);
193
+ margin-left: 1rem;
194
+ }
195
+
196
+ .message-bubble.assistant .message-avatar {
197
+ background: linear-gradient(135deg, var(--secondary-mint) 0%, var(--secondary-gold) 100%);
198
+ color: var(--text-dark);
199
+ margin-right: 1rem;
200
+ }
201
+
202
+ .message-content {
203
+ max-width: 70%;
204
+ }
205
+
206
+ .message-text {
207
+ padding: 1rem 1.25rem;
208
+ border-radius: 18px;
209
+ word-wrap: break-word;
210
+ white-space: pre-wrap;
211
+ }
212
+
213
+ .message-bubble.user .message-text {
214
+ background: linear-gradient(135deg, var(--primary-pink) 0%, var(--accent-purple) 100%);
215
+ color: var(--white);
216
+ }
217
+
218
+ .message-bubble.assistant .message-text {
219
+ background: var(--white);
220
+ color: var(--text-dark);
221
+ box-shadow: 0 2px 8px var(--shadow);
222
+ }
223
+
224
+ .message-time {
225
+ font-size: 0.75rem;
226
+ color: var(--text-light);
227
+ margin-top: 0.25rem;
228
+ padding: 0 0.5rem;
229
+ }
230
+
231
+ .message-bubble.user .message-time {
232
+ text-align: right;
233
+ }
234
+
235
+ /* Input */
236
+ .chat-input {
237
+ border: 2px solid var(--secondary-mint);
238
+ border-radius: 25px;
239
+ padding: 0.75rem 1.5rem;
240
+ }
241
+
242
+ .chat-input:focus {
243
+ border-color: var(--primary-pink);
244
+ box-shadow: 0 0 0 0.2rem rgba(255, 107, 157, 0.25);
245
+ }
246
+
247
+ .send-button {
248
+ background: linear-gradient(135deg, var(--primary-pink) 0%, var(--primary-coral) 100%);
249
+ border: none;
250
+ border-radius: 25px;
251
+ padding: 0.75rem 1.5rem;
252
+ color: white;
253
+ font-weight: bold;
254
+ white-space: nowrap;
255
+ }
256
+
257
+ .send-button:hover {
258
+ transform: translateY(-2px);
259
+ box-shadow: 0 4px 12px var(--shadow);
260
+ }
261
+
262
+ /* Scrollbar */
263
+ .chat-messages::-webkit-scrollbar,
264
+ .sidebar-content::-webkit-scrollbar {
265
+ width: 8px;
266
+ }
267
+
268
+ .chat-messages::-webkit-scrollbar-track,
269
+ .sidebar-content::-webkit-scrollbar-track {
270
+ background: rgba(0, 0, 0, 0.05);
271
+ }
272
+
273
+ .chat-messages::-webkit-scrollbar-thumb {
274
+ background: var(--primary-pink);
275
+ border-radius: 10px;
276
+ }
277
+
278
+ .sidebar-content::-webkit-scrollbar-thumb {
279
+ background: rgba(255, 255, 255, 0.3);
280
+ border-radius: 10px;
281
+ }
282
+
283
+ /* Responsive */
284
+ @media (max-width: 768px) {
285
+ .chat-sidebar {
286
+ display: none;
287
+ }
288
+
289
+ .message-content {
290
+ max-width: 85%;
291
+ }
292
+ }
static/css/backup/style-clean.css ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Life Coach - Clean CSS (NO Framework) */
2
+
3
+ /* ===== RESET & BASICS ===== */
4
+ * {
5
+ margin: 0;
6
+ padding: 0;
7
+ box-sizing: border-box;
8
+ }
9
+
10
+ body {
11
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
12
+ background: linear-gradient(135deg, #E0F7FA 0%, #FFF9C4 100%);
13
+ min-height: 100vh;
14
+ color: #333;
15
+ }
16
+
17
+ /* ===== COLOR VARIABLES ===== */
18
+ :root {
19
+ --primary-pink: #FF6B9D;
20
+ --primary-coral: #FFA07A;
21
+ --secondary-mint: #98D8C8;
22
+ --secondary-gold: #FFD700;
23
+ --accent-purple: #9C27B0;
24
+ --accent-orange: #FF5722;
25
+ --bg-gradient-start: #E0F7FA;
26
+ --bg-gradient-end: #FFF9C4;
27
+ --chat-bg: #F5F5F5;
28
+ --white: #FFFFFF;
29
+ --text-dark: #333333;
30
+ --text-light: #666666;
31
+ --shadow: rgba(0, 0, 0, 0.1);
32
+ }
33
+
34
+ /* ===== NAVBAR ===== */
35
+ .navbar {
36
+ background: linear-gradient(90deg, var(--primary-pink) 0%, var(--primary-coral) 100%);
37
+ padding: 1rem 2rem;
38
+ display: flex;
39
+ justify-content: space-between;
40
+ align-items: center;
41
+ box-shadow: 0 2px 8px var(--shadow);
42
+ }
43
+
44
+ .navbar-brand {
45
+ color: var(--white);
46
+ font-size: 1.5rem;
47
+ font-weight: bold;
48
+ text-decoration: none;
49
+ }
50
+
51
+ .navbar-menu {
52
+ display: flex;
53
+ gap: 1rem;
54
+ align-items: center;
55
+ }
56
+
57
+ .navbar-item {
58
+ color: var(--white);
59
+ text-decoration: none;
60
+ padding: 0.5rem 1rem;
61
+ border-radius: 5px;
62
+ transition: background 0.2s;
63
+ }
64
+
65
+ .navbar-item:hover {
66
+ background: rgba(255, 255, 255, 0.2);
67
+ }
68
+
69
+ /* ===== BUTTONS ===== */
70
+ .button {
71
+ padding: 0.5rem 1rem;
72
+ border: none;
73
+ border-radius: 5px;
74
+ cursor: pointer;
75
+ font-size: 1rem;
76
+ transition: all 0.2s;
77
+ }
78
+
79
+ .button-primary {
80
+ background: linear-gradient(135deg, var(--primary-pink) 0%, var(--primary-coral) 100%);
81
+ color: white;
82
+ }
83
+
84
+ .button-primary:hover {
85
+ transform: translateY(-2px);
86
+ box-shadow: 0 4px 12px var(--shadow);
87
+ }
88
+
89
+ /* ===== AUTH PAGES ===== */
90
+ .auth-container {
91
+ min-height: calc(100vh - 200px);
92
+ display: flex;
93
+ align-items: center;
94
+ justify-content: center;
95
+ padding: 2rem;
96
+ }
97
+
98
+ .auth-box {
99
+ background: var(--white);
100
+ padding: 2.5rem;
101
+ border-radius: 15px;
102
+ box-shadow: 0 8px 24px var(--shadow);
103
+ width: 100%;
104
+ max-width: 400px;
105
+ }
106
+
107
+ .auth-box h1 {
108
+ text-align: center;
109
+ margin-bottom: 2rem;
110
+ color: var(--primary-pink);
111
+ }
112
+
113
+ .form-group {
114
+ margin-bottom: 1.5rem;
115
+ }
116
+
117
+ .form-group label {
118
+ display: block;
119
+ margin-bottom: 0.5rem;
120
+ font-weight: 600;
121
+ color: var(--text-dark);
122
+ }
123
+
124
+ .form-group input {
125
+ width: 100%;
126
+ padding: 0.75rem;
127
+ border: 2px solid #ddd;
128
+ border-radius: 5px;
129
+ font-size: 1rem;
130
+ }
131
+
132
+ .form-group input:focus {
133
+ outline: none;
134
+ border-color: var(--primary-pink);
135
+ }
136
+
137
+ .button-full {
138
+ width: 100%;
139
+ padding: 0.75rem;
140
+ }
141
+
142
+ /* ===== CHAT LAYOUT ===== */
143
+ .chat-section {
144
+ height: calc(100vh - 60px);
145
+ padding: 0;
146
+ overflow: hidden;
147
+ }
148
+
149
+ .chat-container {
150
+ height: 100%;
151
+ display: flex;
152
+ background: var(--white);
153
+ box-shadow: 0 8px 32px var(--shadow);
154
+ }
155
+
156
+ /* ===== SIDEBAR ===== */
157
+ .sidebar {
158
+ width: 300px;
159
+ flex-shrink: 0;
160
+ background: linear-gradient(180deg, var(--primary-pink) 0%, var(--accent-purple) 100%);
161
+ color: var(--white);
162
+ display: flex;
163
+ flex-direction: column;
164
+ }
165
+
166
+ .sidebar-header {
167
+ padding: 1.5rem;
168
+ border-bottom: 1px solid rgba(255, 255, 255, 0.2);
169
+ }
170
+
171
+ .sidebar-header h2 {
172
+ margin-bottom: 1rem;
173
+ font-size: 1.2rem;
174
+ }
175
+
176
+ .sidebar-content {
177
+ flex: 1;
178
+ overflow-y: auto;
179
+ padding: 0.5rem;
180
+ }
181
+
182
+ .conversation-item {
183
+ padding: 1rem;
184
+ margin-bottom: 0.5rem;
185
+ border-radius: 10px;
186
+ cursor: pointer;
187
+ transition: all 0.2s;
188
+ background: rgba(255, 255, 255, 0.1);
189
+ position: relative;
190
+ }
191
+
192
+ .conversation-item:hover {
193
+ background: rgba(255, 255, 255, 0.2);
194
+ transform: translateX(5px);
195
+ }
196
+
197
+ .conversation-item.is-active {
198
+ background: var(--white);
199
+ color: var(--primary-pink);
200
+ }
201
+
202
+ .conversation-title {
203
+ font-weight: bold;
204
+ margin-bottom: 0.25rem;
205
+ }
206
+
207
+ .conversation-meta {
208
+ font-size: 0.85rem;
209
+ opacity: 0.8;
210
+ }
211
+
212
+ .conversation-delete {
213
+ position: absolute;
214
+ top: 0.5rem;
215
+ right: 0.5rem;
216
+ background: rgba(255, 82, 82, 0.8);
217
+ color: white;
218
+ border: none;
219
+ border-radius: 50%;
220
+ width: 24px;
221
+ height: 24px;
222
+ display: none;
223
+ align-items: center;
224
+ justify-content: center;
225
+ cursor: pointer;
226
+ font-size: 0.75rem;
227
+ }
228
+
229
+ .conversation-item:hover .conversation-delete {
230
+ display: flex;
231
+ }
232
+
233
+ /* ===== CHAT MAIN AREA ===== */
234
+ .chat-main {
235
+ flex: 1;
236
+ display: flex;
237
+ flex-direction: column;
238
+ background: var(--chat-bg);
239
+ height: 100%;
240
+ overflow: hidden;
241
+ }
242
+
243
+ .chat-header {
244
+ flex-shrink: 0;
245
+ background: linear-gradient(90deg, var(--secondary-mint) 0%, var(--secondary-gold) 100%);
246
+ padding: 1.5rem;
247
+ border-bottom: 3px solid var(--accent-orange);
248
+ display: flex;
249
+ justify-content: space-between;
250
+ align-items: center;
251
+ flex-wrap: wrap;
252
+ gap: 1rem;
253
+ }
254
+
255
+ .chat-header h1 {
256
+ margin: 0;
257
+ font-size: 1.5rem;
258
+ }
259
+
260
+ .chat-header p {
261
+ margin: 0.25rem 0 0 0;
262
+ font-size: 0.9rem;
263
+ color: var(--text-light);
264
+ }
265
+
266
+ .tts-toggle {
267
+ background: rgba(255, 255, 255, 0.7);
268
+ padding: 0.5rem 1rem;
269
+ border-radius: 20px;
270
+ cursor: pointer;
271
+ display: flex;
272
+ align-items: center;
273
+ gap: 0.5rem;
274
+ }
275
+
276
+ .tts-toggle:hover {
277
+ background: rgba(255, 255, 255, 0.9);
278
+ }
279
+
280
+ .tts-toggle input[type="checkbox"]:checked ~ .fa-volume-up {
281
+ color: var(--primary-pink);
282
+ }
283
+
284
+ /* ===== MESSAGES AREA ===== */
285
+ .chat-messages {
286
+ flex: 1;
287
+ overflow-y: auto;
288
+ overflow-x: hidden;
289
+ padding: 2rem;
290
+ min-height: 0;
291
+ }
292
+
293
+ .message-bubble {
294
+ display: flex;
295
+ margin-bottom: 1.5rem;
296
+ animation: slideIn 0.3s ease-out;
297
+ }
298
+
299
+ @keyframes slideIn {
300
+ from { opacity: 0; transform: translateX(-20px); }
301
+ to { opacity: 1; transform: translateX(0); }
302
+ }
303
+
304
+ .message-bubble.user {
305
+ flex-direction: row-reverse;
306
+ }
307
+
308
+ .message-avatar {
309
+ width: 40px;
310
+ height: 40px;
311
+ border-radius: 50%;
312
+ display: flex;
313
+ align-items: center;
314
+ justify-content: center;
315
+ font-size: 1.2rem;
316
+ flex-shrink: 0;
317
+ }
318
+
319
+ .message-bubble.user .message-avatar {
320
+ background: linear-gradient(135deg, var(--accent-purple) 0%, var(--primary-pink) 100%);
321
+ color: var(--white);
322
+ margin-left: 1rem;
323
+ }
324
+
325
+ .message-bubble.assistant .message-avatar {
326
+ background: linear-gradient(135deg, var(--secondary-mint) 0%, var(--secondary-gold) 100%);
327
+ color: var(--text-dark);
328
+ margin-right: 1rem;
329
+ }
330
+
331
+ .message-content {
332
+ max-width: 70%;
333
+ }
334
+
335
+ .message-text {
336
+ padding: 1rem 1.25rem;
337
+ border-radius: 18px;
338
+ word-wrap: break-word;
339
+ white-space: pre-wrap;
340
+ }
341
+
342
+ .message-bubble.user .message-text {
343
+ background: linear-gradient(135deg, var(--primary-pink) 0%, var(--accent-purple) 100%);
344
+ color: var(--white);
345
+ }
346
+
347
+ .message-bubble.assistant .message-text {
348
+ background: var(--white);
349
+ color: var(--text-dark);
350
+ box-shadow: 0 2px 8px var(--shadow);
351
+ }
352
+
353
+ .message-time {
354
+ font-size: 0.75rem;
355
+ color: var(--text-light);
356
+ margin-top: 0.25rem;
357
+ padding: 0 0.5rem;
358
+ }
359
+
360
+ .message-bubble.user .message-time {
361
+ text-align: right;
362
+ }
363
+
364
+ /* ===== INPUT AREA ===== */
365
+ .chat-input-container {
366
+ flex-shrink: 0;
367
+ background: var(--white);
368
+ padding: 1.5rem;
369
+ border-top: 2px solid var(--secondary-mint);
370
+ }
371
+
372
+ .chat-form {
373
+ display: flex;
374
+ gap: 0.5rem;
375
+ }
376
+
377
+ .chat-input {
378
+ flex: 1;
379
+ padding: 0.75rem 1.5rem;
380
+ border: 2px solid var(--secondary-mint);
381
+ border-radius: 25px;
382
+ font-size: 1rem;
383
+ }
384
+
385
+ .chat-input:focus {
386
+ outline: none;
387
+ border-color: var(--primary-pink);
388
+ }
389
+
390
+ .send-button {
391
+ padding: 0.75rem 1.5rem;
392
+ background: linear-gradient(135deg, var(--primary-pink) 0%, var(--primary-coral) 100%);
393
+ color: white;
394
+ border: none;
395
+ border-radius: 25px;
396
+ cursor: pointer;
397
+ font-weight: bold;
398
+ display: flex;
399
+ align-items: center;
400
+ gap: 0.5rem;
401
+ }
402
+
403
+ .send-button:hover {
404
+ transform: translateY(-2px);
405
+ box-shadow: 0 4px 12px var(--shadow);
406
+ }
407
+
408
+ /* ===== LOADING ===== */
409
+ .loading-indicator {
410
+ text-align: center;
411
+ margin-top: 1rem;
412
+ display: none;
413
+ }
414
+
415
+ .loader-wrapper {
416
+ display: inline-flex;
417
+ align-items: center;
418
+ gap: 1rem;
419
+ padding: 0.75rem 1.5rem;
420
+ background: var(--white);
421
+ border-radius: 25px;
422
+ box-shadow: 0 4px 12px var(--shadow);
423
+ }
424
+
425
+ .loader {
426
+ border: 3px solid rgba(255, 107, 157, 0.2);
427
+ border-top: 3px solid var(--primary-pink);
428
+ border-radius: 50%;
429
+ width: 24px;
430
+ height: 24px;
431
+ animation: spin 1s linear infinite;
432
+ }
433
+
434
+ @keyframes spin {
435
+ 0% { transform: rotate(0deg); }
436
+ 100% { transform: rotate(360deg); }
437
+ }
438
+
439
+ /* ===== SCROLLBAR ===== */
440
+ .chat-messages::-webkit-scrollbar,
441
+ .sidebar-content::-webkit-scrollbar {
442
+ width: 8px;
443
+ }
444
+
445
+ .chat-messages::-webkit-scrollbar-track,
446
+ .sidebar-content::-webkit-scrollbar-track {
447
+ background: rgba(0, 0, 0, 0.05);
448
+ }
449
+
450
+ .chat-messages::-webkit-scrollbar-thumb {
451
+ background: var(--primary-pink);
452
+ border-radius: 10px;
453
+ }
454
+
455
+ .sidebar-content::-webkit-scrollbar-thumb {
456
+ background: rgba(255, 255, 255, 0.3);
457
+ border-radius: 10px;
458
+ }
459
+
460
+ /* ===== RESPONSIVE ===== */
461
+ @media screen and (max-width: 768px) {
462
+ .sidebar {
463
+ display: none;
464
+ }
465
+
466
+ .message-content {
467
+ max-width: 85%;
468
+ }
469
+ }
static/css/style.css ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Life Coach - Bright & Playful CSS Styles */
2
+
3
+ :root {
4
+ --primary-pink: #FF6B9D;
5
+ --primary-coral: #FFA07A;
6
+ --secondary-mint: #98D8C8;
7
+ --secondary-gold: #FFD700;
8
+ --accent-purple: #9C27B0;
9
+ --accent-orange: #FF57222;
10
+ --bg-gradient-start: #E0F7FA;
11
+ --bg-gradient-end: #FFF9C4;
12
+ --chat-bg: #F5F5F5;
13
+ --white: #FFFFFF;
14
+ --text-dark: #333333;
15
+ --text-light: #666666;
16
+ --shadow: rgba(0, 0, 0, 0.1);
17
+ }
18
+
19
+ /* Global */
20
+ body {
21
+ background: linear-gradient(135deg, var(--bg-gradient-start) 0%, var(--bg-gradient-end) 100%);
22
+ min-height: 100vh;
23
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
24
+ margin: 0;
25
+ padding: 0;
26
+ }
27
+
28
+ /* Navbar */
29
+ .navbar.is-primary {
30
+ background: linear-gradient(90deg, var(--primary-pink) 0%, var(--primary-coral) 100%) !important;
31
+ }
32
+
33
+ .button.is-primary {
34
+ background: linear-gradient(135deg, var(--primary-pink) 0%, var(--primary-coral) 100%);
35
+ border: none;
36
+ transition: transform 0.2s, box-shadow 0.2s;
37
+ }
38
+
39
+ .button.is-primary:hover {
40
+ background: linear-gradient(135deg, var(--primary-coral) 0%, var(--primary-pink) 100%);
41
+ transform: translateY(-2px);
42
+ box-shadow: 0 4px 12px var(--shadow);
43
+ }
44
+
45
+ .button.is-link {
46
+ background: linear-gradient(135deg, var(--secondary-mint) 0%, var(--secondary-gold) 100%);
47
+ border: none;
48
+ color: var(--text-dark);
49
+ font-weight: bold;
50
+ }
51
+
52
+ .button.is-link:hover {
53
+ background: linear-gradient(135deg, var(--secondary-gold) 0%, var(--secondary-mint) 100%);
54
+ transform: translateY(-2px);
55
+ box-shadow: 0 4px 12px var(--shadow);
56
+ }
57
+
58
+ /* Auth Pages */
59
+ .auth-box {
60
+ border-radius: 15px;
61
+ box-shadow: 0 8px 24px var(--shadow);
62
+ background: var(--white);
63
+ padding: 2.5rem;
64
+ }
65
+
66
+ .auth-box .input:focus {
67
+ border-color: var(--primary-pink);
68
+ box-shadow: 0 0 0 0.125em rgba(255, 107, 157, 0.25);
69
+ }
70
+
71
+ /* === FIX LAYOUT: Full Height Chat === */
72
+ .chat-section {
73
+ padding: 0 !important;
74
+ margin: 0 !important;
75
+ height: calc(100vh - 52px);
76
+ }
77
+
78
+ .chat-container {
79
+ height: 100%;
80
+ background: var(--white);
81
+ border-radius: 10px;
82
+ overflow: hidden;
83
+ box-shadow: 0 8px 32px var(--shadow);
84
+ padding: 0 !important;
85
+ margin: 0 !important;
86
+ }
87
+
88
+ .chat-layout {
89
+ display: flex;
90
+ width: 100%;
91
+ height: 100%;
92
+ }
93
+
94
+ .chat-layout .sidebar {
95
+ width: 300px;
96
+ flex-shrink: 0;
97
+ background: linear-gradient(180deg, var(--primary-pink) 0%, var(--accent-purple) 100%);
98
+ color: var(--white);
99
+ display: flex;
100
+ flex-direction: column;
101
+ border-right: 1px solid rgba(0, 0, 0, 0.1);
102
+ }
103
+
104
+ .chat-layout .chat-main {
105
+ flex: 1;
106
+ display: flex;
107
+ flex-direction: column;
108
+ background: var(--chat-bg);
109
+ min-width: 0;
110
+ }
111
+
112
+ /* Sidebar */
113
+ .sidebar-header {
114
+ padding: 1.5rem;
115
+ border-bottom: 1px solid rgba(255, 255, 255, 0.2);
116
+ }
117
+
118
+ .sidebar-header .title {
119
+ color: var(--white);
120
+ margin-bottom: 1rem;
121
+ }
122
+
123
+ .sidebar-content {
124
+ flex: 1;
125
+ overflow-y: auto;
126
+ padding: 0.5rem;
127
+ }
128
+
129
+ .conversation-item {
130
+ padding: 1rem;
131
+ margin-bottom: 0.5rem;
132
+ border-radius: 10px;
133
+ cursor: pointer;
134
+ transition: all 0.2s;
135
+ background: rgba(255, 255, 255, 0.1);
136
+ position: relative;
137
+ display: flex;
138
+ flex-direction: column;
139
+ }
140
+
141
+ .conversation-item:hover {
142
+ background: rgba(255, 255, 255, 0.2);
143
+ transform: translateX(5px);
144
+ }
145
+
146
+ .conversation-item.is-active {
147
+ background: var(--white);
148
+ color: var(--primary-pink);
149
+ box-shadow: 0 4px 12px var(--shadow);
150
+ }
151
+
152
+ .conversation-title {
153
+ font-weight: bold;
154
+ margin-bottom: 0.25rem;
155
+ }
156
+
157
+ .conversation-meta {
158
+ font-size: 0.85rem;
159
+ opacity: 0.8;
160
+ }
161
+
162
+ .conversation-delete {
163
+ position: absolute;
164
+ top: 0.5rem;
165
+ right: 0.5rem;
166
+ background: rgba(255, 82, 82, 0.8);
167
+ color: white;
168
+ border: none;
169
+ border-radius: 50%;
170
+ width: 24px;
171
+ height: 24px;
172
+ display: none;
173
+ align-items: center;
174
+ justify-content: center;
175
+ cursor: pointer;
176
+ font-size: 0.75rem;
177
+ }
178
+
179
+ .conversation-item:hover .conversation-delete {
180
+ display: flex;
181
+ }
182
+
183
+ .conversation-delete:hover {
184
+ background: rgba(255, 20, 20, 1);
185
+ transform: scale(1.1);
186
+ }
187
+
188
+ /* Chat Header */
189
+ .chat-header {
190
+ background: linear-gradient(90deg, var(--secondary-mint) 0%, var(--secondary-gold) 100%);
191
+ padding: 1.5rem;
192
+ border-bottom: 3px solid var(--accent-orange);
193
+ box-shadow: 0 2px 8px var(--shadow);
194
+ flex-shrink: 0;
195
+ min-height: 80px;
196
+ }
197
+
198
+ .chat-header > div {
199
+ display: flex;
200
+ justify-content: space-between;
201
+ align-items: center;
202
+ flex-wrap: wrap;
203
+ gap: 1rem;
204
+ }
205
+
206
+ .chat-header .checkbox {
207
+ background: rgba(255, 255, 255, 0.7);
208
+ padding: 0.5rem 1rem;
209
+ border-radius: 20px;
210
+ transition: all 0.2s;
211
+ cursor: pointer;
212
+ white-space: nowrap;
213
+ }
214
+
215
+ .chat-header .checkbox:hover {
216
+ background: rgba(255, 255, 255, 0.9);
217
+ transform: scale(1.05);
218
+ }
219
+
220
+ /* Messages Area */
221
+ .chat-messages {
222
+ flex: 1;
223
+ overflow-y: auto;
224
+ overflow-x: hidden;
225
+ padding: 2rem;
226
+ scroll-behavior: smooth;
227
+ min-height: 0;
228
+ display: flex;
229
+ flex-direction: column;
230
+ justify-content: center;
231
+ align-items: center;
232
+ }
233
+
234
+ /* Disabilita centramento quando ci sono messaggi */
235
+ .chat-messages:has(.message-bubble),
236
+ .chat-messages.has-messages {
237
+ justify-content: flex-start !important;
238
+ align-items: flex-start !important;
239
+ }
240
+
241
+ .welcome-message {
242
+ text-align: center;
243
+ padding: 3rem 2rem;
244
+ animation: fadeIn 0.5s ease-in;
245
+ width: 100%;
246
+ max-width: 600px;
247
+ }
248
+
249
+ @keyframes fadeIn {
250
+ from { opacity: 0; transform: translateY(20px); }
251
+ to { opacity: 1; transform: translateY(0); }
252
+ }
253
+
254
+ /* Message Bubbles */
255
+ .message-bubble {
256
+ display: flex;
257
+ margin-bottom: 1.5rem;
258
+ animation: slideIn 0.3s ease-out;
259
+ width: 100%;
260
+ max-width: 100%;
261
+ }
262
+
263
+ @keyframes slideIn {
264
+ from { opacity: 0; transform: translateX(-20px); }
265
+ to { opacity: 1; transform: translateX(0); }
266
+ }
267
+
268
+ .message-bubble.user {
269
+ flex-direction: row-reverse;
270
+ }
271
+
272
+ .message-avatar {
273
+ width: 40px;
274
+ height: 40px;
275
+ border-radius: 50%;
276
+ display: flex;
277
+ align-items: center;
278
+ justify-content: center;
279
+ font-size: 1.2rem;
280
+ flex-shrink: 0;
281
+ }
282
+
283
+ .message-bubble.user .message-avatar {
284
+ background: linear-gradient(135deg, var(--accent-purple) 0%, var(--primary-pink) 100%);
285
+ color: var(--white);
286
+ margin-left: 1rem;
287
+ }
288
+
289
+ .message-bubble.assistant .message-avatar {
290
+ background: linear-gradient(135deg, var(--secondary-mint) 0%, var(--secondary-gold) 100%);
291
+ color: var(--text-dark);
292
+ margin-right: 1rem;
293
+ }
294
+
295
+ .message-content {
296
+ max-width: 70%;
297
+ width: fit-content;
298
+ }
299
+
300
+ .message-text {
301
+ padding: 1rem 1.25rem;
302
+ border-radius: 18px;
303
+ word-wrap: break-word;
304
+ white-space: pre-wrap;
305
+ }
306
+
307
+ .message-bubble.user .message-text {
308
+ background: linear-gradient(135deg, var(--primary-pink) 0%, var(--accent-purple) 100%);
309
+ color: var(--white);
310
+ border-bottom-right-radius: 4px;
311
+ }
312
+
313
+ .message-bubble.assistant .message-text {
314
+ background: var(--white);
315
+ color: var(--text-dark);
316
+ border-bottom-left-radius: 4px;
317
+ box-shadow: 0 2px 8px var(--shadow);
318
+ }
319
+
320
+ .message-time {
321
+ font-size: 0.75rem;
322
+ color: var(--text-light);
323
+ margin-top: 0.25rem;
324
+ padding: 0 0.5rem;
325
+ }
326
+
327
+ .message-bubble.user .message-time {
328
+ text-align: right;
329
+ }
330
+
331
+ /* Input */
332
+ .chat-input-container {
333
+ background: var(--white);
334
+ padding: 1.5rem;
335
+ border-top: 2px solid var(--secondary-mint);
336
+ box-shadow: 0 -2px 8px var(--shadow);
337
+ flex-shrink: 0;
338
+ min-height: 100px;
339
+ }
340
+
341
+ .chat-input-container .input {
342
+ border-radius: 25px;
343
+ border: 2px solid var(--secondary-mint);
344
+ padding-left: 1.5rem;
345
+ }
346
+
347
+ .chat-input-container .input:focus {
348
+ border-color: var(--primary-pink);
349
+ box-shadow: 0 0 0 0.125em rgba(255, 107, 157, 0.25);
350
+ }
351
+
352
+ .chat-input-container .button {
353
+ border-radius: 25px;
354
+ font-weight: bold;
355
+ }
356
+
357
+ /* Loading */
358
+ .loading-indicator {
359
+ text-align: center;
360
+ margin-top: 1rem;
361
+ }
362
+
363
+ .loader-wrapper {
364
+ display: inline-flex;
365
+ align-items: center;
366
+ gap: 1rem;
367
+ padding: 0.75rem 1.5rem;
368
+ background: var(--white);
369
+ border-radius: 25px;
370
+ box-shadow: 0 4px 12px var(--shadow);
371
+ }
372
+
373
+ .loader {
374
+ border: 3px solid rgba(255, 107, 157, 0.2);
375
+ border-top: 3px solid var(--primary-pink);
376
+ border-radius: 50%;
377
+ width: 24px;
378
+ height: 24px;
379
+ animation: spin 1s linear infinite;
380
+ }
381
+
382
+ @keyframes spin {
383
+ to { transform: rotate(360deg); }
384
+ }
385
+
386
+ /* Scrollbar */
387
+ .chat-messages::-webkit-scrollbar,
388
+ .sidebar-content::-webkit-scrollbar {
389
+ width: 8px;
390
+ }
391
+
392
+ .chat-messages::-webkit-scrollbar-track,
393
+ .sidebar-content::-webkit-scrollbar-track {
394
+ background: rgba(0, 0, 0, 0.05);
395
+ }
396
+
397
+ .chat-messages::-webkit-scrollbar-thumb {
398
+ background: var(--primary-pink);
399
+ border-radius: 10px;
400
+ }
401
+
402
+ .sidebar-content::-webkit-scrollbar-thumb {
403
+ background: rgba(255, 255, 255, 0.3);
404
+ border-radius: 10px;
405
+ }
406
+
407
+ /* Mobile */
408
+ @media (max-width: 768px) {
409
+ .chat-layout .sidebar {
410
+ display: none;
411
+ }
412
+ .chat-header {
413
+ padding: 1rem;
414
+ }
415
+ .chat-header > div {
416
+ flex-direction: column;
417
+ gap: 0.5rem;
418
+ }
419
+ .message-content {
420
+ max-width: 85%;
421
+ }
422
+ }
423
+
424
+ /* Footer */
425
+ .footer {
426
+ background: linear-gradient(90deg, var(--primary-pink) 0%, var(--accent-purple) 100%);
427
+ color: var(--white);
428
+ padding: 2rem;
429
+ }
430
+
431
+ .footer strong {
432
+ color: var(--white);
433
+ }
static/js/chat.js ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Life Coach Chat Interface JavaScript
3
+ * Handles AJAX communication with the Flask backend
4
+ */
5
+
6
+ // Global state
7
+ let currentConversationId = null;
8
+ let ttsEnabled = false;
9
+ let currentAudio = null;
10
+
11
+ // Initialize when DOM is loaded
12
+ document.addEventListener('DOMContentLoaded', () => {
13
+ // Get current conversation ID from hidden input
14
+ const conversationIdInput = document.getElementById('currentConversationId');
15
+ if (conversationIdInput) {
16
+ currentConversationId = parseInt(conversationIdInput.value);
17
+ }
18
+
19
+ // Setup event listeners
20
+ setupEventListeners();
21
+
22
+ // Load current conversation messages
23
+ if (currentConversationId) {
24
+ loadConversation(currentConversationId);
25
+ }
26
+
27
+ // Load TTS preference from localStorage
28
+ const ttsToggle = document.getElementById('ttsToggle');
29
+ if (ttsToggle) {
30
+ const savedTtsPreference = localStorage.getItem('ttsEnabled');
31
+ if (savedTtsPreference === 'true') {
32
+ ttsToggle.checked = true;
33
+ ttsEnabled = true;
34
+ }
35
+ }
36
+ });
37
+
38
+
39
+ /**
40
+ * Setup all event listeners
41
+ */
42
+ function setupEventListeners() {
43
+ // Chat form submission
44
+ const chatForm = document.getElementById('chatForm');
45
+ if (chatForm) {
46
+ chatForm.addEventListener('submit', handleSendMessage);
47
+ }
48
+
49
+ // New conversation button
50
+ const newConvBtn = document.getElementById('newConversationBtn');
51
+ if (newConvBtn) {
52
+ newConvBtn.addEventListener('click', handleNewConversation);
53
+ }
54
+
55
+ // Conversation item clicks
56
+ const conversationItems = document.querySelectorAll('.conversation-item');
57
+ conversationItems.forEach(item => {
58
+ item.addEventListener('click', (e) => {
59
+ // Don't switch conversation if clicking delete button
60
+ if (e.target.closest('.conversation-delete')) {
61
+ return;
62
+ }
63
+ const convId = parseInt(item.dataset.conversationId);
64
+ switchConversation(convId);
65
+ });
66
+ });
67
+
68
+ // Delete conversation buttons
69
+ const deleteButtons = document.querySelectorAll('.conversation-delete');
70
+ deleteButtons.forEach(btn => {
71
+ btn.addEventListener('click', (e) => {
72
+ e.stopPropagation(); // Prevent triggering conversation switch
73
+ const convId = parseInt(btn.dataset.conversationId);
74
+ handleDeleteConversation(convId);
75
+ });
76
+ });
77
+
78
+ // Enter key in input (already handled by form submit, but for custom behavior)
79
+ const messageInput = document.getElementById('messageInput');
80
+ if (messageInput) {
81
+ messageInput.addEventListener('keydown', (e) => {
82
+ if (e.key === 'Enter' && !e.shiftKey) {
83
+ e.preventDefault();
84
+ chatForm.dispatchEvent(new Event('submit'));
85
+ }
86
+ });
87
+ }
88
+
89
+ // TTS toggle
90
+ const ttsToggle = document.getElementById('ttsToggle');
91
+ if (ttsToggle) {
92
+ ttsToggle.addEventListener('change', (e) => {
93
+ ttsEnabled = e.target.checked;
94
+ // Save preference to localStorage
95
+ localStorage.setItem('ttsEnabled', ttsEnabled);
96
+
97
+ // Stop any currently playing audio when disabling
98
+ if (!ttsEnabled && currentAudio) {
99
+ currentAudio.pause();
100
+ currentAudio = null;
101
+ }
102
+ });
103
+ }
104
+ }
105
+
106
+
107
+ /**
108
+ * Handle sending a message
109
+ */
110
+ async function handleSendMessage(e) {
111
+ e.preventDefault();
112
+
113
+ const messageInput = document.getElementById('messageInput');
114
+ const sendBtn = document.getElementById('sendBtn');
115
+ const loadingIndicator = document.getElementById('loadingIndicator');
116
+
117
+ const message = messageInput.value.trim();
118
+ if (!message) return;
119
+
120
+ // Disable input and show loading
121
+ messageInput.disabled = true;
122
+ sendBtn.disabled = true;
123
+ loadingIndicator.style.display = 'block';
124
+
125
+ // Clear input immediately for better UX
126
+ messageInput.value = '';
127
+
128
+ // Add user message to UI immediately
129
+ addMessageToUI({
130
+ role: 'user',
131
+ content: message,
132
+ timestamp: new Date().toISOString()
133
+ });
134
+
135
+ try {
136
+ // Send message to backend
137
+ const response = await fetch('/chat/api/send', {
138
+ method: 'POST',
139
+ headers: {
140
+ 'Content-Type': 'application/json'
141
+ },
142
+ body: JSON.stringify({
143
+ message: message,
144
+ conversation_id: currentConversationId
145
+ })
146
+ });
147
+
148
+ const data = await response.json();
149
+
150
+ if (data.success) {
151
+ // Update current conversation ID if it was a new conversation
152
+ if (!currentConversationId) {
153
+ currentConversationId = data.conversation_id;
154
+ document.getElementById('currentConversationId').value = currentConversationId;
155
+ }
156
+
157
+ // Add assistant message to UI
158
+ addMessageToUI(data.assistant_message);
159
+
160
+ // Generate and play TTS if enabled
161
+ if (ttsEnabled) {
162
+ playTextToSpeech(data.assistant_message.content);
163
+ }
164
+
165
+ // Update conversation list if needed
166
+ refreshConversationList();
167
+
168
+ } else {
169
+ showError(data.error || 'Failed to send message');
170
+ // Remove the user message we added optimistically
171
+ const lastMessage = document.querySelector('.message-bubble:last-child');
172
+ if (lastMessage && lastMessage.classList.contains('user')) {
173
+ lastMessage.remove();
174
+ }
175
+ }
176
+
177
+ } catch (error) {
178
+ console.error('Error sending message:', error);
179
+ showError('Network error. Please try again.');
180
+ // Remove the user message we added optimistically
181
+ const lastMessage = document.querySelector('.message-bubble:last-child');
182
+ if (lastMessage && lastMessage.classList.contains('user')) {
183
+ lastMessage.remove();
184
+ }
185
+ } finally {
186
+ // Re-enable input and hide loading
187
+ messageInput.disabled = false;
188
+ sendBtn.disabled = false;
189
+ loadingIndicator.style.display = 'none';
190
+ messageInput.focus();
191
+ }
192
+ }
193
+
194
+
195
+ /**
196
+ * Add a message to the UI
197
+ */
198
+ function addMessageToUI(message) {
199
+ const chatMessages = document.getElementById('chatMessages');
200
+
201
+ // Remove welcome message if it exists
202
+ const welcomeMessage = chatMessages.querySelector('.welcome-message');
203
+ if (welcomeMessage) {
204
+ welcomeMessage.remove();
205
+ }
206
+
207
+ // Create message bubble
208
+ const messageBubble = document.createElement('div');
209
+ messageBubble.className = `message-bubble ${message.role}`;
210
+
211
+ // Format timestamp
212
+ const timestamp = new Date(message.timestamp);
213
+ const timeString = timestamp.toLocaleTimeString('en-US', {
214
+ hour: 'numeric',
215
+ minute: '2-digit',
216
+ hour12: true
217
+ });
218
+
219
+ // Create message HTML
220
+ messageBubble.innerHTML = `
221
+ <div class="message-avatar">
222
+ ${message.role === 'user'
223
+ ? '<i class="fas fa-user"></i>'
224
+ : '<i class="fas fa-robot"></i>'}
225
+ </div>
226
+ <div class="message-content">
227
+ <div class="message-text">${escapeHtml(message.content)}</div>
228
+ <div class="message-time">${timeString}</div>
229
+ </div>
230
+ `;
231
+
232
+ chatMessages.appendChild(messageBubble);
233
+
234
+ // Scroll to bottom
235
+ scrollToBottom();
236
+ }
237
+
238
+
239
+ /**
240
+ * Load a conversation and its messages
241
+ */
242
+ async function loadConversation(conversationId) {
243
+ try {
244
+ const response = await fetch(`/chat/api/conversation/${conversationId}`);
245
+ const data = await response.json();
246
+
247
+ if (data.success) {
248
+ const conversation = data.conversation;
249
+
250
+ // Update UI
251
+ document.getElementById('chatTitle').textContent = conversation.title;
252
+ currentConversationId = conversation.id;
253
+ document.getElementById('currentConversationId').value = currentConversationId;
254
+
255
+ // Clear and reload messages
256
+ const chatMessages = document.getElementById('chatMessages');
257
+ chatMessages.innerHTML = '';
258
+
259
+ if (conversation.messages.length === 0) {
260
+ // Show welcome message
261
+ chatMessages.innerHTML = `
262
+ <div class="welcome-message">
263
+ <div class="has-text-centered">
264
+ <i class="fas fa-heart fa-4x has-text-primary mb-4"></i>
265
+ <h2 class="title is-3">Welcome to Life Coach!</h2>
266
+ <p class="subtitle is-5">I'm here to help you with personal growth, goal setting, and motivation.</p>
267
+ <p class="has-text-grey">Start by telling me what's on your mind or what you'd like to work on today.</p>
268
+ </div>
269
+ </div>
270
+ `;
271
+ } else {
272
+ // Add all messages
273
+ conversation.messages.forEach(msg => addMessageToUI(msg));
274
+ // Scroll to bottom after loading all messages
275
+ scrollToBottom();
276
+ }
277
+
278
+ } else {
279
+ showError('Failed to load conversation');
280
+ }
281
+
282
+ } catch (error) {
283
+ console.error('Error loading conversation:', error);
284
+ showError('Failed to load conversation');
285
+ }
286
+ }
287
+
288
+
289
+ /**
290
+ * Switch to a different conversation
291
+ */
292
+ function switchConversation(conversationId) {
293
+ // Update active state in sidebar
294
+ document.querySelectorAll('.conversation-item').forEach(item => {
295
+ item.classList.remove('is-active');
296
+ if (parseInt(item.dataset.conversationId) === conversationId) {
297
+ item.classList.add('is-active');
298
+ }
299
+ });
300
+
301
+ // Load the conversation
302
+ loadConversation(conversationId);
303
+ }
304
+
305
+
306
+ /**
307
+ * Create a new conversation
308
+ */
309
+ async function handleNewConversation() {
310
+ try {
311
+ const response = await fetch('/chat/api/conversation/new', {
312
+ method: 'POST',
313
+ headers: {
314
+ 'Content-Type': 'application/json'
315
+ }
316
+ });
317
+
318
+ const data = await response.json();
319
+
320
+ if (data.success) {
321
+ // Refresh conversation list
322
+ await refreshConversationList();
323
+
324
+ // Switch to new conversation
325
+ switchConversation(data.conversation.id);
326
+
327
+ } else {
328
+ showError('Failed to create new conversation');
329
+ }
330
+
331
+ } catch (error) {
332
+ console.error('Error creating conversation:', error);
333
+ showError('Failed to create new conversation');
334
+ }
335
+ }
336
+
337
+
338
+ /**
339
+ * Refresh the conversation list
340
+ */
341
+ async function refreshConversationList() {
342
+ try {
343
+ const response = await fetch('/chat/api/conversations');
344
+ const data = await response.json();
345
+
346
+ if (data.success) {
347
+ const conversationList = document.getElementById('conversationList');
348
+ conversationList.innerHTML = '';
349
+
350
+ data.conversations.forEach(conv => {
351
+ const item = document.createElement('div');
352
+ item.className = 'conversation-item';
353
+ if (conv.id === currentConversationId) {
354
+ item.classList.add('is-active');
355
+ }
356
+ item.dataset.conversationId = conv.id;
357
+
358
+ item.innerHTML = `
359
+ <button class="conversation-delete" data-conversation-id="${conv.id}" title="Delete conversation">
360
+ <i class="fas fa-times"></i>
361
+ </button>
362
+ <div class="conversation-title">
363
+ <i class="fas fa-message mr-2"></i>
364
+ ${escapeHtml(conv.title)}
365
+ </div>
366
+ <div class="conversation-meta">
367
+ ${conv.message_count} messages
368
+ </div>
369
+ `;
370
+
371
+ // Add click handler for conversation item
372
+ item.addEventListener('click', (e) => {
373
+ // Don't switch conversation if clicking delete button
374
+ if (e.target.closest('.conversation-delete')) {
375
+ return;
376
+ }
377
+ switchConversation(conv.id);
378
+ });
379
+
380
+ // Add click handler for delete button
381
+ const deleteBtn = item.querySelector('.conversation-delete');
382
+ deleteBtn.addEventListener('click', (e) => {
383
+ e.stopPropagation();
384
+ handleDeleteConversation(conv.id);
385
+ });
386
+
387
+ conversationList.appendChild(item);
388
+ });
389
+ }
390
+
391
+ } catch (error) {
392
+ console.error('Error refreshing conversation list:', error);
393
+ }
394
+ }
395
+
396
+
397
+ /**
398
+ * Delete a conversation
399
+ */
400
+ async function handleDeleteConversation(conversationId) {
401
+ // Confirm deletion
402
+ if (!confirm('Are you sure you want to delete this conversation? This cannot be undone.')) {
403
+ return;
404
+ }
405
+
406
+ try {
407
+ const response = await fetch(`/chat/api/conversation/${conversationId}/delete`, {
408
+ method: 'POST',
409
+ headers: {
410
+ 'Content-Type': 'application/json'
411
+ }
412
+ });
413
+
414
+ const data = await response.json();
415
+
416
+ if (data.success) {
417
+ // If we deleted the current conversation, switch to a different one
418
+ if (conversationId === currentConversationId) {
419
+ // Refresh conversation list first
420
+ await refreshConversationList();
421
+
422
+ // Get the first conversation or create a new one
423
+ const conversationList = document.getElementById('conversationList');
424
+ const firstConversation = conversationList.querySelector('.conversation-item');
425
+
426
+ if (firstConversation) {
427
+ const firstConvId = parseInt(firstConversation.dataset.conversationId);
428
+ switchConversation(firstConvId);
429
+ } else {
430
+ // No conversations left, create a new one
431
+ await handleNewConversation();
432
+ }
433
+ } else {
434
+ // Just refresh the list
435
+ await refreshConversationList();
436
+ }
437
+ } else {
438
+ showError(data.error || 'Failed to delete conversation');
439
+ }
440
+
441
+ } catch (error) {
442
+ console.error('Error deleting conversation:', error);
443
+ showError('Failed to delete conversation');
444
+ }
445
+ }
446
+
447
+
448
+ /**
449
+ * Scroll chat messages to bottom
450
+ */
451
+ function scrollToBottom() {
452
+ const chatMessages = document.getElementById('chatMessages');
453
+ // Use setTimeout to ensure DOM has updated
454
+ setTimeout(() => {
455
+ chatMessages.scrollTop = chatMessages.scrollHeight;
456
+ }, 100);
457
+ }
458
+
459
+
460
+ /**
461
+ * Show error notification
462
+ */
463
+ function showError(message) {
464
+ // Create notification
465
+ const notification = document.createElement('div');
466
+ notification.className = 'notification is-danger is-light';
467
+ notification.innerHTML = `
468
+ <button class="delete"></button>
469
+ ${escapeHtml(message)}
470
+ `;
471
+
472
+ // Add to page
473
+ const container = document.querySelector('.container') || document.body;
474
+ container.insertBefore(notification, container.firstChild);
475
+
476
+ // Add delete functionality
477
+ notification.querySelector('.delete').addEventListener('click', () => {
478
+ notification.remove();
479
+ });
480
+
481
+ // Auto-dismiss after 5 seconds
482
+ setTimeout(() => {
483
+ notification.remove();
484
+ }, 5000);
485
+ }
486
+
487
+
488
+ /**
489
+ * Escape HTML to prevent XSS
490
+ */
491
+ function escapeHtml(text) {
492
+ const div = document.createElement('div');
493
+ div.textContent = text;
494
+ return div.innerHTML;
495
+ }
496
+
497
+
498
+ /**
499
+ * Format timestamp to readable string
500
+ */
501
+ function formatTimestamp(isoString) {
502
+ const date = new Date(isoString);
503
+ return date.toLocaleString('en-US', {
504
+ month: 'short',
505
+ day: 'numeric',
506
+ hour: 'numeric',
507
+ minute: '2-digit',
508
+ hour12: true
509
+ });
510
+ }
511
+
512
+
513
+ /**
514
+ * Generate and play text-to-speech audio
515
+ */
516
+ async function playTextToSpeech(text) {
517
+ try {
518
+ // Stop any currently playing audio
519
+ if (currentAudio) {
520
+ currentAudio.pause();
521
+ currentAudio = null;
522
+ }
523
+
524
+ // Request TTS generation from server
525
+ const response = await fetch('/chat/api/tts/generate', {
526
+ method: 'POST',
527
+ headers: {
528
+ 'Content-Type': 'application/json'
529
+ },
530
+ body: JSON.stringify({ text: text })
531
+ });
532
+
533
+ const data = await response.json();
534
+
535
+ if (data.success && data.audio_url) {
536
+ // Create and play audio element
537
+ currentAudio = new Audio(data.audio_url);
538
+
539
+ // Add event listener for when audio ends
540
+ currentAudio.addEventListener('ended', () => {
541
+ currentAudio = null;
542
+ });
543
+
544
+ // Add error handler
545
+ currentAudio.addEventListener('error', (e) => {
546
+ console.error('Audio playback error:', e);
547
+ currentAudio = null;
548
+ });
549
+
550
+ // Play the audio
551
+ await currentAudio.play();
552
+
553
+ } else {
554
+ console.error('TTS generation failed:', data.error);
555
+ }
556
+
557
+ } catch (error) {
558
+ console.error('Error playing TTS:', error);
559
+ }
560
+ }
templates/base.html ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>{% block title %}Life Coach{% endblock %} - AI Life Coaching Assistant</title>
7
+
8
+ <!-- Bulma CSS -->
9
+ <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bulma@0.9.4/css/bulma.min.css">
10
+
11
+ <!-- Font Awesome Icons -->
12
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
13
+
14
+ <!-- Custom CSS -->
15
+ <link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}?v={{ range(1, 10000) | random }}">
16
+
17
+ {% block extra_head %}{% endblock %}
18
+ </head>
19
+ <body>
20
+ <!-- Navbar -->
21
+ <nav class="navbar is-primary" role="navigation" aria-label="main navigation">
22
+ <div class="navbar-brand">
23
+ <a class="navbar-item" href="{{ url_for('index') }}">
24
+ <i class="fas fa-heart mr-2"></i>
25
+ <strong>Life Coach</strong>
26
+ </a>
27
+
28
+ <a role="button" class="navbar-burger" aria-label="menu" aria-expanded="false" data-target="navbarMenu">
29
+ <span aria-hidden="true"></span>
30
+ <span aria-hidden="true"></span>
31
+ <span aria-hidden="true"></span>
32
+ </a>
33
+ </div>
34
+
35
+ <div id="navbarMenu" class="navbar-menu">
36
+ <div class="navbar-end">
37
+ {% if current_user.is_authenticated %}
38
+ <div class="navbar-item has-dropdown is-hoverable">
39
+ <a class="navbar-link">
40
+ <i class="fas fa-user mr-2"></i>
41
+ {{ current_user.username }}
42
+ </a>
43
+
44
+ <div class="navbar-dropdown">
45
+ <a class="navbar-item" href="{{ url_for('chat.chat_interface') }}">
46
+ <i class="fas fa-comments mr-2"></i>
47
+ Chat
48
+ </a>
49
+ <hr class="navbar-divider">
50
+ <a class="navbar-item" href="{{ url_for('auth.logout') }}">
51
+ <i class="fas fa-sign-out-alt mr-2"></i>
52
+ Logout
53
+ </a>
54
+ </div>
55
+ </div>
56
+ {% else %}
57
+ <a class="navbar-item" href="{{ url_for('auth.login') }}">
58
+ <i class="fas fa-sign-in-alt mr-2"></i>
59
+ Login
60
+ </a>
61
+ <a class="navbar-item" href="{{ url_for('auth.register') }}">
62
+ <i class="fas fa-user-plus mr-2"></i>
63
+ Register
64
+ </a>
65
+ {% endif %}
66
+ </div>
67
+ </div>
68
+ </nav>
69
+
70
+ <!-- Flash Messages -->
71
+ {% with messages = get_flashed_messages(with_categories=true) %}
72
+ {% if messages %}
73
+ <div class="container mt-4">
74
+ {% for category, message in messages %}
75
+ <div class="notification is-{{ category }} is-light">
76
+ <button class="delete"></button>
77
+ {{ message }}
78
+ </div>
79
+ {% endfor %}
80
+ </div>
81
+ {% endif %}
82
+ {% endwith %}
83
+
84
+ <!-- Main Content -->
85
+ <main>
86
+ {% block content %}{% endblock %}
87
+ </main>
88
+
89
+ <!-- Footer -->
90
+ <footer class="footer mt-6">
91
+ <div class="content has-text-centered">
92
+ <p>
93
+ <strong>Life Coach</strong> - AI-powered life coaching assistant
94
+ <br>
95
+ Powered by Microsoft Phi-4 🤖
96
+ </p>
97
+ </div>
98
+ </footer>
99
+
100
+ <!-- Scripts -->
101
+ <script>
102
+ // Mobile navbar toggle
103
+ document.addEventListener('DOMContentLoaded', () => {
104
+ const $navbarBurgers = Array.prototype.slice.call(document.querySelectorAll('.navbar-burger'), 0);
105
+
106
+ $navbarBurgers.forEach( el => {
107
+ el.addEventListener('click', () => {
108
+ const target = el.dataset.target;
109
+ const $target = document.getElementById(target);
110
+ el.classList.toggle('is-active');
111
+ $target.classList.toggle('is-active');
112
+ });
113
+ });
114
+
115
+ // Auto-dismiss notifications
116
+ const $notifications = document.querySelectorAll('.notification .delete');
117
+ $notifications.forEach(($delete) => {
118
+ const $notification = $delete.parentNode;
119
+ $delete.addEventListener('click', () => {
120
+ $notification.parentNode.removeChild($notification);
121
+ });
122
+ });
123
+ });
124
+ </script>
125
+
126
+ {% block extra_scripts %}{% endblock %}
127
+ </body>
128
+ </html>
templates/chat-clean.html ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% extends "base.html" %}
2
+
3
+ {% block title %}Chat{% endblock %}
4
+
5
+ {% block content %}
6
+ <div class="chat-section">
7
+ <div class="container-fluid h-100 p-0">
8
+ <div class="row g-0 h-100">
9
+ <!-- Sidebar Column -->
10
+ <div class="col-md-3 chat-sidebar">
11
+ <div class="sidebar-header">
12
+ <h5 class="text-white mb-3">
13
+ <i class="fas fa-comments me-2"></i>Conversations
14
+ </h5>
15
+ <button class="btn btn-light w-100" id="newConversationBtn">
16
+ <i class="fas fa-plus me-2"></i>New Chat
17
+ </button>
18
+ </div>
19
+
20
+ <div class="sidebar-content" id="conversationList">
21
+ {% for conv in conversations %}
22
+ <div class="conversation-item {% if conv.id == current_conversation.id %}active{% endif %}"
23
+ data-conversation-id="{{ conv.id }}">
24
+ <button class="conversation-delete" data-conversation-id="{{ conv.id }}" title="Delete conversation">
25
+ <i class="fas fa-times"></i>
26
+ </button>
27
+ <div class="conversation-title">
28
+ <i class="fas fa-message me-2"></i>{{ conv.title }}
29
+ </div>
30
+ <div class="conversation-meta">
31
+ {{ conv.messages|length }} messages
32
+ </div>
33
+ </div>
34
+ {% endfor %}
35
+ </div>
36
+ </div>
37
+
38
+ <!-- Main Chat Column -->
39
+ <div class="col-md-9 d-flex flex-column chat-main">
40
+ <!-- Chat Header -->
41
+ <div class="chat-header">
42
+ <div>
43
+ <h4 class="mb-1" id="chatTitle">{{ current_conversation.title }}</h4>
44
+ <p class="mb-0 text-muted small">
45
+ <i class="fas fa-user-tie me-1"></i>AI Life Coach - Here to help you grow
46
+ </p>
47
+ </div>
48
+ <div class="form-check form-switch">
49
+ <input class="form-check-input" type="checkbox" id="ttsToggle">
50
+ <label class="form-check-label" for="ttsToggle">
51
+ <i class="fas fa-volume-up me-1"></i>Read Aloud
52
+ </label>
53
+ </div>
54
+ </div>
55
+
56
+ <!-- Chat Messages -->
57
+ <div class="chat-messages flex-grow-1" id="chatMessages">
58
+ {% if current_conversation.messages|length == 0 %}
59
+ <div class="text-center py-5">
60
+ <i class="fas fa-heart fa-4x mb-3" style="color: var(--primary-pink);"></i>
61
+ <h2>Welcome to Life Coach!</h2>
62
+ <p class="text-muted">I'm here to help you with personal growth and motivation.</p>
63
+ </div>
64
+ {% endif %}
65
+
66
+ {% for message in current_conversation.messages %}
67
+ <div class="message-bubble {{ message.role }}">
68
+ <div class="message-avatar">
69
+ <i class="fas fa-{{ 'user' if message.role == 'user' else 'user-tie' }}"></i>
70
+ </div>
71
+ <div class="message-content">
72
+ <div class="message-text">{{ message.content }}</div>
73
+ <div class="message-time">{{ message.timestamp.strftime('%I:%M %p') }}</div>
74
+ </div>
75
+ </div>
76
+ {% endfor %}
77
+ </div>
78
+
79
+ <!-- Chat Input -->
80
+ <div class="chat-input-container">
81
+ <form id="chatForm" class="d-flex gap-2">
82
+ <input type="text" class="form-control chat-input" id="messageInput"
83
+ placeholder="Type your message here..."
84
+ autocomplete="off" required>
85
+ <button type="submit" class="btn btn-primary send-button" id="sendBtn">
86
+ <i class="fas fa-paper-plane me-2"></i>Send
87
+ </button>
88
+ </form>
89
+
90
+ <div class="loading-indicator text-center mt-3" id="loadingIndicator" style="display: none;">
91
+ <div class="d-inline-flex align-items-center gap-2 bg-white px-4 py-2 rounded-pill shadow">
92
+ <div class="spinner-border spinner-border-sm text-primary" role="status"></div>
93
+ <span>Thinking...</span>
94
+ </div>
95
+ </div>
96
+ </div>
97
+ </div>
98
+ </div>
99
+ </div>
100
+ </div>
101
+
102
+ <input type="hidden" id="currentConversationId" value="{{ current_conversation.id }}">
103
+ {% endblock %}
104
+
105
+ {% block extra_scripts %}
106
+ <script src="{{ url_for('static', filename='js/chat.js') }}"></script>
107
+ {% endblock %}
templates/chat.html ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% extends "base.html" %}
2
+
3
+ {% block title %}Chat{% endblock %}
4
+
5
+ {% block content %}
6
+ <section class="section chat-section">
7
+ <div class="chat-container">
8
+ <!-- Layout flessibile personalizzato (senza .columns di Bulma) -->
9
+ <div class="chat-layout">
10
+
11
+ <!-- Sidebar -->
12
+ <aside class="sidebar">
13
+ <div class="sidebar-header">
14
+ <h2 class="title is-5">
15
+ <i class="fas fa-comments mr-2"></i>
16
+ Conversations
17
+ </h2>
18
+ <button class="button is-primary is-small" id="newConversationBtn">
19
+ <span class="icon">
20
+ <i class="fas fa-plus"></i>
21
+ </span>
22
+ <span>New Chat</span>
23
+ </button>
24
+ </div>
25
+
26
+ <div class="sidebar-content" id="conversationList">
27
+ {% for conv in conversations %}
28
+ <div class="conversation-item {% if conv.id == current_conversation.id %}is-active{% endif %}"
29
+ data-conversation-id="{{ conv.id }}">
30
+ <button class="conversation-delete" data-conversation-id="{{ conv.id }}" title="Delete conversation">
31
+ <i class="fas fa-times"></i>
32
+ </button>
33
+ <div class="conversation-title">
34
+ <i class="fas fa-message mr-2"></i>
35
+ {{ conv.title }}
36
+ </div>
37
+ <div class="conversation-meta">
38
+ {{ conv.messages|length }} messages
39
+ </div>
40
+ </div>
41
+ {% endfor %}
42
+ </div>
43
+ </aside>
44
+
45
+ <!-- Main Chat Area -->
46
+ <main class="chat-main">
47
+ <!-- Header -->
48
+ <header class="chat-header">
49
+ <div>
50
+ <div>
51
+ <h1 class="title is-4 mb-0" id="chatTitle">
52
+ {{ current_conversation.title }}
53
+ </h1>
54
+ <p class="subtitle is-7 has-text-grey">
55
+ <i class="fas fa-robot mr-1"></i>
56
+ AI Life Coach - Here to help you grow
57
+ </p>
58
+ </div>
59
+ <div class="field">
60
+ <label class="checkbox" title="Enable text-to-speech for assistant responses">
61
+ <input type="checkbox" id="ttsToggle">
62
+ <i class="fas fa-volume-up ml-2"></i>
63
+ <span class="ml-1">Read Aloud</span>
64
+ </label>
65
+ </div>
66
+ </div>
67
+ </header>
68
+
69
+ <!-- Messages Area -->
70
+ <div class="chat-messages" id="chatMessages">
71
+ <!-- Welcome Message (solo se non ci sono messaggi) -->
72
+ <div class="welcome-message">
73
+ <div class="has-text-centered">
74
+ <i class="fas fa-heart fa-4x has-text-primary mb-4"></i>
75
+ <h2 class="title is-3">Welcome to Life Coach!</h2>
76
+ <p class="subtitle is-5">I'm here to help you with personal growth, goal setting, and motivation.</p>
77
+ <p class="has-text-grey">Start by telling me what's on your mind or what you'd like to work on today.</p>
78
+ </div>
79
+ </div>
80
+
81
+ <!-- Messaggi esistenti -->
82
+ {% for message in current_conversation.messages %}
83
+ <div class="message-bubble {{ message.role }}">
84
+ <div class="message-avatar">
85
+ {% if message.role == 'user' %}
86
+ <i class="fas fa-user"></i>
87
+ {% else %}
88
+ <i class="fas fa-robot"></i>
89
+ {% endif %}
90
+ </div>
91
+ <div class="message-content">
92
+ <div class="message-text">{{ message.content }}</div>
93
+ <div class="message-time">{{ message.timestamp.strftime('%I:%M %p') }}</div>
94
+ </div>
95
+ </div>
96
+ {% endfor %}
97
+ </div>
98
+
99
+ <!-- Input Area -->
100
+ <div class="chat-input-container">
101
+ <form id="chatForm">
102
+ <div class="field has-addons">
103
+ <div class="control is-expanded">
104
+ <input class="input is-medium" type="text" id="messageInput"
105
+ placeholder="Type your message here..."
106
+ autocomplete="off" required>
107
+ </div>
108
+ <div class="control">
109
+ <button class="button is-primary is-medium" type="submit" id="sendBtn">
110
+ <span class="icon">
111
+ <i class="fas fa-paper-plane"></i>
112
+ </span>
113
+ <span>Send</span>
114
+ </button>
115
+ </div>
116
+ </div>
117
+ </form>
118
+
119
+ <!-- Loading Indicator -->
120
+ <div class="loading-indicator" id="loadingIndicator" style="display: none;">
121
+ <div class="loader-wrapper">
122
+ <div class="loader"></div>
123
+ <span>Thinking...</span>
124
+ </div>
125
+ </div>
126
+ </div>
127
+ </main>
128
+ </div>
129
+ </div>
130
+ </section>
131
+
132
+ <!-- ID conversazione corrente per JS -->
133
+ <input type="hidden" id="currentConversationId" value="{{ current_conversation.id }}">
134
+ {% endblock %}
135
+
136
+ {% block extra_scripts %}
137
+ <script src="{{ url_for('static', filename='js/chat.js') }}?v={{ range(1, 10000) | random }}"></script>
138
+ {% endblock %}
templates/login.html ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% extends "base.html" %}
2
+
3
+ {% block title %}Login{% endblock %}
4
+
5
+ {% block content %}
6
+ <section class="hero is-fullheight-with-navbar">
7
+ <div class="hero-body">
8
+ <div class="container">
9
+ <div class="columns is-centered">
10
+ <div class="column is-5-tablet is-4-desktop is-3-widescreen">
11
+ <div class="box auth-box">
12
+ <div class="has-text-centered mb-5">
13
+ <i class="fas fa-heart fa-3x has-text-primary"></i>
14
+ <h1 class="title is-3 mt-3">Welcome Back!</h1>
15
+ <p class="subtitle is-6">Login to continue your journey</p>
16
+ </div>
17
+
18
+ <form method="POST" action="{{ url_for('auth.login') }}">
19
+ <!-- Username -->
20
+ <div class="field">
21
+ <label class="label">Username</label>
22
+ <div class="control has-icons-left">
23
+ <input class="input" type="text" name="username"
24
+ placeholder="Enter your username"
25
+ value="{{ username or '' }}" required autofocus>
26
+ <span class="icon is-small is-left">
27
+ <i class="fas fa-user"></i>
28
+ </span>
29
+ </div>
30
+ </div>
31
+
32
+ <!-- Password -->
33
+ <div class="field">
34
+ <label class="label">Password</label>
35
+ <div class="control has-icons-left">
36
+ <input class="input" type="password" name="password"
37
+ placeholder="Enter your password" required>
38
+ <span class="icon is-small is-left">
39
+ <i class="fas fa-lock"></i>
40
+ </span>
41
+ </div>
42
+ </div>
43
+
44
+ <!-- Remember Me -->
45
+ <div class="field">
46
+ <label class="checkbox">
47
+ <input type="checkbox" name="remember">
48
+ Remember me
49
+ </label>
50
+ </div>
51
+
52
+ <!-- Submit Button -->
53
+ <div class="field">
54
+ <div class="control">
55
+ <button class="button is-primary is-fullwidth" type="submit">
56
+ <span class="icon">
57
+ <i class="fas fa-sign-in-alt"></i>
58
+ </span>
59
+ <span>Login</span>
60
+ </button>
61
+ </div>
62
+ </div>
63
+ </form>
64
+
65
+ <hr>
66
+
67
+ <div class="has-text-centered">
68
+ <p>Don't have an account?</p>
69
+ <a href="{{ url_for('auth.register') }}" class="button is-link is-light mt-2">
70
+ <span class="icon">
71
+ <i class="fas fa-user-plus"></i>
72
+ </span>
73
+ <span>Register Now</span>
74
+ </a>
75
+ </div>
76
+ </div>
77
+ </div>
78
+ </div>
79
+ </div>
80
+ </div>
81
+ </section>
82
+ {% endblock %}
templates/register.html ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% extends "base.html" %}
2
+
3
+ {% block title %}Register{% endblock %}
4
+
5
+ {% block content %}
6
+ <section class="hero is-fullheight-with-navbar">
7
+ <div class="hero-body">
8
+ <div class="container">
9
+ <div class="columns is-centered">
10
+ <div class="column is-5-tablet is-4-desktop is-3-widescreen">
11
+ <div class="box auth-box">
12
+ <div class="has-text-centered mb-5">
13
+ <i class="fas fa-user-plus fa-3x has-text-link"></i>
14
+ <h1 class="title is-3 mt-3">Join Us!</h1>
15
+ <p class="subtitle is-6">Start your life coaching journey</p>
16
+ </div>
17
+
18
+ <form method="POST" action="{{ url_for('auth.register') }}">
19
+ <!-- Username -->
20
+ <div class="field">
21
+ <label class="label">Username</label>
22
+ <div class="control has-icons-left">
23
+ <input class="input" type="text" name="username"
24
+ placeholder="Choose a username"
25
+ value="{{ username or '' }}" required autofocus>
26
+ <span class="icon is-small is-left">
27
+ <i class="fas fa-user"></i>
28
+ </span>
29
+ </div>
30
+ <p class="help">At least 3 characters</p>
31
+ </div>
32
+
33
+ <!-- Email -->
34
+ <div class="field">
35
+ <label class="label">Email</label>
36
+ <div class="control has-icons-left">
37
+ <input class="input" type="email" name="email"
38
+ placeholder="your.email@example.com"
39
+ value="{{ email or '' }}" required>
40
+ <span class="icon is-small is-left">
41
+ <i class="fas fa-envelope"></i>
42
+ </span>
43
+ </div>
44
+ </div>
45
+
46
+ <!-- Password -->
47
+ <div class="field">
48
+ <label class="label">Password</label>
49
+ <div class="control has-icons-left">
50
+ <input class="input" type="password" name="password"
51
+ placeholder="Create a strong password" required>
52
+ <span class="icon is-small is-left">
53
+ <i class="fas fa-lock"></i>
54
+ </span>
55
+ </div>
56
+ <p class="help">At least 6 characters</p>
57
+ </div>
58
+
59
+ <!-- Confirm Password -->
60
+ <div class="field">
61
+ <label class="label">Confirm Password</label>
62
+ <div class="control has-icons-left">
63
+ <input class="input" type="password" name="password_confirm"
64
+ placeholder="Confirm your password" required>
65
+ <span class="icon is-small is-left">
66
+ <i class="fas fa-lock"></i>
67
+ </span>
68
+ </div>
69
+ </div>
70
+
71
+ <!-- Submit Button -->
72
+ <div class="field">
73
+ <div class="control">
74
+ <button class="button is-link is-fullwidth" type="submit">
75
+ <span class="icon">
76
+ <i class="fas fa-user-plus"></i>
77
+ </span>
78
+ <span>Create Account</span>
79
+ </button>
80
+ </div>
81
+ </div>
82
+ </form>
83
+
84
+ <hr>
85
+
86
+ <div class="has-text-centered">
87
+ <p>Already have an account?</p>
88
+ <a href="{{ url_for('auth.login') }}" class="button is-primary is-light mt-2">
89
+ <span class="icon">
90
+ <i class="fas fa-sign-in-alt"></i>
91
+ </span>
92
+ <span>Login</span>
93
+ </a>
94
+ </div>
95
+ </div>
96
+ </div>
97
+ </div>
98
+ </div>
99
+ </div>
100
+ </section>
101
+ {% endblock %}