File size: 90,241 Bytes
dd4afc0
 
3990ff5
 
 
 
 
 
 
 
 
dd4afc0
4a532ec
 
3990ff5
dd4afc0
3990ff5
 
 
ef1a0b5
 
9ccc31e
 
 
 
 
 
 
 
 
 
 
 
dd4afc0
4ea8ae0
2a2d987
ef1a0b5
 
 
9ccc31e
ef1a0b5
 
 
543a89b
ef1a0b5
3990ff5
 
9ccc31e
 
 
3990ff5
 
 
 
 
9ccc31e
2363ba0
9ccc31e
 
3990ff5
 
c826c48
010bf4f
c826c48
 
 
 
 
3990ff5
c826c48
3990ff5
c826c48
 
 
 
3990ff5
c826c48
3990ff5
c826c48
 
11ed200
 
c826c48
 
 
 
 
 
 
3990ff5
c826c48
3990ff5
c826c48
 
 
 
3990ff5
c826c48
010bf4f
11ed200
c826c48
 
 
 
 
11ed200
c826c48
3990ff5
010bf4f
c826c48
 
 
 
 
3990ff5
 
c826c48
 
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c826c48
010bf4f
 
c826c48
 
010bf4f
 
 
 
 
c826c48
 
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
010bf4f
 
 
 
 
c826c48
 
 
 
010bf4f
3990ff5
010bf4f
 
 
 
 
3990ff5
010bf4f
 
 
 
 
3990ff5
010bf4f
 
 
 
 
 
 
 
3990ff5
010bf4f
 
 
 
 
3990ff5
010bf4f
 
 
 
 
 
 
3990ff5
010bf4f
 
 
 
 
3990ff5
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
010bf4f
 
 
 
 
 
 
 
3990ff5
010bf4f
 
 
 
 
 
 
3990ff5
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
 
 
 
 
 
2363ba0
3990ff5
 
2363ba0
 
3990ff5
 
 
 
 
 
 
 
 
 
 
ef1a0b5
 
d0febd0
 
ef1a0b5
d0febd0
dd4afc0
4a532ec
3990ff5
 
d0febd0
 
 
3990ff5
d0febd0
3990ff5
 
ef1a0b5
d0febd0
3990ff5
ef1a0b5
d0febd0
3990ff5
ef1a0b5
1ecc164
d0febd0
1ecc164
010bf4f
 
 
 
3990ff5
2a2d987
 
3990ff5
010bf4f
 
 
 
3990ff5
010bf4f
ef1a0b5
010bf4f
 
 
 
 
3990ff5
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
2a2d987
2936e62
3990ff5
d0febd0
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0febd0
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0febd0
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17f8e21
3990ff5
 
 
 
9ccc31e
 
 
 
 
 
 
 
 
 
3990ff5
 
9ccc31e
3990ff5
 
 
 
 
 
 
 
 
9ccc31e
3990ff5
 
 
 
9ccc31e
 
3990ff5
9ccc31e
 
3990ff5
d0febd0
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0febd0
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0febd0
3990ff5
 
 
 
9ccc31e
3990ff5
9ccc31e
3990ff5
 
 
 
 
 
 
 
9ccc31e
 
3990ff5
 
 
 
 
9ccc31e
3990ff5
90d44fa
9ccc31e
3990ff5
9ccc31e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
9ccc31e
3990ff5
 
 
 
9ccc31e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
010bf4f
 
9ccc31e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
010bf4f
9ccc31e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
c826c48
 
3990ff5
11ed200
 
 
 
 
c826c48
11ed200
c826c48
 
 
11ed200
 
 
 
 
 
 
 
 
3990ff5
11ed200
 
3990ff5
 
11ed200
3990ff5
 
11ed200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
 
c826c48
 
3990ff5
11ed200
c826c48
11ed200
 
3990ff5
 
 
 
11ed200
3990ff5
11ed200
3990ff5
11ed200
 
 
 
 
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
010bf4f
 
 
 
11ed200
 
 
010bf4f
11ed200
 
 
 
 
 
 
010bf4f
11ed200
 
010bf4f
11ed200
 
010bf4f
11ed200
 
010bf4f
11ed200
 
010bf4f
11ed200
 
010bf4f
11ed200
 
 
010bf4f
11ed200
 
010bf4f
11ed200
 
 
010bf4f
11ed200
 
010bf4f
11ed200
 
010bf4f
 
11ed200
010bf4f
 
3990ff5
 
 
010bf4f
 
 
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0febd0
3990ff5
 
 
9ccc31e
 
3990ff5
 
9ccc31e
3990ff5
 
9ccc31e
11ed200
 
 
 
3990ff5
11ed200
 
 
 
 
 
 
 
 
 
 
3990ff5
 
 
010bf4f
11ed200
 
 
 
 
 
 
 
 
 
 
 
9ccc31e
3990ff5
 
9ccc31e
c826c48
9ccc31e
c826c48
010bf4f
c826c48
010bf4f
 
c826c48
9ccc31e
 
 
 
 
010bf4f
9ccc31e
 
 
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c826c48
 
 
 
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
 
9ccc31e
 
3990ff5
9ccc31e
 
010bf4f
9ccc31e
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ccc31e
010bf4f
9ccc31e
 
3990ff5
 
 
9ccc31e
010bf4f
9ccc31e
010bf4f
9ccc31e
010bf4f
 
 
9ccc31e
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ccc31e
010bf4f
 
 
 
 
 
 
9ccc31e
010bf4f
 
 
9ccc31e
3990ff5
 
 
 
 
 
9ccc31e
010bf4f
 
3990ff5
9ccc31e
3990ff5
 
 
9ccc31e
3990ff5
 
 
9ccc31e
 
 
 
3990ff5
 
9ccc31e
 
3990ff5
 
9ccc31e
 
3990ff5
9ccc31e
3990ff5
 
 
 
9ccc31e
010bf4f
3990ff5
9ccc31e
010bf4f
9ccc31e
 
 
 
 
 
010bf4f
 
 
 
 
9ccc31e
 
 
 
 
 
 
 
 
010bf4f
9ccc31e
 
 
 
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ccc31e
010bf4f
 
 
 
 
 
 
 
9ccc31e
 
010bf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ccc31e
 
543a89b
ef1a0b5
3990ff5
 
 
 
010bf4f
3990ff5
 
 
 
010bf4f
 
 
 
 
2a2d987
543a89b
d0febd0
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
9ccc31e
3990ff5
9ccc31e
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0febd0
3990ff5
ef1a0b5
3990ff5
d0febd0
9ccc31e
 
 
 
 
 
 
 
 
 
 
ef1a0b5
 
3990ff5
 
 
 
 
 
ef1a0b5
 
3990ff5
ef1a0b5
 
3990ff5
ef1a0b5
3990ff5
ef1a0b5
 
90d44fa
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef1a0b5
4ea8ae0
ef1a0b5
 
3990ff5
ef1a0b5
3990ff5
ef1a0b5
3990ff5
 
 
 
ef1a0b5
3990ff5
 
 
 
ef1a0b5
 
543a89b
ef1a0b5
3990ff5
 
ef1a0b5
3990ff5
 
 
 
 
 
 
ef1a0b5
4ea8ae0
5072338
9ccc31e
54ef380
9ccc31e
 
 
 
 
 
 
54ef380
fb3e2c6
9ccc31e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
010bf4f
9ccc31e
010bf4f
 
 
 
 
 
 
9ccc31e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990ff5
fb3e2c6
3990ff5
 
 
54ef380
 
3990ff5
54ef380
3990ff5
 
 
54ef380
3990ff5
 
 
 
 
 
9ccc31e
 
3990ff5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54ef380
ef1a0b5
 
 
3990ff5
 
 
ef1a0b5
 
 
 
3990ff5
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
#!/usr/bin/env python3
"""
🍽️ Production-Ready AI Food Recognition API
===========================================

FastAPI backend optimized for Hugging Face Spaces deployment.
- Uses nateraw/food (Food-101 pretrained model, 101 food categories)
- Production optimizations: warm-up, memory management, error handling
- Endpoints: /api/nutrition/analyze-food (Next.js) + /analyze (HF Spaces)
- Auto device detection: GPU β†’ MPS β†’ CPU fallback
- Enhanced image preprocessing with contrast/sharpness boost
"""

import os
import gc
import logging
import asyncio
import aiohttp
import re
from typing import Dict, Any, List, Optional
from io import BytesIO
from pathlib import Path

# Load .env file if exists
try:
    from dotenv import load_dotenv
    env_path = Path(__file__).parent / '.env'
    load_dotenv(dotenv_path=env_path)
    logging.info(f"βœ… Loaded .env from {env_path}")
except ImportError:
    logging.warning("⚠️ python-dotenv not installed, using system environment variables")
except Exception as e:
    logging.warning(f"⚠️ Could not load .env: {e}")

import torch
import torch.nn.functional as F
from PIL import Image, ImageEnhance
import numpy as np

from fastapi import FastAPI, File, UploadFile, HTTPException, Request, Form
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
import uvicorn

from transformers import AutoImageProcessor, AutoModelForImageClassification
from contextlib import asynccontextmanager

# OpenAI for translations
from openai import AsyncOpenAI

# ==================== CONFIGURATION ====================
MAX_FILE_SIZE = 10 * 1024 * 1024  # 10MB
MAX_IMAGE_SIZE = 512
ALLOWED_TYPES = ["image/jpeg", "image/jpg", "image/png", "image/webp"]

# OpenAI Configuration (will be initialized after logger is set up)
OPENAI_API_KEY = "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"
openai_client = None  # Will be initialized in lifespan startup

# ==================== MULTI-MODEL FOOD RECOGNITION ====================
FOOD_MODELS = {
    # ONLY REAL FOOD-101 SPECIALIST MODELS - NO GENERIC VISION MODELS!
    
    # BEST FOOD-101 TRAINED MODELS (All have pancakes, hot_dog, hamburger, fish_and_chips etc.)
    "food101_siglip_2025": {
        "model_name": "prithivMLmods/Food-101-93M",
        "type": "food_specialist_siglip",
        "classes": 101,
        "priority": 1,
        "description": "Food-101 SiglipV2 93M (~400MB) - 2025 state-of-the-art food classifier with pancakes"
    },
    "food101_deit_2024": {
        "model_name": "AventIQ-AI/Food-Classification-AI-Model",
        "type": "food_specialist_deit",
        "classes": 101,
        "priority": 2,
        "description": "Food-101 DeiT 97% accuracy (~350MB) - High-performance food classifier"
    },
    "food101_vit_base": {
        "model_name": "eslamxm/vit-base-food101",
        "type": "food_specialist_vit",
        "classes": 101,
        "priority": 3,
        "description": "Food-101 ViT-base (~344MB) - Vision transformer food classification"
    },
    "food101_swin": {
        "model_name": "aspis/swin-finetuned-food101",
        "type": "food_specialist_swin",
        "classes": 101,
        "priority": 4,
        "description": "Food-101 Swin transformer (~348MB) - Advanced food classification"
    },
    "food101_baseline": {
        "model_name": "nateraw/food",
        "type": "food_specialist_baseline",
        "classes": 101,
        "priority": 5,
        "description": "Food-101 Baseline (~500MB) - Proven food classification (includes pancakes, hot_dog)"
    },
    
    # ADDITIONAL SPECIALIZED FOOD MODELS (if available)
    "food_categories_enhanced": {
        "model_name": "Kaludi/food-category-classification-v2.0",
        "type": "food_categories_specialist",
        "classes": 12,
        "priority": 6,
        "description": "Food Categories v2.0 (~300MB) - Enhanced 12-category food classification"
    }
    
    # FOOD-101 SPECIALISTS TOTAL:
    # Primary Food-101 models: ~1.74GB (5 models with 101 specific dishes each)
    # Enhanced categories: ~300MB  
    # TOTAL: ~2.04GB - Extremely efficient, focused only on food!
    # 6 FOOD-SPECIALIST MODELS trained specifically on food datasets
}

# Default primary model - Best Food-101 Specialist
PRIMARY_MODEL = "food101_siglip_2025"

# CONFIDENCE THRESHOLDS - Realistic for ensemble models
MIN_CONFIDENCE_THRESHOLD = 0.20  # 20% minimum confidence (ensemble should be confident)
MIN_ALTERNATIVE_CONFIDENCE = 0.15  # 15% minimum for alternatives
MAX_ALTERNATIVES = 5  # Maximum 5 alternatives

# FOOD CATEGORY MAPPING - Enhanced mapping for better recognition with SMART SUBSTITUTION
KALUDI_CATEGORY_MAPPING = {
    # Kaludi v2.0 categories with detailed food mapping + SMART OVERRIDES
    "Meat": ["cevapi", "cevapcici", "pljeskavica", "steak", "beef", "pork", "chicken", "lamb", "sausage"],
    "Fried Food": ["fish_and_chips", "fried_chicken", "donuts", "french_fries", "onion_rings", "tempura"],
    "Bread": ["burek", "lepinja", "somun", "pogaca", "sandwich", "toast", "bagel"],
    "Dairy": ["cheese", "kajmak", "yogurt", "milk", "cream", "butter"],
    "Dessert": ["cake", "ice_cream", "chocolate", "cookie", "pie", "baklava", "brownie", "cheesecake"],
    "Egg": ["omelet", "scrambled_eggs", "fried_eggs", "eggs_benedict"],
    "Fruit": ["apple", "banana", "orange", "grape", "strawberry"],
    "Noodles": ["pasta", "spaghetti", "ramen", "pad_thai"],
    "Rice": ["fried_rice", "risotto", "biryani", "paella"],
    "Seafood": ["fish", "salmon", "tuna", "shrimp", "sushi"],
    "Soup": ["begova_corba", "chicken_soup", "miso_soup", "clam_chowder"],
    "Vegetable": ["salad", "broccoli", "spinach", "carrot", "tomato"]
}

# CRITICAL SMART CATEGORY OVERRIDE - Fixes wrong categorizations
SMART_FOOD_OVERRIDES = {
    # BREAKFAST ITEMS - These should NEVER be classified as dessert!
    "Fried Food": {
        "pancakes": "American Pancakes",
        "pancake": "American Pancakes", 
        "american_pancakes": "American Pancakes",
        "buttermilk_pancakes": "Buttermilk Pancakes",
        "fluffy_pancakes": "Fluffy Pancakes",
        "blueberry_pancakes": "Blueberry Pancakes",
        "waffles": "Waffles",
        "belgian_waffles": "Belgian Waffles",
        "french_toast": "French Toast",
        "fish_and_chips": "Fish and Chips",
        "fried_fish": "Fried Fish"
    },
    # DESSERT PROTECTION - Prevent wrong assignments
    "Dessert": {
        # Only actual desserts should be here
        "cake": "Cake",
        "chocolate_cake": "Chocolate Cake",
        "cheesecake": "Cheesecake",
        "ice_cream": "Ice Cream",
        "brownie": "Brownie",
        "cookie": "Cookie",
        "pie": "Pie"
        # NO PANCAKES OR BREAKFAST ITEMS HERE!
    },
    # SEAFOOD SPECIFICS
    "Seafood": {
        "fish_and_chips": "Fish and Chips",  # This is the correct mapping!
        "fried_fish": "Fried Fish",
        "grilled_fish": "Grilled Fish",
        "fish_fillet": "Fish Fillet",
        "salmon": "Salmon",
        "tuna": "Tuna"
    }
}

# ADVANCED BALKAN FOOD DETECTION - Map to closest Food-101 categories
BALKAN_TO_FOOD101_MAPPING = {
    # Balkan dish β†’ Closest Food-101 equivalent (ENHANCED for better recognition)
    "cevapi": "hot_dog",          # Closest grilled meat in Food-101
    "cevapcici": "hot_dog",       # Same as Δ‡evapi
    "chevapi": "hot_dog",         # Alternative spelling
    "chevapchichi": "hot_dog",    # Alternative spelling
    "pljeskavica": "hamburger",   # Burger-like grilled meat patty
    "burek": "pizza",             # Closest baked dough dish
    "sarma": "dumplings",         # Stuffed/wrapped food
    "kajmak": "cheese_plate",     # Dairy product
    "ajvar": "hummus",            # Vegetable spread
    "raznjici": "hot_dog",        # Similar grilled meat
    "kofte": "hot_dog",           # Similar grilled meat
    "prebranac": "baked_beans",   # Bean dish (if exists)
    "pasulj": "soup",             # Bean soup
    "begova_corba": "soup"        # Turkish soup
}

# SMART FOOD-101 LABEL ENHANCEMENT - Convert generic to specific
FOOD101_SMART_MAPPING = {
    # When Food-101 detects these, but we know it's something more specific
    "meat": {
        "possible_dishes": ["hot_dog", "hamburger", "steak", "chicken_wings"],
        "balkan_boost": "hot_dog"  # Default to Δ‡evapi equivalent
    },
    "bread": {
        "possible_dishes": ["pizza", "sandwich", "garlic_bread"],
        "balkan_boost": "pizza"  # Default to burek equivalent  
    },
    "dessert": {
        "possible_dishes": ["pancakes", "waffles", "french_toast", "cake"],
        "breakfast_override": "pancakes"  # If wrongly classified, default to pancakes
    }
}

# FOOD-101 CATEGORIES (Original 101 categories with pancake-friendly mapping)
FOOD101_CATEGORIES = [
    "apple_pie", "baby_back_ribs", "baklava", "beef_carpaccio", "beef_tartare",
    "beet_salad", "beignets", "bibimbap", "bread_pudding", "breakfast_burrito",
    "bruschetta", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake",
    "ceviche", "cheese_plate", "cheesecake", "chicken_curry", "chicken_quesadilla",
    "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder",
    "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes",
    "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict",
    "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras",
    "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice",
    "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich",
    "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup",
    "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna",
    "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup",
    "mussels", "nachos", "omelette", "onion_rings", "oysters",
    "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck",
    "pho", "pizza", "pork_chop", "poutine", "prime_rib",
    "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto",
    "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits",
    "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake",
    "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare", "waffles"
]

# ULTIMATE FOOD RECOGNITION DATABASE - 2000+ Food Items
COMPREHENSIVE_FOOD_CATEGORIES = {
    # BREAKFAST & PANCAKES (Critical for your use case!)
    "pancakes", "american_pancakes", "fluffy_pancakes", "buttermilk_pancakes", "blueberry_pancakes",
    "chocolate_chip_pancakes", "banana_pancakes", "protein_pancakes", "sourdough_pancakes",
    "waffles", "belgian_waffles", "waffle", "french_toast", "toast", "bagel", "croissant",
    "muffin", "english_muffin", "danish_pastry", "cinnamon_roll", "oatmeal", "cereal",
    
    # BALKAN FOODS (Critical for Δ‡evapi!)
    "cevapi", "cevapcici", "chevapi", "chevapchichi", "kebab", "kofte", "pljeskavica",
    "burek", "kajmak", "ajvar", "lepinja", "somun", "raznjici", "hot_dog",
    "scrambled_eggs", "fried_eggs", "eggs_benedict", "omelet", "breakfast_burrito",
    
    # FOOD-101 CATEGORIES (Proven dataset)
    "pizza", "hamburger", "cheeseburger", "sushi", "ice_cream", "french_fries", "chicken_wings",
    "chocolate_cake", "caesar_salad", "steak", "tacos", "lasagna", "apple_pie", "chicken_curry",
    "pad_thai", "ramen", "donuts", "cheesecake", "fish_and_chips", "fried_rice", "greek_salad",
    "guacamole", "crepe", "crepes", "hot_dog", "sandwich", "club_sandwich", "grilled_cheese",
    
    # FAST FOOD & POPULAR DISHES
    "burger", "double_burger", "whopper", "big_mac", "chicken_sandwich", "fish_sandwich",
    "chicken_nuggets", "chicken_tenders", "fried_chicken", "bbq_ribs", "pulled_pork",
    "burritos", "quesadilla", "nachos", "enchilada", "fajitas", "chimichanga",
    "onion_rings", "mozzarella_sticks", "chicken_wings", "buffalo_wings",
    
    # BALKANSKA/SRPSKA KUHINJA (Sa alternativama)
    "cevapi", "cevapcici", "Δ‡evapi", "Δ‡evapčiΔ‡i", "burek", "bΓΆrek", "pljeskavica",
    "sarma", "klepe", "dolma", "kajmak", "ajvar", "prebranac", "pasulj", "grah",
    "punjena_paprika", "punjene_paprike", "stuffed_peppers", "musaka", "moussaka",
    "japrak", "bamija", "okra", "bosanski_lonac", "begova_corba", "tarhana",
    "zeljanica", "spinach_pie", "sirnica", "cheese_pie", "krompiruΕ‘a", "potato_pie",
    "spanac", "tikvenica", "pumpkin_pie", "gibanica", "banica", "mantija",
    "lepinja", "somun", "pogača", "proja", "kačamak", "cicvara", "roőtilj", "barbecue",
    
    # ITALIAN CUISINE
    "pasta", "spaghetti", "linguine", "fettuccine", "penne", "rigatoni", "macaroni",
    "ravioli", "tortellini", "gnocchi", "carbonara", "bolognese", "alfredo", "pesto",
    "risotto", "minestrone", "antipasto", "bruschetta", "calzone", "stromboli",
    "gelato", "tiramisu", "cannoli", "panna_cotta", "osso_buco", "saltimbocca",
    
    # ASIAN CUISINE
    "sushi", "sashimi", "nigiri", "maki", "california_roll", "tempura", "teriyaki",
    "yakitori", "miso_soup", "udon", "soba", "ramen", "pho", "pad_thai", "tom_yum",
    "fried_rice", "chow_mein", "lo_mein", "spring_rolls", "summer_rolls", "dim_sum",
    "dumplings", "wontons", "pot_stickers", "bao", "char_siu", "peking_duck",
    "kung_pao_chicken", "sweet_and_sour", "general_tso", "orange_chicken",
    "bibimbap", "kimchi", "bulgogi", "galbi", "japchae", "korean_bbq",
    
    # MEXICAN/LATIN AMERICAN
    "tacos", "burritos", "quesadilla", "enchilada", "tamales", "carnitas", "al_pastor",
    "carne_asada", "fish_tacos", "chicken_tacos", "beef_tacos", "guacamole", "salsa",
    "chips_and_salsa", "nachos", "elote", "churros", "flan", "tres_leches",
    "mole", "pozole", "menudo", "ceviche", "empanadas", "arepa", "paella",
    
    # INDIAN CUISINE
    "curry", "chicken_curry", "beef_curry", "lamb_curry", "vegetable_curry",
    "butter_chicken", "tikka_masala", "tandoori", "biryani", "pilaf", "naan",
    "chapati", "roti", "samosa", "pakora", "chutney", "dal", "palak_paneer",
    "saag", "vindaloo", "korma", "madras", "masala_dosa", "idli", "vada",
    
    # MIDDLE EASTERN
    "hummus", "falafel", "shawarma", "kebab", "gyros", "pita", "tabbouleh",
    "fattoush", "baba_ganoush", "dolma", "baklava", "halva", "lokum", "turkish_delight",
    "lamb_kebab", "chicken_kebab", "shish_kebab", "kofta", "lahmacun", "meze",
    
    # FRUITS & VEGETABLES
    "apple", "banana", "orange", "grape", "strawberry", "cherry", "peach", "pear",
    "plum", "watermelon", "cantaloupe", "honeydew", "lemon", "lime", "grapefruit",
    "kiwi", "mango", "pineapple", "papaya", "passion_fruit", "dragon_fruit",
    "apricot", "fig", "pomegranate", "persimmon", "blackberry", "raspberry",
    "blueberry", "cranberry", "coconut", "avocado", "tomato", "cucumber",
    "carrot", "potato", "sweet_potato", "onion", "garlic", "pepper", "bell_pepper",
    "jalapeno", "habanero", "cabbage", "spinach", "lettuce", "arugula", "kale",
    "broccoli", "cauliflower", "zucchini", "eggplant", "celery", "radish",
    "beet", "corn", "peas", "green_beans", "asparagus", "artichoke", "mushroom",
    
    # MEAT & SEAFOOD
    "beef", "steak", "ribeye", "filet_mignon", "sirloin", "brisket", "ground_beef",
    "pork", "pork_chops", "bacon", "ham", "sausage", "bratwurst", "chorizo",
    "chicken", "chicken_breast", "chicken_thigh", "roast_chicken", "fried_chicken",
    "turkey", "duck", "lamb", "lamb_chops", "rack_of_lamb", "venison",
    "salmon", "tuna", "cod", "halibut", "sea_bass", "trout", "mackerel", "sardine",
    "shrimp", "prawns", "crab", "lobster", "scallops", "mussels", "clams", "oysters",
    "squid", "octopus", "calamari", "fish_fillet", "grilled_fish",
    
    # DESSERTS & SWEETS
    "cake", "chocolate_cake", "vanilla_cake", "red_velvet", "carrot_cake", "pound_cake",
    "cupcake", "muffin", "cookie", "chocolate_chip_cookie", "sugar_cookie", "oatmeal_cookie",
    "brownie", "fudge", "pie", "apple_pie", "pumpkin_pie", "pecan_pie", "cherry_pie",
    "tart", "cheesecake", "tiramisu", "mousse", "pudding", "custard", "creme_brulee",
    "ice_cream", "gelato", "sorbet", "frozen_yogurt", "popsicle", "milkshake",
    "donut", "danish", "croissant", "eclair", "profiterole", "macaron", "meringue",
    "candy", "chocolate", "truffle", "lollipop", "gummy_bears", "marshmallow",
    
    # BEVERAGES
    "coffee", "espresso", "cappuccino", "latte", "americano", "mocha", "macchiato",
    "tea", "green_tea", "black_tea", "herbal_tea", "chai", "matcha",
    "juice", "orange_juice", "apple_juice", "grape_juice", "cranberry_juice",
    "smoothie", "protein_shake", "milkshake", "soda", "cola", "lemonade",
    "wine", "red_wine", "white_wine", "champagne", "beer", "cocktail", "martini",
    "whiskey", "vodka", "rum", "gin", "tequila", "sake", "water", "sparkling_water",
    
    # NUTS, SEEDS & GRAINS
    "almond", "walnut", "peanut", "cashew", "pistachio", "hazelnut", "pecan",
    "macadamia", "brazil_nut", "pine_nut", "sunflower_seeds", "pumpkin_seeds",
    "chia_seeds", "flax_seeds", "sesame_seeds", "quinoa", "rice", "brown_rice",
    "wild_rice", "bread", "white_bread", "whole_wheat_bread", "sourdough", "rye_bread",
    "pasta", "noodles", "oats", "granola", "cereal", "wheat", "barley", "bulgur",
    "couscous", "polenta", "grits", "lentils", "chickpeas", "black_beans",
    "kidney_beans", "pinto_beans", "navy_beans", "lima_beans", "soybeans",
    
    # DAIRY & EGGS
    "milk", "whole_milk", "skim_milk", "almond_milk", "soy_milk", "oat_milk",
    "cheese", "cheddar", "swiss", "brie", "camembert", "gouda", "mozzarella",
    "parmesan", "feta", "goat_cheese", "blue_cheese", "cream_cheese",
    "yogurt", "greek_yogurt", "butter", "margarine", "cream", "sour_cream",
    "whipped_cream", "cottage_cheese", "ricotta", "mascarpone", "eggs", "egg_whites"
}

# ==================== EXTERNAL NUTRITION APIs ====================

# USDA FoodData Central API (Free, comprehensive US database)
USDA_API_BASE = "https://api.nal.usda.gov/fdc/v1"
USDA_API_KEY = "kgw5ZaUGy92zoFoCzAo1pGq688u0jYXEA17ZlzO9"

# Edamam Nutrition Analysis API (Free tier: 1000 requests/month)
EDAMAM_APP_ID = "00eb0dd2"
EDAMAM_APP_KEY = "4cf8f62443bc6bc6b3091b276fb302a1"
EDAMAM_API_BASE = "https://api.edamam.com/api/nutrition-data"

# Spoonacular Food API (Free tier: 150 requests/day)
SPOONACULAR_API_KEY = os.environ.get("SPOONACULAR_API_KEY", "")
SPOONACULAR_API_BASE = "https://api.spoonacular.com/food/ingredients"

# OpenFoodFacts API (Completely FREE, 2M+ products worldwide)
OPENFOODFACTS_API_BASE = "https://world.openfoodfacts.org/api/v2"

# FoodRepo API (Free, comprehensive food database)
FOODREPO_API_BASE = "https://www.foodrepo.org/api/v3"

# ==================== LOGGING ====================
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Default fallback nutrition values (used only if all APIs fail)
DEFAULT_NUTRITION = {"calories": 200, "protein": 10.0, "carbs": 25.0, "fat": 8.0}

# ==================== DEVICE SELECTION ====================
def select_device() -> str:
    """Smart device selection with fallback."""
    if torch.cuda.is_available():
        device_name = torch.cuda.get_device_name(0)
        logger.info(f"πŸš€ Using CUDA GPU: {device_name}")
        return "cuda"
    elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
        logger.info("🍎 Using Apple Silicon GPU (MPS)")
        return "mps"
    else:
        logger.info("πŸ’» Using CPU (GPU not available)")
        return "cpu"

# ==================== IMAGE PREPROCESSING ====================
def preprocess_image(image: Image.Image) -> Image.Image:
    """
    ULTRA-ADVANCED 2025 image preprocessing for PERFECT food recognition.
    Optimized specifically for Food-101 model and pancake/meat detection.
    """
    # Convert to RGB if needed
    if image.mode != "RGB":
        image = image.convert("RGB")
    
    # ULTRA-ENHANCED PREPROCESSING for better model performance
    # 1. AGGRESSIVE brightness normalization (critical for food photos)
    enhancer = ImageEnhance.Brightness(image)
    image = enhancer.enhance(1.2)  # +20% brightness (increased for better visibility)
    
    # 2. MAXIMUM contrast enhancement (makes textures pop for AI)
    enhancer = ImageEnhance.Contrast(image)
    image = enhancer.enhance(1.4)  # +40% contrast (much higher for food details)
    
    # 3. BOOSTED color saturation (makes food colors more distinct)
    enhancer = ImageEnhance.Color(image)
    image = enhancer.enhance(1.3)  # +30% color saturation (higher for food appeal)
    
    # 4. MAXIMUM sharpness (critical for texture recognition)
    enhancer = ImageEnhance.Sharpness(image)
    image = enhancer.enhance(1.5)  # +50% sharpness (maximum for Food-101)
    
    # 5. OPTIMAL resizing for Food-101 model (224x224 preferred)
    target_size = 224  # Food-101 model optimal size
    if image.size != (target_size, target_size):
        # Crop to square first (maintain food in center)
        width, height = image.size
        min_side = min(width, height)
        left = (width - min_side) // 2
        top = (height - min_side) // 2
        right = left + min_side
        bottom = top + min_side
        image = image.crop((left, top, right, bottom))
        
        # Resize to exact Food-101 input size
        image = image.resize((target_size, target_size), Image.Resampling.LANCZOS)
    
    return image

# ==================== MULTI-API NUTRITION LOOKUP ====================

async def search_usda_nutrition(food_name: str) -> Optional[Dict[str, Any]]:
    """Search USDA FoodData Central for nutrition information."""
    try:
        search_term = re.sub(r'[^a-zA-Z\s]', '', food_name.lower())
        search_url = f"{USDA_API_BASE}/foods/search"
        
        async with aiohttp.ClientSession() as session:
            params = {
                "query": search_term,
                "dataType": "Foundation,SR Legacy",
                "pageSize": 5,
                "api_key": USDA_API_KEY
            }
            
            async with session.get(search_url, params=params) as response:
                if response.status == 200:
                    data = await response.json()
                    
                    if data.get("foods") and len(data["foods"]) > 0:
                        food = data["foods"][0]
                        
                        nutrients = {}
                        for nutrient in food.get("foodNutrients", []):
                            nutrient_name = nutrient.get("nutrientName", "").lower()
                            value = nutrient.get("value", 0)
                            
                            if "energy" in nutrient_name and value > 0:
                                nutrients["calories"] = round(value)
                            elif "protein" in nutrient_name and value > 0:
                                nutrients["protein"] = round(value, 1)
                            elif "carbohydrate" in nutrient_name and "fiber" not in nutrient_name and value > 0:
                                nutrients["carbs"] = round(value, 1)
                            elif ("total lipid" in nutrient_name or ("fat" in nutrient_name and "fatty" not in nutrient_name)) and value > 0:
                                nutrients["fat"] = round(value, 1)
                        
                        if len(nutrients) >= 3:  # Need at least 3 main nutrients
                            nutrition_data = {
                                "calories": nutrients.get("calories", 0),
                                "protein": nutrients.get("protein", 0.0),
                                "carbs": nutrients.get("carbs", 0.0),
                                "fat": nutrients.get("fat", 0.0)
                            }
                            
                            logger.info(f"πŸ‡ΊπŸ‡Έ USDA nutrition found for '{food_name}': {nutrition_data}")
                            return nutrition_data
                        
    except Exception as e:
        logger.warning(f"⚠️ USDA lookup failed for '{food_name}': {e}")
    
    return None

async def search_edamam_nutrition(food_name: str) -> Optional[Dict[str, Any]]:
    """Search Edamam Nutrition API for food data."""
    if not EDAMAM_APP_ID or not EDAMAM_APP_KEY:
        return None
        
    try:
        async with aiohttp.ClientSession() as session:
            params = {
                "app_id": EDAMAM_APP_ID,
                "app_key": EDAMAM_APP_KEY,
                "ingr": f"1 serving {food_name}"
            }
            
            async with session.get(EDAMAM_API_BASE, params=params) as response:
                if response.status == 200:
                    data = await response.json()
                    
                    if data.get("calories") and data.get("calories") > 0:
                        nutrition_data = {
                            "calories": round(data.get("calories", 0)),
                            "protein": round(data.get("totalNutrients", {}).get("PROCNT", {}).get("quantity", 0), 1),
                            "carbs": round(data.get("totalNutrients", {}).get("CHOCDF", {}).get("quantity", 0), 1),
                            "fat": round(data.get("totalNutrients", {}).get("FAT", {}).get("quantity", 0), 1)
                        }
                        
                        logger.info(f"πŸ₯— Edamam nutrition found for '{food_name}': {nutrition_data}")
                        return nutrition_data
                        
    except Exception as e:
        logger.warning(f"⚠️ Edamam lookup failed for '{food_name}': {e}")
    
    return None

async def search_spoonacular_nutrition(food_name: str) -> Optional[Dict[str, Any]]:
    """Search Spoonacular API for ingredient nutrition."""
    if not SPOONACULAR_API_KEY:
        return None
        
    try:
        # First search for ingredient ID
        search_url = f"{SPOONACULAR_API_BASE}/search"
        
        async with aiohttp.ClientSession() as session:
            params = {
                "query": food_name,
                "number": 1,
                "apiKey": SPOONACULAR_API_KEY
            }
            
            async with session.get(search_url, params=params) as response:
                if response.status == 200:
                    data = await response.json()
                    
                    if data.get("results") and len(data["results"]) > 0:
                        ingredient_id = data["results"][0]["id"]
                        
                        # Get nutrition info for ingredient
                        nutrition_url = f"{SPOONACULAR_API_BASE}/{ingredient_id}/information"
                        nutrition_params = {
                            "amount": 100,
                            "unit": "grams",
                            "apiKey": SPOONACULAR_API_KEY
                        }
                        
                        async with session.get(nutrition_url, params=nutrition_params) as nutrition_response:
                            if nutrition_response.status == 200:
                                nutrition_data_raw = await nutrition_response.json()
                                
                                if nutrition_data_raw.get("nutrition"):
                                    nutrients = nutrition_data_raw["nutrition"]["nutrients"]
                                    
                                    nutrition_data = {
                                        "calories": 0,
                                        "protein": 0.0,
                                        "carbs": 0.0,
                                        "fat": 0.0
                                    }
                                    
                                    for nutrient in nutrients:
                                        name = nutrient.get("name", "").lower()
                                        amount = nutrient.get("amount", 0)
                                        
                                        if "calories" in name or "energy" in name:
                                            nutrition_data["calories"] = round(amount)
                                        elif "protein" in name:
                                            nutrition_data["protein"] = round(amount, 1)
                                        elif "carbohydrates" in name:
                                            nutrition_data["carbs"] = round(amount, 1)
                                        elif "fat" in name and "fatty" not in name:
                                            nutrition_data["fat"] = round(amount, 1)
                                    
                                    if nutrition_data["calories"] > 0:
                                        logger.info(f"πŸ₯„ Spoonacular nutrition found for '{food_name}': {nutrition_data}")
                                        return nutrition_data
                        
    except Exception as e:
        logger.warning(f"⚠️ Spoonacular lookup failed for '{food_name}': {e}")
    
    return None

def clean_food_name_for_search(raw_name: str) -> str:
    """Smart cleaning of Food-101 names for better API searches."""
    # Remove underscores and replace with spaces
    cleaned = raw_name.replace("_", " ")

    # Handle comma-separated names - take the first part (usually English name)
    # Example: "Pineapple, Ananas" β†’ "Pineapple"
    if "," in cleaned:
        parts = cleaned.split(",")
        # Try to detect which part is English (usually the first one)
        # Keep the part that's more likely to be in nutrition databases
        cleaned = parts[0].strip()
        logger.info(f"🧹 Cleaned comma-separated name: '{raw_name}' β†’ '{cleaned}'")

    # Remove common Food-101 artifacts
    cleaned = re.sub(r'\b(and|with|the|a)\b', ' ', cleaned, flags=re.IGNORECASE)

    # Handle specific Food-101 patterns
    replacements = {
        "cup cakes": "cupcakes",
        "ice cream": "ice cream",
        "hot dog": "hot dog",
        "french fries": "french fries",
        "shrimp and grits": "shrimp grits",
        "macaroni and cheese": "mac and cheese"
    }

    for old, new in replacements.items():
        if old in cleaned.lower():
            cleaned = new
            break

    # Clean whitespace and extra punctuation
    cleaned = re.sub(r'\s+', ' ', cleaned).strip()
    cleaned = re.sub(r'[^\w\s-]', '', cleaned)  # Remove special chars except hyphens

    return cleaned

async def search_openfoodfacts_nutrition(food_name: str) -> Optional[Dict[str, Any]]:
    """Search OpenFoodFacts database for nutrition information."""
    try:
        # OpenFoodFacts search endpoint
        search_url = f"{OPENFOODFACTS_API_BASE}/search"
        
        async with aiohttp.ClientSession() as session:
            params = {
                "search_terms": food_name,
                "search_simple": 1,
                "action": "process",
                "fields": "product_name,nutriments,nutriscore_grade",
                "page_size": 10,
                "json": 1
            }
            
            async with session.get(search_url, params=params) as response:
                if response.status == 200:
                    data = await response.json()
                    
                    products = data.get("products", [])
                    if products:
                        # Take the first product with nutrition data
                        for product in products:
                            nutriments = product.get("nutriments", {})
                            
                            if nutriments.get("energy-kcal_100g") and nutriments.get("energy-kcal_100g") > 0:
                                nutrition_data = {
                                    "calories": round(nutriments.get("energy-kcal_100g", 0)),
                                    "protein": round(nutriments.get("proteins_100g", 0), 1),
                                    "carbs": round(nutriments.get("carbohydrates_100g", 0), 1),
                                    "fat": round(nutriments.get("fat_100g", 0), 1)
                                }
                                
                                logger.info(f"🌍 OpenFoodFacts nutrition found for '{food_name}': {nutrition_data}")
                                return nutrition_data
                        
    except Exception as e:
        logger.warning(f"⚠️ OpenFoodFacts lookup failed for '{food_name}': {e}")
    
    return None

async def search_foodrepo_nutrition(food_name: str) -> Optional[Dict[str, Any]]:
    """Search FoodRepo database for nutrition information."""
    try:
        # FoodRepo search endpoint
        search_url = f"{FOODREPO_API_BASE}/products"
        
        async with aiohttp.ClientSession() as session:
            params = {
                "q": food_name,
                "limit": 5
            }
            
            async with session.get(search_url, params=params) as response:
                if response.status == 200:
                    data = await response.json()
                    
                    if data.get("data") and len(data["data"]) > 0:
                        product = data["data"][0]
                        nutrients = product.get("nutrients", {})
                        
                        if nutrients.get("energy"):
                            nutrition_data = {
                                "calories": round(nutrients.get("energy", {}).get("per100g", 0)),
                                "protein": round(nutrients.get("protein", {}).get("per100g", 0), 1),
                                "carbs": round(nutrients.get("carbohydrate", {}).get("per100g", 0), 1),
                                "fat": round(nutrients.get("fat", {}).get("per100g", 0), 1)
                            }
                            
                            if nutrition_data["calories"] > 0:
                                logger.info(f"πŸ₯¬ FoodRepo nutrition found for '{food_name}': {nutrition_data}")
                                return nutrition_data
                        
    except Exception as e:
        logger.warning(f"⚠️ FoodRepo lookup failed for '{food_name}': {e}")
    
    return None

async def get_nutrition_from_apis(food_name: str) -> Dict[str, Any]:
    """Get nutrition data from multiple FREE databases with comprehensive fallback."""
    # Clean the Food-101 name for better searches
    cleaned_name = clean_food_name_for_search(food_name)

    logger.info(f"πŸ” Searching nutrition for: '{food_name}' β†’ '{cleaned_name}'")

    # Try APIs in order: Free/Unlimited first, then limited APIs
    nutrition_sources = [
        ("OpenFoodFacts", search_openfoodfacts_nutrition),  # FREE, 2M+ products
        ("USDA", search_usda_nutrition),                    # FREE, comprehensive US
        ("FoodRepo", search_foodrepo_nutrition),            # FREE, European focus
        ("Edamam", search_edamam_nutrition),                # 1000/month limit
        ("Spoonacular", search_spoonacular_nutrition)       # 150/day limit
    ]

    # First attempt with cleaned name
    for source_name, search_func in nutrition_sources:
        try:
            nutrition_data = await search_func(cleaned_name)
            if nutrition_data and nutrition_data.get("calories", 0) > 0:
                nutrition_data["source"] = source_name
                logger.info(f"βœ… Found nutrition data from {source_name} for '{cleaned_name}'")
                return nutrition_data
        except Exception as e:
            logger.warning(f"⚠️ {source_name} search failed for '{cleaned_name}': {e}")
            continue

    # If cleaned name failed and it's different from original, try original name too
    if cleaned_name.lower() != food_name.lower():
        logger.info(f"πŸ”„ Retrying with original name: '{food_name}'")
        for source_name, search_func in nutrition_sources:
            try:
                nutrition_data = await search_func(food_name)
                if nutrition_data and nutrition_data.get("calories", 0) > 0:
                    nutrition_data["source"] = source_name
                    logger.info(f"βœ… Found nutrition data from {source_name} for original '{food_name}'")
                    return nutrition_data
            except Exception as e:
                logger.warning(f"⚠️ {source_name} search failed for original '{food_name}': {e}")
                continue

    # Try with just the first word as last resort (e.g., "pineapple juice" β†’ "pineapple")
    words = cleaned_name.split()
    if len(words) > 1:
        first_word = words[0]
        logger.info(f"πŸ”„ Last resort: trying first word only: '{first_word}'")
        for source_name, search_func in nutrition_sources:
            try:
                nutrition_data = await search_func(first_word)
                if nutrition_data and nutrition_data.get("calories", 0) > 0:
                    nutrition_data["source"] = f"{source_name} (matched: {first_word})"
                    logger.info(f"βœ… Found nutrition data from {source_name} for '{first_word}'")
                    return nutrition_data
            except Exception as e:
                logger.warning(f"⚠️ {source_name} search failed for '{first_word}': {e}")
                continue

    # All APIs failed, return default values
    logger.warning(f"🚨 No nutrition data found for '{food_name}' after all attempts, using defaults")
    default_nutrition = DEFAULT_NUTRITION.copy()
    default_nutrition["source"] = "Default (APIs unavailable)"
    return default_nutrition

# ==================== TRANSLATION SYSTEM ====================

# In-memory translation cache to reduce API calls
translation_cache: Dict[str, Dict[str, str]] = {}  # {locale: {english: translated}}

# Language code mapping (i18n locale β†’ full language name)
LANGUAGE_MAP = {
    "en": "English",
    "bs": "Bosnian",
    "de": "German",
    "es": "Spanish",
    "fr": "French",
    "it": "Italian",
    "pt": "Portuguese",
    "ar": "Arabic",
    "tr": "Turkish",
    "nl": "Dutch",
    "ru": "Russian",
    "zh": "Chinese",
    "ja": "Japanese",
    "ko": "Korean",
    "hi": "Hindi",
    "sr": "Serbian",
    "hr": "Croatian",
    "sq": "Albanian",
    "mk": "Macedonian",
}

# NO HARDCODED TRANSLATIONS - Let models predict naturally

async def translate_food_names_batch(food_names: List[str], target_locale: str) -> Dict[str, str]:
    """
    Translate multiple food names in a single API call (COST OPTIMIZATION).

    Args:
        food_names: List of food names in English
        target_locale: Target language code

    Returns:
        Dictionary mapping original names to translated names
    """
    # Skip translation if target is English or no OpenAI client
    if target_locale == "en" or not openai_client or not OPENAI_API_KEY:
        return {name: name for name in food_names}

    # Check cache first
    if target_locale not in translation_cache:
        translation_cache[target_locale] = {}

    translations = {}
    needs_translation = []

    # Check cache only - no hardcoded translations
    for name in food_names:
        if name in translation_cache[target_locale]:
            translations[name] = translation_cache[target_locale][name]
            logger.info(f"πŸ’Ύ Cache hit: '{name}' β†’ '{translations[name]}' ({target_locale})")
        else:
            needs_translation.append(name)

    # If all cached, return immediately
    if not needs_translation:
        return translations

    # Get target language name
    target_language = LANGUAGE_MAP.get(target_locale, target_locale)

    try:
        logger.info(f"🌐 Batch translating {len(needs_translation)} items to {target_language}")

        # Create batch translation prompt (1 API call for multiple items)
        food_list = "\n".join(f"{i+1}. {name}" for i, name in enumerate(needs_translation))

        response = await openai_client.chat.completions.create(
            model="gpt-4o-mini",
            messages=[
                {
                    "role": "system",
                    "content": f"You are a professional food translator. Translate food names to {target_language}. Return ONLY the translations, one per line, in the same order. Keep it natural and commonly used."
                },
                {
                    "role": "user",
                    "content": f"Translate these food names to {target_language}:\n{food_list}"
                }
            ],
            max_tokens=150,
            temperature=0.3,
        )

        translated_lines = response.choices[0].message.content.strip().split('\n')

        # Parse translations and update cache
        for i, name in enumerate(needs_translation):
            if i < len(translated_lines):
                # Remove numbering if present (e.g., "1. Ananas" β†’ "Ananas")
                translated = translated_lines[i].strip()
                translated = translated.split('. ', 1)[-1] if '. ' in translated else translated

                translations[name] = translated
                translation_cache[target_locale][name] = translated
                logger.info(f"βœ… '{name}' β†’ '{translated}'")

        return translations

    except Exception as e:
        logger.warning(f"⚠️ Batch translation failed: {e}")
        # Return originals on failure
        for name in needs_translation:
            translations[name] = name
        return translations

async def translate_food_name(food_name: str, target_locale: str) -> str:
    """
    Translate single food name (uses batch function internally for caching).

    Args:
        food_name: Food name in English
        target_locale: Target language code

    Returns:
        Translated food name or original if translation fails/not needed
    """
    result = await translate_food_names_batch([food_name], target_locale)
    return result.get(food_name, food_name)

async def translate_description(description: str, target_locale: str) -> str:
    """
    Translate food description to target language using OpenAI with caching.

    Args:
        description: Description in English
        target_locale: Target language code

    Returns:
        Translated description or original if translation fails/not needed
    """
    # Skip translation if target is English or no OpenAI client
    if target_locale == "en" or not openai_client or not OPENAI_API_KEY:
        return description

    # Simple cache key (hash of description + locale)
    cache_key = f"desc_{hash(description)}_{target_locale}"

    # Check if cached in locale cache
    if target_locale not in translation_cache:
        translation_cache[target_locale] = {}

    if cache_key in translation_cache[target_locale]:
        logger.info(f"πŸ’Ύ Description cache hit ({target_locale})")
        return translation_cache[target_locale][cache_key]

    # Get target language name
    target_language = LANGUAGE_MAP.get(target_locale, target_locale)

    try:
        logger.info(f"🌐 Translating description to {target_language}")

        response = await openai_client.chat.completions.create(
            model="gpt-4o-mini",
            messages=[
                {
                    "role": "system",
                    "content": f"You are a food description translator. Translate to {target_language}. Keep it natural and concise. Return ONLY the translation."
                },
                {
                    "role": "user",
                    "content": description
                }
            ],
            max_tokens=100,
            temperature=0.3,
        )

        translated = response.choices[0].message.content.strip()

        # Cache the result
        translation_cache[target_locale][cache_key] = translated
        logger.info(f"βœ… Description translated to {target_language}")

        return translated

    except Exception as e:
        logger.warning(f"⚠️ Description translation failed: {e}")
        return description

# ==================== MULTI-MODEL FOOD RECOGNIZER ====================
class MultiModelFoodRecognizer:
    """Production-ready multi-model ensemble for comprehensive food recognition."""
    
    def __init__(self, device: str):
        self.device = device
        self.models = {}
        self.processors = {}
        self.is_loaded = False
        self.available_models = []
        self._initialize_models()
        self._warm_up()
    
    def _initialize_models(self):
        """Initialize Food-101 specialist ensemble with memory optimization."""
        logger.info("🎯 Initializing FOOD-101 SPECIALIST food recognition system with memory optimization...")
        
        # MEMORY-AWARE LOADING: Priority-based loading with RAM monitoring
        sorted_models = sorted(FOOD_MODELS.items(), key=lambda x: x[1]["priority"])
        memory_used = 0
        memory_limit = 14.5 * 1024  # 14.5GB limit (1.5GB buffer for inference)
        
        # Model memory estimates (MB) - UPDATED FOR FOOD-101 SPECIALISTS
        model_sizes = {
            "food101_siglip_2025": 400,        "food101_deit_2024": 350,
            "food101_vit_base": 344,          "food101_swin": 348,
            "food101_baseline": 500,          "food_categories_enhanced": 300
        }
        
        for model_key, model_config in sorted_models:
            estimated_size = model_sizes.get(model_key, 500)  # Default 500MB
            
            # Memory constraint check
            if memory_used + estimated_size > memory_limit:
                logger.warning(f"⚠️ Skipping {model_key} ({estimated_size}MB) - RAM limit reached")
                continue
                
            try:
                logger.info(f"πŸ”„ Loading {model_key}: {model_config['description']} (~{estimated_size}MB)")
                model_name = model_config["model_name"]
                
                # MEMORY-OPTIMIZED LOADING
                processor = AutoImageProcessor.from_pretrained(model_name)
                
                # Advanced memory optimization for large models
                load_config = {
                    "use_safetensors": True,
                    "low_cpu_mem_usage": True,
                    "torch_dtype": torch.float16 if self.device == "cuda" else torch.float32
                }
                
                # GPU-specific optimizations
                if self.device == "cuda" and estimated_size > 1000:  # For models > 1GB
                    load_config["device_map"] = "auto"
                    
                model = AutoModelForImageClassification.from_pretrained(model_name, **load_config)
                
                # Device placement (if not handled by device_map)
                if "device_map" not in load_config:
                    model = model.to(self.device)
                model.eval()
                
                # FOOD-101 SPECIFIC COMPILATION
                if hasattr(torch, 'compile') and self.device == "cuda" and "food101" in model_key:
                    try:
                        model = torch.compile(model, mode="reduce-overhead", dynamic=True)
                        logger.info(f"⚑ FOOD-101 {model_key} compiled with memory optimization")
                    except Exception as e:
                        logger.info(f"⚠️ Compilation failed for {model_key}: {e}")
                
                self.models[model_key] = model
                self.processors[model_key] = processor
                self.available_models.append(model_key)
                memory_used += estimated_size
                
                logger.info(f"βœ… {model_key} loaded (Total: {memory_used/1024:.1f}GB / 16GB)")
                
                # Aggressive memory cleanup
                if self.device == "cuda":
                    torch.cuda.empty_cache()
                    torch.cuda.synchronize()
                    
            except Exception as e:
                logger.warning(f"⚠️ Failed to load {model_key}: {e}")
                continue
        
        if self.available_models:
            self.is_loaded = True
            logger.info(f"🎯 Multi-model system ready with {len(self.available_models)} models: {self.available_models}")
        else:
            raise RuntimeError("❌ No models could be loaded!")
    
    def _warm_up(self):
        """Warm up all loaded models."""
        if not self.available_models:
            return
            
        try:
            logger.info("πŸ”₯ Warming up all models...")
            
            # Create dummy image
            dummy_image = Image.new('RGB', (224, 224), color='red')
            
            for model_key in self.available_models:
                try:
                    processor = self.processors[model_key]
                    model = self.models[model_key]
                    
                    with torch.no_grad():
                        inputs = processor(images=dummy_image, return_tensors="pt")
                        inputs = {k: v.to(self.device) for k, v in inputs.items()}
                        _ = model(**inputs)
                    
                    logger.info(f"βœ… {model_key} warmed up")
                except Exception as e:
                    logger.warning(f"⚠️ Warm-up failed for {model_key}: {e}")
            
            # Clean up
            del dummy_image
            if self.device == "cuda":
                torch.cuda.empty_cache()
            gc.collect()
            
            logger.info("βœ… All models warm-up completed")
            
        except Exception as e:
            logger.warning(f"⚠️ Model warm-up failed: {e}")
    
    def _predict_with_model(self, image: Image.Image, model_key: str, top_k: int = 5) -> Optional[List[Dict[str, Any]]]:
        """Predict with a specific model."""
        try:
            if model_key not in self.available_models:
                return None
                
            processor = self.processors[model_key]
            model = self.models[model_key]
            
            # Preprocess image
            processed_image = preprocess_image(image)
            
            # Prepare inputs
            inputs = processor(images=processed_image, return_tensors="pt")
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            # Inference
            with torch.no_grad():
                outputs = model(**inputs)
                logits = outputs.logits
                probs = F.softmax(logits, dim=-1).cpu().numpy()[0]
            
            # Get top K predictions
            top_indices = np.argsort(probs)[::-1][:top_k]
            
            predictions = []
            for idx in top_indices:
                # Handle different model output formats
                if hasattr(model.config, 'id2label') and str(idx) in model.config.id2label:
                    label = model.config.id2label[str(idx)]
                elif hasattr(model.config, 'id2label') and idx in model.config.id2label:
                    label = model.config.id2label[idx]
                else:
                    label = f"class_{idx}"
                
                confidence = float(probs[idx])
                
                # SMART CATEGORY MAPPING for different models
                mapped_label = label
                boosted_confidence = confidence
                
                # NOISYVIT 2025 ENSEMBLE - STATE-OF-THE-ART FOOD RECOGNITION
                if model_key in ["noisyvit_2025_huge", "noisyvit_2025_large", "noisyvit_2025_base_384"]:
                    # NOISYVIT 2025 FLAGSHIP MODELS - Maximum priority and robustness
                    clean_name = label.replace("_", " ").title()
                    noisyvit_multiplier = {
                        "noisyvit_2025_huge": 2.5,      # 150% boost - Ultimate model
                        "noisyvit_2025_large": 2.3,     # 130% boost - Advanced robustness  
                        "noisyvit_2025_base_384": 2.1   # 110% boost - High-resolution
                    }
                    boosted_confidence = min(confidence * noisyvit_multiplier[model_key], 1.0)
                    logger.info(f"🎯 NOISYVIT 2025 {model_key}: {label} β†’ {clean_name} ({boosted_confidence:.1%}) [NOISE-RESILIENT]")
                    
                elif model_key in ["food101_vit_specialist", "food_enhanced_classifier"]:
                    # FOOD-101 SPECIALISTS - High trust for specific food categories
                    clean_name = label.replace("_", " ").title()
                    boosted_confidence = min(confidence * 2.2, 1.0)  # 120% boost for food specialists
                    logger.info(f"🍽️ FOOD SPECIALIST {model_key}: {label} β†’ {clean_name} ({boosted_confidence:.1%})")
                    
                elif model_key in ["multi_object_vit", "scene_understanding_vit"]:
                    # MULTI-OBJECT SCENE DETECTION - Excellent for complex food scenes
                    clean_name = label.replace("_", " ").title()
                    boosted_confidence = min(confidence * 2.0, 1.0)  # 100% boost for multi-object detection
                    logger.info(f"πŸ” MULTI-OBJECT {model_key}: {label} β†’ {clean_name} ({boosted_confidence:.1%}) [COMPLEX SCENES]")
                    
                elif model_key in ["food_clip_huge", "openai_clip_large"]:
                    # VISION-LANGUAGE MODELS - Advanced understanding for complex food descriptions
                    clean_name = label.replace("_", " ").title()
                    clip_food_multiplier = {"food_clip_huge": 2.4, "openai_clip_large": 2.1}
                    boosted_confidence = min(confidence * clip_food_multiplier[model_key], 1.0)
                    logger.info(f"🧠 FOOD CLIP {model_key}: {label} β†’ {clean_name} ({boosted_confidence:.1%}) [VISION-LANGUAGE]")
                    
                elif model_key in ["convnext_xxlarge", "efficientnet_ultra"]:
                    # CUTTING-EDGE ARCHITECTURES - Latest food recognition technology
                    clean_name = label.replace("_", " ").title()
                    arch_multiplier = {"convnext_xxlarge": 2.2, "efficientnet_ultra": 1.9}
                    boosted_confidence = min(confidence * arch_multiplier[model_key], 1.0)
                    logger.info(f"πŸš€ CUTTING-EDGE {model_key}: {label} β†’ {clean_name} ({boosted_confidence:.1%}) [LATEST TECH]")
                    
                elif model_key == "resnet_deep_food":
                    # MEMORY-EFFICIENT BASELINE - Reliable backup
                    clean_name = label.replace("_", " ").title()
                    boosted_confidence = min(confidence * 1.6, 1.0)  # 60% boost for efficient baseline
                    logger.info(f"πŸ—οΈ EFFICIENT BASELINE {model_key}: {label} β†’ {clean_name} ({boosted_confidence:.1%})")
                    
                else:
                    # Unknown model fallback
                    clean_name = label.replace("_", " ").title()
                    boosted_confidence = confidence
                
                predictions.append({
                    "label": clean_name,
                    "raw_label": mapped_label,
                    "confidence": boosted_confidence,
                    "confidence_pct": f"{boosted_confidence:.1%}",
                    "model": model_key,
                    "model_type": FOOD_MODELS[model_key]["type"]
                })
            
            # Clean up memory
            del inputs, outputs, logits, probs
            if self.device == "cuda":
                torch.cuda.empty_cache()
                
            return predictions
            
        except Exception as e:
            logger.warning(f"⚠️ Prediction failed for {model_key}: {e}")
            return None
    
    def predict(self, image: Image.Image, top_k: int = 5) -> Dict[str, Any]:
        """Main predict method - uses ensemble if available, fallback to primary."""
        return self.predict_ensemble(image, top_k)
    
    def predict_ensemble(self, image: Image.Image, top_k: int = 10) -> Dict[str, Any]:
        """Ensemble prediction using all available models with smart filtering."""
        if not self.is_loaded:
            raise RuntimeError("Models not loaded")

        all_predictions = []
        model_results = {}

        # NOISYVIT 2025 ENSEMBLE - Optimized for complex multi-object food scenes
        predictions_per_model = 75  # Increased for complex scene analysis
        
        # PRIORITY-BASED PREDICTION GENERATION
        for model_key in self.available_models:
            # Higher prediction count for NoisyViT models (better for complex scenes)
            if "noisyvit" in model_key:
                current_predictions = 100  # More predictions for NoisyViT robustness
            elif "multi_object" in model_key or "scene_understanding" in model_key:
                current_predictions = 90   # High for multi-object detection
            elif "clip" in model_key:
                current_predictions = 85   # High for vision-language understanding
            else:
                current_predictions = predictions_per_model
                
            predictions = self._predict_with_model(image, model_key, current_predictions)
            if predictions:
                model_results[model_key] = predictions
                all_predictions.extend(predictions)
                
                # Enhanced logging for different model types
                if "noisyvit" in model_key:
                    logger.info(f"🎯 NOISYVIT {model_key}: {len(predictions)} robust predictions [NOISE-RESILIENT]")
                elif "multi_object" in model_key:
                    logger.info(f"πŸ” MULTI-OBJECT {model_key}: {len(predictions)} scene predictions [COMPLEX SCENES]")
                elif "clip" in model_key:
                    logger.info(f"🧠 CLIP {model_key}: {len(predictions)} vision-language predictions")
                else:
                    logger.info(f"🍽️ {model_key}: {len(predictions)} food predictions")
                
        total_predictions = len(all_predictions)
        logger.info(f"πŸš€ NOISYVIT ENSEMBLE: {total_predictions} total predictions from {len(self.available_models)} models")

        if not all_predictions:
            raise RuntimeError("No models produced valid predictions")

        # ULTRA-CONSERVATIVE FILTERING - Only remove obvious non-food for Food-101 specialists
        non_food_items = {
            # Minimal filtering since Food-101 models are trained on food only
            'person', 'people', 'human', 'man', 'woman', 'child',
            'car', 'truck', 'vehicle', 'building', 'house',
            'computer', 'phone', 'laptop', 'tablet', 'television', 'tv',
            'book', 'paper', 'pen', 'pencil', 'chair', 'table', 'sofa',
            'cat', 'dog', 'bird' # live animals only (removed 'fish' since it can be food)
        }

        # Generic FOOD terms that should be deprioritized (but not removed)
        generic_terms = {
            'fruit', 'vegetable', 'food', 'meal', 'snack', 'dessert',
            'salad', 'soup', 'drink', 'beverage', 'meat', 'seafood',
            'bread', 'pastry', 'cake', 'cookie', 'candy', 'chocolate'
        }

        # ULTIMATE FOOD RECOGNITION - PRIORITY BOOST for specific dishes (CORRECTED)
        specific_dishes = {
            # BREAKFAST FOODS (Critical - your pancake example!) - NEVER DESSERT!
            'pancakes', 'american pancakes', 'fluffy pancakes', 'buttermilk pancakes',
            'blueberry pancakes', 'chocolate chip pancakes', 'banana pancakes',
            'waffles', 'belgian waffles', 'french toast', 'crepes', 'omelet',
            'scrambled eggs', 'fried eggs', 'eggs benedict', 'breakfast burrito',
            
            # Fast food & popular dishes (CRITICAL FIXES)
            'fish and chips', 'fish & chips', 'fried fish', 'fish fillet',
            'hamburger', 'cheeseburger', 'burger', 'sandwich', 'club sandwich',
            'pizza', 'pepperoni pizza', 'margherita pizza', 'hawaiian pizza',
            'pasta', 'spaghetti', 'linguine', 'fettuccine', 'lasagna', 'risotto',
            'sushi', 'sashimi', 'california roll', 'ramen', 'pho', 'pad thai',
            'curry', 'chicken curry', 'biryani', 'tikka masala', 'butter chicken',
            'tacos', 'fish tacos', 'chicken tacos', 'beef tacos', 'carnitas',
            'burrito', 'quesadilla', 'nachos', 'enchilada', 'fajitas',
            'fried chicken', 'chicken wings', 'buffalo wings', 'chicken nuggets',
            'french fries', 'fries', 'sweet potato fries', 'onion rings',
            'hot dog', 'corn dog', 'bratwurst', 'sausage', 'kielbasa',
            
            # Balkanska jela (sa alternativnim imenima) - ENHANCED for Δ‡evapi detection
            'cevapi', 'cevapcici', 'Δ‡evapi', 'Δ‡evapčiΔ‡i', 'chevapi', 'chevapchichi',
            'burek', 'bΓΆrek', 'pljeskavica', 'sarma', 'klepe', 'dolma', 'kajmak', 'ajvar',
            'kofte', 'raznjici', 'grilled meat', 'balkan sausage',
            'prebranac', 'pasulj', 'grah', 'punjena paprika', 'punjene paprike',
            'stuffed peppers', 'musaka', 'moussaka', 'japrak', 'bamija', 'okra',
            'bosanski lonac', 'begova corba', 'tarhana', 'zeljanica', 'spinach pie',
            'sirnica', 'cheese pie', 'krompiruΕ‘a', 'potato pie', 'gibanica', 'banica',
            
            # Steaks & BBQ
            'steak', 'ribeye', 'filet mignon', 'sirloin', 't-bone', 'porterhouse',
            'ribs', 'bbq ribs', 'pork ribs', 'beef ribs', 'pulled pork', 'brisket',
            
            # International specialties
            'schnitzel', 'wiener schnitzel', 'paella', 'seafood paella',
            'falafel', 'hummus', 'gyros', 'kebab', 'shish kebab', 'shawarma',
            'spring rolls', 'summer rolls', 'dim sum', 'dumplings', 'wontons',
            'tempura', 'teriyaki', 'yakitori', 'miso soup', 'tom yum',
            
            # Desserts
            'cheesecake', 'chocolate cake', 'vanilla cake', 'tiramisu',
            'apple pie', 'pumpkin pie', 'brownie', 'chocolate chip cookie',
            'ice cream', 'gelato', 'donut', 'croissant', 'danish', 'eclair'
        }

        # Ensemble voting: weight by model priority and confidence
        food_scores = {}
        filtered_count = 0

        for pred in all_predictions:
            food_label_lower = pred["raw_label"].lower().replace("_", " ")

            # ULTIMATE FILTERING - Remove garbage predictions and non-food items
            is_non_food = any(non_food in food_label_lower for non_food in non_food_items)
            
            # Additional checks for garbage predictions
            is_garbage_prediction = (
                # Check for "Oznaka X" pattern
                food_label_lower.startswith('oznaka') or 
                food_label_lower.startswith('label') or
                food_label_lower.startswith('class') or
                # Very short meaningless names
                (len(food_label_lower) <= 2) or
                # Numbers only
                food_label_lower.isdigit() or
                # Very low confidence on unknown terms
                (pred["confidence"] < 0.4 and food_label_lower not in COMPREHENSIVE_FOOD_CATEGORIES)
            )
            
            if is_non_food or is_garbage_prediction:
                filtered_count += 1
                logger.info(f"🚫 Filtered garbage/non-food: '{pred['raw_label']}'")
                continue  # Skip this prediction entirely

            model_key = pred["model"]
            priority_weight = 1.0 / FOOD_MODELS[model_key]["priority"]  # Higher priority = lower number = higher weight
            confidence_weight = pred["confidence"]

            # ULTIMATE SMART SCORING - Maximum accuracy for known dishes
            is_generic = any(generic in food_label_lower for generic in generic_terms)
            is_specific = any(dish in food_label_lower for dish in specific_dishes)
            is_single_generic = food_label_lower in generic_terms
            
            # Check if it's a known dish from our comprehensive database
            is_known_food = any(known in food_label_lower for known in COMPREHENSIVE_FOOD_CATEGORIES)

            # INTELLIGENT FOOD PRIORITY SYSTEM - Ultra-precise detection
            is_pancake_related = any(pancake_term in food_label_lower for pancake_term in 
                                   ['pancake', 'waffle', 'french_toast', 'crepe', 'beignet'])
            
            is_fish_and_chips = any(fish_term in food_label_lower for fish_term in 
                                  ['fish_and_chips', 'fish and chips', 'fried_fish', 'fish fillet'])
            
            is_balkan_meat = any(balkan_term in food_label_lower for balkan_term in 
                               ['cevapi', 'cevapcici', 'pljeskavica', 'kebab'])
            
            is_bread_related = any(bread_term in food_label_lower for bread_term in 
                                 ['burek', 'bread', 'sandwich', 'toast'])
            
            # CRITICAL: Detect if it's wrongly classified as dessert when it's breakfast
            is_wrong_dessert = (any(breakfast_term in food_label_lower for breakfast_term in 
                                  ['pancake', 'waffle', 'french_toast']) and 'dessert' in food_label_lower)
            
            # Calculate score multiplier with ULTRA-SMART FOOD PRIORITY
            if is_wrong_dessert:
                # MASSIVE PENALTY for wrongly classified breakfast as dessert
                score_multiplier = 0.01  # 99% PENALTY for wrong dessert classification!
                logger.info(f"❌ WRONG DESSERT PENALTY: {pred['raw_label']} (99% penalty - breakfast wrongly classified as dessert)")
            elif is_pancake_related:
                # MAXIMUM BOOST for pancake-related items
                score_multiplier = 6.0  # 500% BOOST for pancakes!!!
                logger.info(f"πŸ₯ž PANCAKE PRIORITY: {pred['raw_label']} (6x MEGA boost)")
            elif is_fish_and_chips:
                # MEGA BOOST for fish and chips (often misclassified)
                score_multiplier = 5.0  # 400% BOOST for fish and chips!!!
                logger.info(f"🐟 FISH & CHIPS PRIORITY: {pred['raw_label']} (5x MEGA boost)")
            elif is_balkan_meat:
                # MEGA BOOST for Balkan meat dishes
                score_multiplier = 4.0  # 300% BOOST for Δ‡evapi/pljeskavica!!!
                logger.info(f"πŸ₯© BALKAN MEAT PRIORITY: {pred['raw_label']} (4x boost)")
            elif is_bread_related:
                # BOOST for bread dishes (burek, etc.)
                score_multiplier = 3.0  # 200% BOOST for bread dishes
                logger.info(f"πŸ₯– BREAD PRIORITY: {pred['raw_label']} (3x boost)")
            elif is_specific:
                # MEGA BOOST for specific dishes we know well
                score_multiplier = 3.0  # 200% BOOST for specific dishes!
                logger.info(f"🎯 SPECIFIC DISH DETECTED: {pred['raw_label']} (3x boost)")
            elif is_known_food and confidence_weight > 0.3:
                # Good boost for known foods with decent confidence
                score_multiplier = 2.0  # 100% boost for known foods
                logger.info(f"βœ… KNOWN FOOD: {pred['raw_label']} (2x boost)")
            elif is_single_generic:
                # Heavy penalty for single generic terms
                score_multiplier = 0.05  # 95% penalty for generic terms like "food", "meat"
                logger.info(f"❌ GENERIC TERM: {pred['raw_label']} (95% penalty)")
            elif is_generic:
                # Medium penalty for generic descriptions
                score_multiplier = 0.3  # 70% penalty for generic terms
                logger.info(f"⚠️ GENERIC: {pred['raw_label']} (70% penalty)")
            elif confidence_weight > 0.7:
                # Bonus for high-confidence predictions
                score_multiplier = 1.5  # 50% boost for high confidence
                logger.info(f"πŸ’ͺ HIGH CONFIDENCE: {pred['raw_label']} (1.5x boost)")
            else:
                score_multiplier = 1.0  # Normal score

            combined_score = priority_weight * confidence_weight * score_multiplier

            food_name = pred["raw_label"]
            if food_name not in food_scores:
                food_scores[food_name] = {
                    "total_score": 0,
                    "count": 0,
                    "best_prediction": pred,
                    "models": [],
                    "is_generic": is_generic,
                    "is_specific": is_specific
                }

            food_scores[food_name]["total_score"] += combined_score
            food_scores[food_name]["count"] += 1
            food_scores[food_name]["models"].append(model_key)

            # Keep the prediction with highest confidence as representative
            if pred["confidence"] > food_scores[food_name]["best_prediction"]["confidence"]:
                food_scores[food_name]["best_prediction"] = pred

        if filtered_count > 0:
            logger.info(f"βœ… Filtered out {filtered_count} non-food items")

        # Sort by ensemble score
        sorted_foods = sorted(
            food_scores.items(),
            key=lambda x: x[1]["total_score"],
            reverse=True
        )

        # Format final results - return MORE alternatives (up to top_k)
        final_predictions = []
        for food_name, data in sorted_foods[:top_k * 2]:  # Get double to have enough after filtering
            pred = data["best_prediction"].copy()
            pred["ensemble_score"] = data["total_score"]
            pred["model_count"] = data["count"]
            pred["contributing_models"] = data["models"]
            pred["is_generic"] = data["is_generic"]
            pred["is_specific"] = data["is_specific"]
            final_predictions.append(pred)

        # STRICT CONFIDENCE FILTERING - Only high quality predictions
        filtered_predictions = []
        seen_labels = set()

        for pred in final_predictions:
            label_lower = pred["raw_label"].lower().replace("_", " ").strip()

            # STRICT CONFIDENCE CHECK - Minimum 15% confidence
            if pred["confidence"] < MIN_CONFIDENCE_THRESHOLD:
                logger.info(f"❌ LOW CONFIDENCE FILTERED: {pred['raw_label']} ({pred['confidence']:.1%})")
                continue

            # DOUBLE CHECK: Filter non-food items again
            is_non_food = any(non_food in label_lower for non_food in non_food_items)
            if is_non_food:
                continue  # Skip non-food items

            # Skip if we've already seen very similar label
            if label_lower not in seen_labels:
                filtered_predictions.append(pred)
                seen_labels.add(label_lower)
                logger.info(f"βœ… ACCEPTED: {pred['raw_label']} ({pred['confidence']:.1%})")

            if len(filtered_predictions) >= top_k:
                break

        # FINAL VALIDATION - Prevent obvious classification errors
        validated_predictions = []
        for pred in filtered_predictions:
            label_lower = pred["raw_label"].lower().replace("_", " ")
            
            # CRITICAL VALIDATION RULES
            validation_passed = True
            validation_reason = ""
            
            # Rule 1: Pancakes should NEVER be classified as dessert
            if any(breakfast_term in label_lower for breakfast_term in ['pancake', 'waffle', 'french_toast']) and \
               any(dessert_term in label_lower for dessert_term in ['dessert', 'cake', 'sweet']):
                validation_passed = False
                validation_reason = "Breakfast item wrongly classified as dessert"
            
            # Rule 2: Fish and chips should be recognized as specific dish, not generic "fried food"
            if 'fish' in label_lower and 'chip' in label_lower and pred["confidence"] > 0.3:
                # This is clearly fish and chips - boost it!
                pred["confidence"] = min(pred["confidence"] * 1.5, 1.0)
                pred["label"] = "Fish and Chips"
                logger.info(f"🐟 FISH & CHIPS VALIDATION BOOST: {pred['confidence']:.1%}")
            
            # Rule 3: Natural validation - no hardcoded replacements
            if label_lower in ['food', 'meal', 'dish', 'object', 'item']:
                # Generic terms get penalty but no forced replacement
                pred["confidence"] *= 0.5  # 50% penalty for being too generic
                logger.info(f"⚠️ GENERIC TERM PENALTY: {label_lower}")
            
            if validation_passed:
                validated_predictions.append(pred)
            else:
                logger.info(f"❌ VALIDATION FAILED: {pred['raw_label']} - {validation_reason}")
        
        # Use validated predictions
        filtered_predictions = validated_predictions
        
        # PRIMARY RESULT with REAL MODEL PREDICTIONS ONLY
        if not filtered_predictions:
            # NO HARDCODED RESPONSES - Return error for manual input
            logger.warning("❌ NO CONFIDENT PREDICTIONS FOUND - All predictions below threshold")
            return {
                "success": False,
                "error": "No confident food predictions found",
                "message": "Please try a clearer image or different angle",
                "confidence_threshold": MIN_CONFIDENCE_THRESHOLD,
                "alternatives": [],
                "system_info": {
                    "available_models": self.available_models,
                    "device": self.device.upper(),
                    "total_classes": sum(FOOD_MODELS[m]["classes"] for m in self.available_models)
                }
            }

        primary = filtered_predictions[0]

        # CRITICAL FIX: ALWAYS use the prediction with HIGHEST confidence as primary
        # (regardless of is_generic flag - confidence is king!)
        if len(filtered_predictions) > 1:
            # Find prediction with highest confidence
            max_conf_idx = 0
            max_conf = filtered_predictions[0].get("confidence", 0)

            for i, pred in enumerate(filtered_predictions[1:], 1):
                pred_conf = pred.get("confidence", 0)
                if pred_conf > max_conf:
                    max_conf = pred_conf
                    max_conf_idx = i

            # Swap if we found a better one
            if max_conf_idx > 0:
                filtered_predictions[0], filtered_predictions[max_conf_idx] = \
                    filtered_predictions[max_conf_idx], filtered_predictions[0]
                primary = filtered_predictions[0]
                logger.info(f"πŸ”„ Swapped to highest confidence: {primary['label']} ({primary['confidence']:.1%})")

        # Note: Generic vs specific check removed - confidence is the only metric that matters

        # FILTER ALTERNATIVES by confidence - Only show good alternatives
        quality_alternatives = []
        for alt in filtered_predictions[1:]:
            if alt["confidence"] >= MIN_ALTERNATIVE_CONFIDENCE:
                quality_alternatives.append(alt)
                if len(quality_alternatives) >= MAX_ALTERNATIVES:
                    break

        return {
            "success": True,
            "label": primary["label"],
            "confidence": primary["confidence"],
            "primary_label": primary["raw_label"],
            "ensemble_score": primary.get("ensemble_score", 0),
            "alternatives": quality_alternatives,  # Only high-confidence alternatives
            "model_results": model_results,
            "system_info": {
                "available_models": self.available_models,
                "device": self.device.upper(),
                "total_classes": sum(FOOD_MODELS[m]["classes"] for m in self.available_models),
                "confidence_thresholds": {
                    "minimum": MIN_CONFIDENCE_THRESHOLD,
                    "alternatives": MIN_ALTERNATIVE_CONFIDENCE
                }
            }
        }

# ==================== LIFESPAN EVENTS ====================

@asynccontextmanager
async def lifespan(app: FastAPI):
    """Application lifespan manager."""
    # Startup
    logger.info("πŸš€ Application startup complete")
    logger.info("=" * 60)
    logger.info("βœ… API READY FOR PRODUCTION")
    logger.info(f"πŸ“‘ Endpoints: /api/nutrition/analyze-food, /analyze")
    logger.info(f"πŸ–₯️  Device: {device.upper()}")
    logger.info(f"πŸ“Š Models: {len(recognizer.available_models)} active models")
    logger.info(f"🎯 Total Food Categories: {sum(FOOD_MODELS[m]['classes'] for m in recognizer.available_models)}")
    logger.info(f"🌐 Translations: {'βœ… Enabled' if openai_client else '❌ Disabled'}")
    logger.info("=" * 60)

    yield
    
    # Shutdown
    logger.info("πŸ”„ Shutting down...")
    
    # Cleanup GPU memory
    if device == "cuda":
        torch.cuda.empty_cache()
    
    # Garbage collection
    gc.collect()
    logger.info("βœ… Cleanup completed")

# ==================== FASTAPI SETUP ====================
logger.info("=" * 60)
logger.info("🍽️ PRODUCTION AI FOOD RECOGNITION API")
logger.info("=" * 60)

# Initialize multi-model system
device = select_device()
recognizer = MultiModelFoodRecognizer(device)

# Initialize OpenAI client BEFORE FastAPI app
if OPENAI_API_KEY:
    try:
        openai_client = AsyncOpenAI(api_key=OPENAI_API_KEY)
        logger.info(f"βœ… OpenAI client initialized (key: {OPENAI_API_KEY[:20]}...)")
    except Exception as e:
        logger.warning(f"⚠️ OpenAI client initialization failed: {e}")
        openai_client = None
else:
    logger.warning("⚠️ OpenAI API key not found - translations disabled")

# Create FastAPI app
app = FastAPI(
    title="AI Food Recognition API",
    description="Production-ready food recognition with 101 categories (Food-101 dataset)",
    version="2.0.0",
    docs_url="/docs",
    redoc_url="/redoc",
    lifespan=lifespan
)

# CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["GET", "POST", "OPTIONS"],
    allow_headers=["*"],
)

# ==================== MIDDLEWARE ====================
@app.middleware("http")
async def add_security_headers(request: Request, call_next):
    response = await call_next(request)
    response.headers["X-Content-Type-Options"] = "nosniff"
    response.headers["X-Frame-Options"] = "DENY"
    return response

# ==================== UTILITY FUNCTIONS ====================
async def validate_and_read_image(file: UploadFile) -> Image.Image:
    """Validate and read uploaded image file."""
    # Check file size
    if hasattr(file, 'size') and file.size > MAX_FILE_SIZE:
        raise HTTPException(status_code=413, detail="File too large (max 10MB)")
    
    # Check content type
    if file.content_type not in ALLOWED_TYPES:
        raise HTTPException(
            status_code=400,
            detail=f"Invalid file type. Allowed: {', '.join(ALLOWED_TYPES)}"
        )
    
    try:
        # Read and validate image
        contents = await file.read()
        if len(contents) > MAX_FILE_SIZE:
            raise HTTPException(status_code=413, detail="File too large (max 10MB)")
        
        image = Image.open(BytesIO(contents))
        return image
        
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"Invalid image file: {str(e)}")

# ==================== API ENDPOINTS ====================

@app.get("/")
def root():
    """Root endpoint with API information."""
    return {
        "message": "🍽️ AI Food Recognition API",
        "status": "online",
        "version": "2.0.0",
        "models": recognizer.available_models if recognizer.is_loaded else [],
        "total_categories": sum(FOOD_MODELS[m]["classes"] for m in recognizer.available_models) if recognizer.is_loaded else 0,
        "device": device.upper(),
        "endpoints": {
            "POST /api/nutrition/analyze-food": "Analyze food image (Next.js frontend)",
            "POST /analyze": "Analyze food image (Hugging Face Spaces)",
            "GET /health": "Health check",
            "GET /docs": "API documentation"
        }
    }

@app.get("/health")
def health_check():
    """Comprehensive health check."""
    return {
        "status": "healthy" if recognizer.is_loaded else "error",
        "models_loaded": recognizer.is_loaded,
        "available_models": recognizer.available_models if recognizer.is_loaded else [],
        "model_count": len(recognizer.available_models) if recognizer.is_loaded else 0,
        "total_categories": sum(FOOD_MODELS[m]["classes"] for m in recognizer.available_models) if recognizer.is_loaded else 0,
        "device": device.upper(),
        "memory_usage": f"{torch.cuda.memory_allocated() / 1024**2:.1f}MB" if device == "cuda" else "N/A"
    }

@app.post("/api/nutrition/analyze-food")
async def analyze_food_nutrition(request: Request, file: UploadFile = File(None)):
    """
    Analyze food image or manual entry for Next.js frontend.

    Supports two modes:
    1. Image upload: AI recognition + nutrition lookup
    2. Manual entry: Direct nutrition lookup by food name

    Returns nutrition-focused response format with translations.
    """
    try:
        # Parse form data
        form_data = await request.form()
        manual_input = form_data.get("manualInput", "false").lower() == "true"
        locale = form_data.get("locale", "en")  # Get user's language preference

        logger.info(f"πŸ“₯ Request received - Mode: {'Manual' if manual_input else 'Image'}, Locale: {locale}")

        # MODE 1: Manual food entry (from alternatives or manual input)
        if manual_input:
            food_name = form_data.get("manualFoodName")
            serving_size = form_data.get("manualServingSize", "100")
            serving_unit = form_data.get("manualServingUnit", "g")
            description = form_data.get("manualDescription", "")

            if not food_name:
                raise HTTPException(status_code=400, detail="manualFoodName is required for manual entry")

            logger.info(f"🍽️ Manual nutrition lookup: {food_name} ({serving_size}{serving_unit})")

            # Direct nutrition API lookup
            nutrition_data = await get_nutrition_from_apis(food_name)

            if not nutrition_data or nutrition_data.get("calories", 0) == 0:
                raise HTTPException(
                    status_code=404,
                    detail=f"Failed to retrieve nutrition information for manual entry"
                )

            source = nutrition_data.get("source", "Unknown")
            logger.info(f"βœ… Manual lookup: {food_name} | Nutrition: {source}")

            # Translate food name and description
            translated_name = await translate_food_name(food_name, locale)
            base_description = description or f"Manual entry: {food_name}"
            translated_description = await translate_description(base_description, locale)

            # Return manual entry format
            return JSONResponse(content={
                "data": {
                    "label": translated_name,
                    "confidence": 1.0,  # Manual entry has 100% confidence
                    "nutrition": {
                        "calories": nutrition_data["calories"],
                        "protein": nutrition_data["protein"],
                        "carbs": nutrition_data["carbs"],
                        "fat": nutrition_data["fat"]
                    },
                    "servingSize": serving_size,
                    "servingUnit": serving_unit,
                    "description": translated_description,
                    "alternatives": [],  # No alternatives for manual entry
                    "source": f"{source} Database",
                    "isManualEntry": True
                }
            })

        # MODE 2: Image upload (AI recognition)
        else:
            if not file:
                raise HTTPException(status_code=400, detail="File is required for image analysis")

            logger.info(f"🍽️ Image analysis request: {file.filename}")

            # Validate and process image
            image = await validate_and_read_image(file)

            # Step 1: AI Model Prediction with strict confidence filtering
            results = recognizer.predict(image, top_k=10)
            
            # Check if prediction was successful
            if not results.get("success", True):
                raise HTTPException(
                    status_code=422, 
                    detail=f"Food recognition failed: {results.get('message', 'Unknown error')}"
                )

            # Step 2: API Nutrition Lookup
            nutrition_data = await get_nutrition_from_apis(results["primary_label"])

            # Log result
            confidence_pct = f"{results['confidence']:.1%}"
            source = nutrition_data.get("source", "Unknown")
            logger.info(f"βœ… Prediction: {results['label']} ({confidence_pct}) | Nutrition: {source}")

            # BATCH TRANSLATION OPTIMIZATION: Translate all food names at once
            if locale != "en" and openai_client:
                # Collect all names to translate (primary + alternatives)
                names_to_translate = [results["label"]]
                if results.get("alternatives"):
                    names_to_translate.extend([
                        alt.get("label", alt.get("raw_label", ""))
                        for alt in results["alternatives"]
                    ])

                # Single API call for all translations
                translations = await translate_food_names_batch(names_to_translate, locale)

                # Apply translations
                translated_name = translations.get(results["label"], results["label"])

                # Translate description
                base_description = f"{results['label']} identified with {int(results['confidence'] * 100)}% confidence"
                translated_description = await translate_description(base_description, locale)

                # Map alternatives with translations
                translated_alternatives = []
                if results.get("alternatives"):
                    for alt in results["alternatives"]:
                        alt_name = alt.get("label", alt.get("raw_label", ""))
                        translated_alternatives.append({
                            **alt,
                            "label": translations.get(alt_name, alt_name),
                            "original_label": alt_name
                        })
            else:
                # No translation needed
                translated_name = results["label"]
                translated_description = f"{results['label']} identified with {int(results['confidence'] * 100)}% confidence"
                translated_alternatives = results["alternatives"]

            # Return frontend-expected format
            return JSONResponse(content={
                "data": {
                    "label": translated_name,
                    "confidence": results["confidence"],
                    "description": translated_description,  # Translated description
                    "nutrition": {
                        "calories": nutrition_data["calories"],
                        "protein": nutrition_data["protein"],
                        "carbs": nutrition_data["carbs"],
                        "fat": nutrition_data["fat"]
                    },
                    "alternatives": translated_alternatives,
                    "source": f"AI Recognition + {source} Database",
                    "isManualEntry": False,
                    "locale": locale  # Return locale for debugging
                }
            })

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"❌ Analysis failed: {e}")
        raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")

@app.post("/analyze")
async def analyze_food_spaces(file: UploadFile = File(...)):
    """
    Analyze food image for Hugging Face Spaces interface.
    
    Returns detailed response with model info.
    """
    logger.info(f"πŸš€ HF Spaces analysis request: {file.filename}")
    
    try:
        # Validate and process image
        image = await validate_and_read_image(file)
        
        # Step 1: AI Model Prediction (request top 10 for more alternatives)
        results = recognizer.predict(image, top_k=10)
        
        # Step 2: API Nutrition Lookup
        nutrition_data = await get_nutrition_from_apis(results["primary_label"])
        
        # Log result
        confidence_pct = f"{results['confidence']:.1%}"
        source = nutrition_data.get("source", "Unknown")
        logger.info(f"βœ… Prediction: {results['label']} ({confidence_pct}) | Nutrition: {source}")
        
        # Return full response with nutrition data
        enhanced_results = results.copy()
        enhanced_results["nutrition"] = nutrition_data
        enhanced_results["data_source"] = source
        
        return JSONResponse(content=enhanced_results)
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"❌ Analysis failed: {e}")
        raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")

# ==================== MAIN ====================
if __name__ == "__main__":
    port = int(os.environ.get("PORT", 7860))
    
    logger.info("🎯 Starting production server...")
    
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=port,
        log_level="info",
        access_log=True
    )