File size: 133,473 Bytes
fe788c4
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
727526c
 
 
0991c48
 
 
 
 
 
3233647
 
 
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe788c4
 
 
 
 
 
0991c48
 
 
 
 
 
 
727526c
 
 
0991c48
cd31bc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0991c48
 
a002723
0991c48
 
 
 
 
 
a96c15a
a002723
 
 
 
0991c48
 
727526c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6243bc6
 
 
 
 
727526c
6243bc6
f87c30a
 
6243bc6
 
 
 
 
 
 
 
 
 
 
f87c30a
6243bc6
727526c
 
f87c30a
 
 
727526c
 
 
f87c30a
727526c
f87c30a
 
 
 
 
727526c
f87c30a
727526c
 
f87c30a
 
 
 
727526c
6243bc6
a002723
6243bc6
a002723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6243bc6
a002723
6243bc6
 
a002723
 
 
 
 
 
6243bc6
 
 
a002723
 
 
 
 
 
6243bc6
 
 
a002723
6243bc6
 
a002723
 
 
6243bc6
a002723
6243bc6
a002723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6243bc6
 
 
 
a002723
6243bc6
a002723
6243bc6
a002723
6243bc6
 
 
a002723
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe788c4
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f87c30a
0991c48
 
 
 
 
 
 
 
fe788c4
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe788c4
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3233647
 
 
b25907a
 
 
 
 
 
 
 
3233647
 
b25907a
3233647
 
 
 
 
b25907a
 
 
3233647
 
 
 
 
0991c48
 
 
 
 
 
 
727526c
cf64e6f
6243bc6
 
f87c30a
 
 
cd31bc6
0991c48
cd31bc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
04c57b1
cd31bc6
04c57b1
727526c
 
f87c30a
04c57b1
f87c30a
cf64e6f
6243bc6
cd31bc6
 
cf64e6f
cd31bc6
 
 
04c57b1
f87c30a
0991c48
04c57b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
727526c
f87c30a
 
cd31bc6
6243bc6
cf64e6f
6243bc6
cd31bc6
 
cf64e6f
cd31bc6
 
 
04c57b1
f87c30a
0991c48
cd31bc6
 
f87c30a
727526c
cf64e6f
0991c48
a96c15a
cf64e6f
04c57b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a96c15a
0991c48
a96c15a
cd31bc6
 
04c57b1
 
a96c15a
0991c48
 
 
727526c
0991c48
 
fe788c4
0991c48
cf64e6f
0991c48
6243bc6
727526c
6243bc6
727526c
 
 
 
 
 
 
 
 
0991c48
 
 
 
 
 
cd31bc6
0991c48
a96c15a
cf64e6f
04c57b1
cd31bc6
04c57b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a96c15a
0991c48
a96c15a
cd31bc6
 
04c57b1
 
a96c15a
0991c48
 
 
 
 
 
 
 
 
cd31bc6
0991c48
 
cd31bc6
0991c48
 
 
 
 
727526c
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
6243bc6
 
0991c48
 
 
 
 
727526c
0991c48
fe788c4
727526c
cd31bc6
0991c48
 
 
cd31bc6
0991c48
a96c15a
cf64e6f
04c57b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a96c15a
0991c48
a96c15a
cd31bc6
04c57b1
 
a96c15a
0991c48
 
 
727526c
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
0991c48
cf64e6f
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
 
 
 
 
 
04c57b1
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
 
6243bc6
04c57b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6243bc6
cd31bc6
6243bc6
 
cf64e6f
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
6243bc6
 
 
 
cd31bc6
6243bc6
 
cf64e6f
 
 
 
 
cd31bc6
cf64e6f
cd31bc6
cf64e6f
cd31bc6
cf64e6f
cd31bc6
cf64e6f
cd31bc6
cf64e6f
cd31bc6
6243bc6
 
0991c48
 
cd31bc6
0991c48
 
 
 
727526c
 
6243bc6
727526c
 
 
 
 
 
 
 
 
cd31bc6
0991c48
a96c15a
cf64e6f
04c57b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a96c15a
0991c48
a96c15a
cd31bc6
 
04c57b1
 
a96c15a
0991c48
 
 
 
727526c
0991c48
 
cf64e6f
0991c48
 
 
 
 
 
 
 
 
 
 
 
dee1d98
0991c48
dee1d98
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
 
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe788c4
0991c48
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
 
0991c48
 
 
cd31bc6
 
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
0991c48
 
 
 
 
 
cd31bc6
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe788c4
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3233647
6243bc6
0991c48
3233647
 
 
 
 
6243bc6
3233647
 
 
6243bc6
 
 
 
cd31bc6
 
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
0991c48
 
e345371
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
 
 
0991c48
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
 
0991c48
 
 
 
 
 
 
 
 
3233647
0991c48
3233647
0991c48
 
 
 
 
 
 
cd31bc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0991c48
 
 
 
cd31bc6
0991c48
 
 
 
 
 
 
 
 
 
 
 
3233647
6243bc6
3233647
6243bc6
 
 
 
 
3233647
 
fe788c4
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
dee1d98
 
 
 
 
 
 
 
 
 
 
cd31bc6
 
dee1d98
e6fa448
 
 
 
 
 
 
 
 
 
dee1d98
 
 
 
 
 
 
 
 
 
 
0991c48
cd31bc6
 
 
 
 
dee1d98
 
 
 
0991c48
dee1d98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
dee1d98
 
 
 
 
 
 
 
 
 
cd31bc6
 
 
 
 
 
 
dee1d98
cd31bc6
 
dee1d98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0991c48
dee1d98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
dee1d98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
dee1d98
cd31bc6
dee1d98
cd31bc6
dee1d98
 
 
cd31bc6
dee1d98
 
cd31bc6
dee1d98
 
cd31bc6
 
 
 
dee1d98
 
 
 
 
0991c48
 
dee1d98
3233647
 
 
6243bc6
3233647
 
dee1d98
0991c48
cd31bc6
0991c48
 
 
 
dee1d98
6243bc6
3233647
 
 
6243bc6
3233647
6243bc6
 
3233647
0991c48
6243bc6
 
3233647
6243bc6
0991c48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd31bc6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
import spaces  # 추가
import gradio as gr
import os
import asyncio
import torch
import io
import json
import re
import httpx
import tempfile
import wave
import base64
import numpy as np
import soundfile as sf
import subprocess
import shutil
import requests
import logging
from datetime import datetime, timedelta
from dataclasses import dataclass
from typing import List, Tuple, Dict, Optional
from pathlib import Path
from threading import Thread
from dotenv import load_dotenv

# PDF processing imports
from langchain_community.document_loaders import PyPDFLoader

# Edge TTS imports
import edge_tts
from pydub import AudioSegment

# OpenAI imports
from openai import OpenAI

# Transformers imports (for legacy local mode)
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    TextIteratorStreamer,
    BitsAndBytesConfig,
)

# Llama CPP imports (for new local mode)
try:
    from llama_cpp import Llama
    from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
    from llama_cpp_agent.providers import LlamaCppPythonProvider
    from llama_cpp_agent.chat_history import BasicChatHistory
    from llama_cpp_agent.chat_history.messages import Roles
    from huggingface_hub import hf_hub_download
    LLAMA_CPP_AVAILABLE = True
except ImportError:
    LLAMA_CPP_AVAILABLE = False

# Spark TTS imports
try:
    from huggingface_hub import snapshot_download
    SPARK_AVAILABLE = True
except:
    SPARK_AVAILABLE = False

# MeloTTS imports (for local mode)
try:
    # unidic 다운로드를 조건부로 처리
    if not os.path.exists("/usr/local/lib/python3.10/site-packages/unidic"):
        try:
            os.system("python -m unidic download")
        except:
            pass
    from melo.api import TTS as MeloTTS
    MELO_AVAILABLE = True
except:
    MELO_AVAILABLE = False

load_dotenv()

# Brave Search API 설정
BRAVE_KEY = os.getenv("BSEARCH_API")
BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search"

# Edge TTS 전용 언어 목록 (English 제외)
EDGE_TTS_ONLY_LANGUAGES = [
    "Korean", "Japanese", "French", "German", "Spanish", "Italian", 
    "Portuguese", "Dutch", "Thai", "Vietnamese", "Arabic", "Hebrew", 
    "Indonesian", "Hindi", "Russian", "Chinese", "Norwegian", "Swedish",
    "Finnish", "Danish", "Polish", "Turkish", "Greek", "Czech"
]

# 언어별 Edge TTS 음성 설정
EDGE_TTS_VOICES = {
    "English": [
        "en-US-AndrewMultilingualNeural",    # 남성 음성 1
        "en-US-BrianMultilingualNeural"      # 남성 음성 2
    ],
    "Korean": [
        "ko-KR-HyunsuNeural",  # 남성 음성 1 (차분하고 신뢰감 있는)
        "ko-KR-InJoonNeural"   # 남성 음성 2 (활기차고 친근한)
    ],
    "Japanese": [
        "ja-JP-KeitaNeural",    # 남성 음성 1
        "ja-JP-NanamiNeural"    # 여성 음성 (백업용)
    ],
    "French": [
        "fr-FR-HenriNeural",    # 남성 음성 1
        "fr-FR-DeniseNeural"    # 여성 음성 (백업용)
    ],
    "German": [
        "de-DE-ConradNeural",   # 남성 음성 1
        "de-DE-KillianNeural"   # 남성 음성 2
    ],
    "Spanish": [
        "es-ES-AlvaroNeural",   # 남성 음성 1
        "es-ES-ElviraNeural"    # 여성 음성 (백업용)
    ],
    "Italian": [
        "it-IT-DiegoNeural",    # 남성 음성 1
        "it-IT-IsabellaNeural"  # 여성 음성 (백업용)
    ],
    "Portuguese": [
        "pt-BR-AntonioNeural",  # 남성 음성 1
        "pt-BR-FranciscaNeural" # 여성 음성 (백업용)
    ],
    "Dutch": [
        "nl-NL-MaartenNeural",  # 남성 음성 1
        "nl-NL-ColetteNeural"   # 여성 음성 (백업용)
    ],
    "Thai": [
        "th-TH-NiwatNeural",    # 남성 음성 1
        "th-TH-PremwadeeNeural" # 여성 음성 (백업용)
    ],
    "Vietnamese": [
        "vi-VN-NamMinhNeural",  # 남성 음성 1
        "vi-VN-HoaiMyNeural"    # 여성 음성 (백업용)
    ],
    "Arabic": [
        "ar-SA-HamedNeural",    # 남성 음성 1
        "ar-SA-ZariyahNeural"   # 여성 음성 (백업용)
    ],
    "Hebrew": [
        "he-IL-AvriNeural",     # 남성 음성 1
        "he-IL-HilaNeural"      # 여성 음성 (백업용)
    ],
    "Indonesian": [
        "id-ID-ArdiNeural",     # 남성 음성 1
        "id-ID-GadisNeural"     # 여성 음성 (백업용)
    ],
    "Hindi": [
        "hi-IN-MadhurNeural",   # 남성 음성 1
        "hi-IN-SwaraNeural"     # 여성 음성 (백업용)
    ],
    "Russian": [
        "ru-RU-DmitryNeural",   # 남성 음성 1
        "ru-RU-SvetlanaNeural"  # 여성 음성 (백업용)
    ],
    "Chinese": [
        "zh-CN-YunxiNeural",    # 남성 음성 1
        "zh-CN-XiaoxiaoNeural"  # 여성 음성 (백업용)
    ],
    "Norwegian": [
        "nb-NO-FinnNeural",     # 남성 음성 1
        "nb-NO-PernilleNeural"  # 여성 음성 (백업용)
    ],
    "Swedish": [
        "sv-SE-MattiasNeural",  # 남성 음성 1
        "sv-SE-SofieNeural"     # 여성 음성 (백업용)
    ],
    "Finnish": [
        "fi-FI-HarriNeural",    # 남성 음성 1
        "fi-FI-NooraNeural"     # 여성 음성 (백업용)
    ],
    "Danish": [
        "da-DK-JeppeNeural",    # 남성 음성 1
        "da-DK-ChristelNeural"  # 여성 음성 (백업용)
    ],
    "Polish": [
        "pl-PL-MarekNeural",    # 남성 음성 1
        "pl-PL-ZofiaNeural"     # 여성 음성 (백업용)
    ],
    "Turkish": [
        "tr-TR-AhmetNeural",    # 남성 음성 1
        "tr-TR-EmelNeural"      # 여성 음성 (백업용)
    ],
    "Greek": [
        "el-GR-NestorasNeural", # 남성 음성 1
        "el-GR-AthinaNeural"    # 여성 음성 (백업용)
    ],
    "Czech": [
        "cs-CZ-AntoninNeural",  # 남성 음성 1
        "cs-CZ-VlastaNeural"    # 여성 음성 (백업용)
    ]
}

@dataclass
class ConversationConfig:
    max_words: int = 8000  # 4000에서 6000으로 증가 (1.5배)
    prefix_url: str = "https://r.jina.ai/"
    api_model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"
    legacy_local_model_name: str = "NousResearch/Hermes-2-Pro-Llama-3-8B"
    # 새로운 로컬 모델 설정
    local_model_name: str = "Private-BitSix-Mistral-Small-3.1-24B-Instruct-2503.gguf"
    local_model_repo: str = "ginigen/Private-BitSix-Mistral-Small-3.1-24B-Instruct-2503"
    # 토큰 수 증가
    max_tokens: int = 6000  # 3000에서 4500으로 증가 (1.5배)
    max_new_tokens: int = 12000  # 6000에서 9000으로 증가 (1.5배)
    min_conversation_turns: int = 18  # 최소 대화 턴 수
    max_conversation_turns: int = 20  # 최대 대화 턴 수


def brave_search(query: str, count: int = 8, freshness_days: int | None = None):
    """Brave Search API를 사용하여 최신 정보 검색"""
    if not BRAVE_KEY:
        return []
    params = {"q": query, "count": str(count)}
    if freshness_days:
        dt_from = (datetime.utcnow() - timedelta(days=freshness_days)).strftime("%Y-%m-%d")
        params["freshness"] = dt_from
    try:
        r = requests.get(
            BRAVE_ENDPOINT,
            headers={"Accept": "application/json", "X-Subscription-Token": BRAVE_KEY},
            params=params,
            timeout=15
        )
        raw = r.json().get("web", {}).get("results") or []
        return [{
            "title": r.get("title", ""),
            "url": r.get("url", r.get("link", "")),
            "snippet": r.get("description", r.get("text", "")),
            "host": re.sub(r"https?://(www\.)?", "", r.get("url", "")).split("/")[0]
        } for r in raw[:count]]
    except Exception as e:
        logging.error(f"Brave search error: {e}")
        return []

def format_search_results(query: str, for_keyword: bool = False) -> str:
    """검색 결과를 포맷팅하여 반환"""
    # 키워드 검색의 경우 더 많은 결과 사용
    count = 5 if for_keyword else 3
    rows = brave_search(query, count, freshness_days=7 if not for_keyword else None)
    if not rows:
        return ""
    
    results = []
    # 키워드 검색의 경우 더 상세한 정보 포함
    max_results = 4 if for_keyword else 2
    for r in rows[:max_results]:
        if for_keyword:
            # 키워드 검색은 더 긴 스니펫 사용
            snippet = r['snippet'][:200] + "..." if len(r['snippet']) > 200 else r['snippet']
            results.append(f"**{r['title']}**\n{snippet}\nSource: {r['host']}")
        else:
            # 일반 검색은 짧은 스니펫
            snippet = r['snippet'][:100] + "..." if len(r['snippet']) > 100 else r['snippet']
            results.append(f"- {r['title']}: {snippet}")
    
    return "\n\n".join(results) + "\n"

def extract_keywords_for_search(text: str, language: str = "English") -> List[str]:
    """텍스트에서 검색할 키워드 추출 (개선)"""
    # 텍스트 앞부분만 사용 (너무 많은 텍스트 처리 방지)
    text_sample = text[:500]  
    
    if language == "Korean":
        import re
        # 한국어 명사 추출 (2글자 이상)
        keywords = re.findall(r'[가-힣]{2,}', text_sample)
        # 중복 제거하고 가장 긴 단어 1개만 선택
        unique_keywords = list(dict.fromkeys(keywords))
        # 길이 순으로 정렬하고 가장 의미있을 것 같은 단어 선택
        unique_keywords.sort(key=len, reverse=True)
        return unique_keywords[:1]  # 1개만 반환
    else:
        # 영어는 대문자로 시작하는 단어 중 가장 긴 것 1개
        words = text_sample.split()
        keywords = [word.strip('.,!?;:') for word in words 
                   if len(word) > 4 and word[0].isupper()]
        if keywords:
            return [max(keywords, key=len)]  # 가장 긴 단어 1개
        return []

def search_and_compile_content(keyword: str, language: str = "English") -> str:
    """키워드로 검색하여 충분한 콘텐츠 컴파일"""
    if not BRAVE_KEY:
        # API 없을 때도 기본 콘텐츠 생성
        if language == "Korean":
            return f"""
'{keyword}'에 대한 종합적인 정보:

{keyword}는 현대 사회에서 매우 중요한 주제입니다. 
이 주제는 다양한 측면에서 우리의 삶에 영향을 미치고 있으며, 
최근 들어 더욱 주목받고 있습니다.

주요 특징:
1. 기술적 발전과 혁신
2. 사회적 영향과 변화
3. 미래 전망과 가능성
4. 실용적 활용 방안
5. 글로벌 트렌드와 동향

전문가들은 {keyword}가 앞으로 더욱 중요해질 것으로 예상하고 있으며,
이에 대한 깊이 있는 이해가 필요한 시점입니다.
"""
        else:
            return f"""
Comprehensive information about '{keyword}':

{keyword} is a significant topic in modern society.
This subject impacts our lives in various ways and has been 
gaining increasing attention recently.

Key aspects:
1. Technological advancement and innovation
2. Social impact and changes
3. Future prospects and possibilities
4. Practical applications
5. Global trends and developments

Experts predict that {keyword} will become even more important,
and it's crucial to develop a deep understanding of this topic.
"""
    
    # 언어에 따른 다양한 검색 쿼리
    if language == "Korean":
        queries = [
            f"{keyword} 최신 뉴스 2024",
            f"{keyword} 정보 설명",
            f"{keyword} 트렌드 전망",
            f"{keyword} 장점 단점",
            f"{keyword} 활용 방법",
            f"{keyword} 전문가 의견"
        ]
    else:
        queries = [
            f"{keyword} latest news 2024",
            f"{keyword} explained comprehensive",
            f"{keyword} trends forecast",
            f"{keyword} advantages disadvantages",
            f"{keyword} how to use",
            f"{keyword} expert opinions"
        ]
    
    all_content = []
    total_content_length = 0
    
    for query in queries:
        results = brave_search(query, count=5)  # 더 많은 결과 가져오기
        for r in results[:3]:  # 각 쿼리당 상위 3개
            content = f"**{r['title']}**\n{r['snippet']}\nSource: {r['host']}\n"
            all_content.append(content)
            total_content_length += len(r['snippet'])
    
    # 콘텐츠가 부족하면 추가 생성
    if total_content_length < 1000:  # 최소 1000자 확보
        if language == "Korean":
            additional_content = f"""
추가 정보:
{keyword}와 관련된 최근 동향을 살펴보면, 이 분야는 빠르게 발전하고 있습니다.
많은 전문가들이 이 주제에 대해 활발히 연구하고 있으며, 
실생활에서의 응용 가능성도 계속 확대되고 있습니다.

특히 주목할 점은:
- 기술 혁신의 가속화
- 사용자 경험의 개선
- 접근성의 향상
- 비용 효율성 증대
- 글로벌 시장의 성장

이러한 요소들이 {keyword}의 미래를 더욱 밝게 만들고 있습니다.
"""
        else:
            additional_content = f"""
Additional insights:
Recent developments in {keyword} show rapid advancement in this field.
Many experts are actively researching this topic, and its practical 
applications continue to expand.

Key points to note:
- Accelerating technological innovation
- Improving user experience
- Enhanced accessibility
- Increased cost efficiency
- Growing global market

These factors are making the future of {keyword} increasingly promising.
"""
        all_content.append(additional_content)
    
    # 컴파일된 콘텐츠 반환
    compiled = "\n\n".join(all_content)
    
    # 키워드 기반 소개
    if language == "Korean":
        intro = f"### '{keyword}'에 대한 종합적인 정보와 최신 동향:\n\n"
    else:
        intro = f"### Comprehensive information and latest trends about '{keyword}':\n\n"
    
    return intro + compiled


class UnifiedAudioConverter:
    def __init__(self, config: ConversationConfig):
        self.config = config
        self.llm_client = None
        self.legacy_local_model = None
        self.legacy_tokenizer = None
        # 새로운 로컬 LLM 관련
        self.local_llm = None
        self.local_llm_model = None
        self.melo_models = None
        self.spark_model_dir = None
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        
    def initialize_api_mode(self, api_key: str):
        """Initialize API mode with Together API (now fallback)"""
        self.llm_client = OpenAI(api_key=api_key, base_url="https://api.together.xyz/v1")
        
    @spaces.GPU(duration=120)
    def initialize_local_mode(self):
        """Initialize new local mode with Llama CPP"""
        if not LLAMA_CPP_AVAILABLE:
            raise RuntimeError("Llama CPP dependencies not available. Please install llama-cpp-python and llama-cpp-agent.")
        
        if self.local_llm is None or self.local_llm_model != self.config.local_model_name:
            try:
                # 모델 다운로드
                model_path = hf_hub_download(
                    repo_id=self.config.local_model_repo,
                    filename=self.config.local_model_name,
                    local_dir="./models"
                )
                
                model_path_local = os.path.join("./models", self.config.local_model_name)
                
                if not os.path.exists(model_path_local):
                    raise RuntimeError(f"Model file not found at {model_path_local}")
                
                # Llama 모델 초기화
                self.local_llm = Llama(
                    model_path=model_path_local,
                    flash_attn=True,
                    n_gpu_layers=81 if torch.cuda.is_available() else 0,
                    n_batch=1024,
                    n_ctx=16384,
                )
                self.local_llm_model = self.config.local_model_name
                print(f"Local LLM initialized: {model_path_local}")
                
            except Exception as e:
                print(f"Failed to initialize local LLM: {e}")
                raise RuntimeError(f"Failed to initialize local LLM: {e}")

    @spaces.GPU(duration=60)
    def initialize_legacy_local_mode(self):
        """Initialize legacy local mode with Hugging Face model (fallback)"""
        if self.legacy_local_model is None:
            quantization_config = BitsAndBytesConfig(
                load_in_4bit=True, 
                bnb_4bit_compute_dtype=torch.float16
            )
            self.legacy_local_model = AutoModelForCausalLM.from_pretrained(
                self.config.legacy_local_model_name, 
                quantization_config=quantization_config
            )
            self.legacy_tokenizer = AutoTokenizer.from_pretrained(
                self.config.legacy_local_model_name,
                revision='8ab73a6800796d84448bc936db9bac5ad9f984ae'
            )

    def initialize_spark_tts(self):
        """Initialize Spark TTS model by downloading if needed"""
        if not SPARK_AVAILABLE:
            raise RuntimeError("Spark TTS dependencies not available")
        
        model_dir = "pretrained_models/Spark-TTS-0.5B"
        
        # Check if model exists, if not download it
        if not os.path.exists(model_dir):
            print("Downloading Spark-TTS model...")
            try:
                os.makedirs("pretrained_models", exist_ok=True)
                snapshot_download(
                    "SparkAudio/Spark-TTS-0.5B", 
                    local_dir=model_dir
                )
                print("Spark-TTS model downloaded successfully")
            except Exception as e:
                raise RuntimeError(f"Failed to download Spark-TTS model: {e}")
        
        self.spark_model_dir = model_dir
        
        # Check if we have the CLI inference script
        if not os.path.exists("cli/inference.py"):
            print("Warning: Spark-TTS CLI not found. Please clone the Spark-TTS repository.")

    @spaces.GPU(duration=60)
    def initialize_melo_tts(self):
        """Initialize MeloTTS models"""        
        if MELO_AVAILABLE and self.melo_models is None:
            self.melo_models = {"EN": MeloTTS(language="EN", device=self.device)}

    def fetch_text(self, url: str) -> str:
        """Fetch text content from URL"""
        if not url:
            raise ValueError("URL cannot be empty")
            
        if not url.startswith("http://") and not url.startswith("https://"):
            raise ValueError("URL must start with 'http://' or 'https://'")

        full_url = f"{self.config.prefix_url}{url}"
        try:
            response = httpx.get(full_url, timeout=60.0)
            response.raise_for_status()
            return response.text
        except httpx.HTTPError as e:
            raise RuntimeError(f"Failed to fetch URL: {e}")

    def extract_text_from_pdf(self, pdf_file) -> str:
        """Extract text content from PDF file"""
        try:
            # Gradio returns file path, not file object
            if isinstance(pdf_file, str):
                pdf_path = pdf_file
            else:
                # If it's a file object (shouldn't happen with Gradio)
                with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
                    tmp_file.write(pdf_file.read())
                    pdf_path = tmp_file.name
            
            # PDF 로드 및 텍스트 추출
            loader = PyPDFLoader(pdf_path)
            pages = loader.load()
            
            # 모든 페이지의 텍스트를 결합
            text = "\n".join([page.page_content for page in pages])
            
            # 임시 파일인 경우 삭제
            if not isinstance(pdf_file, str) and os.path.exists(pdf_path):
                os.unlink(pdf_path)
            
            return text
        except Exception as e:
            raise RuntimeError(f"Failed to extract text from PDF: {e}")

    def _get_messages_formatter_type(self, model_name):
        """Get appropriate message formatter for the model"""
        if "Mistral" in model_name or "BitSix" in model_name:
            return MessagesFormatterType.CHATML
        else:
            return MessagesFormatterType.LLAMA_3

    def _build_prompt(self, text: str, language: str = "English", search_context: str = "") -> str:
        """Build prompt for conversation generation with enhanced professional podcast style"""
        # 텍스트 길이 제한
        max_text_length = 4500 if search_context else 6000
        if len(text) > max_text_length:
            text = text[:max_text_length] + "..."
        
        # 언어별 화자 이름 설정
        if language == "Korean":
            speaker1, speaker2 = "준수", "민호"
        elif language == "Japanese":
            speaker1, speaker2 = "Hiroshi", "Takeshi"
        elif language == "French":
            speaker1, speaker2 = "Pierre", "Marc"
        elif language == "German":
            speaker1, speaker2 = "Klaus", "Stefan"
        elif language == "Spanish":
            speaker1, speaker2 = "Carlos", "Miguel"
        elif language == "Italian":
            speaker1, speaker2 = "Marco", "Giuseppe"
        elif language == "Portuguese":
            speaker1, speaker2 = "João", "Pedro"
        elif language == "Dutch":
            speaker1, speaker2 = "Jan", "Pieter"
        elif language == "Thai":
            speaker1, speaker2 = "Somchai", "Prasert"
        elif language == "Vietnamese":
            speaker1, speaker2 = "Minh", "Duc"
        elif language == "Arabic":
            speaker1, speaker2 = "Ahmed", "Mohammed"
        elif language == "Hebrew":
            speaker1, speaker2 = "David", "Michael"
        elif language == "Indonesian":
            speaker1, speaker2 = "Budi", "Andi"
        elif language == "Hindi":
            speaker1, speaker2 = "Raj", "Amit"
        elif language == "Russian":
            speaker1, speaker2 = "Alexei", "Dmitri"
        elif language == "Chinese":
            speaker1, speaker2 = "Wei", "Jun"
        else:  # English and others
            speaker1, speaker2 = "Alex", "Jordan"
        
        # 대화 템플릿 생성
        template = "{\n    \"conversation\": [\n"
        for i in range(12):  # 12번의 교환
            template += f"        {{\"speaker\": \"{speaker1 if i % 2 == 0 else speaker2}\", \"text\": \"\"}}"
            if i < 11:
                template += ","
            template += "\n"
        template += "    ]\n}"
        
        # 언어별 프롬프트 작성
        if language == "Korean":
            context_part = f"# 최신 관련 정보:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# 원본 콘텐츠:\n{text}\n\n"
                f"{context_part}"
                f"위 내용으로 전문적이고 심층적인 팟캐스트 대담 프로그램 대본을 한국어로 작성해주세요.\n\n"
                f"## 핵심 지침:\n"
                f"1. **대화 스타일**: 전문적이면서도 이해하기 쉬운 팟캐스트 대담\n"
                f"2. **화자 역할**:\n"
                f"   - {speaker1}: 진행자/호스트 (핵심을 짚는 질문, 청취자 관점에서 궁금한 점 질문)\n"
                f"   - {speaker2}: 전문가 (깊이 있는 설명, 구체적 사례와 데이터 제시)\n"
                f"3. **중요한 답변 규칙**:\n"
                f"   - {speaker1}: 1-2문장의 명확한 질문\n"
                f"   - {speaker2}: **반드시 2-4문장으로 충실히 답변** (개념 설명 + 구체적 설명 + 예시나 함의)\n"
                f"4. **전문성 요소**: 통계나 연구 결과 인용, 실제 사례와 케이스 스터디, 전문 용어를 쉽게 풀어서 설명\n"
                f"5. **필수 규칙**: 서로 존댓말 사용, 12회 대화 교환, 모든 대화는 한국어로 작성\n\n"
                f"JSON 형식으로만 반환:\n{template}"
            )
        elif language == "Japanese":
            context_part = f"# 最新関連情報:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# 元のコンテンツ:\n{text}\n\n"
                f"{context_part}"
                f"上記の内容で専門的で深いポッドキャスト対談番組の台本を日本語で作成してください。\n\n"
                f"## 重要な指針:\n"
                f"1. **対話スタイル**: 専門的でありながら理解しやすいポッドキャスト対談\n"
                f"2. **話者の役割**:\n"
                f"   - {speaker1}: 司会者/ホスト(核心を突く質問、聴衆の視点からの質問)\n"
                f"   - {speaker2}: 専門家(深い説明、具体的な事例とデータの提示)\n"
                f"3. **重要な回答ルール**:\n"
                f"   - {speaker1}: 1-2文の明確な質問\n"
                f"   - {speaker2}: **必ず2-4文で充実した回答**(概念説明 + 具体的説明 + 例示や含意)\n"
                f"4. **専門性要素**: 統計や研究結果の引用、実際の事例とケーススタディ、専門用語を分かりやすく説明\n"
                f"5. **必須ルール**: お互いに丁寧語を使用、12回の対話交換、すべての対話は日本語で作成\n\n"
                f"JSON形式でのみ返答:\n{template}"
            )
        elif language == "French":
            context_part = f"# Dernières informations pertinentes:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# Contenu original:\n{text}\n\n"
                f"{context_part}"
                f"Créez un script de débat podcast professionnel et approfondi en français avec le contenu ci-dessus.\n\n"
                f"## Directives clés:\n"
                f"1. **Style de dialogue**: Discussion de podcast professionnelle mais accessible\n"
                f"2. **Rôles des intervenants**:\n"
                f"   - {speaker1}: Animateur/Hôte (questions perspicaces, perspective de l'audience)\n"
                f"   - {speaker2}: Expert (explications approfondies, exemples concrets et données)\n"
                f"3. **Règles de réponse importantes**:\n"
                f"   - {speaker1}: Questions claires en 1-2 phrases\n"
                f"   - {speaker2}: **Répondre obligatoirement en 2-4 phrases** (explication du concept + explication détaillée + exemple/implication)\n"
                f"4. **Éléments de professionnalisme**: Citer des statistiques et recherches, études de cas réels, expliquer clairement les termes techniques\n"
                f"5. **Règles obligatoires**: Utiliser un langage poli, 12 échanges de dialogue, toute la conversation en français\n\n"
                f"Retourner uniquement en format JSON:\n{template}"
            )
        elif language == "German":
            context_part = f"# Neueste relevante Informationen:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# Originalinhalt:\n{text}\n\n"
                f"{context_part}"
                f"Erstellen Sie ein professionelles und tiefgreifendes Podcast-Gesprächsskript auf Deutsch mit dem obigen Inhalt.\n\n"
                f"## Wichtige Richtlinien:\n"
                f"1. **Gesprächsstil**: Professionelle, aber zugängliche Podcast-Diskussion\n"
                f"2. **Sprecherrollen**:\n"
                f"   - {speaker1}: Moderator/Gastgeber (einsichtsvolle Fragen, Publikumsperspektive)\n"
                f"   - {speaker2}: Experte (tiefgreifende Erklärungen, konkrete Beispiele und Daten)\n"
                f"3. **Wichtige Antwortregeln**:\n"
                f"   - {speaker1}: Klare Fragen in 1-2 Sätzen\n"
                f"   - {speaker2}: **Muss in 2-4 Sätzen antworten** (Konzepterklärung + detaillierte Erklärung + Beispiel/Implikation)\n"
                f"4. **Professionalitätselemente**: Statistiken und Forschung zitieren, echte Fallstudien, technische Begriffe klar erklären\n"
                f"5. **Pflichtregeln**: Höfliche Sprache verwenden, 12 Gesprächsaustausche, gesamte Unterhaltung auf Deutsch\n\n"
                f"Nur im JSON-Format zurückgeben:\n{template}"
            )
        elif language == "Spanish":
            context_part = f"# Última información relevante:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# Contenido original:\n{text}\n\n"
                f"{context_part}"
                f"Cree un guión de debate de podcast profesional y profundo en español con el contenido anterior.\n\n"
                f"## Directrices clave:\n"
                f"1. **Estilo de diálogo**: Discusión de podcast profesional pero accesible\n"
                f"2. **Roles de los hablantes**:\n"
                f"   - {speaker1}: Presentador/Anfitrión (preguntas perspicaces, perspectiva de la audiencia)\n"
                f"   - {speaker2}: Experto (explicaciones profundas, ejemplos concretos y datos)\n"
                f"3. **Reglas de respuesta importantes**:\n"
                f"   - {speaker1}: Preguntas claras en 1-2 oraciones\n"
                f"   - {speaker2}: **Debe responder en 2-4 oraciones** (explicación del concepto + explicación detallada + ejemplo/implicación)\n"
                f"4. **Elementos de profesionalismo**: Citar estadísticas e investigación, estudios de casos reales, explicar términos técnicos claramente\n"
                f"5. **Reglas obligatorias**: Usar lenguaje cortés, 12 intercambios de diálogo, toda la conversación en español\n\n"
                f"Devolver solo en formato JSON:\n{template}"
            )
        elif language == "Italian":
            context_part = f"# Ultime informazioni rilevanti:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# Contenuto originale:\n{text}\n\n"
                f"{context_part}"
                f"Crea uno script di dibattito podcast professionale e approfondito in italiano con il contenuto sopra.\n\n"
                f"## Linee guida chiave:\n"
                f"1. **Stile di dialogo**: Discussione podcast professionale ma accessibile\n"
                f"2. **Ruoli degli speaker**:\n"
                f"   - {speaker1}: Conduttore/Ospite (domande perspicaci, prospettiva del pubblico)\n"
                f"   - {speaker2}: Esperto (spiegazioni approfondite, esempi concreti e dati)\n"
                f"3. **Regole di risposta importanti**:\n"
                f"   - {speaker1}: Domande chiare in 1-2 frasi\n"
                f"   - {speaker2}: **Deve rispondere in 2-4 frasi** (spiegazione del concetto + spiegazione dettagliata + esempio/implicazione)\n"
                f"4. **Elementi di professionalità**: Citare statistiche e ricerche, studi di casi reali, spiegare chiaramente termini tecnici\n"
                f"5. **Regole obbligatorie**: Usare linguaggio cortese, 12 scambi di dialogo, tutta la conversazione in italiano\n\n"
                f"Restituire solo in formato JSON:\n{template}"
            )
        elif language == "Portuguese":
            context_part = f"# Últimas informações relevantes:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# Conteúdo original:\n{text}\n\n"
                f"{context_part}"
                f"Crie um roteiro de debate de podcast profissional e aprofundado em português com o conteúdo acima.\n\n"
                f"## Diretrizes principais:\n"
                f"1. **Estilo de diálogo**: Discussão de podcast profissional mas acessível\n"
                f"2. **Papéis dos falantes**:\n"
                f"   - {speaker1}: Apresentador/Anfitrião (perguntas perspicazes, perspectiva da audiência)\n"
                f"   - {speaker2}: Especialista (explicações aprofundadas, exemplos concretos e dados)\n"
                f"3. **Regras de resposta importantes**:\n"
                f"   - {speaker1}: Perguntas claras em 1-2 frases\n"
                f"   - {speaker2}: **Deve responder em 2-4 frases** (explicação do conceito + explicação detalhada + exemplo/implicação)\n"
                f"4. **Elementos de profissionalismo**: Citar estatísticas e pesquisas, estudos de casos reais, explicar termos técnicos claramente\n"
                f"5. **Regras obrigatórias**: Usar linguagem cortês, 12 trocas de diálogo, toda a conversa em português\n\n"
                f"Retornar apenas em formato JSON:\n{template}"
            )
        elif language == "Chinese":
            context_part = f"# 最新相关信息:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# 原始内容:\n{text}\n\n"
                f"{context_part}"
                f"请用上述内容创建一个专业深入的播客对话节目剧本,使用中文。\n\n"
                f"## 关键指导原则:\n"
                f"1. **对话风格**: 专业但易于理解的播客讨论\n"
                f"2. **说话者角色**:\n"
                f"   - {speaker1}: 主持人(有见地的问题,听众视角)\n"
                f"   - {speaker2}: 专家(深入解释,具体例子和数据)\n"
                f"3. **重要回答规则**:\n"
                f"   - {speaker1}: 1-2句清晰的问题\n"
                f"   - {speaker2}: **必须用2-4句话回答**(概念解释 + 详细解释 + 例子/含义)\n"
                f"4. **专业元素**: 引用统计数据和研究,真实案例研究,清楚解释技术术语\n"
                f"5. **必要规则**: 使用礼貌语言,12次对话交换,所有对话都用中文\n\n"
                f"仅以JSON格式返回:\n{template}"
            )
        elif language == "Russian":
            context_part = f"# Последняя релевантная информация:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# Оригинальный контент:\n{text}\n\n"
                f"{context_part}"
                f"Создайте профессиональный и глубокий сценарий подкаст-дебатов на русском языке с приведенным выше содержанием.\n\n"
                f"## Ключевые принципы:\n"
                f"1. **Стиль диалога**: Профессиональное, но доступное обсуждение подкаста\n"
                f"2. **Роли говорящих**:\n"
                f"   - {speaker1}: Ведущий/Хост (проницательные вопросы, перспектива аудитории)\n"
                f"   - {speaker2}: Эксперт (глубокие объяснения, конкретные примеры и данные)\n"
                f"3. **Важные правила ответов**:\n"
                f"   - {speaker1}: Четкие вопросы в 1-2 предложениях\n"
                f"   - {speaker2}: **Должен отвечать в 2-4 предложениях** (объяснение концепции + подробное объяснение + пример/импликация)\n"
                f"4. **Элементы профессионализма**: Цитировать статистику и исследования, реальные кейс-стади, четко объяснять технические термины\n"
                f"5. **Обязательные правила**: Использовать вежливый язык, 12 обменов диалога, вся беседа на русском языке\n\n"
                f"Вернуть только в формате JSON:\n{template}"
            )
        else:  # English and other languages
            context_part = f"# Latest Information:\n{search_context}\n" if search_context else ""
            base_prompt = (
                f"# Content:\n{text}\n\n"
                f"{context_part}"
                f"Create a professional and insightful podcast conversation in {language}.\n\n"
                f"## Key Guidelines:\n"
                f"1. **Style**: Professional yet accessible podcast discussion\n"
                f"2. **Roles**:\n"
                f"   - {speaker1}: Host (insightful questions, audience perspective)\n"
                f"   - {speaker2}: Expert (in-depth explanations, concrete examples and data)\n"
                f"3. **Critical Response Rules**:\n"
                f"   - {speaker1}: 1-2 sentence clear questions\n"
                f"   - {speaker2}: **Must answer in 2-4 sentences** (concept + detailed explanation + example/implication)\n"
                f"4. **Professional Elements**: Cite statistics and research, real cases and case studies, explain technical terms clearly\n"
                f"5. **Language**: All dialogue must be in {language}, 12 exchanges total\n\n"
                f"Return JSON only:\n{template}"
            )
        
        return base_prompt

    def _build_messages_for_local(self, text: str, language: str = "English", search_context: str = "") -> List[Dict]:
        """Build messages for local LLM with enhanced professional podcast style"""
        if language == "Korean":
            system_message = (
                "당신은 한국 최고의 전문 팟캐스트 작가입니다. "
                "청취자들이 전문 지식을 쉽게 이해할 수 있는 고품질 대담을 한국어로 만들어냅니다. "
                "반드시 서로 존댓말을 사용하며, 전문적이면서도 친근한 톤을 유지합니다. "
                "모든 대화는 반드시 한국어로 작성해주세요."
            )
        elif language == "Japanese":
            system_message = (
                "あなたは日本の最高のプロフェッショナルポッドキャスト作家です。"
                "聴衆が専門知識を簡単に理解できる高品質な対談を日本語で作成します。"
                "必ずお互いに丁寧語を使用し、専門的でありながら親しみやすいトーンを維持してください。"
                "すべての対話は必ず日本語で書いてください。"
            )
        elif language == "French":
            system_message = (
                "Vous êtes le meilleur scénariste de podcast professionnel de France. "
                "Créez des discussions de haute qualité en français qui permettent au public "
                "de comprendre facilement les connaissances spécialisées. "
                "Maintenez un ton professionnel mais accessible. "
                "Toutes les conversations doivent être écrites en français."
            )
        elif language == "German":
            system_message = (
                "Sie sind der beste professionelle Podcast-Drehbuchautor Deutschlands. "
                "Erstellen Sie hochwertige Diskussionen auf Deutsch, die es dem Publikum ermöglichen, "
                "Fachwissen leicht zu verstehen. "
                "Bewahren Sie einen professionellen, aber zugänglichen Ton. "
                "Alle Gespräche müssen auf Deutsch geschrieben werden."
            )
        elif language == "Spanish":
            system_message = (
                "Eres el mejor guionista de podcast profesional de España. "
                "Crea discusiones de alta calidad en español que permitan a la audiencia "
                "entender fácilmente el conocimiento especializado. "
                "Mantén un tono profesional pero accesible. "
                "Todas las conversaciones deben estar escritas en español."
            )
        elif language == "Italian":
            system_message = (
                "Sei il migliore sceneggiatore di podcast professionali d'Italia. "
                "Crea discussioni di alta qualità in italiano che permettano al pubblico "
                "di comprendere facilmente le conoscenze specialistiche. "
                "Mantieni un tono professionale ma accessibile. "
                "Tutte le conversazioni devono essere scritte in italiano."
            )
        elif language == "Portuguese":
            system_message = (
                "Você é o melhor roteirista de podcast profissional do Brasil. "
                "Crie discussões de alta qualidade em português que permitam ao público "
                "entender facilmente o conhecimento especializado. "
                "Mantenha um tom profissional mas acessível. "
                "Todas as conversas devem ser escritas em português."
            )
        elif language == "Chinese":
            system_message = (
                "您是中国最好的专业播客编剧。"
                "创建高质量的中文讨论,让观众能够轻松理解专业知识。"
                "保持专业但平易近人的语调。"
                "所有对话都必须用中文书写。"
            )
        elif language == "Russian":
            system_message = (
                "Вы лучший профессиональный сценарист подкастов в России. "
                "Создавайте высококачественные дискуссии на русском языке, которые позволяют аудитории "
                "легко понимать специализированные знания. "
                "Поддерживайте профессиональный, но доступный тон. "
                "Все разговоры должны быть написаны на русском языке."
            )
        else:
            system_message = (
                f"You are an expert podcast scriptwriter creating high-quality "
                f"professional discussions in {language}. Make complex topics accessible "
                f"while maintaining expertise and a professional yet approachable tone. "
                f"All conversations must be written in {language}."
            )
        
        return [
            {"role": "system", "content": system_message},
            {"role": "user", "content": self._build_prompt(text, language, search_context)}
        ]

    @spaces.GPU(duration=120)
    def extract_conversation_local(self, text: str, language: str = "English", progress=None) -> Dict:
        """Extract conversation using new local LLM with enhanced professional style"""
        try:
            # 검색 컨텍스트 생성 (키워드 기반이 아닌 경우)
            search_context = ""
            if BRAVE_KEY and not text.startswith("Keyword-based content:"):
                try:
                    keywords = extract_keywords_for_search(text, language)
                    if keywords:
                        search_query = keywords[0] if language == "Korean" else f"{keywords[0]} latest news"
                        search_context = format_search_results(search_query)
                        print(f"Search context added for: {search_query}")
                except Exception as e:
                    print(f"Search failed, continuing without context: {e}")
            
            # 먼저 새로운 로컬 LLM 시도
            self.initialize_local_mode()
            
            chat_template = self._get_messages_formatter_type(self.config.local_model_name)
            provider = LlamaCppPythonProvider(self.local_llm)

            # 언어별 시스템 메시지
            if language == "Korean":
                system_message = (
                    "당신은 한국의 유명 팟캐스트 전문 작가입니다. "
                    "청취자들이 깊이 있는 전문 지식을 얻을 수 있는 고품질 대담을 한국어로 만듭니다. "
                    "반드시 서로 존댓말을 사용하며, 12회의 대화 교환으로 구성하세요. "
                    "모든 대화는 반드시 한국어로 작성하고 JSON 형식으로만 응답하세요."
                )
            elif language == "Japanese":
                system_message = (
                    "あなたは日本の有名なポッドキャスト専門作家です。"
                    "聴衆が深い専門知識を得られる高品質な対談を日本語で作成します。"
                    "必ずお互いに丁寧語を使用し、12回の対話交換で構成してください。"
                    "すべての対話は必ず日本語で作成し、JSON形式でのみ回答してください。"
                )
            elif language == "French":
                system_message = (
                    "Vous êtes un célèbre scénariste de podcast professionnel français. "
                    "Créez des discussions de haute qualité en français qui donnent au public "
                    "des connaissances professionnelles approfondies. "
                    "Créez exactement 12 échanges de conversation et répondez uniquement en format JSON."
                )
            elif language == "German":
                system_message = (
                    "Sie sind ein berühmter professioneller Podcast-Drehbuchautor aus Deutschland. "
                    "Erstellen Sie hochwertige Diskussionen auf Deutsch, die dem Publikum "
                    "tiefgreifendes Fachwissen vermitteln. "
                    "Erstellen Sie genau 12 Gesprächsaustausche und antworten Sie nur im JSON-Format."
                )
            elif language == "Spanish":
                system_message = (
                    "Eres un famoso guionista de podcast profesional español. "
                    "Crea discusiones de alta calidad en español que brinden al público "
                    "conocimientos profesionales profundos. "
                    "Crea exactamente 12 intercambios de conversación y responde solo en formato JSON."
                )
            elif language == "Italian":
                system_message = (
                    "Sei un famoso sceneggiatore di podcast professionali italiano. "
                    "Crea discussioni di alta qualità in italiano che forniscano al pubblico "
                    "conoscenze professionali approfondite. "
                    "Crea esattamente 12 scambi di conversazione e rispondi solo in formato JSON."
                )
            elif language == "Portuguese":
                system_message = (
                    "Você é um famoso roteirista de podcast profissional brasileiro. "
                    "Crie discussões de alta qualidade em português que forneçam ao público "
                    "conhecimentos profissionais aprofundados. "
                    "Crie exatamente 12 trocas de conversa e responda apenas em formato JSON."
                )
            elif language == "Chinese":
                system_message = (
                    "您是中国著名的专业播客编剧。"
                    "创建高质量的中文讨论,为观众提供深入的专业知识。"
                    "创建恰好12次对话交换,仅以JSON格式回答。"
                )
            elif language == "Russian":
                system_message = (
                    "Вы известный профессиональный сценарист подкастов из России. "
                    "Создавайте высококачественные дискуссии на русском языке, которые дают аудитории "
                    "глубокие профессиональные знания. "
                    "Создайте ровно 12 обменов разговором и отвечайте только в формате JSON."
                )
            else:
                system_message = (
                    f"You are a professional podcast scriptwriter creating high-quality, "
                    f"insightful discussions in {language}. Create exactly 12 conversation exchanges "
                    f"with professional expertise. All dialogue must be in {language}. "
                    f"Respond only in JSON format."
                )

            agent = LlamaCppAgent(
                provider,
                system_prompt=system_message,
                predefined_messages_formatter_type=chat_template,
                debug_output=False
            )
            
            settings = provider.get_provider_default_settings()
            settings.temperature = 0.75
            settings.top_k = 40
            settings.top_p = 0.95
            settings.max_tokens = self.config.max_tokens
            settings.repeat_penalty = 1.1
            settings.stream = False

            messages = BasicChatHistory()
            
            prompt = self._build_prompt(text, language, search_context)
            response = agent.get_chat_response(
                prompt,
                llm_sampling_settings=settings,
                chat_history=messages,
                returns_streaming_generator=False,
                print_output=False
            )

            # JSON 파싱
            pattern = r"\{(?:[^{}]|(?:\{[^{}]*\}))*\}"
            json_match = re.search(pattern, response)
            
            if json_match:
                conversation_data = json.loads(json_match.group())
                return conversation_data
            else:
                raise ValueError("No valid JSON found in local LLM response")
                
        except Exception as e:
            print(f"Local LLM failed: {e}, falling back to legacy local method")
            return self.extract_conversation_legacy_local(text, language, progress, search_context)

    @spaces.GPU(duration=120)
    def extract_conversation_legacy_local(self, text: str, language: str = "English", progress=None, search_context: str = "") -> Dict:
        """Extract conversation using legacy local model"""
        try:
            self.initialize_legacy_local_mode()
            
            # 언어별 시스템 메시지
            if language == "Korean":
                system_message = (
                    "당신은 전문 팟캐스트 작가입니다. "
                    "12회의 대화 교환으로 구성된 전문적인 대담을 한국어로 만드세요. "
                    "모든 대화는 반드시 한국어로 작성해주세요."
                )
            elif language == "Japanese":
                system_message = (
                    "あなたは専門的なポッドキャスト作家です。"
                    "12回の対話交換で構成された専門的な対談を日本語で作成してください。"
                    "すべての対話は必ず日本語で作成してください。"
                )
            elif language == "French":
                system_message = (
                    "Vous êtes un scénariste de podcast professionnel. "
                    "Créez un dialogue professionnel composé de 12 échanges de conversation en français. "
                    "Toutes les conversations doivent être écrites en français."
                )
            elif language == "German":
                system_message = (
                    "Sie sind ein professioneller Podcast-Drehbuchautor. "
                    "Erstellen Sie einen professionellen Dialog aus 12 Gesprächsaustauschen auf Deutsch. "
                    "Alle Gespräche müssen auf Deutsch geschrieben werden."
                )
            elif language == "Spanish":
                system_message = (
                    "Eres un guionista de podcast profesional. "
                    "Crea un diálogo profesional compuesto por 12 intercambios de conversación en español. "
                    "Todas las conversaciones deben estar escritas en español."
                )
            elif language == "Chinese":
                system_message = (
                    "您是专业播客编剧。"
                    "创建由12次对话交换组成的专业对话,使用中文。"
                    "所有对话都必须用中文书写。"
                )
            elif language == "Russian":
                system_message = (
                    "Вы профессиональный сценарист подкастов. "
                    "Создайте профессиональный диалог из 12 обменов разговором на русском языке. "
                    "Все разговоры должны быть написаны на русском языке."
                )
            else:
                system_message = (
                    f"You are a professional podcast scriptwriter. "
                    f"Create a professional dialogue in {language} with 12 exchanges. "
                    f"All conversations must be written in {language}."
                )

            chat = [
                {"role": "system", "content": system_message},
                {"role": "user", "content": self._build_prompt(text, language, search_context)}
            ]

            terminators = [
                self.legacy_tokenizer.eos_token_id,
                self.legacy_tokenizer.convert_tokens_to_ids("<|eot_id|>")
            ]

            messages = self.legacy_tokenizer.apply_chat_template(
                chat, tokenize=False, add_generation_prompt=True
            )
            model_inputs = self.legacy_tokenizer([messages], return_tensors="pt").to(self.device)
            
            streamer = TextIteratorStreamer(
                self.legacy_tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
            )
            
            generate_kwargs = dict(
                model_inputs,
                streamer=streamer,
                max_new_tokens=self.config.max_new_tokens,
                do_sample=True,
                temperature=0.75,
                eos_token_id=terminators,
            )

            t = Thread(target=self.legacy_local_model.generate, kwargs=generate_kwargs)
            t.start()

            partial_text = ""
            for new_text in streamer:
                partial_text += new_text

            pattern = r"\{(?:[^{}]|(?:\{[^{}]*\}))*\}"
            json_match = re.search(pattern, partial_text)
            
            if json_match:
                return json.loads(json_match.group())
            else:
                raise ValueError("No valid JSON found in legacy local response")
                
        except Exception as e:
            print(f"Legacy local model also failed: {e}")
            return self._get_default_conversation(language)

    def _get_default_conversation(self, language: str) -> Dict:
        """언어별 기본 대화 템플릿"""
        if language == "Korean":
            return self._get_default_korean_conversation()
        elif language == "Japanese":
            return self._get_default_japanese_conversation()
        elif language == "French":
            return self._get_default_french_conversation()
        elif language == "German":
            return self._get_default_german_conversation()
        elif language == "Spanish":
            return self._get_default_spanish_conversation()
        elif language == "Chinese":
            return self._get_default_chinese_conversation()
        elif language == "Russian":
            return self._get_default_russian_conversation()
        else:
            return self._get_default_english_conversation()

    def _get_default_japanese_conversation(self) -> Dict:
        """기본 일본어 대화 템플릿"""
        return {
            "conversation": [
                {"speaker": "Hiroshi", "text": "皆さん、こんにちは!今日は本当に重要で興味深いトピックについて話し合いたいと思います。高橋先生、まずこの話題がなぜ今これほど注目されているのか説明していただけますか?"},
                {"speaker": "Takeshi", "text": "こんにちは。最近この分野で画期的な進展がありました。特に昨年のMIT研究チームの発表によると、この技術の効率性が従来比で300%向上したとのことです。これは単なる技術的進歩を超えて、私たちの日常生活に直接的な影響を与える可能性がある変化です。実際にGoogleやMicrosoftなどの大手テック企業がすでに数十億ドルを投資しています。"},
                {"speaker": "Hiroshi", "text": "300%の向上とは本当に驚くべきことですね。それでは、このような技術発展が一般の人々にとって具体的にどのような利益をもたらすのでしょうか?"},
                {"speaker": "Takeshi", "text": "最も直接的な利益はコスト削減とアクセシビリティの向上です。例えば、以前は専門家のみが使用できた高度な機能が、今ではスマートフォンアプリでも実装可能になりました。McKinseyのレポートによると、2025年までにこの技術により世界的に約2兆ドルの経済価値が創出されると予想されています。特に医療、教育、金融分野で革新的な変化が起こると見られています。"},
                {"speaker": "Hiroshi", "text": "2兆ドルという enormous な規模ですね。医療分野ではどのような変化が予想されますか?"},
                {"speaker": "Takeshi", "text": "医療分野の変化は本当に革命的になると予想されます。すでにスタンフォード大学病院では、この技術を活用してがん診断の精度を95%まで高めました。従来では熟練した医師でも見落とす可能性のあった微細な病変をAIが検出するのです。さらに驚くべきことは、このような診断がわずか数分で行われることです。WHO の推計では、この技術が世界的に普及すれば、年間数百万人の命を救うことができると予測しています。"},
                {"speaker": "Hiroshi", "text": "本当に印象的ですね。しかし、このような急速な技術発展に対する懸念の声もあるでしょうね?"},
                {"speaker": "Takeshi", "text": "おっしゃる通りです。主な懸念事項は大きく3つあります。第一は雇用代替問題で、オックスフォード大学の研究によると、今後20年以内に現在の職業の47%が自動化される危険性があります。第二はプライバシーとセキュリティの問題です。第三は技術格差による不平等の深刻化です。しかし歴史的に見ると、新しい技術は常に新しい機会も同時に作り出してきたため、適切な政策と教育によってこれらの問題を解決できると考えています。"},
                {"speaker": "Hiroshi", "text": "バランスの取れた視点が重要ですね。それでは、私たちはこのような変化にどのように備えるべきでしょうか?"},
                {"speaker": "Takeshi", "text": "最も重要なのは継続的な学習と適応力です。世界経済フォーラムは、2025年までに世界の労働者の50%が再教育を必要とすると予測しました。特にデジタルリテラシー、批判的思考力、創造性などの能力が重要になるでしょう。個人的には、オンライン教育プラットフォームを活用した自己啓発をお勧めします。例えば、CourseraやedXなどのプラットフォームでは、世界最高の大学の講義を無料で受講できます。"},
                {"speaker": "Hiroshi", "text": "実用的なアドバイスをありがとうございます。最後に、この分野の将来の展望をどのように見ていらっしゃいますか?"},
                {"speaker": "Takeshi", "text": "今後10年間は人類史上最も急速な技術発展を経験する時期になるでしょう。Gartnerのハイプサイクル分析によると、現在私たちはこの技術の初期段階に過ぎません。2030年までには、現在では想像し難いレベルの革新が起こると予想されます。重要なのは、このような変化を恐れるのではなく、機会として活用してより良い未来を作り上げていくことだと思います。"},
                {"speaker": "Hiroshi", "text": "本当に洞察に富んだお話でした。今日は大変有益な時間でした。リスナーの皆さんも今日議論された内容を基に未来に備えていただければと思います。高橋先生、貴重なお時間をいただき、ありがとうございました!"},
                {"speaker": "Takeshi", "text": "ありがとうございました。リスナーの皆さんがこの変化の時代を賢明に乗り切っていかれることを願っています。技術は道具に過ぎず、それをどのように活用するかは私たちにかかっているということを覚えておいてください。今日お話しした内容についてさらに詳しく知りたい方は、私が運営するブログや最近出版した本で詳細な情報を見つけることができます。"}
            ]
        }

    def _get_default_french_conversation(self) -> Dict:
        """기본 프랑스어 대화 템플릿"""
        return {
            "conversation": [
                {"speaker": "Pierre", "text": "Bonjour tout le monde ! Aujourd'hui, nous allons aborder un sujet vraiment important et fascinant. Marc, pourriez-vous d'abord expliquer pourquoi ce sujet attire autant l'attention en ce moment ?"},
                {"speaker": "Marc", "text": "Bonjour Pierre. Nous assistons récemment à des développements révolutionnaires dans ce domaine. Selon une publication récente du MIT, l'efficacité de cette technologie s'est améliorée de 300% par rapport aux méthodes conventionnelles. Il ne s'agit pas seulement d'un progrès technique, mais d'un changement qui peut avoir un impact direct sur notre vie quotidienne. En fait, des géants technologiques comme Google et Microsoft ont déjà investi des milliards de dollars dans ce secteur."},
                {"speaker": "Pierre", "text": "Une amélioration de 300%, c'est vraiment remarquable ! Alors, quels bénéfices concrets cette évolution technologique peut-elle apporter au grand public ?"},
                {"speaker": "Marc", "text": "Les avantages les plus directs sont la réduction des coûts et l'amélioration de l'accessibilité. Par exemple, des fonctionnalités avancées qui n'étaient auparavant disponibles que pour les experts peuvent maintenant être implémentées dans des applications smartphone. Selon un rapport de McKinsey, cette technologie devrait créer environ 2 000 milliards de dollars de valeur économique mondiale d'ici 2025. Des changements innovants sont particulièrement attendus dans les domaines de la santé, de l'éducation et de la finance."},
                {"speaker": "Pierre", "text": "2 000 milliards de dollars, c'est une échelle énorme ! Quels changements sont attendus dans le domaine médical ?"},
                {"speaker": "Marc", "text": "Les changements dans le domaine médical seront vraiment révolutionnaires. L'hôpital universitaire de Stanford a déjà utilisé cette technologie pour améliorer la précision du diagnostic du cancer jusqu'à 95%. L'IA peut détecter des lésions microscopiques que même des médecins expérimentés pourraient manquer. Ce qui est encore plus remarquable, c'est que de tels diagnostics peuvent être effectués en quelques minutes seulement. Selon les estimations de l'OMS, si cette technologie se répand mondialement, elle pourrait sauver des millions de vies chaque année."},
                {"speaker": "Pierre", "text": "C'est vraiment impressionnant. Mais il doit y avoir aussi des préoccupations concernant ce développement technologique rapide ?"},
                {"speaker": "Marc", "text": "Absolument. Il y a trois préoccupations principales. Premièrement, le problème du remplacement de l'emploi - selon une recherche de l'Université d'Oxford, 47% des emplois actuels risquent d'être automatisés dans les 20 prochaines années. Deuxièmement, les questions de confidentialité et de sécurité. Troisièmement, l'aggravation des inégalités due au fossé technologique. Cependant, historiquement, les nouvelles technologies ont toujours créé de nouvelles opportunités, donc je pense que ces problèmes peuvent être résolus avec des politiques et une éducation appropriées."},
                {"speaker": "Pierre", "text": "Une perspective équilibrée est importante. Alors, comment devrions-nous nous préparer à ces changements ?"},
                {"speaker": "Marc", "text": "Le plus important est l'apprentissage continu et l'adaptabilité. Le Forum économique mondial prédit que 50% des travailleurs mondiaux auront besoin d'une formation supplémentaire d'ici 2025. Des compétences comme la littératie numérique, la pensée critique et la créativité deviendront particulièrement importantes. Personnellement, je recommande l'auto-amélioration grâce aux plateformes d'éducation en ligne. Par exemple, sur des plateformes comme Coursera ou edX, vous pouvez suivre gratuitement des cours des meilleures universités du monde."},
                {"speaker": "Pierre", "text": "Merci pour ces conseils pratiques. Enfin, comment voyez-vous les perspectives d'avenir de ce domaine ?"},
                {"speaker": "Marc", "text": "Les 10 prochaines années seront une période d'évolution technologique la plus rapide de l'histoire humaine. Selon l'analyse du cycle de Gartner, nous ne sommes actuellement qu'au stade initial de cette technologie. D'ici 2030, on s'attend à des innovations d'un niveau difficile à imaginer actuellement. L'important est de ne pas craindre ces changements, mais de les considérer comme des opportunités pour créer un avenir meilleur."},
                {"speaker": "Pierre", "text": "C'étaient des paroles vraiment perspicaces. Aujourd'hui a été un moment très enrichissant. J'espère que nos auditeurs se prépareront également à l'avenir en se basant sur ce qui a été discuté aujourd'hui. Marc, merci d'avoir pris de votre précieux temps !"},
                {"speaker": "Marc", "text": "Merci à vous. J'espère que nos auditeurs navigueront sagement à travers cette ère de changement. Rappelez-vous que la technologie n'est qu'un outil, et c'est à nous de décider comment l'utiliser. Pour ceux qui souhaitent en savoir plus sur ce dont nous avons parlé aujourd'hui, vous pouvez trouver des informations plus détaillées sur mon blog et dans mon livre récemment publié."}
            ]
        }

    def _get_default_german_conversation(self) -> Dict:
        """기본 독일어 대화 템플릿"""
        return {
            "conversation": [
                {"speaker": "Klaus", "text": "Hallo zusammen! Heute wollen wir ein wirklich wichtiges und faszinierendes Thema besprechen. Stefan, könnten Sie zunächst erklären, warum dieses Thema gerade jetzt so viel Aufmerksamkeit erhält?"},
                {"speaker": "Stefan", "text": "Hallo Klaus. Wir erleben kürzlich revolutionäre Entwicklungen in diesem Bereich. Laut einer aktuellen Veröffentlichung des MIT hat sich die Effizienz dieser Technologie um 300% gegenüber herkömmlichen Methoden verbessert. Das ist nicht nur technischer Fortschritt, sondern eine Veränderung, die direkten Einfluss auf unser tägliches Leben haben kann. Tatsächlich haben Tech-Riesen wie Google und Microsoft bereits Milliarden von Dollar in diesen Sektor investiert."},
                {"speaker": "Klaus", "text": "300% Verbesserung ist wirklich bemerkenswert! Welche konkreten Vorteile kann diese technologische Entwicklung für die Allgemeinheit bringen?"},
                {"speaker": "Stefan", "text": "Die direktesten Vorteile sind Kostenreduzierung und verbesserte Zugänglichkeit. Zum Beispiel können erweiterte Funktionen, die früher nur Experten zur Verfügung standen, jetzt in Smartphone-Apps implementiert werden. Laut einem McKinsey-Bericht soll diese Technologie bis 2025 weltweit etwa 2 Billionen Dollar wirtschaftlichen Wert schaffen. Besonders in den Bereichen Gesundheitswesen, Bildung und Finanzen werden innovative Veränderungen erwartet."},
                {"speaker": "Klaus", "text": "2 Billionen Dollar ist eine enorme Größenordnung! Welche Veränderungen werden im medizinischen Bereich erwartet?"},
                {"speaker": "Stefan", "text": "Die Veränderungen im medizinischen Bereich werden wirklich revolutionär sein. Das Stanford University Hospital hat diese Technologie bereits eingesetzt, um die Genauigkeit der Krebsdiagnose auf 95% zu verbessern. KI kann mikroskopische Läsionen erkennen, die selbst erfahrene Ärzte übersehen könnten. Noch bemerkenswerter ist, dass solche Diagnosen in nur wenigen Minuten durchgeführt werden können. Laut WHO-Schätzungen könnte diese Technologie, wenn sie weltweit verbreitet wird, jährlich Millionen von Leben retten."},
                {"speaker": "Klaus", "text": "Das ist wirklich beeindruckend. Aber es muss auch Bedenken bezüglich dieser schnellen technologischen Entwicklung geben?"},
                {"speaker": "Stefan", "text": "Absolut richtig. Es gibt drei Hauptbedenken. Erstens das Problem der Arbeitsplatzverdrängung - laut Oxford University-Forschung sind 47% der aktuellen Jobs in den nächsten 20 Jahren von Automatisierung bedroht. Zweitens Datenschutz- und Sicherheitsfragen. Drittens die Verschärfung von Ungleichheiten durch die Technologielücke. Historisch gesehen haben neue Technologien jedoch immer auch neue Möglichkeiten geschaffen, daher glaube ich, dass diese Probleme mit angemessenen Richtlinien und Bildung gelöst werden können."},
                {"speaker": "Klaus", "text": "Eine ausgewogene Perspektive ist wichtig. Wie sollten wir uns auf diese Veränderungen vorbereiten?"},
                {"speaker": "Stefan", "text": "Das Wichtigste ist kontinuierliches Lernen und Anpassungsfähigkeit. Das Weltwirtschaftsforum prognostiziert, dass bis 2025 50% der weltweiten Arbeitskräfte Umschulung benötigen werden. Besonders wichtig werden Fähigkeiten wie digitale Kompetenz, kritisches Denken und Kreativität. Persönlich empfehle ich Selbstverbesserung durch Online-Bildungsplattformen. Zum Beispiel können Sie auf Plattformen wie Coursera oder edX kostenlos Vorlesungen der besten Universitäten der Welt besuchen."},
                {"speaker": "Klaus", "text": "Danke für die praktischen Ratschläge. Wie sehen Sie abschließend die Zukunftsaussichten in diesem Bereich?"},
                {"speaker": "Stefan", "text": "Die nächsten 10 Jahre werden eine Zeit der schnellsten technologischen Entwicklung in der Menschheitsgeschichte sein. Laut Gartners Hype-Cycle-Analyse befinden wir uns derzeit nur im Anfangsstadium dieser Technologie. Bis 2030 werden Innovationen auf einem Niveau erwartet, das derzeit schwer vorstellbar ist. Wichtig ist, diese Veränderungen nicht zu fürchten, sondern sie als Chancen zu nutzen, um eine bessere Zukunft zu schaffen."},
                {"speaker": "Klaus", "text": "Das waren wirklich einsichtsvolle Worte. Heute war eine sehr bereichernde Zeit. Ich hoffe, unsere Zuhörer werden sich auch basierend auf dem, was heute diskutiert wurde, auf die Zukunft vorbereiten. Stefan, vielen Dank, dass Sie sich die wertvolle Zeit genommen haben!"},
                {"speaker": "Stefan", "text": "Vielen Dank. Ich hoffe, unsere Zuhörer werden diese Zeit des Wandels weise meistern. Denken Sie daran, dass Technologie nur ein Werkzeug ist und es an uns liegt, wie wir sie nutzen. Für diejenigen, die mehr über das erfahren möchten, was wir heute besprochen haben, finden Sie detailliertere Informationen in meinem Blog und meinem kürzlich veröffentlichten Buch."}
            ]
        }

    def _get_default_spanish_conversation(self) -> Dict:
        """기본 스페인어 대화 템플릿"""
        return {
            "conversation": [
                {"speaker": "Carlos", "text": "¡Hola a todos! Hoy vamos a abordar un tema realmente importante y fascinante. Miguel, ¿podrías explicar primero por qué este tema está recibiendo tanta atención en este momento?"},
                {"speaker": "Miguel", "text": "Hola Carlos. Estamos presenciando desarrollos revolucionarios recientes en este campo. Según una publicación reciente del MIT, la eficiencia de esta tecnología ha mejorado un 300% en comparación con los métodos convencionales. Esto no es solo progreso técnico, sino un cambio que puede tener un impacto directo en nuestra vida diaria. De hecho, gigantes tecnológicos como Google y Microsoft ya han invertido miles de millones de dólares en este sector."},
                {"speaker": "Carlos", "text": "¡Una mejora del 300% es realmente notable! Entonces, ¿qué beneficios concretos puede aportar esta evolución tecnológica al público en general?"},
                {"speaker": "Miguel", "text": "Los beneficios más directos son la reducción de costos y la mejora de la accesibilidad. Por ejemplo, funciones avanzadas que antes solo estaban disponibles para expertos ahora pueden implementarse en aplicaciones de smartphone. Según un informe de McKinsey, se espera que esta tecnología cree aproximadamente 2 billones de dólares de valor económico mundial para 2025. Se esperan cambios innovadores especialmente en los campos de la salud, educación y finanzas."},
                {"speaker": "Carlos", "text": "¡2 billones de dólares es una escala enorme! ¿Qué cambios se esperan en el campo médico?"},
                {"speaker": "Miguel", "text": "Los cambios en el campo médico serán realmente revolucionarios. El Hospital Universitario de Stanford ya ha utilizado esta tecnología para mejorar la precisión del diagnóstico de cáncer hasta el 95%. La IA puede detectar lesiones microscópicas que incluso médicos experimentados podrían pasar por alto. Aún más notable es que tales diagnósticos pueden realizarse en solo unos minutos. Según las estimaciones de la OMS, si esta tecnología se extiende globalmente, podría salvar millones de vidas anualmente."},
                {"speaker": "Carlos", "text": "Eso es realmente impresionante. ¿Pero también debe haber preocupaciones sobre este rápido desarrollo tecnológico?"},
                {"speaker": "Miguel", "text": "Absolutamente correcto. Hay tres preocupaciones principales. Primero, el problema del reemplazo laboral: según la investigación de la Universidad de Oxford, el 47% de los empleos actuales están en riesgo de automatización en los próximos 20 años. Segundo, cuestiones de privacidad y seguridad. Tercero, el agravamiento de la desigualdad debido a la brecha tecnológica. Sin embargo, históricamente, las nuevas tecnologías siempre han creado nuevas oportunidades también, por lo que creo que estos problemas pueden resolverse con políticas y educación adecuadas."},
                {"speaker": "Carlos", "text": "Una perspectiva equilibrada es importante. Entonces, ¿cómo deberíamos prepararnos para estos cambios?"},
                {"speaker": "Miguel", "text": "Lo más importante es el aprendizaje continuo y la adaptabilidad. El Foro Económico Mundial predice que el 50% de los trabajadores mundiales necesitarán reeducación para 2025. Habilidades como la alfabetización digital, el pensamiento crítico y la creatividad se volverán especialmente importantes. Personalmente, recomiendo la automejora a través de plataformas de educación en línea. Por ejemplo, en plataformas como Coursera o edX, puedes tomar clases gratuitas de las mejores universidades del mundo."},
                {"speaker": "Carlos", "text": "Gracias por los consejos prácticos. Finalmente, ¿cómo ve las perspectivas futuras de este campo?"},
                {"speaker": "Miguel", "text": "Los próximos 10 años serán un período del desarrollo tecnológico más rápido en la historia humana. Según el análisis del ciclo de Gartner, actualmente estamos solo en la etapa inicial de esta tecnología. Para 2030, se esperan innovaciones en un nivel que es difícil de imaginar actualmente. Lo importante es no temer estos cambios, sino considerarlos como oportunidades para crear un futuro mejor."},
                {"speaker": "Carlos", "text": "Esas fueron palabras realmente perspicaces. Hoy ha sido un momento muy enriquecedor. Espero que nuestros oyentes también se preparen para el futuro basándose en lo que se discutió hoy. ¡Miguel, gracias por tomar tu valioso tiempo!"},
                {"speaker": "Miguel", "text": "Gracias a ti. Espero que nuestros oyentes naveguen sabiamente a través de esta era de cambio. Recuerden que la tecnología es solo una herramienta, y depende de nosotros cómo la usemos. Para aquellos que quieran saber más sobre lo que discutimos hoy, pueden encontrar información más detallada en mi blog y mi libro recientemente publicado."}
            ]
        }

    def _get_default_chinese_conversation(self) -> Dict:
        """기본 중국어 대화 템플릿"""
        return {
            "conversation": [
                {"speaker": "Wei", "text": "大家好!今天我们要讨论一个非常重要和有趣的话题。小军,你能首先解释一下为什么这个话题现在受到如此多的关注吗?"},
                {"speaker": "Jun", "text": "你好,小伟。我们最近在这个领域见证了革命性的发展。根据MIT最新发布的研究,这项技术的效率比传统方法提高了300%。这不仅仅是技术进步,而是一个可能直接影响我们日常生活的变化。实际上,像谷歌和微软这样的科技巨头已经在这个领域投资了数十亿美元。"},
                {"speaker": "Wei", "text": "300%的提升确实令人惊叹!那么这种技术发展能为普通大众带来什么具体的好处呢?"},
                {"speaker": "Jun", "text": "最直接的好处是成本降低和可访问性提高。例如,以前只有专家才能使用的高级功能现在可以在智能手机应用中实现。根据麦肯锡的报告,预计到2025年,这项技术将在全球创造约2万亿美元的经济价值。特别是在医疗、教育和金融领域,预计会出现创新性变化。"},
                {"speaker": "Wei", "text": "2万亿美元是一个巨大的规模!在医疗领域预计会有什么变化?"},
                {"speaker": "Jun", "text": "医疗领域的变化将是真正革命性的。斯坦福大学医院已经利用这项技术将癌症诊断的准确率提高到95%。AI可以检测到即使是经验丰富的医生也可能错过的微小病变。更令人惊叹的是,这样的诊断只需要几分钟就能完成。根据世界卫生组织的估计,如果这项技术在全球普及,每年可以拯救数百万人的生命。"},
                {"speaker": "Wei", "text": "这真的令人印象深刻。但是对于这种快速的技术发展也一定存在担忧吧?"},
                {"speaker": "Jun", "text": "确实如此。主要有三个担忧。首先是就业替代问题——根据牛津大学的研究,未来20年内当前47%的工作可能面临自动化风险。其次是隐私和安全问题。第三是技术差距导致的不平等加剧。但是从历史上看,新技术总是在创造新机遇的同时,我相信通过适当的政策和教育可以解决这些问题。"},
                {"speaker": "Wei", "text": "平衡的观点很重要。那么我们应该如何为这些变化做准备呢?"},
                {"speaker": "Jun", "text": "最重要的是持续学习和适应能力。世界经济论坛预测,到2025年,全球50%的工人将需要重新培训。数字素养、批判性思维和创造力等技能将变得特别重要。我个人推荐通过在线教育平台进行自我提升。例如,在Coursera或edX等平台上,你可以免费学习世界顶尖大学的课程。"},
                {"speaker": "Wei", "text": "感谢这些实用的建议。最后,你如何看待这个领域的未来前景?"},
                {"speaker": "Jun", "text": "未来10年将是人类历史上技术发展最快的时期。根据Gartner的炒作周期分析,我们目前只是处于这项技术的初期阶段。到2030年,预计会出现目前难以想象水平的创新。重要的是不要害怕这些变化,而是将它们视为创造更美好未来的机会。"},
                {"speaker": "Wei", "text": "这些话真的很有见地。今天是一个非常有益的时光。希望我们的听众也能基于今天讨论的内容为未来做准备。小军,感谢你抽出宝贵的时间!"},
                {"speaker": "Jun", "text": "谢谢你。希望我们的听众能够明智地度过这个变化的时代。请记住,技术只是工具,如何使用它取决于我们自己。对于想要了解更多我们今天讨论内容的人,可以在我的博客和最近出版的书中找到更详细的信息。"}
            ]
        }

    def _get_default_russian_conversation(self) -> Dict:
        """기본 러시아어 대화 템플릿"""
        return {
            "conversation": [
                {"speaker": "Alexei", "text": "Привет всем! Сегодня мы собираемся обсудить действительно важную и увлекательную тему. Дмитрий, не могли бы вы сначала объяснить, почему эта тема сейчас привлекает так много внимания?"},
                {"speaker": "Dmitri", "text": "Привет, Алексей. Мы недавно стали свидетелями революционных разработок в этой области. Согласно недавней публикации MIT, эффективность этой технологии улучшилась на 300% по сравнению с традиционными методами. Это не просто технический прогресс, а изменение, которое может оказать прямое влияние на нашу повседневную жизнь. Фактически, технологические гиганты, такие как Google и Microsoft, уже инвестировали миллиарды долларов в этот сектор."},
                {"speaker": "Alexei", "text": "Улучшение на 300% действительно замечательно! Итак, какие конкретные преимущества это технологическое развитие может принести широкой публике?"},
                {"speaker": "Dmitri", "text": "Наиболее прямые преимущества - это снижение затрат и улучшение доступности. Например, расширенные функции, которые ранее были доступны только экспертам, теперь могут быть реализованы в приложениях для смартфонов. Согласно отчету McKinsey, ожидается, что эта технология создаст примерно 2 триллиона долларов экономической стоимости в мире к 2025 году. Инновационные изменения особенно ожидаются в области здравоохранения, образования и финансов."},
                {"speaker": "Alexei", "text": "2 триллиона долларов - это огромный масштаб! Какие изменения ожидаются в медицинской области?"},
                {"speaker": "Dmitri", "text": "Изменения в медицинской области будут действительно революционными. Университетская больница Стэнфорда уже использовала эту технологию для улучшения точности диагностики рака до 95%. ИИ может обнаруживать микроскопические поражения, которые даже опытные врачи могли бы пропустить. Еще более примечательно то, что такая диагностика может быть выполнена всего за несколько минут. По оценкам ВОЗ, если эта технология распространится по всему миру, она сможет спасать миллионы жизней ежегодно."},
                {"speaker": "Alexei", "text": "Это действительно впечатляет. Но также должны быть опасения относительно такого быстрого технологического развития?"},
                {"speaker": "Dmitri", "text": "Абсолютно правильно. Есть три основные проблемы. Во-первых, проблема замещения рабочих мест - согласно исследованию Оксфордского университета, 47% современных рабочих мест рискуют быть автоматизированными в течение следующих 20 лет. Во-вторых, вопросы конфиденциальности и безопасности. В-третьих, усугубление неравенства из-за технологического разрыва. Однако исторически новые технологии всегда создавали и новые возможности, поэтому я считаю, что эти проблемы можно решить с помощью соответствующих политик и образования."},
                {"speaker": "Alexei", "text": "Сбалансированная перспектива важна. Итак, как нам следует готовиться к этим изменениям?"},
                {"speaker": "Dmitri", "text": "Наиболее важными являются непрерывное обучение и адаптивность. Всемирный экономический форум прогнозирует, что к 2025 году 50% мировых работников будут нуждаться в переобучении. Особенно важными станут навыки цифровой грамотности, критического мышления и креативности. Лично я рекомендую самосовершенствование через онлайн-образовательные платформы. Например, на платформах вроде Coursera или edX вы можете бесплатно посещать лекции лучших университетов мира."},
                {"speaker": "Alexei", "text": "Спасибо за практические советы. Наконец, как вы видите будущие перспективы этой области?"},
                {"speaker": "Dmitri", "text": "Следующие 10 лет будут периодом самого быстрого технологического развития в истории человечества. Согласно анализу цикла Gartner, в настоящее время мы находимся только на начальной стадии этой технологии. К 2030 году ожидаются инновации на уровне, который в настоящее время трудно представить. Важно не бояться этих изменений, а рассматривать их как возможности для создания лучшего будущего."},
                {"speaker": "Alexei", "text": "Это были действительно проницательные слова. Сегодня было очень обогащающее время. Надеюсь, наши слушатели тоже подготовятся к будущему, основываясь на том, что обсуждалось сегодня. Дмитрий, спасибо, что уделили свое драгоценное время!"},
                {"speaker": "Dmitri", "text": "Спасибо вам. Надеюсь, наши слушатели мудро пройдут через эту эпоху перемен. Помните, что технология - это всего лишь инструмент, и от нас зависит, как мы его используем. Для тех, кто хочет узнать больше о том, что мы обсуждали сегодня, вы можете найти более подробную информацию в моем блоге и недавно опубликованной книге."}
            ]
        }

    def _get_default_korean_conversation(self) -> Dict:
        """기본 한국어 대화 템플릿"""
        return {
            "conversation": [
                {"speaker": "준수", "text": "안녕하세요, 여러분! 오늘은 정말 중요하고 흥미로운 주제를 다뤄보려고 합니다. 민호 박사님, 먼저 이 주제가 왜 지금 이렇게 주목받고 있는지 설명해주시겠어요?"},
                {"speaker": "민호", "text": "네, 안녕하세요. 최근 이 분야에서 획기적인 발전이 있었습니다. 특히 작년 MIT 연구팀의 발표에 따르면, 이 기술의 효율성이 기존 대비 300% 향상되었다고 합니다. 이는 단순한 기술적 진보를 넘어서 우리 일상생활에 직접적인 영향을 미칠 수 있는 변화인데요. 실제로 구글과 마이크로소프트 같은 빅테크 기업들이 이미 수십억 달러를 투자하고 있습니다."},
                {"speaker": "준수", "text": "와, 300% 향상이라니 정말 놀라운데요. 그렇다면 이런 기술 발전이 일반인들에게는 구체적으로 어떤 혜택을 가져다줄 수 있을까요?"},
                {"speaker": "민호", "text": "가장 직접적인 혜택은 비용 절감과 접근성 향상입니다. 예를 들어, 이전에는 전문가만 사용할 수 있던 고급 기능들이 이제는 스마트폰 앱으로도 구현 가능해졌습니다. 맥킨지 보고서에 따르면, 2025년까지 이 기술로 인해 전 세계적으로 약 2조 달러의 경제적 가치가 창출될 것으로 예상됩니다. 특히 의료, 교육, 금융 분야에서 혁신적인 변화가 일어날 것으로 보입니다."},
                {"speaker": "준수", "text": "2조 달러라는 엄청난 규모네요. 의료 분야에서는 어떤 변화가 예상되나요?"},
                {"speaker": "민호", "text": "의료 분야의 변화는 정말 혁명적일 것으로 예상됩니다. 이미 스탠포드 대학병원에서는 이 기술을 활용해 암 진단 정확도를 95%까지 높였습니다. 기존에는 숙련된 의사도 놓칠 수 있던 미세한 병변들을 AI가 감지해내는 것이죠. 더 놀라운 것은 이런 진단이 단 몇 분 만에 이뤄진다는 점입니다. WHO 추산으로는 이 기술이 전 세계적으로 보급되면 연간 수백만 명의 생명을 구할 수 있을 것으로 예측하고 있습니다."},
                {"speaker": "준수", "text": "정말 인상적이네요. 하지만 이런 급격한 기술 발전에 대한 우려의 목소리도 있을 것 같은데요?"},
                {"speaker": "민호", "text": "맞습니다. 주요 우려사항은 크게 세 가지입니다. 첫째는 일자리 대체 문제로, 옥스포드 대학 연구에 따르면 향후 20년 내에 현재 직업의 47%가 자동화될 위험이 있습니다. 둘째는 프라이버시와 보안 문제입니다. 셋째는 기술 격차로 인한 불평등 심화입니다. 하지만 역사적으로 보면 새로운 기술은 항상 새로운 기회도 함께 만들어왔기 때문에, 적절한 정책과 교육으로 이런 문제들을 해결할 수 있을 것으로 봅니다."},
                {"speaker": "준수", "text": "균형잡힌 시각이 중요하겠네요. 그렇다면 우리가 이런 변화에 어떻게 대비해야 할까요?"},
                {"speaker": "민호", "text": "가장 중요한 것은 지속적인 학습과 적응력입니다. 세계경제포럼은 2025년까지 전 세계 근로자의 50%가 재교육이 필요할 것으로 예측했습니다. 특히 디지털 리터러시, 비판적 사고력, 창의성 같은 능력이 중요해질 것입니다. 개인적으로는 온라인 교육 플랫폼을 활용한 자기계발을 추천합니다. 예를 들어 Coursera나 edX 같은 플랫폼에서는 세계 최고 대학의 강의를 무료로 들을 수 있습니다."},
                {"speaker": "준수", "text": "실용적인 조언 감사합니다. 마지막으로 이 분야의 미래 전망은 어떻게 보시나요?"},
                {"speaker": "민호", "text": "향후 10년은 인류 역사상 가장 급격한 기술 발전을 경험하는 시기가 될 것입니다. 가트너의 하이프 사이클 분석에 따르면, 현재 우리는 이 기술의 초기 단계에 불과합니다. 2030년까지는 지금으로서는 상상하기 어려운 수준의 혁신이 일어날 것으로 예상됩니다. 중요한 것은 이런 변화를 두려워하기보다는 기회로 삼아 더 나은 미래를 만들어가는 것이라고 생각합니다."},
                {"speaker": "준수", "text": "정말 통찰력 있는 말씀이네요. 오늘 너무나 유익한 시간이었습니다. 청취자 여러분도 오늘 논의된 내용을 바탕으로 미래를 준비하시길 바랍니다. 민호 박사님, 귀중한 시간 내주셔서 감사합니다!"},
                {"speaker": "민호", "text": "감사합니다. 청취자 여러분들이 이 변화의 시대를 현명하게 헤쳐나가시길 바랍니다. 기술은 도구일 뿐이고, 그것을 어떻게 활용하는지는 우리에게 달려있다는 점을 기억해주세요."}
            ]
        }

    def _get_default_english_conversation(self) -> Dict:
        """기본 영어 대화 템플릿"""
        return {
            "conversation": [
                {"speaker": "Alex", "text": "Welcome everyone to our podcast! Today we're diving into a topic that's reshaping our world. Dr. Jordan, could you start by explaining why this subject has become so critical right now?"},
                {"speaker": "Jordan", "text": "Thanks, Alex. We're witnessing an unprecedented convergence of technological breakthroughs. According to a recent Nature publication, advances in this field have accelerated by 400% in just the past two years. This isn't just incremental progress - it's a fundamental shift in how we approach problem-solving. Major institutions like Harvard and Stanford are completely restructuring their research programs to focus on this area, with combined investments exceeding $5 billion annually."},
                {"speaker": "Alex", "text": "400% acceleration is staggering! What does this mean for everyday people who might not be tech-savvy?"},
                {"speaker": "Jordan", "text": "The impact will be profound yet accessible. Think about how smartphones revolutionized communication - this will be similar but across every aspect of life. McKinsey's latest report projects that by 2026, these technologies will create $4.4 trillion in annual value globally. For individuals, this translates to personalized healthcare that can predict illnesses years in advance, educational systems that adapt to each student's learning style, and financial tools that democratize wealth-building strategies previously available only to the ultra-wealthy."},
                {"speaker": "Alex", "text": "Those applications sound transformative. Can you give us a concrete example of how this is already being implemented?"},
                {"speaker": "Jordan", "text": "Absolutely. Let me share a compelling case from Johns Hopkins Hospital. They've deployed an AI system that analyzes patient data in real-time, reducing diagnostic errors by 85% and cutting average diagnosis time from days to hours. In one documented case, the system identified a rare genetic disorder in a child that had been misdiagnosed for three years. The accuracy comes from analyzing patterns across millions of cases - something impossible for even the most experienced doctors to do manually."},
                {"speaker": "Alex", "text": "That's truly life-changing technology. But I imagine there are significant challenges and risks we need to consider?"},
                {"speaker": "Jordan", "text": "You're absolutely right to raise this. The challenges are as significant as the opportunities. The World Economic Forum identifies three critical risks: algorithmic bias could perpetuate existing inequalities, cybersecurity threats become exponentially more dangerous, and there's the socioeconomic disruption with PwC estimating that 30% of jobs could be automated by 2030. However, history shows us that technological revolutions create new opportunities even as they displace old ones. The key is proactive adaptation and responsible development."},
                {"speaker": "Alex", "text": "How should individuals and organizations prepare for these changes?"},
                {"speaker": "Jordan", "text": "Preparation requires a multi-faceted approach. For individuals, I recommend focusing on skills that complement rather than compete with AI: critical thinking, emotional intelligence, and creative problem-solving. MIT's recent study shows that professionals who combine domain expertise with AI literacy see salary increases of 40% on average. Organizations need to invest in continuous learning programs - Amazon's $700 million worker retraining initiative is a good model. Most importantly, we need to cultivate an adaptive mindset."},
                {"speaker": "Alex", "text": "That's practical advice. What about the ethical considerations? How do we ensure this technology benefits humanity as a whole?"},
                {"speaker": "Jordan", "text": "Ethics must be at the forefront of development. The EU's AI Act and similar regulations worldwide are establishing important guardrails. We need transparent AI systems where decisions can be explained and audited. Companies like IBM and Google have established AI ethics boards, but we need industry-wide standards. Additionally, we must address the digital divide - UNESCO reports that 37% of the global population still lacks internet access. Without inclusive development, these technologies could exacerbate global inequality."},
                {"speaker": "Alex", "text": "Looking ahead, what's your vision for how this technology will shape the next decade?"},
                {"speaker": "Jordan", "text": "The next decade will be transformative beyond our current imagination. By 2035, I expect we'll see autonomous systems managing entire cities, personalized medicine extending human lifespan by 20-30 years, and educational AI that makes world-class education universally accessible. The convergence of AI with quantum computing, biotechnology, and nanotechnology will unlock possibilities we can barely conceive of today. However, the future isn't predetermined - it's shaped by the choices we make now about development priorities and ethical frameworks."},
                {"speaker": "Alex", "text": "Dr. Jordan, this has been an incredibly enlightening discussion. Thank you for sharing your expertise and insights with us today."},
                {"speaker": "Jordan", "text": "Thank you, Alex. For listeners wanting to dive deeper, I've compiled additional resources on my website. Remember, the future isn't something that happens to us - it's something we create together. I look forward to seeing how each of you contributes to shaping this exciting new era."}
            ]
        }

    def extract_conversation_api(self, text: str, language: str = "English") -> Dict:
        """Extract conversation using API"""
        if not self.llm_client:
            raise RuntimeError("API mode not initialized")

        try:
            # 검색 컨텍스트 생성
            search_context = ""
            if BRAVE_KEY and not text.startswith("Keyword-based content:"):
                try:
                    keywords = extract_keywords_for_search(text, language)
                    if keywords:
                        search_query = keywords[0] if language == "Korean" else f"{keywords[0]} latest news"
                        search_context = format_search_results(search_query)
                        print(f"Search context added for: {search_query}")
                except Exception as e:
                    print(f"Search failed, continuing without context: {e}")

            # 언어별 시스템 메시지
            if language == "Korean":
                system_message = (
                    "당신은 한국의 최고 전문 팟캐스트 작가입니다. "
                    "12회의 깊이 있는 대화 교환으로 구성된 고품질 대담을 한국어로 만드세요. "
                    "반드시 서로 존댓말을 사용하고 모든 대화는 한국어로 작성하세요."
                )
            elif language == "Japanese":
                system_message = (
                    "あなたは日本の最高の専門ポッドキャスト作家です。"
                    "12回の深い対話交換で構成された高品質な対談を日本語で作成してください。"
                    "必ずお互いに丁寧語を使用し、すべての対話は日本語で作成してください。"
                )
            elif language == "French":
                system_message = (
                    "Vous êtes le meilleur scénariste de podcast professionnel de France. "
                    "Créez des discussions de haute qualité composées de 12 échanges approfondis en français. "
                    "Toutes les conversations doivent être écrites en français."
                )
            elif language == "German":
                system_message = (
                    "Sie sind der beste professionelle Podcast-Drehbuchautor Deutschlands. "
                    "Erstellen Sie hochwertige Diskussionen aus 12 tiefgreifenden Austauschen auf Deutsch. "
                    "Alle Gespräche müssen auf Deutsch geschrieben werden."
                )
            elif language == "Spanish":
                system_message = (
                    "Eres el mejor guionista de podcast profesional de España. "
                    "Crea discusiones de alta calidad compuestas por 12 intercambios profundos en español. "
                    "Todas las conversaciones deben estar escritas en español."
                )
            elif language == "Chinese":
                system_message = (
                    "您是中国最好的专业播客编剧。"
                    "创建由12次深入交流组成的高质量讨论,使用中文。"
                    "所有对话都必须用中文书写。"
                )
            elif language == "Russian":
                system_message = (
                    "Вы лучший профессиональный сценарист подкастов в России. "
                    "Создайте высококачественные дискуссии из 12 глубоких обменов на русском языке. "
                    "Все разговоры должны быть написаны на русском языке."
                )
            else:
                system_message = (
                    f"You are a top professional podcast scriptwriter. "
                    f"Create high-quality discussions in {language} with exactly 12 exchanges. "
                    f"Include specific data, research findings, and real cases. "
                    f"All conversations must be written in {language}."
                )

            chat_completion = self.llm_client.chat.completions.create(
                messages=[
                    {"role": "system", "content": system_message},
                    {"role": "user", "content": self._build_prompt(text, language, search_context)}
                ],
                model=self.config.api_model_name,
                temperature=0.75,
            )

            pattern = r"\{(?:[^{}]|(?:\{[^{}]*\}))*\}"
            json_match = re.search(pattern, chat_completion.choices[0].message.content)

            if not json_match:
                raise ValueError("No valid JSON found in response")

            return json.loads(json_match.group())
        except Exception as e:
            raise RuntimeError(f"Failed to extract conversation: {e}")

    def parse_conversation_text(self, conversation_text: str) -> Dict:
        """Parse conversation text back to JSON format"""
        lines = conversation_text.strip().split('\n')
        conversation_data = {"conversation": []}
        
        for line in lines:
            if ':' in line:
                speaker, text = line.split(':', 1)
                conversation_data["conversation"].append({
                    "speaker": speaker.strip(),
                    "text": text.strip()
                })
        
        return conversation_data

    async def text_to_speech_edge(self, conversation_json: Dict, language: str = "English") -> Tuple[str, str]:
        """Convert text to speech using Edge TTS"""
        output_dir = Path(self._create_output_directory())
        filenames = []

        try:
            # 언어별 음성 설정
            voices = EDGE_TTS_VOICES.get(language, EDGE_TTS_VOICES["English"])

            for i, turn in enumerate(conversation_json["conversation"]):
                filename = output_dir / f"output_{i}.wav"
                voice = voices[i % len(voices)]

                tmp_path = await self._generate_audio_edge(turn["text"], voice)
                os.rename(tmp_path, filename)
                filenames.append(str(filename))

            # Combine audio files
            final_output = os.path.join(output_dir, "combined_output.wav")
            self._combine_audio_files(filenames, final_output)
            
            # Generate conversation text
            conversation_text = "\n".join(
                f"{turn.get('speaker', f'Speaker {i+1}')}: {turn['text']}" 
                for i, turn in enumerate(conversation_json["conversation"])
            )
            
            return final_output, conversation_text
        except Exception as e:
            raise RuntimeError(f"Failed to convert text to speech: {e}")

    async def _generate_audio_edge(self, text: str, voice: str) -> str:
        """Generate audio using Edge TTS"""
        if not text.strip():
            raise ValueError("Text cannot be empty")
            
        voice_short_name = voice.split(" - ")[0] if " - " in voice else voice
        communicate = edge_tts.Communicate(text, voice_short_name)

        with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
            tmp_path = tmp_file.name
            await communicate.save(tmp_path)

        return tmp_path

    @spaces.GPU(duration=60)
    def text_to_speech_spark(self, conversation_json: Dict, language: str = "English", progress=None) -> Tuple[str, str]:
        """Convert text to speech using Spark TTS CLI"""
        if not SPARK_AVAILABLE or not self.spark_model_dir:
            raise RuntimeError("Spark TTS not available")

        try:
            output_dir = self._create_output_directory()
            audio_files = []
            
            # Create different voice characteristics for different speakers
            if language == "Korean":
                voice_configs = [
                    {"prompt_text": "안녕하세요, 오늘 팟캐스트 진행을 맡은 준수입니다.", "gender": "male"},
                    {"prompt_text": "안녕하세요, 저는 오늘 이 주제에 대해 설명드릴 민호입니다.", "gender": "male"}
                ]
            else:
                voice_configs = [
                    {"prompt_text": "Hello everyone, I'm Alex, your host for today's podcast.", "gender": "male"},
                    {"prompt_text": "Hi, I'm Jordan. I'm excited to share my insights with you.", "gender": "male"}
                ]

            for i, turn in enumerate(conversation_json["conversation"]):
                text = turn["text"]
                if not text.strip():
                    continue
                
                voice_config = voice_configs[i % len(voice_configs)]
                output_file = os.path.join(output_dir, f"spark_output_{i}.wav")
                
                cmd = [
                    "python", "-m", "cli.inference",
                    "--text", text,
                    "--device", "0" if torch.cuda.is_available() else "cpu",
                    "--save_dir", output_dir,
                    "--model_dir", self.spark_model_dir,
                    "--prompt_text", voice_config["prompt_text"],
                    "--output_name", f"spark_output_{i}.wav"
                ]
                
                try:
                    result = subprocess.run(
                        cmd, 
                        capture_output=True, 
                        text=True, 
                        timeout=60,
                        cwd="."
                    )
                    
                    if result.returncode == 0:
                        audio_files.append(output_file)
                    else:
                        print(f"Spark TTS error for turn {i}: {result.stderr}")
                        silence = np.zeros(int(22050 * 1.0))
                        sf.write(output_file, silence, 22050)
                        audio_files.append(output_file)
                        
                except subprocess.TimeoutExpired:
                    print(f"Spark TTS timeout for turn {i}")
                    silence = np.zeros(int(22050 * 1.0))
                    sf.write(output_file, silence, 22050)
                    audio_files.append(output_file)
                except Exception as e:
                    print(f"Error running Spark TTS for turn {i}: {e}")
                    silence = np.zeros(int(22050 * 1.0))
                    sf.write(output_file, silence, 22050)
                    audio_files.append(output_file)

            # Combine all audio files
            if audio_files:
                final_output = os.path.join(output_dir, "spark_combined.wav")
                self._combine_audio_files(audio_files, final_output)
            else:
                raise RuntimeError("No audio files generated")

            conversation_text = "\n".join(
                f"{turn.get('speaker', f'Speaker {i+1}')}: {turn['text']}" 
                for i, turn in enumerate(conversation_json["conversation"])
            )
            
            return final_output, conversation_text
            
        except Exception as e:
            raise RuntimeError(f"Failed to convert text to speech with Spark TTS: {e}")

    @spaces.GPU(duration=60)
    def text_to_speech_melo(self, conversation_json: Dict, progress=None) -> Tuple[str, str]:
        """Convert text to speech using MeloTTS"""
        if not MELO_AVAILABLE or not self.melo_models:
            raise RuntimeError("MeloTTS not available")

        speakers = ["EN-Default", "EN-US"]
        combined_audio = AudioSegment.empty()

        for i, turn in enumerate(conversation_json["conversation"]):
            bio = io.BytesIO()
            text = turn["text"]
            speaker = speakers[i % 2]
            speaker_id = self.melo_models["EN"].hps.data.spk2id[speaker]
            
            self.melo_models["EN"].tts_to_file(
                text, speaker_id, bio, speed=1.0, 
                pbar=progress.tqdm if progress else None, 
                format="wav"
            )
            
            bio.seek(0)
            audio_segment = AudioSegment.from_file(bio, format="wav")
            combined_audio += audio_segment

        final_audio_path = "melo_podcast.mp3"
        combined_audio.export(final_audio_path, format="mp3")
        
        conversation_text = "\n".join(
            f"{turn.get('speaker', f'Speaker {i+1}')}: {turn['text']}" 
            for i, turn in enumerate(conversation_json["conversation"])
        )
        
        return final_audio_path, conversation_text

    def _create_output_directory(self) -> str:
        """Create a unique output directory"""
        random_bytes = os.urandom(8)
        folder_name = base64.urlsafe_b64encode(random_bytes).decode("utf-8")
        os.makedirs(folder_name, exist_ok=True)
        return folder_name

    def _combine_audio_files(self, filenames: List[str], output_file: str) -> None:
        """Combine multiple audio files into one"""
        if not filenames:
            raise ValueError("No input files provided")

        try:
            audio_segments = []
            for filename in filenames:
                if os.path.exists(filename):
                    audio_segment = AudioSegment.from_file(filename)
                    audio_segments.append(audio_segment)

            if audio_segments:
                combined = sum(audio_segments)
                combined.export(output_file, format="wav")

            # Clean up temporary files
            for filename in filenames:
                if os.path.exists(filename):
                    os.remove(filename)

        except Exception as e:
            raise RuntimeError(f"Failed to combine audio files: {e}")


# Global converter instance
converter = UnifiedAudioConverter(ConversationConfig())


async def synthesize(article_input, input_type: str = "URL", mode: str = "Local", tts_engine: str = "Edge-TTS", language: str = "English"):
    """Main synthesis function - handles URL, PDF, and Keyword inputs"""
    try:
        # Extract text based on input type
        if input_type == "URL":
            if not article_input or not isinstance(article_input, str):
                return "Please provide a valid URL.", None
            text = converter.fetch_text(article_input)
        elif input_type == "PDF":
            if not article_input:
                return "Please upload a PDF file.", None
            text = converter.extract_text_from_pdf(article_input)
        else:  # Keyword
            if not article_input or not isinstance(article_input, str):
                return "Please provide a keyword or topic.", None
            text = search_and_compile_content(article_input, language)
            text = f"Keyword-based content:\n{text}"

        # Limit text to max words
        words = text.split()
        if len(words) > converter.config.max_words:
            text = " ".join(words[:converter.config.max_words])

        # Extract conversation based on mode
        if mode == "Local":
            try:
                conversation_json = converter.extract_conversation_local(text, language)
            except Exception as e:
                print(f"Local mode failed: {e}, trying API fallback")
                api_key = os.environ.get("TOGETHER_API_KEY")
                if api_key:
                    converter.initialize_api_mode(api_key)
                    conversation_json = converter.extract_conversation_api(text, language)
                else:
                    raise RuntimeError("Local mode failed and no API key available for fallback")
        else:  # API mode
            api_key = os.environ.get("TOGETHER_API_KEY")
            if not api_key:
                print("API key not found, falling back to local mode")
                conversation_json = converter.extract_conversation_local(text, language)
            else:
                try:
                    converter.initialize_api_mode(api_key)
                    conversation_json = converter.extract_conversation_api(text, language)
                except Exception as e:
                    print(f"API mode failed: {e}, falling back to local mode")
                    conversation_json = converter.extract_conversation_local(text, language)

        # Generate conversation text
        conversation_text = "\n".join(
            f"{turn.get('speaker', f'Speaker {i+1}')}: {turn['text']}" 
            for i, turn in enumerate(conversation_json["conversation"])
        )

        return conversation_text, None
        
    except Exception as e:
        return f"Error: {str(e)}", None


async def regenerate_audio(conversation_text: str, tts_engine: str = "Edge-TTS", language: str = "English"):
    """Regenerate audio from edited conversation text"""
    if not conversation_text.strip():
        return "Please provide conversation text.", None

    try:
        conversation_json = converter.parse_conversation_text(conversation_text)
        
        if not conversation_json["conversation"]:
            return "No valid conversation found in the text.", None

        # Edge TTS 전용 언어는 자동으로 Edge-TTS 사용
        if language in EDGE_TTS_ONLY_LANGUAGES and tts_engine != "Edge-TTS":
            tts_engine = "Edge-TTS"

        # Generate audio based on TTS engine
        if tts_engine == "Edge-TTS":
            output_file, _ = await converter.text_to_speech_edge(conversation_json, language)
        elif tts_engine == "Spark-TTS":
            if not SPARK_AVAILABLE:
                return "Spark TTS not available. Please install required dependencies and clone the Spark-TTS repository.", None
            converter.initialize_spark_tts()
            output_file, _ = converter.text_to_speech_spark(conversation_json, language)
        else:  # MeloTTS
            if not MELO_AVAILABLE:
                return "MeloTTS not available. Please install required dependencies.", None
            if language in EDGE_TTS_ONLY_LANGUAGES:
                return f"MeloTTS does not support {language}. Please use Edge-TTS for this language.", None
            converter.initialize_melo_tts()
            output_file, _ = converter.text_to_speech_melo(conversation_json)

        return "Audio generated successfully!", output_file
        
    except Exception as e:
        return f"Error generating audio: {str(e)}", None


def synthesize_sync(article_input, input_type: str = "URL", mode: str = "Local", tts_engine: str = "Edge-TTS", language: str = "English"):
    """Synchronous wrapper for async synthesis"""
    return asyncio.run(synthesize(article_input, input_type, mode, tts_engine, language))


def regenerate_audio_sync(conversation_text: str, tts_engine: str = "Edge-TTS", language: str = "English"):
    """Synchronous wrapper for async audio regeneration"""
    return asyncio.run(regenerate_audio(conversation_text, tts_engine, language))


def update_tts_engine_for_language(language):
    """언어별 TTS 엔진 옵션 업데이트"""
    if language in EDGE_TTS_ONLY_LANGUAGES:
        language_info = {
            "Korean": "한국어는 Edge-TTS만 지원됩니다",
            "Japanese": "日本語はEdge-TTSのみサポートされています",
            "French": "Le français n'est pris en charge que par Edge-TTS",
            "German": "Deutsch wird nur von Edge-TTS unterstützt",
            "Spanish": "El español solo es compatible con Edge-TTS",
            "Italian": "L'italiano è supportato solo da Edge-TTS",
            "Portuguese": "O português é suportado apenas pelo Edge-TTS",
            "Dutch": "Nederlands wordt alleen ondersteund door Edge-TTS",
            "Thai": "ภาษาไทยรองรับเฉพาะ Edge-TTS เท่านั้น",
            "Vietnamese": "Tiếng Việt chỉ được hỗ trợ bởi Edge-TTS",
            "Arabic": "العربية مدعومة فقط من Edge-TTS",
            "Hebrew": "עברית נתמכת רק על ידי Edge-TTS",
            "Indonesian": "Bahasa Indonesia hanya didukung oleh Edge-TTS",
            "Hindi": "हिंदी केवल Edge-TTS द्वारा समर्थित है",
            "Russian": "Русский поддерживается только Edge-TTS",
            "Chinese": "中文仅支持Edge-TTS"
        }
        info_text = language_info.get(language, f"{language} is only supported by Edge-TTS")
        
        return gr.Radio(
            choices=["Edge-TTS"],
            value="Edge-TTS",
            label="TTS Engine",
            info=info_text,
            interactive=False
        )
    else:
        return gr.Radio(
            choices=["Edge-TTS", "Spark-TTS", "MeloTTS"],
            value="Edge-TTS",
            label="TTS Engine",
            info="Edge-TTS: Cloud-based, natural voices | Spark-TTS: Local AI model | MeloTTS: Local, requires GPU",
            interactive=True
        )


def toggle_input_visibility(input_type):
    """Toggle visibility of URL input, file upload, and keyword input based on input type"""
    if input_type == "URL":
        return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
    elif input_type == "PDF":
        return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
    else:  # Keyword
        return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)


# 모델 초기화 (앱 시작 시)
if LLAMA_CPP_AVAILABLE:
    try:
        model_path = hf_hub_download(
            repo_id=converter.config.local_model_repo,
            filename=converter.config.local_model_name,
            local_dir="./models"
        )
        print(f"Model downloaded to: {model_path}")
    except Exception as e:
        print(f"Failed to download model at startup: {e}")


# Gradio Interface - 개선된 다국어 레이아웃
with gr.Blocks(theme='soft', title="AI Podcast Generator", css="""
    .container {max-width: 1200px; margin: auto; padding: 20px;}
    .header-text {text-align: center; margin-bottom: 30px;}
    .input-group {background: #f7f7f7; padding: 20px; border-radius: 10px; margin-bottom: 20px;}
    .output-group {background: #f0f0f0; padding: 20px; border-radius: 10px;}
    .status-box {background: #e8f4f8; padding: 15px; border-radius: 8px; margin-top: 10px;}
""") as demo:
    with gr.Column(elem_classes="container"):
        # 헤더
        with gr.Row(elem_classes="header-text"):
            gr.Markdown("""
            # 🎙️ AI Podcast Generator - Professional Multi-Language Edition
            ### Convert any article, blog, PDF document, or topic into an engaging professional podcast conversation in 24+ languages!
            """)

        with gr.Row(elem_classes="discord-badge"):
            gr.HTML("""
            <p style="text-align: center;">
                <a href="https://discord.gg/openfreeai" target="_blank">
                    <img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="badge">
                </a>
            </p>
            """)

        # 상태 표시 섹션
        with gr.Row():
            with gr.Column(scale=1):
                gr.Markdown(f"""
                #### 🤖 System Status
                - **LLM**: {converter.config.local_model_name.split('.')[0]}
                - **Fallback**: {converter.config.api_model_name.split('/')[-1]}
                - **Llama CPP**: {"✅ Ready" if LLAMA_CPP_AVAILABLE else "❌ Not Available"}
                - **Search**: {"✅ Brave API" if BRAVE_KEY else "❌ No API"}
                """)
            with gr.Column(scale=1):
                gr.Markdown("""
                #### 🌍 Multi-Language Support
                - **24+ Languages**: Korean, Japanese, French, German, Spanish, Italian, etc.
                - **Native Voices**: Optimized for each language
                - **Professional Style**: Expert discussions with data & insights
                - **Auto-TTS Selection**: Best engine per language
                """)
        
        # 메인 입력 섹션
        with gr.Group(elem_classes="input-group"):
            with gr.Row():
                # 왼쪽: 입력 옵션들
                with gr.Column(scale=2):
                    # 입력 타입 선택
                    input_type_selector = gr.Radio(
                        choices=["URL", "PDF", "Keyword"],
                        value="URL",
                        label="📥 Input Type",
                        info="Choose your content source"
                    )
                    
                    # URL 입력
                    url_input = gr.Textbox(
                        label="🔗 Article URL", 
                        placeholder="Enter the article URL here...",
                        value="",
                        visible=True,
                        lines=2
                    )
                    
                    # PDF 업로드
                    pdf_input = gr.File(
                        label="📄 Upload PDF",
                        file_types=[".pdf"],
                        visible=False
                    )
                    
                    # 키워드 입력
                    keyword_input = gr.Textbox(
                        label="🔍 Topic/Keyword",
                        placeholder="Enter a topic (e.g., 'AI trends 2024', '인공지능', 'IA tendances', 'KI Trends')",
                        value="",
                        visible=False,
                        info="System will search and compile latest information",
                        lines=2
                    )
                
                # 오른쪽: 설정 옵션들
                with gr.Column(scale=1):
                    # 언어 선택
                    language_selector = gr.Radio(
                        choices=[
                            "English", "Korean", "Japanese", "French", "German", 
                            "Spanish", "Italian", "Portuguese", "Dutch", "Thai", 
                            "Vietnamese", "Arabic", "Hebrew", "Indonesian", "Hindi", 
                            "Russian", "Chinese", "Norwegian", "Swedish", "Finnish", 
                            "Danish", "Polish", "Turkish", "Greek", "Czech"
                        ],
                        value="English",
                        label="🌐 Language / 언어 / 语言",
                        info="Select podcast language"
                    )
                    
                    # 처리 모드
                    mode_selector = gr.Radio(
                        choices=["Local", "API"],
                        value="Local",
                        label="⚙️ Processing Mode",
                        info="Local: On-device | API: Cloud"
                    )
                    
                    # TTS 엔진
                    tts_selector = gr.Radio(
                        choices=["Edge-TTS", "Spark-TTS", "MeloTTS"],
                        value="Edge-TTS",
                        label="🔊 TTS Engine",
                        info="Voice synthesis engine"
                    )
            
            # 생성 버튼
            with gr.Row():
                convert_btn = gr.Button(
                    "🎯 Generate Professional Conversation", 
                    variant="primary", 
                    size="lg",
                    scale=1
                )
        
        # 출력 섹션
        with gr.Group(elem_classes="output-group"):
            with gr.Row():
                # 왼쪽: 대화 텍스트
                with gr.Column(scale=3):
                    conversation_output = gr.Textbox(
                        label="💬 Generated Professional Conversation (Editable)",
                        lines=25,
                        max_lines=50,
                        interactive=True,
                        placeholder="Professional podcast conversation will appear here...\n전문 팟캐스트 대화가 여기에 표시됩니다...\nLa conversation professionnelle du podcast apparaîtra ici...",
                        info="Edit the conversation as needed. Format: 'Speaker Name: Text'"
                    )
                    
                    # 오디오 생성 버튼
                    with gr.Row():
                        generate_audio_btn = gr.Button(
                            "🎙️ Generate Audio from Text", 
                            variant="secondary", 
                            size="lg"
                        )
                
                # 오른쪽: 오디오 출력 및 상태
                with gr.Column(scale=2):
                    audio_output = gr.Audio(
                        label="🎧 Professional Podcast Audio",
                        type="filepath",
                        interactive=False
                    )
                    
                    status_output = gr.Textbox(
                        label="📊 Status",
                        interactive=False,
                        lines=3,
                        elem_classes="status-box"
                    )
                    
                    # 도움말
                    gr.Markdown("""
                    #### 💡 Quick Tips:
                    - **URL**: Paste any article link
                    - **PDF**: Upload documents directly  
                    - **Keyword**: Enter topics for AI research
                    - **24+ Languages** fully supported
                    - Edit conversation before audio generation
                    - Auto TTS engine selection per language
                    """)
        
        # 예제 섹션
        with gr.Accordion("📚 Multi-Language Examples", open=False):
            gr.Examples(
                examples=[
                    ["https://huggingface.co/blog/openfreeai/cycle-navigator", "URL", "Local", "Edge-TTS", "English"],
                    ["quantum computing breakthroughs", "Keyword", "Local", "Edge-TTS", "English"],
                    ["인공지능 윤리와 규제", "Keyword", "Local", "Edge-TTS", "Korean"],
                    ["https://huggingface.co/papers/2505.14810", "URL", "Local", "Edge-TTS", "Japanese"],
                    ["intelligence artificielle tendances", "Keyword", "Local", "Edge-TTS", "French"],
                    ["künstliche intelligenz entwicklung", "Keyword", "Local", "Edge-TTS", "German"],
                    ["inteligencia artificial avances", "Keyword", "Local", "Edge-TTS", "Spanish"],
                ],
                inputs=[url_input, input_type_selector, mode_selector, tts_selector, language_selector],
                outputs=[conversation_output, status_output],
                fn=synthesize_sync,
                cache_examples=False,
            )
    
    # Input type change handler
    input_type_selector.change(
        fn=toggle_input_visibility,
        inputs=[input_type_selector],
        outputs=[url_input, pdf_input, keyword_input]
    )
    
    # 언어 변경 시 TTS 엔진 옵션 업데이트
    language_selector.change(
        fn=update_tts_engine_for_language,
        inputs=[language_selector],
        outputs=[tts_selector]
    )
    
    # 이벤트 연결
    def get_article_input(input_type, url_input, pdf_input, keyword_input):
        """Get the appropriate input based on input type"""
        if input_type == "URL":
            return url_input
        elif input_type == "PDF":
            return pdf_input
        else:  # Keyword
            return keyword_input
    
    convert_btn.click(
        fn=lambda input_type, url_input, pdf_input, keyword_input, mode, tts, lang: synthesize_sync(
            get_article_input(input_type, url_input, pdf_input, keyword_input), input_type, mode, tts, lang
        ),
        inputs=[input_type_selector, url_input, pdf_input, keyword_input, mode_selector, tts_selector, language_selector],
        outputs=[conversation_output, status_output]
    )
    
    generate_audio_btn.click(
        fn=regenerate_audio_sync,
        inputs=[conversation_output, tts_selector, language_selector],
        outputs=[status_output, audio_output]
    )


# Launch the app
if __name__ == "__main__":
    demo.queue(api_open=True, default_concurrency_limit=10).launch(
        show_api=True,
        share=False,
        server_name="0.0.0.0",
        server_port=7860
    )