openfree commited on
Commit
44ed247
·
verified ·
1 Parent(s): 09d782b

Update app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +1 -1594
app-backup.py CHANGED
@@ -1591,1597 +1591,4 @@ if __name__ == "__main__":
1591
  share=False,
1592
  server_name="0.0.0.0",
1593
  server_port=7860
1594
- ) import spaces # 추가
1595
- import gradio as gr
1596
- import os
1597
- import asyncio
1598
- import torch
1599
- import io
1600
- import json
1601
- import re
1602
- import httpx
1603
- import tempfile
1604
- import wave
1605
- import base64
1606
- import numpy as np
1607
- import soundfile as sf
1608
- import subprocess
1609
- import shutil
1610
- import requests
1611
- import logging
1612
- from datetime import datetime, timedelta
1613
- from dataclasses import dataclass
1614
- from typing import List, Tuple, Dict, Optional
1615
- from pathlib import Path
1616
- from threading import Thread
1617
- from dotenv import load_dotenv
1618
-
1619
- # PDF processing imports
1620
- from langchain_community.document_loaders import PyPDFLoader
1621
-
1622
- # Edge TTS imports
1623
- import edge_tts
1624
- from pydub import AudioSegment
1625
-
1626
- # OpenAI imports
1627
- from openai import OpenAI
1628
-
1629
- # Transformers imports (for legacy local mode)
1630
- from transformers import (
1631
- AutoModelForCausalLM,
1632
- AutoTokenizer,
1633
- TextIteratorStreamer,
1634
- BitsAndBytesConfig,
1635
- )
1636
-
1637
- # Llama CPP imports (for new local mode)
1638
- try:
1639
- from llama_cpp import Llama
1640
- from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
1641
- from llama_cpp_agent.providers import LlamaCppPythonProvider
1642
- from llama_cpp_agent.chat_history import BasicChatHistory
1643
- from llama_cpp_agent.chat_history.messages import Roles
1644
- from huggingface_hub import hf_hub_download
1645
- LLAMA_CPP_AVAILABLE = True
1646
- except ImportError:
1647
- LLAMA_CPP_AVAILABLE = False
1648
-
1649
- # Spark TTS imports
1650
- try:
1651
- from huggingface_hub import snapshot_download
1652
- SPARK_AVAILABLE = True
1653
- except:
1654
- SPARK_AVAILABLE = False
1655
-
1656
- # MeloTTS imports (for local mode)
1657
- try:
1658
- # unidic 다운로드를 조건부로 처리
1659
- if not os.path.exists("/usr/local/lib/python3.10/site-packages/unidic"):
1660
- try:
1661
- os.system("python -m unidic download")
1662
- except:
1663
- pass
1664
- from melo.api import TTS as MeloTTS
1665
- MELO_AVAILABLE = True
1666
- except:
1667
- MELO_AVAILABLE = False
1668
-
1669
- load_dotenv()
1670
-
1671
- # Brave Search API 설정
1672
- BRAVE_KEY = os.getenv("BSEARCH_API")
1673
- BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search"
1674
-
1675
- @dataclass
1676
- class ConversationConfig:
1677
- max_words: int = 8000 # 4000에서 6000으로 증가 (1.5배)
1678
- prefix_url: str = "https://r.jina.ai/"
1679
- api_model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"
1680
- legacy_local_model_name: str = "NousResearch/Hermes-2-Pro-Llama-3-8B"
1681
- # 새로운 로컬 모델 설정
1682
- local_model_name: str = "Private-BitSix-Mistral-Small-3.1-24B-Instruct-2503.gguf"
1683
- local_model_repo: str = "ginigen/Private-BitSix-Mistral-Small-3.1-24B-Instruct-2503"
1684
- # 토큰 수 증가
1685
- max_tokens: int = 6000 # 3000에서 4500으로 증가 (1.5배)
1686
- max_new_tokens: int = 12000 # 6000에서 9000으로 증가 (1.5배)
1687
- min_conversation_turns: int = 18 # 최소 대화 턴 수
1688
- max_conversation_turns: int = 20 # 최대 대화 턴 수
1689
-
1690
-
1691
- def brave_search(query: str, count: int = 8, freshness_days: int | None = None):
1692
- """Brave Search API를 사용하여 최신 정보 검색"""
1693
- if not BRAVE_KEY:
1694
- return []
1695
- params = {"q": query, "count": str(count)}
1696
- if freshness_days:
1697
- dt_from = (datetime.utcnow() - timedelta(days=freshness_days)).strftime("%Y-%m-%d")
1698
- params["freshness"] = dt_from
1699
- try:
1700
- r = requests.get(
1701
- BRAVE_ENDPOINT,
1702
- headers={"Accept": "application/json", "X-Subscription-Token": BRAVE_KEY},
1703
- params=params,
1704
- timeout=15
1705
- )
1706
- raw = r.json().get("web", {}).get("results") or []
1707
- return [{
1708
- "title": r.get("title", ""),
1709
- "url": r.get("url", r.get("link", "")),
1710
- "snippet": r.get("description", r.get("text", "")),
1711
- "host": re.sub(r"https?://(www\.)?", "", r.get("url", "")).split("/")[0]
1712
- } for r in raw[:count]]
1713
- except Exception as e:
1714
- logging.error(f"Brave search error: {e}")
1715
- return []
1716
-
1717
- def format_search_results(query: str, for_keyword: bool = False) -> str:
1718
- """검색 결과를 포맷팅하여 반환"""
1719
- # 키워드 검색의 경우 더 많은 결과 사용
1720
- count = 5 if for_keyword else 3
1721
- rows = brave_search(query, count, freshness_days=7 if not for_keyword else None)
1722
- if not rows:
1723
- return ""
1724
-
1725
- results = []
1726
- # 키워드 검색의 경우 더 상세한 정보 포함
1727
- max_results = 4 if for_keyword else 2
1728
- for r in rows[:max_results]:
1729
- if for_keyword:
1730
- # 키워드 검색은 더 긴 스니펫 사용
1731
- snippet = r['snippet'][:200] + "..." if len(r['snippet']) > 200 else r['snippet']
1732
- results.append(f"**{r['title']}**\n{snippet}\nSource: {r['host']}")
1733
- else:
1734
- # 일반 검색은 짧은 스니펫
1735
- snippet = r['snippet'][:100] + "..." if len(r['snippet']) > 100 else r['snippet']
1736
- results.append(f"- {r['title']}: {snippet}")
1737
-
1738
- return "\n\n".join(results) + "\n"
1739
-
1740
- def extract_keywords_for_search(text: str, language: str = "English") -> List[str]:
1741
- """텍스트에서 검색할 키워드 추출 (개선)"""
1742
- # 텍스트 앞부분만 사용 (너무 많은 텍스트 처리 방지)
1743
- text_sample = text[:500]
1744
-
1745
- if language == "Korean":
1746
- import re
1747
- # 한국어 명사 추출 (2글자 이상)
1748
- keywords = re.findall(r'[가-힣]{2,}', text_sample)
1749
- # 중복 제거하고 가장 긴 단어 1개만 선택
1750
- unique_keywords = list(dict.fromkeys(keywords))
1751
- # 길이 순으로 정렬하고 가장 의미있을 것 같은 단어 선택
1752
- unique_keywords.sort(key=len, reverse=True)
1753
- return unique_keywords[:1] # 1개만 반환
1754
- else:
1755
- # 영어는 대문자로 시작하는 단어 중 가장 긴 것 1개
1756
- words = text_sample.split()
1757
- keywords = [word.strip('.,!?;:') for word in words
1758
- if len(word) > 4 and word[0].isupper()]
1759
- if keywords:
1760
- return [max(keywords, key=len)] # 가장 긴 단어 1개
1761
- return []
1762
-
1763
- def search_and_compile_content(keyword: str, language: str = "English") -> str:
1764
- """키워드로 검색하여 충분한 콘텐츠 컴파일"""
1765
- if not BRAVE_KEY:
1766
- # API 없을 때도 기본 콘텐츠 생성
1767
- if language == "Korean":
1768
- return f"""
1769
- '{keyword}'에 대한 종합적인 정보:
1770
-
1771
- {keyword}는 현대 사회에서 매우 중요한 주제입니다.
1772
- 이 주제는 다양한 측면에서 우리의 삶에 영향을 미치고 있으며,
1773
- 최근 들어 더욱 주목받고 있습니다.
1774
-
1775
- 주요 특징:
1776
- 1. 기술적 발전과 혁신
1777
- 2. 사회적 영향과 변화
1778
- 3. 미래 전망과 가능성
1779
- 4. 실용적 활용 방안
1780
- 5. 글로벌 트렌드와 동향
1781
-
1782
- 전문가들은 {keyword}가 앞으로 더욱 중요해질 것으로 예상하고 있으며,
1783
- 이에 대한 깊이 있는 이해가 필요한 시점입니다.
1784
- """
1785
- else:
1786
- return f"""
1787
- Comprehensive information about '{keyword}':
1788
-
1789
- {keyword} is a significant topic in modern society.
1790
- This subject impacts our lives in various ways and has been
1791
- gaining increasing attention recently.
1792
-
1793
- Key aspects:
1794
- 1. Technological advancement and innovation
1795
- 2. Social impact and changes
1796
- 3. Future prospects and possibilities
1797
- 4. Practical applications
1798
- 5. Global trends and developments
1799
-
1800
- Experts predict that {keyword} will become even more important,
1801
- and it's crucial to develop a deep understanding of this topic.
1802
- """
1803
-
1804
- # 언어에 따른 다양한 검색 쿼리
1805
- if language == "Korean":
1806
- queries = [
1807
- f"{keyword} 최신 뉴스 2024",
1808
- f"{keyword} 정보 설명",
1809
- f"{keyword} 트렌드 전망",
1810
- f"{keyword} 장점 단점",
1811
- f"{keyword} 활용 방법",
1812
- f"{keyword} 전문가 의견"
1813
- ]
1814
- else:
1815
- queries = [
1816
- f"{keyword} latest news 2024",
1817
- f"{keyword} explained comprehensive",
1818
- f"{keyword} trends forecast",
1819
- f"{keyword} advantages disadvantages",
1820
- f"{keyword} how to use",
1821
- f"{keyword} expert opinions"
1822
- ]
1823
-
1824
- all_content = []
1825
- total_content_length = 0
1826
-
1827
- for query in queries:
1828
- results = brave_search(query, count=5) # 더 많은 결과 가져오기
1829
- for r in results[:3]: # 각 쿼리당 상위 3개
1830
- content = f"**{r['title']}**\n{r['snippet']}\nSource: {r['host']}\n"
1831
- all_content.append(content)
1832
- total_content_length += len(r['snippet'])
1833
-
1834
- # 콘텐츠가 부족하면 추가 생성
1835
- if total_content_length < 1000: # 최소 1000자 확보
1836
- if language == "Korean":
1837
- additional_content = f"""
1838
- 추가 정보:
1839
- {keyword}와 관련된 최근 동향을 살펴보면, 이 분야는 빠르게 발전하고 있습니다.
1840
- 많은 전문가들이 이 주제에 대해 활발히 연구하고 있으며,
1841
- 실생활에서의 응용 가능성도 계속 확대되고 있습니다.
1842
-
1843
- 특히 주목할 점은:
1844
- - 기술 혁신의 가속화
1845
- - 사용자 경험의 개선
1846
- - 접근성의 향상
1847
- - 비용 효율성 증대
1848
- - 글로벌 시장의 성장
1849
-
1850
- 이러한 요소들이 {keyword}의 미래를 더욱 밝게 만들고 있습니다.
1851
- """
1852
- else:
1853
- additional_content = f"""
1854
- Additional insights:
1855
- Recent developments in {keyword} show rapid advancement in this field.
1856
- Many experts are actively researching this topic, and its practical
1857
- applications continue to expand.
1858
-
1859
- Key points to note:
1860
- - Accelerating technological innovation
1861
- - Improving user experience
1862
- - Enhanced accessibility
1863
- - Increased cost efficiency
1864
- - Growing global market
1865
-
1866
- These factors are making the future of {keyword} increasingly promising.
1867
- """
1868
- all_content.append(additional_content)
1869
-
1870
- # 컴파일된 콘텐츠 반환
1871
- compiled = "\n\n".join(all_content)
1872
-
1873
- # 키워드 기반 소개
1874
- if language == "Korean":
1875
- intro = f"### '{keyword}'에 대한 종합적인 정���와 최신 동향:\n\n"
1876
- else:
1877
- intro = f"### Comprehensive information and latest trends about '{keyword}':\n\n"
1878
-
1879
- return intro + compiled
1880
-
1881
-
1882
- def _build_prompt(self, text: str, language: str = "English", search_context: str = "") -> str:
1883
- """Build prompt for conversation generation with enhanced radio talk show style"""
1884
- # 텍스트 길이 제한
1885
- max_text_length = 4500 if search_context else 6000
1886
- if len(text) > max_text_length:
1887
- text = text[:max_text_length] + "..."
1888
-
1889
- if language == "Korean":
1890
- # 대화 템플릿을 더 많은 턴으로 확장 (15-20회)
1891
- template = """
1892
- {
1893
- "conversation": [
1894
- {"speaker": "준수", "text": ""},
1895
- {"speaker": "민호", "text": ""},
1896
- {"speaker": "준수", "text": ""},
1897
- {"speaker": "민호", "text": ""},
1898
- {"speaker": "준수", "text": ""},
1899
- {"speaker": "민호", "text": ""},
1900
- {"speaker": "준수", "text": ""},
1901
- {"speaker": "민호", "text": ""},
1902
- {"speaker": "준수", "text": ""},
1903
- {"speaker": "민호", "text": ""},
1904
- {"speaker": "준수", "text": ""},
1905
- {"speaker": "민호", "text": ""},
1906
- {"speaker": "준수", "text": ""},
1907
- {"speaker": "민호", "text": ""},
1908
- {"speaker": "준수", "text": ""},
1909
- {"speaker": "민호", "text": ""},
1910
- {"speaker": "준수", "text": ""},
1911
- {"speaker": "민호", "text": ""}
1912
- ]
1913
- }
1914
- """
1915
-
1916
- context_part = ""
1917
- if search_context:
1918
- context_part = f"# 최신 관련 정보:\n{search_context}\n"
1919
-
1920
- base_prompt = (
1921
- f"# 원본 콘텐츠:\n{text}\n\n"
1922
- f"{context_part}"
1923
- f"위 내용으로 전문적이고 심층적인 라디오 팟캐스트 대담 프로그램 대본을 작성해주세요.\n\n"
1924
- f"## 필수 요구사항:\n"
1925
- f"1. **최소 18회 이상의 대화 교환** (준수 9회, 민호 9회 이상)\n"
1926
- f"2. **대화 스타일**: 전문적이고 깊이 있는 팟캐스트 대담\n"
1927
- f"3. **화자 역할**:\n"
1928
- f" - 준수: 진행자 (통찰력 있는 질문, 핵심 포인트 정리, 청취자 관점 대변)\n"
1929
- f" - 민호: 전문가 (상세하고 전문적인 설명, 구체적 예시, 데이터 기반 분석)\n"
1930
- f"4. **답변 규칙**:\n"
1931
- f" - 준수: 1-2문장의 명확한 질문이나 요약\n"
1932
- f" - 민호: **반드시 2-4문장으로 충실하게 답변** (핵심 개념 설명 + 부연 설명 + 예시/근거)\n"
1933
- f" - 전문 용어는 쉽게 풀어서 설명\n"
1934
- f" - 구체적인 수치, 사례, 연구 결과 인용\n"
1935
- f"5. **내용 구성**:\n"
1936
- f" - 도입부 (2-3회): 주제의 중요성과 배경 설명\n"
1937
- f" - 전개부 (12-14회): 핵심 내용을 다각도로 심층 분석\n"
1938
- f" - 마무리 (2-3회): 핵심 요약과 미래 전망\n"
1939
- f"6. **전문성**: 학술적 근거와 실무적 통찰을 균형있게 포함\n"
1940
- f"7. **필수**: 서로 존댓말 사용, 청취자가 전문 지식을 얻을 수 있도록 상세히 설명\n\n"
1941
- f"반드시 위 JSON 형식으로 18회 이상의 전문적인 대화를 작성하세요:\n{template}"
1942
- )
1943
-
1944
- return base_prompt
1945
-
1946
- else:
1947
- # 영어 템플릿도 확장
1948
- template = """
1949
- {
1950
- "conversation": [
1951
- {"speaker": "Alex", "text": ""},
1952
- {"speaker": "Jordan", "text": ""},
1953
- {"speaker": "Alex", "text": ""},
1954
- {"speaker": "Jordan", "text": ""},
1955
- {"speaker": "Alex", "text": ""},
1956
- {"speaker": "Jordan", "text": ""},
1957
- {"speaker": "Alex", "text": ""},
1958
- {"speaker": "Jordan", "text": ""},
1959
- {"speaker": "Alex", "text": ""},
1960
- {"speaker": "Jordan", "text": ""},
1961
- {"speaker": "Alex", "text": ""},
1962
- {"speaker": "Jordan", "text": ""},
1963
- {"speaker": "Alex", "text": ""},
1964
- {"speaker": "Jordan", "text": ""},
1965
- {"speaker": "Alex", "text": ""},
1966
- {"speaker": "Jordan", "text": ""},
1967
- {"speaker": "Alex", "text": ""},
1968
- {"speaker": "Jordan", "text": ""}
1969
- ]
1970
- }
1971
- """
1972
-
1973
- context_part = ""
1974
- if search_context:
1975
- context_part = f"# Latest Information:\n{search_context}\n"
1976
-
1977
- base_prompt = (
1978
- f"# Content:\n{text}\n\n"
1979
- f"{context_part}"
1980
- f"Create a professional and in-depth podcast conversation.\n\n"
1981
- f"## Requirements:\n"
1982
- f"1. **Minimum 18 conversation exchanges** (Alex 9+, Jordan 9+)\n"
1983
- f"2. **Style**: Professional, insightful podcast discussion\n"
1984
- f"3. **Roles**:\n"
1985
- f" - Alex: Host (insightful questions, key point summaries, audience perspective)\n"
1986
- f" - Jordan: Expert (detailed explanations, concrete examples, data-driven analysis)\n"
1987
- f"4. **Response Rules**:\n"
1988
- f" - Alex: 1-2 sentence clear questions or summaries\n"
1989
- f" - Jordan: **Must answer in 2-4 sentences** (core concept + elaboration + example/evidence)\n"
1990
- f" - Explain technical terms clearly\n"
1991
- f" - Include specific data, cases, research findings\n"
1992
- f"5. **Structure**:\n"
1993
- f" - Introduction (2-3 exchanges): Topic importance and context\n"
1994
- f" - Main content (12-14 exchanges): Multi-angle deep analysis\n"
1995
- f" - Conclusion (2-3 exchanges): Key takeaways and future outlook\n"
1996
- f"6. **Expertise**: Balance academic rigor with practical insights\n\n"
1997
- f"Create exactly 18+ professional exchanges in this JSON format:\n{template}"
1998
- )
1999
-
2000
- return base_prompt
2001
-
2002
- class UnifiedAudioConverter:
2003
- def __init__(self, config: ConversationConfig):
2004
- self.config = config
2005
- self.llm_client = None
2006
- self.legacy_local_model = None
2007
- self.legacy_tokenizer = None
2008
- # 새로운 로컬 LLM 관련
2009
- self.local_llm = None
2010
- self.local_llm_model = None
2011
- self.melo_models = None
2012
- self.spark_model_dir = None
2013
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
2014
-
2015
- def initialize_api_mode(self, api_key: str):
2016
- """Initialize API mode with Together API (now fallback)"""
2017
- self.llm_client = OpenAI(api_key=api_key, base_url="https://api.together.xyz/v1")
2018
-
2019
- @spaces.GPU(duration=120)
2020
- def initialize_local_mode(self):
2021
- """Initialize new local mode with Llama CPP"""
2022
- if not LLAMA_CPP_AVAILABLE:
2023
- raise RuntimeError("Llama CPP dependencies not available. Please install llama-cpp-python and llama-cpp-agent.")
2024
-
2025
- if self.local_llm is None or self.local_llm_model != self.config.local_model_name:
2026
- try:
2027
- # 모델 다운로드
2028
- model_path = hf_hub_download(
2029
- repo_id=self.config.local_model_repo,
2030
- filename=self.config.local_model_name,
2031
- local_dir="./models"
2032
- )
2033
-
2034
- model_path_local = os.path.join("./models", self.config.local_model_name)
2035
-
2036
- if not os.path.exists(model_path_local):
2037
- raise RuntimeError(f"Model file not found at {model_path_local}")
2038
-
2039
- # Llama 모델 초기화
2040
- self.local_llm = Llama(
2041
- model_path=model_path_local,
2042
- flash_attn=True,
2043
- n_gpu_layers=81 if torch.cuda.is_available() else 0,
2044
- n_batch=1024,
2045
- n_ctx=16384,
2046
- )
2047
- self.local_llm_model = self.config.local_model_name
2048
- print(f"Local LLM initialized: {model_path_local}")
2049
-
2050
- except Exception as e:
2051
- print(f"Failed to initialize local LLM: {e}")
2052
- raise RuntimeError(f"Failed to initialize local LLM: {e}")
2053
-
2054
- @spaces.GPU(duration=60)
2055
- def initialize_legacy_local_mode(self):
2056
- """Initialize legacy local mode with Hugging Face model (fallback)"""
2057
- if self.legacy_local_model is None:
2058
- quantization_config = BitsAndBytesConfig(
2059
- load_in_4bit=True,
2060
- bnb_4bit_compute_dtype=torch.float16
2061
- )
2062
- self.legacy_local_model = AutoModelForCausalLM.from_pretrained(
2063
- self.config.legacy_local_model_name,
2064
- quantization_config=quantization_config
2065
- )
2066
- self.legacy_tokenizer = AutoTokenizer.from_pretrained(
2067
- self.config.legacy_local_model_name,
2068
- revision='8ab73a6800796d84448bc936db9bac5ad9f984ae'
2069
- )
2070
-
2071
- def initialize_spark_tts(self):
2072
- """Initialize Spark TTS model by downloading if needed"""
2073
- if not SPARK_AVAILABLE:
2074
- raise RuntimeError("Spark TTS dependencies not available")
2075
-
2076
- model_dir = "pretrained_models/Spark-TTS-0.5B"
2077
-
2078
- # Check if model exists, if not download it
2079
- if not os.path.exists(model_dir):
2080
- print("Downloading Spark-TTS model...")
2081
- try:
2082
- os.makedirs("pretrained_models", exist_ok=True)
2083
- snapshot_download(
2084
- "SparkAudio/Spark-TTS-0.5B",
2085
- local_dir=model_dir
2086
- )
2087
- print("Spark-TTS model downloaded successfully")
2088
- except Exception as e:
2089
- raise RuntimeError(f"Failed to download Spark-TTS model: {e}")
2090
-
2091
- self.spark_model_dir = model_dir
2092
-
2093
- # Check if we have the CLI inference script
2094
- if not os.path.exists("cli/inference.py"):
2095
- print("Warning: Spark-TTS CLI not found. Please clone the Spark-TTS repository.")
2096
-
2097
- @spaces.GPU(duration=60)
2098
- def initialize_melo_tts(self):
2099
- """Initialize MeloTTS models"""
2100
- if MELO_AVAILABLE and self.melo_models is None:
2101
- self.melo_models = {"EN": MeloTTS(language="EN", device=self.device)}
2102
-
2103
- def fetch_text(self, url: str) -> str:
2104
- """Fetch text content from URL"""
2105
- if not url:
2106
- raise ValueError("URL cannot be empty")
2107
-
2108
- if not url.startswith("http://") and not url.startswith("https://"):
2109
- raise ValueError("URL must start with 'http://' or 'https://'")
2110
-
2111
- full_url = f"{self.config.prefix_url}{url}"
2112
- try:
2113
- response = httpx.get(full_url, timeout=60.0)
2114
- response.raise_for_status()
2115
- return response.text
2116
- except httpx.HTTPError as e:
2117
- raise RuntimeError(f"Failed to fetch URL: {e}")
2118
-
2119
- def extract_text_from_pdf(self, pdf_file) -> str:
2120
- """Extract text content from PDF file"""
2121
- try:
2122
- # Gradio returns file path, not file object
2123
- if isinstance(pdf_file, str):
2124
- pdf_path = pdf_file
2125
- else:
2126
- # If it's a file object (shouldn't happen with Gradio)
2127
- with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
2128
- tmp_file.write(pdf_file.read())
2129
- pdf_path = tmp_file.name
2130
-
2131
- # PDF 로드 및 텍스트 추출
2132
- loader = PyPDFLoader(pdf_path)
2133
- pages = loader.load()
2134
-
2135
- # 모든 페이지의 텍스트를 결합
2136
- text = "\n".join([page.page_content for page in pages])
2137
-
2138
- # 임시 파일인 경우 삭제
2139
- if not isinstance(pdf_file, str) and os.path.exists(pdf_path):
2140
- os.unlink(pdf_path)
2141
-
2142
- return text
2143
- except Exception as e:
2144
- raise RuntimeError(f"Failed to extract text from PDF: {e}")
2145
-
2146
- def _get_messages_formatter_type(self, model_name):
2147
- """Get appropriate message formatter for the model"""
2148
- if "Mistral" in model_name or "BitSix" in model_name:
2149
- return MessagesFormatterType.CHATML
2150
- else:
2151
- return MessagesFormatterType.LLAMA_3
2152
-
2153
-
2154
- def _build_prompt(self, text: str, language: str = "English", search_context: str = "") -> str:
2155
- """Build prompt for conversation generation with enhanced professional podcast style"""
2156
- # 텍스트 길이 제한
2157
- max_text_length = 4500 if search_context else 6000
2158
- if len(text) > max_text_length:
2159
- text = text[:max_text_length] + "..."
2160
-
2161
- if language == "Korean":
2162
- # 대화 템플릿을 더 많은 턴으로 확장
2163
- template = """
2164
- {
2165
- "conversation": [
2166
- {"speaker": "준수", "text": ""},
2167
- {"speaker": "민호", "text": ""},
2168
- {"speaker": "준수", "text": ""},
2169
- {"speaker": "민호", "text": ""},
2170
- {"speaker": "준수", "text": ""},
2171
- {"speaker": "민호", "text": ""},
2172
- {"speaker": "준수", "text": ""},
2173
- {"speaker": "민호", "text": ""},
2174
- {"speaker": "준수", "text": ""},
2175
- {"speaker": "민호", "text": ""},
2176
- {"speaker": "준수", "text": ""},
2177
- {"speaker": "민호", "text": ""}
2178
- ]
2179
- }
2180
- """
2181
-
2182
- context_part = ""
2183
- if search_context:
2184
- context_part = f"# 최신 관련 정보:\n{search_context}\n"
2185
-
2186
- base_prompt = (
2187
- f"# 원본 콘텐츠:\n{text}\n\n"
2188
- f"{context_part}"
2189
- f"위 내용으로 전문적이고 심층적인 팟캐스트 대담 프로그램 대본을 작성해주세요.\n\n"
2190
- f"## 핵심 지침:\n"
2191
- f"1. **대화 스타일**: 전문적이면서도 이해하기 쉬운 팟캐스트 대담\n"
2192
- f"2. **화자 역할**:\n"
2193
- f" - 준수: 진행자/호스트 (핵심을 짚는 질문, 청취자 관점에서 궁금한 점 질문)\n"
2194
- f" - 민호: 전문가 (깊이 있는 설명, 구체적 사례와 데이터 제시)\n"
2195
- f"3. **중요한 답변 규칙**:\n"
2196
- f" - 준수: 1-2문장의 명확한 질문 (\"그렇다면 구체적으로 어떤 의미인가요?\", \"실제 사례를 들어주시겠어요?\")\n"
2197
- f" - 민호: **반드시 2-4문장으로 충실히 답변** (개념 설명 + 구체적 설명 + 예시나 함의)\n"
2198
- f" - 예: \"이것은 ~를 의미합니다. 구체적으로 ~한 측면에서 중요한데요. 실제로 최근 ~한 사례가 있었고, 이는 ~를 보여줍니다.\"\n"
2199
- f"4. **전문성 요소**:\n"
2200
- f" - 통계나 연구 결과 인용\n"
2201
- f" - 실제 사례와 케이스 스터디\n"
2202
- f" - 전문 용어를 쉽게 풀어서 설명\n"
2203
- f" - 다양한 관점과 시각 제시\n"
2204
- f"5. **필수 규칙**: 서로 존댓말 사용, 12-15회 대화 교환\n\n"
2205
- f"JSON 형식으로만 반환:\n{template}"
2206
- )
2207
-
2208
- return base_prompt
2209
-
2210
- else:
2211
- # 영어 템플릿도 확장
2212
- template = """
2213
- {
2214
- "conversation": [
2215
- {"speaker": "Alex", "text": ""},
2216
- {"speaker": "Jordan", "text": ""},
2217
- {"speaker": "Alex", "text": ""},
2218
- {"speaker": "Jordan", "text": ""},
2219
- {"speaker": "Alex", "text": ""},
2220
- {"speaker": "Jordan", "text": ""},
2221
- {"speaker": "Alex", "text": ""},
2222
- {"speaker": "Jordan", "text": ""},
2223
- {"speaker": "Alex", "text": ""},
2224
- {"speaker": "Jordan", "text": ""},
2225
- {"speaker": "Alex", "text": ""},
2226
- {"speaker": "Jordan", "text": ""}
2227
- ]
2228
- }
2229
- """
2230
-
2231
- context_part = ""
2232
- if search_context:
2233
- context_part = f"# Latest Information:\n{search_context}\n"
2234
-
2235
- base_prompt = (
2236
- f"# Content:\n{text}\n\n"
2237
- f"{context_part}"
2238
- f"Create a professional and insightful podcast conversation.\n\n"
2239
- f"## Key Guidelines:\n"
2240
- f"1. **Style**: Professional yet accessible podcast discussion\n"
2241
- f"2. **Roles**:\n"
2242
- f" - Alex: Host (insightful questions, audience perspective)\n"
2243
- f" - Jordan: Expert (in-depth explanations, concrete examples and data)\n"
2244
- f"3. **Critical Response Rules**:\n"
2245
- f" - Alex: 1-2 sentence clear questions (\"Could you elaborate on that?\", \"What's a real-world example?\")\n"
2246
- f" - Jordan: **Must answer in 2-4 sentences** (concept + detailed explanation + example/implication)\n"
2247
- f" - Example: \"This refers to... Specifically, it's important because... For instance, recent studies show... This demonstrates...\"\n"
2248
- f"4. **Professional Elements**:\n"
2249
- f" - Cite statistics and research\n"
2250
- f" - Real cases and case studies\n"
2251
- f" - Explain technical terms clearly\n"
2252
- f" - Present multiple perspectives\n"
2253
- f"5. **Length**: 12-15 exchanges total\n\n"
2254
- f"Return JSON only:\n{template}"
2255
- )
2256
-
2257
- return base_prompt
2258
-
2259
-
2260
-
2261
- def _build_messages_for_local(self, text: str, language: str = "English", search_context: str = "") -> List[Dict]:
2262
- """Build messages for local LLM with enhanced professional podcast style"""
2263
- if language == "Korean":
2264
- system_message = (
2265
- "당신은 한국 최고의 전문 팟캐스트 작가입니다. "
2266
- "청취자들이 전문 지식을 쉽게 이해할 수 있는 고품질 대담을 만들어냅니다.\n\n"
2267
- "핵심 원칙:\n"
2268
- "1. 진행자(준수)는 핵심을 짚는 통찰력 있는 질문으로 대화를 이끌어갑니다\n"
2269
- "2. 전문가(민호)는 반드시 2-4문장으로 깊이 있게 답변합니다 (개념+설명+예시)\n"
2270
- "3. 구체적인 데이터, 연구 결과, 실제 사례를 포함합니다\n"
2271
- "4. 전문 용어는 쉽게 풀어서 설명하되, 정확성을 유지합니다\n"
2272
- "5. 다양한 관점을 제시하여 균형잡힌 시각을 제공합니다\n"
2273
- "6. 반드시 서로 존댓말을 사용하며, 전문적이면서도 친근한 톤을 유지합니다"
2274
- )
2275
- else:
2276
- system_message = (
2277
- "You are an expert podcast scriptwriter who creates high-quality, "
2278
- "professional discussions that make complex topics accessible.\n\n"
2279
- "Key principles:\n"
2280
- "1. The host (Alex) asks insightful questions that drive the conversation\n"
2281
- "2. The expert (Jordan) MUST answer in 2-4 sentences (concept+explanation+example)\n"
2282
- "3. Include specific data, research findings, and real cases\n"
2283
- "4. Explain technical terms clearly while maintaining accuracy\n"
2284
- "5. Present multiple perspectives for balanced views\n"
2285
- "6. Maintain a professional yet approachable tone"
2286
- )
2287
-
2288
- return [
2289
- {"role": "system", "content": system_message},
2290
- {"role": "user", "content": self._build_prompt(text, language, search_context)}
2291
- ]
2292
-
2293
- @spaces.GPU(duration=120)
2294
- def extract_conversation_local(self, text: str, language: str = "English", progress=None) -> Dict:
2295
- """Extract conversation using new local LLM with enhanced professional style"""
2296
- try:
2297
- # 검색 컨텍스트 생성 (키워드 기반이 아닌 경우)
2298
- search_context = ""
2299
- if BRAVE_KEY and not text.startswith("Keyword-based content:"):
2300
- try:
2301
- keywords = extract_keywords_for_search(text, language)
2302
- if keywords:
2303
- search_query = keywords[0] if language == "Korean" else f"{keywords[0]} latest news"
2304
- search_context = format_search_results(search_query)
2305
- print(f"Search context added for: {search_query}")
2306
- except Exception as e:
2307
- print(f"Search failed, continuing without context: {e}")
2308
-
2309
- # 먼저 새로운 로컬 LLM 시도
2310
- self.initialize_local_mode()
2311
-
2312
- chat_template = self._get_messages_formatter_type(self.config.local_model_name)
2313
- provider = LlamaCppPythonProvider(self.local_llm)
2314
-
2315
- # 강화된 전문 팟캐스트 스타일 시스템 메시지
2316
- if language == "Korean":
2317
- system_message = (
2318
- "당신은 한국의 유명 팟캐스트 전문 작가입니다. "
2319
- "청취자들이 깊이 있는 전문 지식을 얻을 수 있는 고품질 대담을 만듭니다.\n\n"
2320
- "작성 규칙:\n"
2321
- "1. 진행자(준수)는 핵심을 짚는 1-2문장 질문을 합니다\n"
2322
- "2. 전문가(민호)는 반드시 2-4문장으로 충실히 답변합니다:\n"
2323
- " - 첫 문장: 핵심 개념 설명\n"
2324
- " - 둘째 문장: 구체적인 설명이나 맥락\n"
2325
- " - 셋째-넷째 문장: 실제 예시, 데이터, 함의\n"
2326
- "3. 통계, 연구 결과, 실제 사례를 적극 활용하세요\n"
2327
- "4. 전문성을 유지하면서도 이해하기 쉽게 설명하세요\n"
2328
- "5. 12-15회의 대화 교환으로 구성하세요\n"
2329
- "6. JSON 형식으로만 응답하세요"
2330
- )
2331
- else:
2332
- system_message = (
2333
- "You are a professional podcast scriptwriter creating high-quality, "
2334
- "insightful discussions that provide deep expertise to listeners.\n\n"
2335
- "Writing rules:\n"
2336
- "1. Host (Alex) asks focused 1-2 sentence questions\n"
2337
- "2. Expert (Jordan) MUST answer in 2-4 substantial sentences:\n"
2338
- " - First sentence: Core concept explanation\n"
2339
- " - Second sentence: Specific details or context\n"
2340
- " - Third-fourth sentences: Real examples, data, implications\n"
2341
- "3. Actively use statistics, research findings, real cases\n"
2342
- "4. Maintain expertise while keeping explanations accessible\n"
2343
- "5. Create 12-15 conversation exchanges\n"
2344
- "6. Respond only in JSON format"
2345
- )
2346
-
2347
- agent = LlamaCppAgent(
2348
- provider,
2349
- system_prompt=system_message,
2350
- predefined_messages_formatter_type=chat_template,
2351
- debug_output=False
2352
- )
2353
-
2354
- settings = provider.get_provider_default_settings()
2355
- settings.temperature = 0.75 # 약간 낮춰서 더 일관된 전문적 답변
2356
- settings.top_k = 40
2357
- settings.top_p = 0.95
2358
- settings.max_tokens = self.config.max_tokens # 증가된 토큰 수 사용
2359
- settings.repeat_penalty = 1.1
2360
- settings.stream = False
2361
-
2362
- messages = BasicChatHistory()
2363
-
2364
- prompt = self._build_prompt(text, language, search_context)
2365
- response = agent.get_chat_response(
2366
- prompt,
2367
- llm_sampling_settings=settings,
2368
- chat_history=messages,
2369
- returns_streaming_generator=False,
2370
- print_output=False
2371
- )
2372
-
2373
- # JSON 파싱
2374
- pattern = r"\{(?:[^{}]|(?:\{[^{}]*\}))*\}"
2375
- json_match = re.search(pattern, response)
2376
-
2377
- if json_match:
2378
- conversation_data = json.loads(json_match.group())
2379
- # 대화 길이 확인 및 조정
2380
- if len(conversation_data["conversation"]) < self.config.min_conversation_turns:
2381
- print(f"Conversation too short ({len(conversation_data['conversation'])} turns), regenerating...")
2382
- # 재시도 로직 추가 가능
2383
- return conversation_data
2384
- else:
2385
- raise ValueError("No valid JSON found in local LLM response")
2386
-
2387
- except Exception as e:
2388
- print(f"Local LLM failed: {e}, falling back to legacy local method")
2389
- return self.extract_conversation_legacy_local(text, language, progress, search_context)
2390
-
2391
- @spaces.GPU(duration=120)
2392
- def extract_conversation_legacy_local(self, text: str, language: str = "English", progress=None, search_context: str = "") -> Dict:
2393
- """Extract conversation using legacy local model with enhanced professional style"""
2394
- try:
2395
- self.initialize_legacy_local_mode()
2396
-
2397
- # 강화된 전문 팟캐스트 스타일 시스템 메시지
2398
- if language == "Korean":
2399
- system_message = (
2400
- "당신은 전문 팟캐스트 작가입니다. "
2401
- "진행자(준수)는 통찰력 있는 질문을, 전문가(민호)는 2-4문장의 상세한 답변을 합니다. "
2402
- "구체적인 데이터와 사례를 포함하여 전문적이면서도 이해하기 쉽게 설명하세요. "
2403
- "12-15회 대화 교환으로 구성하세요."
2404
- )
2405
- else:
2406
- system_message = (
2407
- "You are a professional podcast scriptwriter. "
2408
- "Create insightful dialogue where the host (Alex) asks focused questions "
2409
- "and the expert (Jordan) gives detailed 2-4 sentence answers. "
2410
- "Include specific data and examples. Create 12-15 exchanges."
2411
- )
2412
-
2413
- chat = [
2414
- {"role": "system", "content": system_message},
2415
- {"role": "user", "content": self._build_prompt(text, language, search_context)}
2416
- ]
2417
-
2418
- terminators = [
2419
- self.legacy_tokenizer.eos_token_id,
2420
- self.legacy_tokenizer.convert_tokens_to_ids("<|eot_id|>")
2421
- ]
2422
-
2423
- messages = self.legacy_tokenizer.apply_chat_template(
2424
- chat, tokenize=False, add_generation_prompt=True
2425
- )
2426
- model_inputs = self.legacy_tokenizer([messages], return_tensors="pt").to(self.device)
2427
-
2428
- streamer = TextIteratorStreamer(
2429
- self.legacy_tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
2430
- )
2431
-
2432
- generate_kwargs = dict(
2433
- model_inputs,
2434
- streamer=streamer,
2435
- max_new_tokens=self.config.max_new_tokens, # 증가된 토큰 수 사용
2436
- do_sample=True,
2437
- temperature=0.75,
2438
- eos_token_id=terminators,
2439
- )
2440
-
2441
- t = Thread(target=self.legacy_local_model.generate, kwargs=generate_kwargs)
2442
- t.start()
2443
-
2444
- partial_text = ""
2445
- for new_text in streamer:
2446
- partial_text += new_text
2447
-
2448
- pattern = r"\{(?:[^{}]|(?:\{[^{}]*\}))*\}"
2449
- json_match = re.search(pattern, partial_text)
2450
-
2451
- if json_match:
2452
- return json.loads(json_match.group())
2453
- else:
2454
- raise ValueError("No valid JSON found in legacy local response")
2455
-
2456
- except Exception as e:
2457
- print(f"Legacy local model also failed: {e}")
2458
- # Return enhanced default template
2459
- if language == "Korean":
2460
- return self._get_default_korean_conversation()
2461
- else:
2462
- return self._get_default_english_conversation()
2463
-
2464
- def _get_default_korean_conversation(self) -> Dict:
2465
- """더 전문적인 기본 한국어 대화 템플릿"""
2466
- return {
2467
- "conversation": [
2468
- {"speaker": "준수", "text": "안녕하세요, 여러분! 오늘은 정말 중요하고 흥미로운 주제를 다뤄보려고 합니다. 민호 박사님, 먼저 이 주제가 왜 지금 이렇게 주목받고 있는지 설명해주시겠어요?"},
2469
- {"speaker": "민호", "text": "네, 안녕하세요. 최근 이 분야에서 획기적인 발전이 있었습니다. 특히 작년 MIT 연구팀의 발표에 따르면, 이 기술의 효율성이 기존 대비 300% 향상되었다고 합니다. 이는 단순한 기술적 진보를 넘어서 우리 일상생활에 직접적인 영향을 미칠 수 있는 변화인데요. 실제로 구글과 마이크로소프트 같은 빅테크 기업들이 이미 수십억 달러를 투자하고 있습니다."},
2470
- {"speaker": "준수", "text": "와, 300% 향상이라니 정말 놀라운데요. 그렇다면 이런 기술 발전이 일반인들에게는 구체적으로 어떤 혜택을 가져다줄 수 있을까요?"},
2471
- {"speaker": "민호", "text": "가장 직접적인 혜택은 비용 절감과 접근성 향상입니다. 예를 들어, 이전에는 전문가만 사용��� 수 있던 고급 기능들이 이제는 스마트폰 앱으로도 구현 가능해졌습니다. 맥킨지 보고서에 따르면, 2025년까지 이 기술로 인해 전 세계적으로 약 2조 달러의 경제적 가치가 창출될 것으로 예상됩니다. 특히 의료, 교육, 금융 분야에서 혁신적인 변화가 일어날 것으로 보입니다."},
2472
- {"speaker": "준수", "text": "2조 달러라는 엄청난 규모네요. 의료 분야에서는 어떤 변화가 예상되나요?"},
2473
- {"speaker": "민호", "text": "의료 분야의 변화는 정말 혁명적일 것으로 예상됩니다. 이미 스탠포드 대학병원에서는 이 기술을 활용해 암 진단 정확도를 95%까지 높였습니다. 기존에는 숙련된 의사도 놓칠 수 있던 미세한 병변들을 AI가 감지해내는 것이죠. 더 놀라운 것은 이런 진단이 단 몇 분 만에 이뤄진다는 점입니다. WHO 추산으로는 이 기술이 전 세계적으로 보급되면 연간 수백만 명의 생명을 구할 수 있을 것으로 예측하고 있습니다."},
2474
- {"speaker": "준수", "text": "정말 인상적이네요. 하지만 이런 급격한 기술 발전에 대한 우려의 목소리도 있을 것 같은데요?"},
2475
- {"speaker": "민호", "text": "맞습니다. 주요 우려사항은 크게 세 가지입니다. 첫째는 일자리 대체 문제로, 옥스포드 대학 연구에 따르면 향후 20년 내에 현재 직업의 47%가 자동화될 위험이 있습니다. 둘째는 프라이버시와 보안 문제입니다. 셋째는 기술 격차로 인한 불평등 심화입니다. 하지만 역사적으로 보면 새로운 기술은 항상 새로운 기회도 함께 만들어왔기 때문에, 적절한 정책과 교육으로 이런 문제들을 해결할 수 있을 것으로 봅니다."},
2476
- {"speaker": "준수", "text": "균형잡힌 시각이 중요하겠네요. 그렇다면 우리가 이런 변화에 어떻게 대비해야 할까요?"},
2477
- {"speaker": "민호", "text": "가장 중요한 것은 지속적인 학습과 적응력입니다. 세계경제포럼은 2025년까지 전 세계 근로자의 50%가 재교육이 필요할 것으로 예측했습니다. 특히 디지털 리터러시, 비판적 사고력, 창의성 같은 능력이 중요해질 것입니다. 개인적으로는 온라인 교육 플랫폼을 활용한 자기계발을 추천합니다. 예를 들어 Coursera나 edX 같은 플랫폼에서는 세계 최고 대학의 강의를 무료로 들을 수 있습니다."},
2478
- {"speaker": "준수", "text": "실용적인 조언 감사합니다. 마지막으로 이 분야의 미래 전망은 어떻게 보시나요?"},
2479
- {"speaker": "민호", "text": "향후 10년은 인류 역사상 가장 급격한 기술 발전을 경험하는 시기가 될 것입니다. 가트너의 하이프 사이클 분석에 따르면, 현재 우리는 이 기술의 초기 단계에 불과합니다. 2030년까지는 지금으로서는 상상하기 어려운 수준의 혁신이 일어날 것으로 예상됩니다. 중요한 것은 이런 변화를 두려워하기보다는 기회로 삼아 더 나은 미래를 만들어가는 것이라고 생각합니다."},
2480
- {"speaker": "준수", "text": "정말 통찰력 있는 말씀이네요. 오늘 너무나 유익한 시간이었습니다. 청취자 여러분도 오늘 논의된 내용을 바탕으로 미래를 준비하시길 바랍니다. 민호 박사님, 귀중한 시간 내주셔서 감사합니다!"},
2481
- {"speaker": "민호", "text": "감사합니다. 청취자 여러분들이 이 변화의 시대를 현명하게 헤쳐나가시길 바랍니다. 기술은 도구일 뿐이고, 그것을 어떻게 활용하는지는 우리에게 달려있다는 점을 기억해주세요. 오늘 말씀드린 내용에 대해 더 궁금하신 점이 있으시면 제가 운영하는 블로그나 최근 출간한 책에서 더 자세한 정보를 찾으실 수 있습니다."}
2482
- ]
2483
- }
2484
-
2485
- def _get_default_english_conversation(self) -> Dict:
2486
- """Enhanced professional English conversation template"""
2487
- return {
2488
- "conversation": [
2489
- {"speaker": "Alex", "text": "Welcome everyone to our podcast! Today we're diving into a topic that's reshaping our world. Dr. Jordan, could you start by explaining why this subject has become so critical right now?"},
2490
- {"speaker": "Jordan", "text": "Thanks, Alex. We're witnessing an unprecedented convergence of technological breakthroughs. According to a recent Nature publication, advances in this field have accelerated by 400% in just the past two years. This isn't just incremental progress - it's a fundamental shift in how we approach problem-solving. Major institutions like Harvard and Stanford are completely restructuring their research programs to focus on this area, with combined investments exceeding $5 billion annually."},
2491
- {"speaker": "Alex", "text": "400% acceleration is staggering! What does this mean for everyday people who might not be tech-savvy?"},
2492
- {"speaker": "Jordan", "text": "The impact will be profound yet accessible. Think about how smartphones revolutionized communication - this will be similar but across every aspect of life. McKinsey's latest report projects that by 2026, these technologies will create $4.4 trillion in annual value globally. For individuals, this translates to personalized healthcare that can predict illnesses years in advance, educational systems that adapt to each student's learning style, and financial tools that democratize wealth-building strategies previously available only to the ultra-wealthy."},
2493
- {"speaker": "Alex", "text": "Those applications sound transformative. Can you give us a concrete example of how this is already being implemented?"},
2494
- {"speaker": "Jordan", "text": "Absolutely. Let me share a compelling case from Johns Hopkins Hospital. They've deployed an AI system that analyzes patient data in real-time, reducing diagnostic errors by 85% and cutting average diagnosis time from days to hours. In one documented case, the system identified a rare genetic disorder in a child that had been misdiagnosed for three years. The accuracy comes from analyzing patterns across millions of cases - something impossible for even the most experienced doctors to do manually. This technology is now being rolled out to rural hospitals, bringing world-class diagnostic capabilities to underserved communities."},
2495
- {"speaker": "Alex", "text": "That's truly life-changing technology. But I imagine there are significant challenges and risks we need to consider?"},
2496
- {"speaker": "Jordan", "text": "You're absolutely right to raise this. The challenges are as significant as the opportunities. The World Economic Forum identifies three critical risks: First, algorithmic bias could perpetuate or amplify existing inequalities if not carefully managed. Second, cybersecurity threats become exponentially more dangerous when AI systems control critical infrastructure. Third, there's the socioeconomic disruption - PwC estimates that 30% of jobs could be automated by 2030. However, history shows us that technological revolutions create new opportunities even as they displace old ones. The key is proactive adaptation and responsible development."},
2497
- {"speaker": "Alex", "text": "How should individuals and organizations prepare for these changes?"},
2498
- {"speaker": "Jordan", "text": "Preparation requires a multi-faceted approach. For individuals, I recommend focusing on skills that complement rather than compete with AI: critical thinking, emotional intelligence, and creative problem-solving. MIT's recent study shows that professionals who combine domain expertise with AI literacy see salary increases of 40% on average. Organizations need to invest in continuous learning programs - Amazon's $700 million worker retraining initiative is a good model. Most importantly, we need to cultivate an adaptive mindset. The half-life of specific technical skills is shrinking, but the ability to learn and unlearn quickly is becoming invaluable."},
2499
- {"speaker": "Alex", "text": "That's practical advice. What about the ethical considerations? How do we ensure this technology benefits humanity as a whole?"},
2500
- {"speaker": "Jordan", "text": "Ethics must be at the forefront of development. The EU's AI Act and similar regulations worldwide are establishing important guardrails. We need transparent AI systems where decisions can be explained and audited. Companies like IBM and Google have established AI ethics boards, but we need industry-wide standards. Additionally, we must address the digital divide - UNESCO reports that 37% of the global population still lacks internet access. Without inclusive development, these technologies could exacerbate global inequality rather than reduce it. The solution requires collaboration between technologists, ethicists, policymakers, and communities."},
2501
- {"speaker": "Alex", "text": "Looking ahead, what's your vision for how this technology will shape the next decade?"},
2502
- {"speaker": "Jordan", "text": "The next decade will be transformative beyond our current imagination. Ray Kurzweil's prediction of technological singularity by 2045 seems increasingly plausible. By 2035, I expect we'll see autonomous systems managing entire cities, personalized medicine extending human lifespan by 20-30 years, and educational AI that makes world-class education universally accessible. The convergence of AI with quantum computing, biotechnology, and nanotechnology will unlock possibilities we can barely conceive of today. However, the future isn't predetermined - it's shaped by the choices we make now about development priorities, ethical frameworks, and inclusive access."},
2503
- {"speaker": "Alex", "text": "That's both exciting and sobering. Any final thoughts for our listeners?"},
2504
- {"speaker": "Jordan", "text": "I'd encourage everyone to view this as humanity's next great adventure. Yes, there are risks and challenges, but we're also on the cusp of solving problems that have plagued us for millennia - disease, poverty, environmental degradation. The key is engaged participation rather than passive observation. Stay informed through reliable sources, experiment with new technologies, and most importantly, contribute to the conversation about what kind of future we want to build. The decisions we make in the next five years will reverberate for generations."},
2505
- {"speaker": "Alex", "text": "Dr. Jordan, this has been an incredibly enlightening discussion. Thank you for sharing your expertise and insights with us today."},
2506
- {"speaker": "Jordan", "text": "Thank you, Alex. It's been a pleasure discussing these crucial topics. For listeners wanting to dive deeper, I've compiled additional resources on my website, including links to the studies we discussed today. Remember, the future isn't something that happens to us - it's something we create together. I look forward to seeing how each of you contributes to shaping this exciting new era."}
2507
- ]
2508
- }
2509
-
2510
- def extract_conversation_api(self, text: str, language: str = "English") -> Dict:
2511
- """Extract conversation using API with enhanced professional style"""
2512
- if not self.llm_client:
2513
- raise RuntimeError("API mode not initialized")
2514
-
2515
- try:
2516
- # 검색 컨텍스트 생성
2517
- search_context = ""
2518
- if BRAVE_KEY and not text.startswith("Keyword-based content:"):
2519
- try:
2520
- keywords = extract_keywords_for_search(text, language)
2521
- if keywords:
2522
- search_query = keywords[0] if language == "Korean" else f"{keywords[0]} latest news"
2523
- search_context = format_search_results(search_query)
2524
- print(f"Search context added for: {search_query}")
2525
- except Exception as e:
2526
- print(f"Search failed, continuing without context: {e}")
2527
-
2528
- # 강화된 전문 팟캐스트 스타일 프롬프트
2529
- if language == "Korean":
2530
- system_message = (
2531
- "당신은 한국의 최고 전문 팟캐스트 작가입니다. "
2532
- "청취자들이 깊이 있는 인사이트를 얻을 수 있는 고품질 대담을 만드세요.\n"
2533
- "준수(진행자)는 핵심을 짚는 1-2문장 질문을 하고, "
2534
- "민호(전문가)는 반드시 2-4문장으로 상세히 답변합니다. "
2535
- "구체적인 데이터, 연구 결과, 실제 사례를 포함하세요. "
2536
- "전문 용어는 쉽게 설명하고, 반드시 서로 존댓말을 사용하세요. "
2537
- "12-15회의 깊이 있는 대화 교환으로 구성하세요."
2538
- )
2539
- else:
2540
- system_message = (
2541
- "You are a top professional podcast scriptwriter. "
2542
- "Create high-quality discussions that provide deep insights to listeners. "
2543
- "Alex (host) asks focused 1-2 sentence questions, "
2544
- "while Jordan (expert) MUST answer in 2-4 detailed sentences. "
2545
- "Include specific data, research findings, and real cases. "
2546
- "Explain technical terms clearly. "
2547
- "Create 12-15 insightful conversation exchanges."
2548
- )
2549
-
2550
- chat_completion = self.llm_client.chat.completions.create(
2551
- messages=[
2552
- {"role": "system", "content": system_message},
2553
- {"role": "user", "content": self._build_prompt(text, language, search_context)}
2554
- ],
2555
- model=self.config.api_model_name,
2556
- temperature=0.75,
2557
- )
2558
-
2559
- pattern = r"\{(?:[^{}]|(?:\{[^{}]*\}))*\}"
2560
- json_match = re.search(pattern, chat_completion.choices[0].message.content)
2561
-
2562
- if not json_match:
2563
- raise ValueError("No valid JSON found in response")
2564
-
2565
- return json.loads(json_match.group())
2566
- except Exception as e:
2567
- raise RuntimeError(f"Failed to extract conversation: {e}")
2568
-
2569
- def parse_conversation_text(self, conversation_text: str) -> Dict:
2570
- """Parse conversation text back to JSON format"""
2571
- lines = conversation_text.strip().split('\n')
2572
- conversation_data = {"conversation": []}
2573
-
2574
- for line in lines:
2575
- if ':' in line:
2576
- speaker, text = line.split(':', 1)
2577
- conversation_data["conversation"].append({
2578
- "speaker": speaker.strip(),
2579
- "text": text.strip()
2580
- })
2581
-
2582
- return conversation_data
2583
-
2584
- async def text_to_speech_edge(self, conversation_json: Dict, language: str = "English") -> Tuple[str, str]:
2585
- """Convert text to speech using Edge TTS"""
2586
- output_dir = Path(self._create_output_directory())
2587
- filenames = []
2588
-
2589
- try:
2590
- # 언어별 음성 설정 - 한국어는 모두 남성 음성
2591
- if language == "Korean":
2592
- voices = [
2593
- "ko-KR-HyunsuNeural", # 남성 음성 1 (차분하고 신뢰감 있는)
2594
- "ko-KR-InJoonNeural" # 남성 음성 2 (활기차고 친근한)
2595
- ]
2596
- else:
2597
- voices = [
2598
- "en-US-AndrewMultilingualNeural", # 남성 음성 1
2599
- "en-US-BrianMultilingualNeural" # 남성 음성 2
2600
- ]
2601
-
2602
- for i, turn in enumerate(conversation_json["conversation"]):
2603
- filename = output_dir / f"output_{i}.wav"
2604
- voice = voices[i % len(voices)]
2605
-
2606
- tmp_path = await self._generate_audio_edge(turn["text"], voice)
2607
- os.rename(tmp_path, filename)
2608
- filenames.append(str(filename))
2609
-
2610
- # Combine audio files
2611
- final_output = os.path.join(output_dir, "combined_output.wav")
2612
- self._combine_audio_files(filenames, final_output)
2613
-
2614
- # Generate conversation text
2615
- conversation_text = "\n".join(
2616
- f"{turn.get('speaker', f'Speaker {i+1}')}: {turn['text']}"
2617
- for i, turn in enumerate(conversation_json["conversation"])
2618
- )
2619
-
2620
- return final_output, conversation_text
2621
- except Exception as e:
2622
- raise RuntimeError(f"Failed to convert text to speech: {e}")
2623
-
2624
- async def _generate_audio_edge(self, text: str, voice: str) -> str:
2625
- """Generate audio using Edge TTS"""
2626
- if not text.strip():
2627
- raise ValueError("Text cannot be empty")
2628
-
2629
- voice_short_name = voice.split(" - ")[0] if " - " in voice else voice
2630
- communicate = edge_tts.Communicate(text, voice_short_name)
2631
-
2632
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
2633
- tmp_path = tmp_file.name
2634
- await communicate.save(tmp_path)
2635
-
2636
- return tmp_path
2637
-
2638
- @spaces.GPU(duration=60)
2639
- def text_to_speech_spark(self, conversation_json: Dict, language: str = "English", progress=None) -> Tuple[str, str]:
2640
- """Convert text to speech using Spark TTS CLI"""
2641
- if not SPARK_AVAILABLE or not self.spark_model_dir:
2642
- raise RuntimeError("Spark TTS not available")
2643
-
2644
- try:
2645
- output_dir = self._create_output_directory()
2646
- audio_files = []
2647
-
2648
- # Create different voice characteristics for different speakers
2649
- if language == "Korean":
2650
- voice_configs = [
2651
- {"prompt_text": "안녕하세요, 오늘 팟캐스트 진행을 맡은 준수입니다. 여러분과 함께 흥미로운 이야기를 나눠보겠습니다.", "gender": "male"},
2652
- {"prompt_text": "안녕하세요, 저는 오늘 이 주제에 대해 설명드릴 민호입니다. 쉽고 재미있게 설명드릴게요.", "gender": "male"}
2653
- ]
2654
- else:
2655
- voice_configs = [
2656
- {"prompt_text": "Hello everyone, I'm Alex, your host for today's podcast. Let's explore this fascinating topic together.", "gender": "male"},
2657
- {"prompt_text": "Hi, I'm Jordan. I'm excited to share my insights on this subject with you all today.", "gender": "male"}
2658
- ]
2659
-
2660
- for i, turn in enumerate(conversation_json["conversation"]):
2661
- text = turn["text"]
2662
- if not text.strip():
2663
- continue
2664
-
2665
- # Use different voice config for each speaker
2666
- voice_config = voice_configs[i % len(voice_configs)]
2667
-
2668
- output_file = os.path.join(output_dir, f"spark_output_{i}.wav")
2669
-
2670
- # Run Spark TTS CLI inference
2671
- cmd = [
2672
- "python", "-m", "cli.inference",
2673
- "--text", text,
2674
- "--device", "0" if torch.cuda.is_available() else "cpu",
2675
- "--save_dir", output_dir,
2676
- "--model_dir", self.spark_model_dir,
2677
- "--prompt_text", voice_config["prompt_text"],
2678
- "--output_name", f"spark_output_{i}.wav"
2679
- ]
2680
-
2681
- try:
2682
- # Run the command
2683
- result = subprocess.run(
2684
- cmd,
2685
- capture_output=True,
2686
- text=True,
2687
- timeout=60,
2688
- cwd="." # Make sure we're in the right directory
2689
- )
2690
-
2691
- if result.returncode == 0:
2692
- audio_files.append(output_file)
2693
- else:
2694
- print(f"Spark TTS error for turn {i}: {result.stderr}")
2695
- # Create a short silence as fallback
2696
- silence = np.zeros(int(22050 * 1.0)) # 1 second of silence
2697
- sf.write(output_file, silence, 22050)
2698
- audio_files.append(output_file)
2699
-
2700
- except subprocess.TimeoutExpired:
2701
- print(f"Spark TTS timeout for turn {i}")
2702
- # Create silence as fallback
2703
- silence = np.zeros(int(22050 * 1.0))
2704
- sf.write(output_file, silence, 22050)
2705
- audio_files.append(output_file)
2706
- except Exception as e:
2707
- print(f"Error running Spark TTS for turn {i}: {e}")
2708
- # Create silence as fallback
2709
- silence = np.zeros(int(22050 * 1.0))
2710
- sf.write(output_file, silence, 22050)
2711
- audio_files.append(output_file)
2712
-
2713
- # Combine all audio files
2714
- if audio_files:
2715
- final_output = os.path.join(output_dir, "spark_combined.wav")
2716
- self._combine_audio_files(audio_files, final_output)
2717
- else:
2718
- raise RuntimeError("No audio files generated")
2719
-
2720
- # Generate conversation text
2721
- conversation_text = "\n".join(
2722
- f"{turn.get('speaker', f'Speaker {i+1}')}: {turn['text']}"
2723
- for i, turn in enumerate(conversation_json["conversation"])
2724
- )
2725
-
2726
- return final_output, conversation_text
2727
-
2728
- except Exception as e:
2729
- raise RuntimeError(f"Failed to convert text to speech with Spark TTS: {e}")
2730
-
2731
- @spaces.GPU(duration=60)
2732
- def text_to_speech_melo(self, conversation_json: Dict, progress=None) -> Tuple[str, str]:
2733
- """Convert text to speech using MeloTTS"""
2734
- if not MELO_AVAILABLE or not self.melo_models:
2735
- raise RuntimeError("MeloTTS not available")
2736
-
2737
- speakers = ["EN-Default", "EN-US"]
2738
- combined_audio = AudioSegment.empty()
2739
-
2740
- for i, turn in enumerate(conversation_json["conversation"]):
2741
- bio = io.BytesIO()
2742
- text = turn["text"]
2743
- speaker = speakers[i % 2]
2744
- speaker_id = self.melo_models["EN"].hps.data.spk2id[speaker]
2745
-
2746
- # Generate audio
2747
- self.melo_models["EN"].tts_to_file(
2748
- text, speaker_id, bio, speed=1.0,
2749
- pbar=progress.tqdm if progress else None,
2750
- format="wav"
2751
- )
2752
-
2753
- bio.seek(0)
2754
- audio_segment = AudioSegment.from_file(bio, format="wav")
2755
- combined_audio += audio_segment
2756
-
2757
- # Save final audio
2758
- final_audio_path = "melo_podcast.mp3"
2759
- combined_audio.export(final_audio_path, format="mp3")
2760
-
2761
- # Generate conversation text
2762
- conversation_text = "\n".join(
2763
- f"{turn.get('speaker', f'Speaker {i+1}')}: {turn['text']}"
2764
- for i, turn in enumerate(conversation_json["conversation"])
2765
- )
2766
-
2767
- return final_audio_path, conversation_text
2768
-
2769
- def _create_output_directory(self) -> str:
2770
- """Create a unique output directory"""
2771
- random_bytes = os.urandom(8)
2772
- folder_name = base64.urlsafe_b64encode(random_bytes).decode("utf-8")
2773
- os.makedirs(folder_name, exist_ok=True)
2774
- return folder_name
2775
-
2776
- def _combine_audio_files(self, filenames: List[str], output_file: str) -> None:
2777
- """Combine multiple audio files into one"""
2778
- if not filenames:
2779
- raise ValueError("No input files provided")
2780
-
2781
- try:
2782
- audio_segments = []
2783
- for filename in filenames:
2784
- if os.path.exists(filename):
2785
- audio_segment = AudioSegment.from_file(filename)
2786
- audio_segments.append(audio_segment)
2787
-
2788
- if audio_segments:
2789
- combined = sum(audio_segments)
2790
- combined.export(output_file, format="wav")
2791
-
2792
- # Clean up temporary files
2793
- for filename in filenames:
2794
- if os.path.exists(filename):
2795
- os.remove(filename)
2796
-
2797
- except Exception as e:
2798
- raise RuntimeError(f"Failed to combine audio files: {e}")
2799
-
2800
-
2801
- # Global converter instance
2802
- converter = UnifiedAudioConverter(ConversationConfig())
2803
-
2804
-
2805
- async def synthesize(article_input, input_type: str = "URL", mode: str = "Local", tts_engine: str = "Edge-TTS", language: str = "English"):
2806
- """Main synthesis function - handles URL, PDF, and Keyword inputs"""
2807
- try:
2808
- # Extract text based on input type
2809
- if input_type == "URL":
2810
- if not article_input or not isinstance(article_input, str):
2811
- return "Please provide a valid URL.", None
2812
- text = converter.fetch_text(article_input)
2813
- elif input_type == "PDF":
2814
- if not article_input:
2815
- return "Please upload a PDF file.", None
2816
- text = converter.extract_text_from_pdf(article_input)
2817
- else: # Keyword
2818
- if not article_input or not isinstance(article_input, str):
2819
- return "Please provide a keyword or topic.", None
2820
- # 키워드로 검색하여 콘텐츠 생성
2821
- text = search_and_compile_content(article_input, language)
2822
- text = f"Keyword-based content:\n{text}" # 마커 추가
2823
-
2824
- # Limit text to max words
2825
- words = text.split()
2826
- if len(words) > converter.config.max_words:
2827
- text = " ".join(words[:converter.config.max_words])
2828
-
2829
- # Extract conversation based on mode
2830
- if mode == "Local":
2831
- # 로컬 모드가 기본 (새로운 Local LLM 사용)
2832
- try:
2833
- conversation_json = converter.extract_conversation_local(text, language)
2834
- except Exception as e:
2835
- print(f"Local mode failed: {e}, trying API fallback")
2836
- # API 폴백
2837
- api_key = os.environ.get("TOGETHER_API_KEY")
2838
- if api_key:
2839
- converter.initialize_api_mode(api_key)
2840
- conversation_json = converter.extract_conversation_api(text, language)
2841
- else:
2842
- raise RuntimeError("Local mode failed and no API key available for fallback")
2843
- else: # API mode (now secondary)
2844
- api_key = os.environ.get("TOGETHER_API_KEY")
2845
- if not api_key:
2846
- print("API key not found, falling back to local mode")
2847
- conversation_json = converter.extract_conversation_local(text, language)
2848
- else:
2849
- try:
2850
- converter.initialize_api_mode(api_key)
2851
- conversation_json = converter.extract_conversation_api(text, language)
2852
- except Exception as e:
2853
- print(f"API mode failed: {e}, falling back to local mode")
2854
- conversation_json = converter.extract_conversation_local(text, language)
2855
-
2856
- # Generate conversation text
2857
- conversation_text = "\n".join(
2858
- f"{turn.get('speaker', f'Speaker {i+1}')}: {turn['text']}"
2859
- for i, turn in enumerate(conversation_json["conversation"])
2860
- )
2861
-
2862
- return conversation_text, None
2863
-
2864
- except Exception as e:
2865
- return f"Error: {str(e)}", None
2866
-
2867
-
2868
- async def regenerate_audio(conversation_text: str, tts_engine: str = "Edge-TTS", language: str = "English"):
2869
- """Regenerate audio from edited conversation text"""
2870
- if not conversation_text.strip():
2871
- return "Please provide conversation text.", None
2872
-
2873
- try:
2874
- # Parse the conversation text back to JSON format
2875
- conversation_json = converter.parse_conversation_text(conversation_text)
2876
-
2877
- if not conversation_json["conversation"]:
2878
- return "No valid conversation found in the text.", None
2879
-
2880
- # 한국어인 경우 Edge-TTS만 사용 (다른 TTS는 한국어 지원이 제한적)
2881
- if language == "Korean" and tts_engine != "Edge-TTS":
2882
- tts_engine = "Edge-TTS" # 자동으로 Edge-TTS로 변경
2883
-
2884
- # Generate audio based on TTS engine
2885
- if tts_engine == "Edge-TTS":
2886
- output_file, _ = await converter.text_to_speech_edge(conversation_json, language)
2887
- elif tts_engine == "Spark-TTS":
2888
- if not SPARK_AVAILABLE:
2889
- return "Spark TTS not available. Please install required dependencies and clone the Spark-TTS repository.", None
2890
- converter.initialize_spark_tts()
2891
- output_file, _ = converter.text_to_speech_spark(conversation_json, language)
2892
- else: # MeloTTS
2893
- if not MELO_AVAILABLE:
2894
- return "MeloTTS not available. Please install required dependencies.", None
2895
- if language == "Korean":
2896
- return "MeloTTS does not support Korean. Please use Edge-TTS for Korean.", None
2897
- converter.initialize_melo_tts()
2898
- output_file, _ = converter.text_to_speech_melo(conversation_json)
2899
-
2900
- return "Audio generated successfully!", output_file
2901
-
2902
- except Exception as e:
2903
- return f"Error generating audio: {str(e)}", None
2904
-
2905
-
2906
- def synthesize_sync(article_input, input_type: str = "URL", mode: str = "Local", tts_engine: str = "Edge-TTS", language: str = "English"):
2907
- """Synchronous wrapper for async synthesis"""
2908
- return asyncio.run(synthesize(article_input, input_type, mode, tts_engine, language))
2909
-
2910
-
2911
- def regenerate_audio_sync(conversation_text: str, tts_engine: str = "Edge-TTS", language: str = "English"):
2912
- """Synchronous wrapper for async audio regeneration"""
2913
- return asyncio.run(regenerate_audio(conversation_text, tts_engine, language))
2914
-
2915
-
2916
- def update_tts_engine_for_korean(language):
2917
- """한국어 선택 시 TTS 엔진 옵션 업데이트"""
2918
- if language == "Korean":
2919
- return gr.Radio(
2920
- choices=["Edge-TTS"],
2921
- value="Edge-TTS",
2922
- label="TTS Engine",
2923
- info="한국어는 Edge-TTS만 지원됩니다",
2924
- interactive=False
2925
- )
2926
- else:
2927
- return gr.Radio(
2928
- choices=["Edge-TTS", "Spark-TTS", "MeloTTS"],
2929
- value="Edge-TTS",
2930
- label="TTS Engine",
2931
- info="Edge-TTS: Cloud-based, natural voices | Spark-TTS: Local AI model | MeloTTS: Local, requires GPU",
2932
- interactive=True
2933
- )
2934
-
2935
-
2936
- def toggle_input_visibility(input_type):
2937
- """Toggle visibility of URL input, file upload, and keyword input based on input type"""
2938
- if input_type == "URL":
2939
- return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
2940
- elif input_type == "PDF":
2941
- return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
2942
- else: # Keyword
2943
- return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
2944
-
2945
-
2946
- # 모델 초기화 (앱 시작 시)
2947
- if LLAMA_CPP_AVAILABLE:
2948
- try:
2949
- model_path = hf_hub_download(
2950
- repo_id=converter.config.local_model_repo,
2951
- filename=converter.config.local_model_name,
2952
- local_dir="./models"
2953
- )
2954
- print(f"Model downloaded to: {model_path}")
2955
- except Exception as e:
2956
- print(f"Failed to download model at startup: {e}")
2957
-
2958
-
2959
- # Gradio Interface - 개선된 레이아웃
2960
- with gr.Blocks(theme='soft', title="AI Podcast Generator", css="""
2961
- .container {max-width: 1200px; margin: auto; padding: 20px;}
2962
- .header-text {text-align: center; margin-bottom: 30px;}
2963
- .input-group {background: #f7f7f7; padding: 20px; border-radius: 10px; margin-bottom: 20px;}
2964
- .output-group {background: #f0f0f0; padding: 20px; border-radius: 10px;}
2965
- .status-box {background: #e8f4f8; padding: 15px; border-radius: 8px; margin-top: 10px;}
2966
- """) as demo:
2967
- with gr.Column(elem_classes="container"):
2968
- # 헤더
2969
- with gr.Row(elem_classes="header-text"):
2970
- gr.Markdown("""
2971
- # 🎙️ AI Podcast Generator - Professional Edition
2972
- ### Convert any article, blog, PDF document, or topic into an engaging professional podcast conversation with in-depth analysis!
2973
- """)
2974
-
2975
- with gr.Row(elem_classes="discord-badge"):
2976
- gr.HTML("""
2977
- <p style="text-align: center;">
2978
- <a href="https://discord.gg/openfreeai" target="_blank">
2979
- <img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="badge">
2980
- </a>
2981
- </p>
2982
- """)
2983
-
2984
-
2985
-
2986
- # 상태 표시 섹션
2987
- with gr.Row():
2988
- with gr.Column(scale=1):
2989
- gr.Markdown(f"""
2990
- #### 🤖 System Status
2991
- - **LLM**: {converter.config.local_model_name.split('.')[0]}
2992
- - **Fallback**: {converter.config.api_model_name.split('/')[-1]}
2993
- - **Llama CPP**: {"✅ Ready" if LLAMA_CPP_AVAILABLE else "❌ Not Available"}
2994
- - **Search**: {"✅ Brave API" if BRAVE_KEY else "❌ No API"}
2995
- """)
2996
- with gr.Column(scale=1):
2997
- gr.Markdown("""
2998
- #### 📻 Podcast Features
2999
- - **Length**: 12-15 professional exchanges
3000
- - **Style**: Expert discussions with data & insights
3001
- - **Languages**: English & Korean (한국어)
3002
- - **Input**: URL, PDF, or Keywords
3003
- """)
3004
-
3005
- # 메인 입력 섹션
3006
- with gr.Group(elem_classes="input-group"):
3007
- with gr.Row():
3008
- # 왼쪽: 입력 옵션들
3009
- with gr.Column(scale=2):
3010
- # 입력 타입 선택
3011
- input_type_selector = gr.Radio(
3012
- choices=["URL", "PDF", "Keyword"],
3013
- value="URL",
3014
- label="📥 Input Type",
3015
- info="Choose your content source"
3016
- )
3017
-
3018
- # URL 입력
3019
- url_input = gr.Textbox(
3020
- label="🔗 Article URL",
3021
- placeholder="Enter the article URL here...",
3022
- value="",
3023
- visible=True,
3024
- lines=2
3025
- )
3026
-
3027
- # PDF 업로드
3028
- pdf_input = gr.File(
3029
- label="📄 Upload PDF",
3030
- file_types=[".pdf"],
3031
- visible=False
3032
- )
3033
-
3034
- # 키워드 입력
3035
- keyword_input = gr.Textbox(
3036
- label="🔍 Topic/Keyword",
3037
- placeholder="Enter a topic (e.g., 'AI trends 2024', '인공지능 최신 동향')",
3038
- value="",
3039
- visible=False,
3040
- info="System will search and compile latest information",
3041
- lines=2
3042
- )
3043
-
3044
- # 오른쪽: 설정 옵션들
3045
- with gr.Column(scale=1):
3046
- # 언어 선택
3047
- language_selector = gr.Radio(
3048
- choices=["English", "Korean"],
3049
- value="English",
3050
- label="🌐 Language / 언어",
3051
- info="Output language"
3052
- )
3053
-
3054
- # 처리 모드
3055
- mode_selector = gr.Radio(
3056
- choices=["Local", "API"],
3057
- value="Local",
3058
- label="⚙️ Processing Mode",
3059
- info="Local: On-device | API: Cloud"
3060
- )
3061
-
3062
- # TTS 엔진
3063
- tts_selector = gr.Radio(
3064
- choices=["Edge-TTS", "Spark-TTS", "MeloTTS"],
3065
- value="Edge-TTS",
3066
- label="🔊 TTS Engine",
3067
- info="Voice synthesis engine"
3068
- )
3069
-
3070
- # 생성 버튼
3071
- with gr.Row():
3072
- convert_btn = gr.Button(
3073
- "🎯 Generate Professional Conversation",
3074
- variant="primary",
3075
- size="lg",
3076
- scale=1
3077
- )
3078
-
3079
- # 출력 섹션
3080
- with gr.Group(elem_classes="output-group"):
3081
- with gr.Row():
3082
- # 왼쪽: 대화 텍스트
3083
- with gr.Column(scale=3):
3084
- conversation_output = gr.Textbox(
3085
- label="💬 Generated Professional Conversation (Editable)",
3086
- lines=25,
3087
- max_lines=50,
3088
- interactive=True,
3089
- placeholder="Professional podcast conversation will appear here...\n전문 팟캐스트 대화가 여기에 표시됩니다...",
3090
- info="Edit the conversation as needed. Format: 'Speaker Name: Text'"
3091
- )
3092
-
3093
- # 오디오 생성 버튼
3094
- with gr.Row():
3095
- generate_audio_btn = gr.Button(
3096
- "🎙️ Generate Audio from Text",
3097
- variant="secondary",
3098
- size="lg"
3099
- )
3100
-
3101
- # 오른쪽: 오디오 출력 및 상태
3102
- with gr.Column(scale=2):
3103
- audio_output = gr.Audio(
3104
- label="🎧 Professional Podcast Audio",
3105
- type="filepath",
3106
- interactive=False
3107
- )
3108
-
3109
- status_output = gr.Textbox(
3110
- label="📊 Status",
3111
- interactive=False,
3112
- lines=3,
3113
- elem_classes="status-box"
3114
- )
3115
-
3116
- # 도움말
3117
- gr.Markdown("""
3118
- #### 💡 Quick Tips:
3119
- - **URL**: Paste any article link
3120
- - **PDF**: Upload documents directly
3121
- - **Keyword**: Enter topics for AI research
3122
- - Edit conversation before audio generation
3123
- - Korean (한국어) fully supported
3124
- """)
3125
-
3126
- # 예제 섹션
3127
- with gr.Accordion("📚 Examples", open=False):
3128
- gr.Examples(
3129
- examples=[
3130
- ["https://huggingface.co/blog/openfree/cycle-navigator", "URL", "Local", "Edge-TTS", "English"],
3131
- ["quantum computing breakthroughs", "Keyword", "Local", "Edge-TTS", "English"],
3132
- ["https://huggingface.co/papers/2505.14810", "URL", "Local", "Edge-TTS", "Korean"],
3133
- ["인공지능 윤리와 규제", "Keyword", "Local", "Edge-TTS", "Korean"],
3134
- ],
3135
- inputs=[url_input, input_type_selector, mode_selector, tts_selector, language_selector],
3136
- outputs=[conversation_output, status_output],
3137
- fn=synthesize_sync,
3138
- cache_examples=False,
3139
- )
3140
-
3141
- # Input type change handler
3142
- input_type_selector.change(
3143
- fn=toggle_input_visibility,
3144
- inputs=[input_type_selector],
3145
- outputs=[url_input, pdf_input, keyword_input]
3146
- )
3147
-
3148
- # 언어 변경 시 TTS 엔진 옵션 업데이트
3149
- language_selector.change(
3150
- fn=update_tts_engine_for_korean,
3151
- inputs=[language_selector],
3152
- outputs=[tts_selector]
3153
- )
3154
-
3155
- # 이벤트 연결
3156
- def get_article_input(input_type, url_input, pdf_input, keyword_input):
3157
- """Get the appropriate input based on input type"""
3158
- if input_type == "URL":
3159
- return url_input
3160
- elif input_type == "PDF":
3161
- return pdf_input
3162
- else: # Keyword
3163
- return keyword_input
3164
-
3165
- convert_btn.click(
3166
- fn=lambda input_type, url_input, pdf_input, keyword_input, mode, tts, lang: synthesize_sync(
3167
- get_article_input(input_type, url_input, pdf_input, keyword_input), input_type, mode, tts, lang
3168
- ),
3169
- inputs=[input_type_selector, url_input, pdf_input, keyword_input, mode_selector, tts_selector, language_selector],
3170
- outputs=[conversation_output, status_output]
3171
- )
3172
-
3173
- generate_audio_btn.click(
3174
- fn=regenerate_audio_sync,
3175
- inputs=[conversation_output, tts_selector, language_selector],
3176
- outputs=[status_output, audio_output]
3177
- )
3178
-
3179
-
3180
- # Launch the app
3181
- if __name__ == "__main__":
3182
- demo.queue(api_open=True, default_concurrency_limit=10).launch(
3183
- show_api=True,
3184
- share=False,
3185
- server_name="0.0.0.0",
3186
- server_port=7860
3187
- )
 
1591
  share=False,
1592
  server_name="0.0.0.0",
1593
  server_port=7860
1594
+ )