IDEA-DESIGN / app.py
ginipick's picture
Update app.py
799b037 verified
raw
history blame
18.5 kB
"""
try:
client = get_openai_client()
with st.spinner("GPTμ—κ²Œ λ””μžμΈ 아이디어 생성 쀑..."):
response = client.chat.completions.create(
model="gpt-4.1-mini",
messages=[{"role": "user", "content": prompt}],
temperature=0.9,
max_tokens=2500,
)
result_text = response.choices[0].message.content
st.markdown(result_text)
except Exception as e:
st.error(f"였λ₯˜ λ°œμƒ: {e}")
# ──────────────────────────────── Streamlit 메인 μ•± ──────────────────────
def idea_generator_app():
st.title("IlΓΊvatar(일루바타λ₯΄) : Decision Support AI")
st.caption("'일루바타λ₯΄'λŠ” 빅데이터λ₯Ό 자율적으둜 μˆ˜μ§‘Β·λΆ„μ„ν•˜μ—¬ 12μ–΅ 개 μ΄μƒμ˜ 볡합 μ˜μ‚¬κ²°μ • λ³€μˆ˜λ₯Ό μ‹€μ‹œκ°„ 병렬 처리, μ „λž΅μ  톡찰을 λ„μΆœν•˜λŠ” μ΄ˆμ§€λŠ₯ν˜• μ˜μ‚¬κ²°μ • μ‹œμŠ€ν…œμž…λ‹ˆλ‹€.")
default_vals = {
"ai_model": "gpt-4.1-mini",
"messages": [],
"auto_save": True,
"generate_image": True,
"web_search_enabled": True,
"kaggle_enabled": True,
"selected_frameworks": ["sunzi"],
"GLOBAL_PICK_COUNT": {},
"_skip_dup_idx": None
}
for k, v in default_vals.items():
if k not in st.session_state:
st.session_state[k] = v
sb = st.sidebar
st.session_state.temp = sb.slider(
"Diversity temperature", 0.1, 3.0, 1.3, 0.1,
help="0.1 = μ—°κ΄€μ„± μœ„μ£Ό, 3.0 = 맀우 높은 λ‹€μ–‘μ„±"
)
sb.title("Decision Support Settings")
sb.toggle("Auto Save", key="auto_save")
sb.toggle("Auto Image Generation", key="generate_image")
st.session_state.web_search_enabled = sb.toggle(
"Use Web Search", value=st.session_state.web_search_enabled
)
st.session_state.kaggle_enabled = sb.toggle(
"Use Kaggle Datasets", value=st.session_state.kaggle_enabled
)
if st.session_state.web_search_enabled:
sb.info("βœ… Web search results will be integrated.")
if st.session_state.kaggle_enabled:
if KAGGLE_KEY:
sb.info("βœ… Kaggle datasets will be analyzed.")
else:
sb.error("⚠️ KAGGLE_KEY not set. Kaggle integration disabled.")
st.session_state.kaggle_enabled = False
# 뢄석 ν”„λ ˆμž„μ›Œν¬ 선택
sb.subheader("뢄석 ν”„λ ˆμž„μ›Œν¬ μ„€μ •")
selected_frameworks = sb.multiselect(
"μ‚¬μš©ν•  경영 ν”„λ ˆμž„μ›Œν¬ 선택",
options=list(BUSINESS_FRAMEWORKS.keys()),
default=st.session_state.selected_frameworks,
format_func=lambda x: BUSINESS_FRAMEWORKS[x]
)
st.session_state.selected_frameworks = selected_frameworks or ["sunzi"]
# μ˜ˆμ‹œ ν† ν”½
example_topics = {
"example1": "μŠ€λ§ˆνŠΈν™ˆ ν™˜κ²½μ—μ„œ μ‚¬μš©μž κ²½ν—˜μ„ κ°œμ„ ν•  수 μžˆλŠ” μƒˆλ‘œμš΄ κ°€μ „μ œν’ˆ λ””μžμΈ μ˜μ‚¬κ²°μ •",
"example2": "μΉœν™˜κ²½ μ—λ„ˆμ§€ λΆ„μ•Ό μ§„μΆœμ„ μœ„ν•œ 졜적 λΉ„μ¦ˆλ‹ˆμŠ€ λͺ¨λΈ 선택 μ˜μ‚¬κ²°μ •",
"example3": "2030λ…„ 의료 ν—¬μŠ€μΌ€μ–΄ μ‚°μ—…μ˜ 기술 λ°œμ „ λ°©ν–₯κ³Ό 투자 μ „λž΅ μ˜μ‚¬κ²°μ •"
}
sb.subheader("Example Decision Topics")
c1, c2, c3 = sb.columns(3)
if c1.button("μ œν’ˆ λ””μžμΈ μ˜μ‚¬κ²°μ •", key="ex1"):
process_example(example_topics["example1"])
if c2.button("신사업 μ§„μΆœ μ „λž΅", key="ex2"):
process_example(example_topics["example2"])
if c3.button("μ‚°μ—… 미래 전망", key="ex3"):
process_example(example_topics["example3"])
# (μ‹ κ·œ) λ””μžμΈ/발λͺ… μ„Ήμ…˜
sb.subheader("λ””μžμΈ/발λͺ…")
with sb.expander("λ””μžμΈ/발λͺ… 아이디어 생성", expanded=True):
invention_keyword = st.text_input("ν‚€μ›Œλ“œ ν”„λ‘¬ν”„νŠΈ", key="invention_keyword")
if st.button("λ””μžμΈ/발λͺ… 아이디어 μ‹€ν–‰"):
process_invention_ideas(invention_keyword)
# 졜근 κ²°κ³Ό λ‹€μš΄λ‘œλ“œ
latest_ideas = next(
(m["content"] for m in reversed(st.session_state.messages)
if m["role"] == "assistant" and m["content"].strip()),
None
)
if latest_ideas:
title_match = re.search(r"# (.*?)(\n|$)", latest_ideas)
title = (title_match.group(1) if title_match else "ideas").strip()
sb.subheader("Download Latest Ideas")
d1, d2 = sb.columns(2)
d1.download_button("Download as Markdown", latest_ideas,
file_name=f"{title}.md", mime="text/markdown")
d2.download_button("Download as HTML", md_to_html(latest_ideas, title),
file_name=f"{title}.html", mime="text/html")
# λŒ€ν™” νžˆμŠ€ν† λ¦¬ μ—…λ‘œλ“œ/λ‹€μš΄λ‘œλ“œ
up = sb.file_uploader("Load Conversation History (.json)",
type=["json"], key="json_uploader")
if up:
try:
st.session_state.messages = json.load(up)
sb.success("Conversation history loaded successfully")
except Exception as e:
sb.error(f"Failed to load: {e}")
if sb.button("Download Conversation as JSON"):
sb.download_button(
"Save JSON",
data=json.dumps(st.session_state.messages, ensure_ascii=False, indent=2),
file_name="chat_history.json",
mime="application/json"
)
# 파일 μ—…λ‘œλ“œ
st.subheader("File Upload (Optional)")
uploaded_files = st.file_uploader(
"Upload files to reference in the idea generation (txt, csv, pdf)",
type=["txt", "csv", "pdf"],
accept_multiple_files=True,
key="file_uploader"
)
if uploaded_files:
st.success(f"{len(uploaded_files)} files uploaded.")
with st.expander("Preview Uploaded Files", expanded=False):
for idx, file in enumerate(uploaded_files):
st.write(f"**File Name:** {file.name}")
ext = file.name.split('.')[-1].lower()
try:
if ext == 'txt':
preview = file.read(1000).decode('utf-8', errors='ignore')
file.seek(0)
st.text_area("Preview", preview + ("..." if len(preview) >= 1000 else ""), height=150)
elif ext == 'csv':
df = pd.read_csv(file)
file.seek(0)
st.dataframe(df.head(5))
elif ext == 'pdf':
reader = PyPDF2.PdfReader(io.BytesIO(file.read()), strict=False)
file.seek(0)
pg_txt = reader.pages[0].extract_text() if reader.pages else "(No text)"
st.text_area("Preview", (pg_txt[:500] + "...") if pg_txt else "(No text)", height=150)
except Exception as e:
st.error(f"Preview failed: {e}")
if idx < len(uploaded_files) - 1:
st.divider()
# 이미 λ Œλ”λœ λ©”μ‹œμ§€(쀑볡 λ°©μ§€)
skip_idx = st.session_state.get("_skip_dup_idx")
for i, m in enumerate(st.session_state.messages):
if skip_idx is not None and i == skip_idx:
continue
with st.chat_message(m["role"]):
st.markdown(m["content"])
if "image" in m:
st.image(m["image"], caption=m.get("image_caption", ""))
st.session_state["_skip_dup_idx"] = None
# μ±„νŒ… μž…λ ₯
prompt = st.chat_input("μ˜μ‚¬ 결정에 도움이 ν•„μš”ν•œ μƒν™©μ΄λ‚˜ 문제λ₯Ό μ„€λͺ…ν•΄ μ£Όμ„Έμš”.")
if prompt:
process_input(prompt, uploaded_files)
sb.markdown("---")
sb.markdown("Created by [VIDraft](https://discord.gg/openfreeai)")
def process_example(topic):
process_input(topic, [])
def process_input(prompt: str, uploaded_files):
if not any(m["role"] == "user" and m["content"] == prompt for m in st.session_state.messages):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
for i in range(len(st.session_state.messages) - 1):
if (st.session_state.messages[i]["role"] == "user"
and st.session_state.messages[i]["content"] == prompt
and st.session_state.messages[i + 1]["role"] == "assistant"):
return
with st.chat_message("assistant"):
status = st.status("Preparing to generate ideas…")
stream_placeholder = st.empty()
full_response = ""
try:
client = get_openai_client()
status.update(label="Initializing model…")
selected_cat = st.session_state.get("category_focus", None)
selected_frameworks = st.session_state.get("selected_frameworks", ["sunzi"])
sys_prompt = get_idea_system_prompt(
selected_category=selected_cat,
selected_frameworks=selected_frameworks
)
def category_context(sel):
if sel:
return json.dumps({sel: physical_transformation_categories[sel]}, ensure_ascii=False)
return "ALL_CATEGORIES: " + ", ".join(physical_transformation_categories.keys())
use_web_search = st.session_state.web_search_enabled
use_kaggle = st.session_state.kaggle_enabled
has_uploaded = bool(uploaded_files)
search_content = None
kaggle_content = None
file_content = None
# β‘  웹검색
if use_web_search:
status.update(label="Searching the web…")
with st.spinner("Searching…"):
search_content = do_web_search(keywords(prompt, top=5))
# β‘‘ Kaggle
if use_kaggle and check_kaggle_availability():
status.update(label="Kaggle 데이터셋 뢄석 쀑…")
with st.spinner("Searching Kaggle…"):
kaggle_kw = extract_kaggle_search_keywords(prompt)
try:
datasets = search_kaggle_datasets(kaggle_kw)
except Exception as e:
logging.warning(f"search_kaggle_datasets 였λ₯˜ λ¬΄μ‹œ: {e}")
datasets = []
analyses = []
if datasets:
status.update(label="Downloading & analysing datasets…")
for ds in datasets:
try:
ana = download_and_analyze_dataset(ds["ref"])
except Exception as e:
logging.error(f"Kaggle 뢄석 였λ₯˜({ds['ref']}) : {e}")
ana = f"데이터셋 뢄석 였λ₯˜: {e}"
analyses.append({"meta": ds, "analysis": ana})
if analyses:
kaggle_content = format_kaggle_analysis_markdown_multi(analyses)
# β‘’ 파일 μ—…λ‘œλ“œ
if has_uploaded:
status.update(label="Reading uploaded files…")
with st.spinner("Processing files…"):
file_content = process_uploaded_files(uploaded_files)
# β‘£ Military Tactics Dataset (μ‹ κ·œ μΆ”κ°€)
mil_content = None
if is_military_query(prompt):
status.update(label="Searching military tactics dataset…")
with st.spinner("Loading military insights…"):
mil_rows = military_search(prompt)
if mil_rows:
mil_content = "# Military Tactics Dataset Reference\n\n"
for i, row in enumerate(mil_rows, 1):
mil_content += (
f"### Case {i}\n"
f"**Scenario:** {row['scenario_description']}\n\n"
f"**Attack Reasoning:** {row['attack_reasoning']}\n\n"
f"**Defense Reasoning:** {row['defense_reasoning']}\n\n---\n"
)
user_content = prompt
if search_content:
user_content += "\n\n" + search_content
if kaggle_content:
user_content += "\n\n" + kaggle_content
if file_content:
user_content += "\n\n" + file_content
if mil_content:
user_content += "\n\n" + mil_content
# λ‚΄λΆ€ 뢄석
status.update(label="μ˜μ‚¬ κ²°μ • 문제 뢄석 쀑…")
decision_purpose = identify_decision_purpose(prompt)
relevance_scores = compute_relevance_scores(prompt, PHYS_CATEGORIES)
status.update(label="μ˜μ‚¬ κ²°μ • 맀트릭슀 생성 쀑…")
T = st.session_state.temp
k_cat_range = (4, 8) if T < 1.0 else (6, 10) if T < 2.0 else (8, 12)
n_item_range = (2, 4) if T < 1.0 else (3, 6) if T < 2.0 else (4, 8)
depth_range = (2, 3) if T < 1.0 else (2, 5) if T < 2.0 else (2, 6)
combos = generate_random_comparison_matrix(
PHYS_CATEGORIES,
relevance_scores,
k_cat=k_cat_range,
n_item=n_item_range,
depth_range=depth_range,
seed=hash(prompt) & 0xFFFFFFFF,
T=T,
)
combos_table = "| μ‘°ν•© | κ°€μ€‘μΉ˜ | 영ν–₯도 | 신뒰도 | 총점 |\n|------|--------|--------|--------|-----|\n"
for w, imp, conf, tot, cmb in combos:
combo_str = " + ".join(f"{c[0]}-{c[1]}" for c in cmb)
combos_table += f"| {combo_str} | {w} | {imp} | {conf:.1f} | {tot} |\n"
purpose_info = "\n\n## μ˜μ‚¬ κ²°μ • λͺ©μ  뢄석\n"
if decision_purpose['purposes']:
purpose_info += "### μ£Όμš” λͺ©μ \n"
for p, s in decision_purpose['purposes']:
purpose_info += f"- **{p}** (κ΄€λ ¨μ„±: {s})\n"
if decision_purpose['constraints']:
purpose_info += "\n### μ£Όμš” μ œμ•½ 쑰건\n"
for c, s in decision_purpose['constraints']:
purpose_info += f"- **{c}** (κ΄€λ ¨μ„±: {s})\n"
framework_contents = []
if "swot" in selected_frameworks:
swot_res = analyze_with_swot(prompt)
framework_contents.append(format_business_framework_analysis("swot", swot_res))
if "porter" in selected_frameworks:
porter_res = analyze_with_porter(prompt)
framework_contents.append(format_business_framework_analysis("porter", porter_res))
if "bcg" in selected_frameworks:
bcg_res = analyze_with_bcg(prompt)
framework_contents.append(format_business_framework_analysis("bcg", bcg_res))
if framework_contents:
user_content += "\n\n## 경영 ν”„λ ˆμž„μ›Œν¬ 뢄석 κ²°κ³Ό\n\n" + "\n\n".join(framework_contents)
user_content += f"\n\n## μ˜μ‚¬ κ²°μ • 맀트릭슀 뢄석{purpose_info}\n{combos_table}"
status.update(label="Generating ideas…")
api_messages = [
{"role": "system", "content": sys_prompt},
{"role": "system", "name": "category_db", "content": category_context(selected_cat)},
{"role": "user", "content": user_content},
]
stream = client.chat.completions.create(
model="gpt-4.1-mini",
messages=api_messages,
temperature=1,
max_tokens=MAX_TOKENS,
top_p=1,
stream=True
)
for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
full_response += chunk.choices[0].delta.content
stream_placeholder.markdown(full_response + "β–Œ")
stream_placeholder.markdown(full_response)
status.update(label="Ideas created!", state="complete")
# 이미지 생성
img_data = img_caption = None
if st.session_state.generate_image and full_response:
match = re.search(r"###\s*이미지\s*ν”„λ‘¬ν”„νŠΈ\s*\n+([^\n]+)", full_response, re.I)
if not match:
match = re.search(r"Image\s+Prompt\s*[:\-]\s*([^\n]+)", full_response, re.I)
if match:
raw_prompt = re.sub(r'[\r\n"\'\\]', " ", match.group(1)).strip()
with st.spinner("아이디어 이미지 생성 쀑…"):
img_data, img_caption = generate_image(raw_prompt)
if img_data:
st.image(img_data, caption=f"아이디어 μ‹œκ°ν™” – {img_caption}")
answer_msg = {"role": "assistant", "content": full_response}
if img_data:
answer_msg["image"] = img_data
answer_msg["image_caption"] = img_caption
st.session_state["_skip_dup_idx"] = len(st.session_state.messages)
st.session_state.messages.append(answer_msg)
# λ‹€μš΄λ‘œλ“œ λ²„νŠΌ
st.subheader("Download This Output")
col_md, col_html = st.columns(2)
col_md.download_button(
"Markdown",
data=full_response,
file_name=f"{prompt[:30]}.md",
mime="text/markdown"
)
col_html.download_button(
"HTML",
data=md_to_html(full_response, prompt[:30]),
file_name=f"{prompt[:30]}.html",
mime="text/html"
)
if st.session_state.auto_save:
fn = f"chat_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json"
with open(fn, "w", encoding="utf-8") as fp:
json.dump(st.session_state.messages, fp, ensure_ascii=False, indent=2)
except Exception as e:
logging.error("process_input error", exc_info=True)
st.error(f"⚠️ μž‘μ—… 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {e}")
st.session_state.messages.append(
{"role": "assistant", "content": f"⚠️ 였λ₯˜: {e}"}
)
def main():
idea_generator_app()
if __name__ == "__main__":
main()