Spaces:
Running
Running
""" | |
try: | |
client = get_openai_client() | |
with st.spinner("GPTμκ² λμμΈ μμ΄λμ΄ μμ± μ€..."): | |
response = client.chat.completions.create( | |
model="gpt-4.1-mini", | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.9, | |
max_tokens=2500, | |
) | |
result_text = response.choices[0].message.content | |
st.markdown(result_text) | |
except Exception as e: | |
st.error(f"μ€λ₯ λ°μ: {e}") | |
# ββββββββββββββββββββββββββββββββ Streamlit λ©μΈ μ± ββββββββββββββββββββββ | |
def idea_generator_app(): | |
st.title("IlΓΊvatar(μΌλ£¨λ°νλ₯΄) : Decision Support AI") | |
st.caption("'μΌλ£¨λ°νλ₯΄'λ λΉ λ°μ΄ν°λ₯Ό μμ¨μ μΌλ‘ μμ§Β·λΆμνμ¬ 12μ΅ κ° μ΄μμ λ³΅ν© μμ¬κ²°μ λ³μλ₯Ό μ€μκ° λ³λ ¬ μ²λ¦¬, μ λ΅μ ν΅μ°°μ λμΆνλ μ΄μ§λ₯ν μμ¬κ²°μ μμ€ν μ λλ€.") | |
default_vals = { | |
"ai_model": "gpt-4.1-mini", | |
"messages": [], | |
"auto_save": True, | |
"generate_image": True, | |
"web_search_enabled": True, | |
"kaggle_enabled": True, | |
"selected_frameworks": ["sunzi"], | |
"GLOBAL_PICK_COUNT": {}, | |
"_skip_dup_idx": None | |
} | |
for k, v in default_vals.items(): | |
if k not in st.session_state: | |
st.session_state[k] = v | |
sb = st.sidebar | |
st.session_state.temp = sb.slider( | |
"Diversity temperature", 0.1, 3.0, 1.3, 0.1, | |
help="0.1 = μ°κ΄μ± μμ£Ό, 3.0 = λ§€μ° λμ λ€μμ±" | |
) | |
sb.title("Decision Support Settings") | |
sb.toggle("Auto Save", key="auto_save") | |
sb.toggle("Auto Image Generation", key="generate_image") | |
st.session_state.web_search_enabled = sb.toggle( | |
"Use Web Search", value=st.session_state.web_search_enabled | |
) | |
st.session_state.kaggle_enabled = sb.toggle( | |
"Use Kaggle Datasets", value=st.session_state.kaggle_enabled | |
) | |
if st.session_state.web_search_enabled: | |
sb.info("β Web search results will be integrated.") | |
if st.session_state.kaggle_enabled: | |
if KAGGLE_KEY: | |
sb.info("β Kaggle datasets will be analyzed.") | |
else: | |
sb.error("β οΈ KAGGLE_KEY not set. Kaggle integration disabled.") | |
st.session_state.kaggle_enabled = False | |
# λΆμ νλ μμν¬ μ ν | |
sb.subheader("λΆμ νλ μμν¬ μ€μ ") | |
selected_frameworks = sb.multiselect( | |
"μ¬μ©ν κ²½μ νλ μμν¬ μ ν", | |
options=list(BUSINESS_FRAMEWORKS.keys()), | |
default=st.session_state.selected_frameworks, | |
format_func=lambda x: BUSINESS_FRAMEWORKS[x] | |
) | |
st.session_state.selected_frameworks = selected_frameworks or ["sunzi"] | |
# μμ ν ν½ | |
example_topics = { | |
"example1": "μ€λ§νΈν νκ²½μμ μ¬μ©μ κ²½νμ κ°μ ν μ μλ μλ‘μ΄ κ°μ μ ν λμμΈ μμ¬κ²°μ ", | |
"example2": "μΉνκ²½ μλμ§ λΆμΌ μ§μΆμ μν μ΅μ λΉμ¦λμ€ λͺ¨λΈ μ ν μμ¬κ²°μ ", | |
"example3": "2030λ μλ£ ν¬μ€μΌμ΄ μ°μ μ κΈ°μ λ°μ λ°©ν₯κ³Ό ν¬μ μ λ΅ μμ¬κ²°μ " | |
} | |
sb.subheader("Example Decision Topics") | |
c1, c2, c3 = sb.columns(3) | |
if c1.button("μ ν λμμΈ μμ¬κ²°μ ", key="ex1"): | |
process_example(example_topics["example1"]) | |
if c2.button("μ μ¬μ μ§μΆ μ λ΅", key="ex2"): | |
process_example(example_topics["example2"]) | |
if c3.button("μ°μ λ―Έλ μ λ§", key="ex3"): | |
process_example(example_topics["example3"]) | |
# (μ κ·) λμμΈ/λ°λͺ μΉμ | |
sb.subheader("λμμΈ/λ°λͺ ") | |
with sb.expander("λμμΈ/λ°λͺ μμ΄λμ΄ μμ±", expanded=True): | |
invention_keyword = st.text_input("ν€μλ ν둬ννΈ", key="invention_keyword") | |
if st.button("λμμΈ/λ°λͺ μμ΄λμ΄ μ€ν"): | |
process_invention_ideas(invention_keyword) | |
# μ΅κ·Ό κ²°κ³Ό λ€μ΄λ‘λ | |
latest_ideas = next( | |
(m["content"] for m in reversed(st.session_state.messages) | |
if m["role"] == "assistant" and m["content"].strip()), | |
None | |
) | |
if latest_ideas: | |
title_match = re.search(r"# (.*?)(\n|$)", latest_ideas) | |
title = (title_match.group(1) if title_match else "ideas").strip() | |
sb.subheader("Download Latest Ideas") | |
d1, d2 = sb.columns(2) | |
d1.download_button("Download as Markdown", latest_ideas, | |
file_name=f"{title}.md", mime="text/markdown") | |
d2.download_button("Download as HTML", md_to_html(latest_ideas, title), | |
file_name=f"{title}.html", mime="text/html") | |
# λν νμ€ν 리 μ λ‘λ/λ€μ΄λ‘λ | |
up = sb.file_uploader("Load Conversation History (.json)", | |
type=["json"], key="json_uploader") | |
if up: | |
try: | |
st.session_state.messages = json.load(up) | |
sb.success("Conversation history loaded successfully") | |
except Exception as e: | |
sb.error(f"Failed to load: {e}") | |
if sb.button("Download Conversation as JSON"): | |
sb.download_button( | |
"Save JSON", | |
data=json.dumps(st.session_state.messages, ensure_ascii=False, indent=2), | |
file_name="chat_history.json", | |
mime="application/json" | |
) | |
# νμΌ μ λ‘λ | |
st.subheader("File Upload (Optional)") | |
uploaded_files = st.file_uploader( | |
"Upload files to reference in the idea generation (txt, csv, pdf)", | |
type=["txt", "csv", "pdf"], | |
accept_multiple_files=True, | |
key="file_uploader" | |
) | |
if uploaded_files: | |
st.success(f"{len(uploaded_files)} files uploaded.") | |
with st.expander("Preview Uploaded Files", expanded=False): | |
for idx, file in enumerate(uploaded_files): | |
st.write(f"**File Name:** {file.name}") | |
ext = file.name.split('.')[-1].lower() | |
try: | |
if ext == 'txt': | |
preview = file.read(1000).decode('utf-8', errors='ignore') | |
file.seek(0) | |
st.text_area("Preview", preview + ("..." if len(preview) >= 1000 else ""), height=150) | |
elif ext == 'csv': | |
df = pd.read_csv(file) | |
file.seek(0) | |
st.dataframe(df.head(5)) | |
elif ext == 'pdf': | |
reader = PyPDF2.PdfReader(io.BytesIO(file.read()), strict=False) | |
file.seek(0) | |
pg_txt = reader.pages[0].extract_text() if reader.pages else "(No text)" | |
st.text_area("Preview", (pg_txt[:500] + "...") if pg_txt else "(No text)", height=150) | |
except Exception as e: | |
st.error(f"Preview failed: {e}") | |
if idx < len(uploaded_files) - 1: | |
st.divider() | |
# μ΄λ―Έ λ λλ λ©μμ§(μ€λ³΅ λ°©μ§) | |
skip_idx = st.session_state.get("_skip_dup_idx") | |
for i, m in enumerate(st.session_state.messages): | |
if skip_idx is not None and i == skip_idx: | |
continue | |
with st.chat_message(m["role"]): | |
st.markdown(m["content"]) | |
if "image" in m: | |
st.image(m["image"], caption=m.get("image_caption", "")) | |
st.session_state["_skip_dup_idx"] = None | |
# μ±ν μ λ ₯ | |
prompt = st.chat_input("μμ¬ κ²°μ μ λμμ΄ νμν μν©μ΄λ λ¬Έμ λ₯Ό μ€λͺ ν΄ μ£ΌμΈμ.") | |
if prompt: | |
process_input(prompt, uploaded_files) | |
sb.markdown("---") | |
sb.markdown("Created by [VIDraft](https://discord.gg/openfreeai)") | |
def process_example(topic): | |
process_input(topic, []) | |
def process_input(prompt: str, uploaded_files): | |
if not any(m["role"] == "user" and m["content"] == prompt for m in st.session_state.messages): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
for i in range(len(st.session_state.messages) - 1): | |
if (st.session_state.messages[i]["role"] == "user" | |
and st.session_state.messages[i]["content"] == prompt | |
and st.session_state.messages[i + 1]["role"] == "assistant"): | |
return | |
with st.chat_message("assistant"): | |
status = st.status("Preparing to generate ideasβ¦") | |
stream_placeholder = st.empty() | |
full_response = "" | |
try: | |
client = get_openai_client() | |
status.update(label="Initializing modelβ¦") | |
selected_cat = st.session_state.get("category_focus", None) | |
selected_frameworks = st.session_state.get("selected_frameworks", ["sunzi"]) | |
sys_prompt = get_idea_system_prompt( | |
selected_category=selected_cat, | |
selected_frameworks=selected_frameworks | |
) | |
def category_context(sel): | |
if sel: | |
return json.dumps({sel: physical_transformation_categories[sel]}, ensure_ascii=False) | |
return "ALL_CATEGORIES: " + ", ".join(physical_transformation_categories.keys()) | |
use_web_search = st.session_state.web_search_enabled | |
use_kaggle = st.session_state.kaggle_enabled | |
has_uploaded = bool(uploaded_files) | |
search_content = None | |
kaggle_content = None | |
file_content = None | |
# β μΉκ²μ | |
if use_web_search: | |
status.update(label="Searching the webβ¦") | |
with st.spinner("Searchingβ¦"): | |
search_content = do_web_search(keywords(prompt, top=5)) | |
# β‘ Kaggle | |
if use_kaggle and check_kaggle_availability(): | |
status.update(label="Kaggle λ°μ΄ν°μ λΆμ μ€β¦") | |
with st.spinner("Searching Kaggleβ¦"): | |
kaggle_kw = extract_kaggle_search_keywords(prompt) | |
try: | |
datasets = search_kaggle_datasets(kaggle_kw) | |
except Exception as e: | |
logging.warning(f"search_kaggle_datasets μ€λ₯ 무μ: {e}") | |
datasets = [] | |
analyses = [] | |
if datasets: | |
status.update(label="Downloading & analysing datasetsβ¦") | |
for ds in datasets: | |
try: | |
ana = download_and_analyze_dataset(ds["ref"]) | |
except Exception as e: | |
logging.error(f"Kaggle λΆμ μ€λ₯({ds['ref']}) : {e}") | |
ana = f"λ°μ΄ν°μ λΆμ μ€λ₯: {e}" | |
analyses.append({"meta": ds, "analysis": ana}) | |
if analyses: | |
kaggle_content = format_kaggle_analysis_markdown_multi(analyses) | |
# β’ νμΌ μ λ‘λ | |
if has_uploaded: | |
status.update(label="Reading uploaded filesβ¦") | |
with st.spinner("Processing filesβ¦"): | |
file_content = process_uploaded_files(uploaded_files) | |
# β£ Military Tactics Dataset (μ κ· μΆκ°) | |
mil_content = None | |
if is_military_query(prompt): | |
status.update(label="Searching military tactics datasetβ¦") | |
with st.spinner("Loading military insightsβ¦"): | |
mil_rows = military_search(prompt) | |
if mil_rows: | |
mil_content = "# Military Tactics Dataset Reference\n\n" | |
for i, row in enumerate(mil_rows, 1): | |
mil_content += ( | |
f"### Case {i}\n" | |
f"**Scenario:** {row['scenario_description']}\n\n" | |
f"**Attack Reasoning:** {row['attack_reasoning']}\n\n" | |
f"**Defense Reasoning:** {row['defense_reasoning']}\n\n---\n" | |
) | |
user_content = prompt | |
if search_content: | |
user_content += "\n\n" + search_content | |
if kaggle_content: | |
user_content += "\n\n" + kaggle_content | |
if file_content: | |
user_content += "\n\n" + file_content | |
if mil_content: | |
user_content += "\n\n" + mil_content | |
# λ΄λΆ λΆμ | |
status.update(label="μμ¬ κ²°μ λ¬Έμ λΆμ μ€β¦") | |
decision_purpose = identify_decision_purpose(prompt) | |
relevance_scores = compute_relevance_scores(prompt, PHYS_CATEGORIES) | |
status.update(label="μμ¬ κ²°μ λ§€νΈλ¦μ€ μμ± μ€β¦") | |
T = st.session_state.temp | |
k_cat_range = (4, 8) if T < 1.0 else (6, 10) if T < 2.0 else (8, 12) | |
n_item_range = (2, 4) if T < 1.0 else (3, 6) if T < 2.0 else (4, 8) | |
depth_range = (2, 3) if T < 1.0 else (2, 5) if T < 2.0 else (2, 6) | |
combos = generate_random_comparison_matrix( | |
PHYS_CATEGORIES, | |
relevance_scores, | |
k_cat=k_cat_range, | |
n_item=n_item_range, | |
depth_range=depth_range, | |
seed=hash(prompt) & 0xFFFFFFFF, | |
T=T, | |
) | |
combos_table = "| μ‘°ν© | κ°μ€μΉ | μν₯λ | μ λ’°λ | μ΄μ |\n|------|--------|--------|--------|-----|\n" | |
for w, imp, conf, tot, cmb in combos: | |
combo_str = " + ".join(f"{c[0]}-{c[1]}" for c in cmb) | |
combos_table += f"| {combo_str} | {w} | {imp} | {conf:.1f} | {tot} |\n" | |
purpose_info = "\n\n## μμ¬ κ²°μ λͺ©μ λΆμ\n" | |
if decision_purpose['purposes']: | |
purpose_info += "### μ£Όμ λͺ©μ \n" | |
for p, s in decision_purpose['purposes']: | |
purpose_info += f"- **{p}** (κ΄λ ¨μ±: {s})\n" | |
if decision_purpose['constraints']: | |
purpose_info += "\n### μ£Όμ μ μ½ μ‘°κ±΄\n" | |
for c, s in decision_purpose['constraints']: | |
purpose_info += f"- **{c}** (κ΄λ ¨μ±: {s})\n" | |
framework_contents = [] | |
if "swot" in selected_frameworks: | |
swot_res = analyze_with_swot(prompt) | |
framework_contents.append(format_business_framework_analysis("swot", swot_res)) | |
if "porter" in selected_frameworks: | |
porter_res = analyze_with_porter(prompt) | |
framework_contents.append(format_business_framework_analysis("porter", porter_res)) | |
if "bcg" in selected_frameworks: | |
bcg_res = analyze_with_bcg(prompt) | |
framework_contents.append(format_business_framework_analysis("bcg", bcg_res)) | |
if framework_contents: | |
user_content += "\n\n## κ²½μ νλ μμν¬ λΆμ κ²°κ³Ό\n\n" + "\n\n".join(framework_contents) | |
user_content += f"\n\n## μμ¬ κ²°μ λ§€νΈλ¦μ€ λΆμ{purpose_info}\n{combos_table}" | |
status.update(label="Generating ideasβ¦") | |
api_messages = [ | |
{"role": "system", "content": sys_prompt}, | |
{"role": "system", "name": "category_db", "content": category_context(selected_cat)}, | |
{"role": "user", "content": user_content}, | |
] | |
stream = client.chat.completions.create( | |
model="gpt-4.1-mini", | |
messages=api_messages, | |
temperature=1, | |
max_tokens=MAX_TOKENS, | |
top_p=1, | |
stream=True | |
) | |
for chunk in stream: | |
if chunk.choices and chunk.choices[0].delta.content: | |
full_response += chunk.choices[0].delta.content | |
stream_placeholder.markdown(full_response + "β") | |
stream_placeholder.markdown(full_response) | |
status.update(label="Ideas created!", state="complete") | |
# μ΄λ―Έμ§ μμ± | |
img_data = img_caption = None | |
if st.session_state.generate_image and full_response: | |
match = re.search(r"###\s*μ΄λ―Έμ§\s*ν둬ννΈ\s*\n+([^\n]+)", full_response, re.I) | |
if not match: | |
match = re.search(r"Image\s+Prompt\s*[:\-]\s*([^\n]+)", full_response, re.I) | |
if match: | |
raw_prompt = re.sub(r'[\r\n"\'\\]', " ", match.group(1)).strip() | |
with st.spinner("μμ΄λμ΄ μ΄λ―Έμ§ μμ± μ€β¦"): | |
img_data, img_caption = generate_image(raw_prompt) | |
if img_data: | |
st.image(img_data, caption=f"μμ΄λμ΄ μκ°ν β {img_caption}") | |
answer_msg = {"role": "assistant", "content": full_response} | |
if img_data: | |
answer_msg["image"] = img_data | |
answer_msg["image_caption"] = img_caption | |
st.session_state["_skip_dup_idx"] = len(st.session_state.messages) | |
st.session_state.messages.append(answer_msg) | |
# λ€μ΄λ‘λ λ²νΌ | |
st.subheader("Download This Output") | |
col_md, col_html = st.columns(2) | |
col_md.download_button( | |
"Markdown", | |
data=full_response, | |
file_name=f"{prompt[:30]}.md", | |
mime="text/markdown" | |
) | |
col_html.download_button( | |
"HTML", | |
data=md_to_html(full_response, prompt[:30]), | |
file_name=f"{prompt[:30]}.html", | |
mime="text/html" | |
) | |
if st.session_state.auto_save: | |
fn = f"chat_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json" | |
with open(fn, "w", encoding="utf-8") as fp: | |
json.dump(st.session_state.messages, fp, ensure_ascii=False, indent=2) | |
except Exception as e: | |
logging.error("process_input error", exc_info=True) | |
st.error(f"β οΈ μμ μ€ μ€λ₯κ° λ°μνμ΅λλ€: {e}") | |
st.session_state.messages.append( | |
{"role": "assistant", "content": f"β οΈ μ€λ₯: {e}"} | |
) | |
def main(): | |
idea_generator_app() | |
if __name__ == "__main__": | |
main() | |