import requests import streamlit as st from dotenv import load_dotenv import os # .env 파일 로드 load_dotenv() # Hugging Face API 정보 API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct" API_KEY = os.getenv("HUGGINGFACE_API_KEY") print(os.getenv("HUGGINGFACE_API_KEY")) # 모델 호출 함수 def query_model(prompt): headers = {"Authorization": f"Bearer {API_KEY}"} data = {"inputs": prompt} response = requests.post(API_URL, headers=headers, json=data) print(response) if response.status_code == 200: result = response.json() print("Response:", result) # API 응답 전체를 출력 if isinstance(result, list) and len(result) > 0: return result[0].get("generated_text", "No output generated") else: return "No valid output from API" else: print(f"Error: {response.status_code}, {response.text}") return f"Error: {response.status_code}, {response.text}" # Streamlit UI 구성 st.title("Meta-Llama Text Generator") st.write("Enter a prompt to generate text using the Meta-Llama-3B model.") # 사용자 입력 prompt = st.text_area("Enter your prompt:", height=200) if st.button("Generate"): if prompt.strip(): st.write("Generating...") output = query_model(prompt) st.write("### Output:") st.write(output) else: st.warning("Please enter a valid prompt!")