Spaces:
Sleeping
Sleeping
File size: 1,464 Bytes
99d03e3 59f9aef 82850bb 99d03e3 1c13954 99d03e3 59f9aef 1c13954 99d03e3 59f9aef 99d03e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import requests
import streamlit as st
from dotenv import load_dotenv
import os
# .env 파일 로드
load_dotenv()
# Hugging Face API 정보
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
API_KEY = os.getenv("HUGGINGFACE_API_KEY")
print(os.getenv("HUGGINGFACE_API_KEY"))
# 모델 호출 함수
def query_model(prompt):
headers = {"Authorization": f"Bearer {API_KEY}"}
data = {"inputs": prompt}
response = requests.post(API_URL, headers=headers, json=data)
print(response)
if response.status_code == 200:
result = response.json()
print("Response:", result) # API 응답 전체를 출력
if isinstance(result, list) and len(result) > 0:
return result[0].get("generated_text", "No output generated")
else:
return "No valid output from API"
else:
print(f"Error: {response.status_code}, {response.text}")
return f"Error: {response.status_code}, {response.text}"
# Streamlit UI 구성
st.title("Meta-Llama Text Generator")
st.write("Enter a prompt to generate text using the Meta-Llama-3B model.")
# 사용자 입력
prompt = st.text_area("Enter your prompt:", height=200)
if st.button("Generate"):
if prompt.strip():
st.write("Generating...")
output = query_model(prompt)
st.write("### Output:")
st.write(output)
else:
st.warning("Please enter a valid prompt!")
|