File size: 2,294 Bytes
7246624 0f4aad7 3ac8611 7246624 3ac8611 7246624 3ac8611 a8b0bcf 3ac8611 7246624 3ac8611 7246624 3ac8611 7246624 3ac8611 7246624 3ac8611 0f4aad7 3ac8611 0f4aad7 3ac8611 7246624 3ac8611 a8b0bcf 3ac8611 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
# app.py
import streamlit as st
from PIL import Image
import base64
import os
from groq import Groq
import io
# --- PAGE CONFIG ---
st.set_page_config(
page_title="AI Trade Predictor",
layout="wide",
initial_sidebar_state="expanded"
)
st.title("📈 AI Trade Predictor")
st.markdown("Upload a candlestick chart and let AI analyze trade signals with risk and timeframe guidance.")
# --- UPLOAD SECTION ---
uploaded_file = st.file_uploader("Upload Candlestick Chart (PNG or JPG)", type=["png", "jpg", "jpeg"])
# --- GROQ SETUP ---
groq_api_key = st.secrets["GROQ_API_KEY"] if "GROQ_API_KEY" in st.secrets else os.environ.get("GROQ_API_KEY")
client = Groq(api_key=groq_api_key)
# --- FUNCTION: Convert image to base64 string ---
def image_to_base64_str(image_file):
img_bytes = image_file.read()
base64_str = base64.b64encode(img_bytes).decode("utf-8")
return base64_str
# --- SHORTER PROMPT TEMPLATE ---
prompt_template = """
You're an AI financial assistant. Analyze the candlestick chart image and give:
1. Signal (Buy/Sell/Hold)
2. Confidence percentage
3. Reasoning (simple language)
4. Suggest best timeframes like 30min, 1hr, 4hr
5. Explain key risks in a beginner-friendly way
Only reply in concise format.
"""
# --- MAIN PREDICTION LOGIC ---
if uploaded_file is not None:
st.image(uploaded_file, caption="Uploaded Chart", use_column_width=True)
with st.spinner("Analyzing chart and generating prediction..."):
image_base64 = image_to_base64_str(uploaded_file)
# Compose final prompt with image reference (simulated for now)
full_prompt = prompt_template + "\n[This is a simulated chart image base64: {}]".format(image_base64[:100])
try:
chat_completion = client.chat.completions.create(
messages=[
{"role": "user", "content": full_prompt}
],
model="llama-3.3-70b-versatile" # Smaller model to reduce token use
)
result = chat_completion.choices[0].message.content
st.success("Prediction Ready")
st.markdown(result)
except Exception as e:
st.error(f"Something went wrong: {e}")
# --- FOOTER ---
st.markdown("---")
st.markdown("Made ❤️ by Abdullah's AI Labs")
|