File size: 2,248 Bytes
7246624 3ac8611 0000616 7246624 0000616 7246624 0000616 a8b0bcf 0000616 7246624 0000616 7246624 0000616 7246624 0000616 7246624 0000616 0f4aad7 0000616 0f4aad7 0000616 3ac8611 0000616 3ac8611 0000616 7246624 a8b0bcf 3ac8611 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import streamlit as st
from PIL import Image
import os
from groq import Groq
from dotenv import load_dotenv
load_dotenv()
st.set_page_config(page_title="AI Trade Predictor", layout="wide")
st.markdown("""
<style>
.main {
background-color: #f5f7fa;
}
h1 {
color: #3b3b3b;
}
.stButton > button {
color: white;
background-color: #4CAF50;
font-size: 16px;
border-radius: 10px;
padding: 10px 24px;
}
</style>
""", unsafe_allow_html=True)
st.title("📈 AI Trade Predictor")
uploaded_file = st.file_uploader("Upload a candlestick chart image", type=["png", "jpg", "jpeg"])
if uploaded_file:
image = Image.open(uploaded_file)
st.image(image, caption='Uploaded Chart', use_column_width=True)
st.write("Analyzing chart using AI model...")
# Convert image to base64 string
import base64
import io
buffered = io.BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
# Connect to Groq and send the image analysis prompt
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
user_prompt = f"""
Analyze the following candlestick chart image (base64-encoded PNG) and provide a trading decision:
- Tell whether the action should be BUY, SELL, or HOLD.
- Give confidence level in percentage.
- Suggest timeframes (e.g., 30 min, 1 hour, 4 hour, 1 day) and what signal applies to each.
- List any risks or reasons the prediction may fail.
- Use clear language that a beginner can understand.
- Give a short summary at the end.
Image (base64 PNG): {img_str}
"""
try:
chat_completion = client.chat.completions.create(
messages=[
{"role": "user", "content": user_prompt}
],
model="meta-llama/llama-guard-4-12b"
)
response = chat_completion.choices[0].message.content
st.success("Prediction Complete")
st.markdown(response)
except Exception as e:
st.error(f"Something went wrong: {e}")
# --- FOOTER ---
st.markdown("---")
st.markdown("Made ❤️ by Abdullah's AI Labs")
|