File size: 7,873 Bytes
7f6c08d
 
 
7352851
e8b9998
7352851
 
 
 
492133f
7f6c08d
05fbc95
7f6c08d
 
 
7352851
 
 
 
 
 
 
 
72ca55a
 
ef13d6a
2521826
 
 
 
 
 
7352851
 
 
 
 
 
 
 
 
e8b9998
7352851
 
 
 
 
 
 
 
 
e1a461b
7352851
 
 
 
 
 
e8b9998
7352851
 
 
 
 
 
 
 
 
 
 
 
66a966e
 
2521826
66a966e
 
 
 
2521826
 
492133f
 
 
2521826
 
 
 
492133f
 
 
 
 
 
caadcc0
2521826
 
 
 
 
 
492133f
 
2521826
 
 
492133f
 
 
 
2521826
 
492133f
2521826
 
 
 
 
492133f
2521826
 
 
 
 
 
 
492133f
 
 
2521826
66a966e
7352851
 
66a966e
 
 
 
 
 
7352851
72ca55a
 
 
 
66a966e
 
dd1055c
66a966e
 
2521826
 
66a966e
2521826
 
 
66a966e
 
 
 
 
 
 
 
 
 
7352851
ef13d6a
dd1055c
7352851
 
66a966e
7352851
dd1055c
 
7352851
 
 
 
66a966e
7352851
dd1055c
 
7352851
 
 
 
66a966e
 
dd1055c
66a966e
2521826
 
66a966e
 
 
 
 
 
 
 
 
 
7bdb95c
2521826
 
 
 
 
 
 
 
 
7352851
 
 
66a966e
 
 
2521826
 
 
 
 
66a966e
2521826
7352851
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
import nest_asyncio
nest_asyncio.apply()

import streamlit as st
from transformers import pipeline
import torch
from gtts import gTTS
import io
import time
from streamlit.components.v1 import html
import asyncio

if not asyncio.get_event_loop().is_running():
    asyncio.set_event_loop(asyncio.new_event_loop())
    
# Initialize session state
if 'processed_data' not in st.session_state:
    st.session_state.processed_data = {
        'scenario': None,
        'story': None,
        'audio': None
    }

if 'image_data' not in st.session_state:
    st.session_state.image_data = None

if 'timer_active' not in st.session_state:
    st.session_state.timer_active = False

if 'timer_to_freeze' not in st.session_state:
    st.session_state.timer_to_freeze = False

# Page setup
st.set_page_config(page_title="Your Image to Audio Story", page_icon="🦜")
st.header("Turn Your Image to a Short Audio Story for Children")

# Model loading
@st.cache_resource
def load_models():
    return {
        "img_model": pipeline("image-to-text", "cnmoro/tiny-image-captioning"),
        "story_model": pipeline("text-generation", "Qwen/Qwen2.5-0.5B-Instruct")
    }

models = load_models()

# Processing functions
def img2text(url):
    return models["img_model"](url)[0]["generated_text"]

def text2story(text):
    prompt = f"Generate a 100-word story about: {text}"
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": prompt}
    ]
    response = models["story_model"](
        messages,
        max_new_tokens=100,
        do_sample=True,
        temperature=0.7
    )[0]["generated_text"]
    return response[2]["content"]

def text2audio(story_text):
    audio_io = io.BytesIO()
    tts = gTTS(text=story_text, lang='en', slow=False)
    tts.write_to_fp(audio_io)
    audio_io.seek(0)
    return {'audio': audio_io, 'sampling_rate': 16000}

# Create fixed containers for UI elements
image_container = st.empty()
timer_container = st.empty()
status_container = st.empty()
progress_container = st.empty()
results_container = st.container()

# JavaScript timer component
def active_timer():
    timer_html = """
    <div id="timer" style="font-size:16px;color:#666;margin-bottom:10px;">⏱️ Elapsed: 00:00</div>
    <script>
    // Generate a unique ID for this timer instance
    var timerId = 'timer_' + Date.now();
    
    // Initialize the timer
    var startTime = new Date().getTime();
    var timerInterval = setInterval(function() {
        var now = new Date().getTime();
        var elapsed = now - startTime;
        var minutes = Math.floor(elapsed / (1000 * 60));
        var seconds = Math.floor((elapsed % (1000 * 60)) / 1000);
        
        var timerElement = document.getElementById("timer");
        if (timerElement) {
            timerElement.innerHTML = "⏱️ Elapsed: " + 
                (minutes < 10 ? "0" : "") + minutes + ":" + 
                (seconds < 10 ? "0" : "") + seconds;
        }
    }, 1000);
    
    // Store the interval ID in window object with unique ID
    window[timerId] = timerInterval;
    window.currentTimerId = timerId;
    </script>
    """
    return html(timer_html, height=50)

def freeze_timer():
    freeze_html = """
    <script>
    // Find the current timer ID
    if (window.currentTimerId && window[window.currentTimerId]) {
        // Stop the interval
        clearInterval(window[window.currentTimerId]);
        window[window.currentTimerId] = null;
        
        // Get and style the timer element
        var timerElement = document.getElementById("timer");
        if (timerElement) {
            timerElement.style.color = "#00cc00";
            timerElement.style.fontWeight = "bold";
            timerElement.innerHTML = timerElement.innerHTML + " βœ“";
        }
    }
    </script>
    """
    return html(freeze_html, height=0)

# UI components
uploaded_file = st.file_uploader("Select an Image After the Models are Loaded...")

# Always display the image if we have image data
if st.session_state.image_data is not None:
    image_container.image(st.session_state.image_data, caption="Uploaded Image", use_container_width=True)

# Process new uploaded file
if uploaded_file is not None:
    # Save the image data to session state
    bytes_data = uploaded_file.getvalue()
    st.session_state.image_data = bytes_data
    
    # Display the image
    image_container.image(bytes_data, caption="Uploaded Image", use_container_width=True)
    
    if st.session_state.get('current_file') != uploaded_file.name:
        st.session_state.current_file = uploaded_file.name
        st.session_state.timer_active = True
        st.session_state.timer_to_freeze = False
        
        # Display timer
        timer_container.empty()
        timer_container.write(active_timer())
        
        # Progress indicators
        status_text = status_container.empty()
        progress_bar = progress_container.progress(0)
        
        try:
            # Save uploaded file
            with open(uploaded_file.name, "wb") as file:
                file.write(bytes_data)
                
            # Stage 1: Image to Text
            status_text.markdown("**πŸ–ΌοΈ Generating caption...**")
            progress_bar.progress(0)
            st.session_state.processed_data['scenario'] = img2text(uploaded_file.name)
            progress_bar.progress(33)
            
            # Stage 2: Text to Story
            status_text.markdown("**πŸ“– Generating story...**")
            progress_bar.progress(33)
            st.session_state.processed_data['story'] = text2story(
                st.session_state.processed_data['scenario']
            )
            progress_bar.progress(66)
            
            # Stage 3: Story to Audio
            status_text.markdown("**πŸ”Š Synthesizing audio...**")
            progress_bar.progress(66)
            st.session_state.processed_data['audio'] = text2audio(
                st.session_state.processed_data['story']
            )
            progress_bar.progress(100)
            
            # Final status
            status_text.success("**βœ… Generation complete!**")
            
            # Mark timer to be frozen when audio is played
            st.session_state.timer_to_freeze = True
            
            # Show results
            with results_container:
                st.write("**Caption:**", st.session_state.processed_data['scenario'])
                st.write("**Story:**", st.session_state.processed_data['story'])
                
        except Exception as e:
            status_text.error(f"**❌ Error:** {str(e)}")
            progress_bar.empty()
            raise e

# Display results if available
if st.session_state.processed_data.get('scenario'):
    with results_container:
        st.write("**Caption:**", st.session_state.processed_data['scenario'])

if st.session_state.processed_data.get('story'):
    with results_container:
        st.write("**Story:**", st.session_state.processed_data['story'])

# Audio playback
if st.button("Play Audio of the Story Generated"):
    if st.session_state.processed_data.get('audio'):
        # Make sure the image is still displayed
        if st.session_state.image_data is not None:
            image_container.image(st.session_state.image_data, caption="Uploaded Image", use_container_width=True)
        
        # Freeze the timer if it's active and marked to be frozen
        if st.session_state.timer_active and st.session_state.timer_to_freeze:
            freeze_timer()
            st.session_state.timer_active = False
            
        # Play the audio
        audio_data = st.session_state.processed_data['audio']
        st.audio(
            audio_data['audio'].getvalue(),
            format="audio/mp3"
        )
    else:
        st.warning("Please generate a story first!")