Spaces:
Running
Running
Tecnhotron
commited on
Commit
·
e3729ed
0
Parent(s):
Initial commit
Browse files- Dockerfile +45 -0
- README.md +206 -0
- main.py +1315 -0
- requirements.txt +3 -0
- templates/index.html +385 -0
- templates/script.js +1618 -0
Dockerfile
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use an official Python runtime as a parent image
|
2 |
+
FROM python:3.10-slim
|
3 |
+
|
4 |
+
# Install system dependencies first (as root)
|
5 |
+
RUN apt update && apt install -y ffmpeg \
|
6 |
+
# Clean up apt cache to reduce image size
|
7 |
+
&& rm -rf /var/lib/apt/lists/*
|
8 |
+
|
9 |
+
# Create a non-root user 'appuser' with UID 1000 and a group 'appuser' with GID 1000
|
10 |
+
# Create a home directory for the user
|
11 |
+
RUN groupadd -r appuser -g 1000 && useradd -u 1000 -r -g appuser -m -s /bin/bash -c "App User" appuser
|
12 |
+
|
13 |
+
# Set environment variables for the user's home and update PATH
|
14 |
+
ENV HOME=/home/appuser \
|
15 |
+
PATH=/home/appuser/.local/bin:$PATH
|
16 |
+
|
17 |
+
# Set the working directory *inside the user's home*
|
18 |
+
WORKDIR $HOME/app
|
19 |
+
|
20 |
+
# Change ownership of the working directory to the new user
|
21 |
+
# Although WORKDIR creates it if it doesn't exist, explicitly ensuring ownership is good practice
|
22 |
+
RUN chown appuser:appuser $HOME/app
|
23 |
+
|
24 |
+
# Switch to the non-root user *before* copying files and installing packages
|
25 |
+
USER appuser
|
26 |
+
|
27 |
+
# Copy the requirements file (will be owned by appuser due to USER command)
|
28 |
+
COPY --chown=appuser:appuser requirements.txt .
|
29 |
+
|
30 |
+
# Install Python packages (as appuser)
|
31 |
+
# Pip installs packages into user's site-packages or uses --user implicitly
|
32 |
+
RUN pip install --no-cache-dir --user -r requirements.txt
|
33 |
+
|
34 |
+
# Copy the rest of the application code (owned by appuser)
|
35 |
+
COPY --chown=appuser:appuser . .
|
36 |
+
|
37 |
+
# Create the directories for uploads and final output (as appuser)
|
38 |
+
# These will automatically be owned by 'appuser' because we are running as that user
|
39 |
+
RUN mkdir -p uploads output
|
40 |
+
|
41 |
+
# Make port 7860 available
|
42 |
+
EXPOSE 7860
|
43 |
+
|
44 |
+
# Set the default command to run the application (runs as appuser)
|
45 |
+
CMD ["python", "app.py"]
|
README.md
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# AVE - AI Video Editor
|
2 |
+
|
3 |
+
AVE is an advanced AI-powered video editing platform designed to streamline the editing process for portrait-mode, aesthetic videos (such as Instagram Reels). The application leverages Google’s Gemini generative AI, MoviePy for video/audio manipulation, and Flask for the web interface to provide a semi-automated video production process with customizable editing plans.
|
4 |
+
---
|
5 |
+
|
6 |
+
## Table of Contents
|
7 |
+
|
8 |
+
- [Overview](#overview)
|
9 |
+
- [Features](#features)
|
10 |
+
- [Architecture & Workflow](#architecture--workflow)
|
11 |
+
- [Installation](#installation)
|
12 |
+
- [Usage](#usage)
|
13 |
+
- [Configuration](#configuration)
|
14 |
+
- [File Structure](#file-structure)
|
15 |
+
- [Contributing](#contributing)
|
16 |
+
|
17 |
+
---
|
18 |
+
|
19 |
+
## Overview
|
20 |
+
|
21 |
+
AVE is built to help users generate short, high-quality, portrait-mode videos that are stylistically aligned with modern, aesthetic standards. It automatically analyzes the provided media files and leverages Google Gemini's 2.5 Pro to create a comprehensive JSON editing plan. This plan details which segments to include, when to apply effects, and how to arrange clips, thereby greatly reducing manual work.
|
22 |
+
|
23 |
+
### How It Works
|
24 |
+
|
25 |
+
1. **File Upload and Caching:**
|
26 |
+
Users upload various media files (videos, audios, and images). Each file is hashed and either processed or retrieved from a cache if already available, ensuring efficient re-use of uploads.
|
27 |
+
|
28 |
+
2. **Editing Plan Generation:**
|
29 |
+
With the help of Google Gemini API, the system generates a detailed JSON editing plan. The plan follows a specified structure that includes clip order, start and end times, speed adjustments, optional muting, and overall color adjustments.
|
30 |
+
|
31 |
+
3. **Video Assembly:**
|
32 |
+
The application then processes the clips based on the generated plan. Using MoviePy and FFMPEG under the hood, clips are trimmed, adjusted (including speed and volume), concatenated, and optionally overlaid with transitions and filters.
|
33 |
+
|
34 |
+
4. **Progress and Notifications:**
|
35 |
+
During video processing, users can receive real-time progress updates. Additionally, the system logs all steps for debugging and auditing purposes.
|
36 |
+
|
37 |
+
---
|
38 |
+
|
39 |
+
## Features
|
40 |
+
|
41 |
+
- **AI-Assisted Editing Plan:**
|
42 |
+
Uses Gemini AI to generate a JSON-based editing plan that incorporates advanced parameters:
|
43 |
+
- Clip selection based on aesthetics
|
44 |
+
- Logical sequencing of clips
|
45 |
+
- Optional speed adjustments and mute settings
|
46 |
+
- Background audio specification and color adjustment hints
|
47 |
+
|
48 |
+
- **Media Upload and Caching:**
|
49 |
+
- Secure file uploads with configurable size limits
|
50 |
+
- SHA256-based file hashing for deduplication and caching
|
51 |
+
- Resilient file processing with automatic timeout handling
|
52 |
+
|
53 |
+
- **Video Processing and Assembly:**
|
54 |
+
- Uses MoviePy and FFMPEG for video processing
|
55 |
+
- Supports effects like speed adjustment, volume control, and color grading
|
56 |
+
- Designed for both low-resolution previews and full HQ processing
|
57 |
+
|
58 |
+
- **Progress Monitoring and Logging:**
|
59 |
+
- Detailed progress updates throughout the processing chain
|
60 |
+
- Logging of each step (file upload, processing progress, plan generation, etc.)
|
61 |
+
- Error handling with descriptive logging for better troubleshooting
|
62 |
+
|
63 |
+
- **User Interface & Experience:**
|
64 |
+
- Modern, responsive, and mobile-friendly web interface built with Flask and a custom HTML/CSS design
|
65 |
+
- Real-time progress indicators and potential for WebSocket integration in future releases
|
66 |
+
|
67 |
+
- **Extensibility:**
|
68 |
+
- Well-defined modular structure for adding new editing effects and transitions
|
69 |
+
- Prepared for integration with asynchronous task queues (Celery/Redis) for scalability
|
70 |
+
- Possible user authentication modules and file storage enhancements in later versions
|
71 |
+
|
72 |
+
---
|
73 |
+
|
74 |
+
## Architecture & Workflow
|
75 |
+
|
76 |
+
1. **Front-End:**
|
77 |
+
- A clean and modern web interface served using Flask and rendered by Jinja2 templates.
|
78 |
+
- Supports file selection, form inputs for style description, target duration, and other processing parameters.
|
79 |
+
|
80 |
+
2. **Back-End:**
|
81 |
+
- **Flask Application:** Acts as the central point for handling API requests, file uploads, and processing triggers.
|
82 |
+
- **File Caching & Upload Worker:** Uses threading to manage file uploads and caching using a SHA256 hash.
|
83 |
+
- **Editing Plan Generation:** Interacts with the Gemini AI API to generate and validate a JSON editing plan based on the inputs.
|
84 |
+
- **Video Assembly Engine:** Processes individual clips as per the JSON plan using MoviePy and FFMPEG for final video assembly.
|
85 |
+
|
86 |
+
3. **Logging & Cleanup:**
|
87 |
+
- Comprehensive logging using Python’s logging framework.
|
88 |
+
- Cleanup functions are in place (or scheduled for future automation) for temporary files and cache entries.
|
89 |
+
|
90 |
+
---
|
91 |
+
|
92 |
+
## Installation
|
93 |
+
|
94 |
+
### Prerequisites
|
95 |
+
|
96 |
+
- Python 3.8 or above
|
97 |
+
- [FFmpeg](https://ffmpeg.org/download.html) installed and available in your system’s PATH
|
98 |
+
- A valid API key for the Gemini AI service
|
99 |
+
|
100 |
+
### Clone the Repository
|
101 |
+
|
102 |
+
```bash
|
103 |
+
git clone https://github.com/yourusername/AVE.git
|
104 |
+
cd AVE
|
105 |
+
```
|
106 |
+
|
107 |
+
### Create and Activate a Virtual Environment
|
108 |
+
|
109 |
+
```bash
|
110 |
+
python -m venv venv
|
111 |
+
source venv/bin/activate # On Windows use: venv\Scripts\activate
|
112 |
+
```
|
113 |
+
|
114 |
+
### Install Dependencies
|
115 |
+
|
116 |
+
```bash
|
117 |
+
pip install -r repos/AVE/requirements.txt
|
118 |
+
```
|
119 |
+
|
120 |
+
### Setting Up the API Key
|
121 |
+
|
122 |
+
Edit the configuration in the source code (e.g., in `main.py` or via environment variables) and replace `"YOUR_API_KEY"` with your actual Gemini API key. You may also include other configuration details such as `MODEL_NAME`, `UPLOAD_FOLDER`, and target timeouts.
|
123 |
+
|
124 |
+
---
|
125 |
+
|
126 |
+
## Usage
|
127 |
+
|
128 |
+
### Running the Application
|
129 |
+
|
130 |
+
Once you have installed the dependencies and configured your API key, you can start the server with:
|
131 |
+
|
132 |
+
```bash
|
133 |
+
python repos/AVE/main.py
|
134 |
+
```
|
135 |
+
|
136 |
+
This will start the Flask server on the configured host/port (defaults to `localhost:7860`).
|
137 |
+
|
138 |
+
### Uploading Files and Generating Videos
|
139 |
+
|
140 |
+
1. **Navigate to the Web Interface:**
|
141 |
+
Open your web browser and go to `http://localhost:7860`.
|
142 |
+
|
143 |
+
2. **Upload Your Media Files:**
|
144 |
+
Use the intuitive interface to upload videos, audio tracks, or images. Ensure the files meet the allowed formats (e.g., `mp4`, `mov`, `mp3`, `jpg`, etc.).
|
145 |
+
|
146 |
+
3. **Enter Editing Details:**
|
147 |
+
Fill out the form with your desired style description, target duration, and (optionally) provide a sample video to guide the AI.
|
148 |
+
|
149 |
+
4. **Submit and Monitor:**
|
150 |
+
Submit the form. The system will start processing, and you will receive updates about each stage:
|
151 |
+
- File upload
|
152 |
+
- Editing plan generation
|
153 |
+
- Video processing and assembly
|
154 |
+
|
155 |
+
5. **Preview and Download:**
|
156 |
+
After processing is complete, preview your generated video directly in the browser. Download the final edited video if satisfied with the result.
|
157 |
+
|
158 |
+
---
|
159 |
+
|
160 |
+
## Configuration
|
161 |
+
|
162 |
+
- **File Uploads:**
|
163 |
+
Configurable settings include allowed file types and maximum file sizes.
|
164 |
+
|
165 |
+
- **Timeouts & Caching:**
|
166 |
+
- `MAX_WAIT_TIME` controls how long the application waits for file processing.
|
167 |
+
- `CACHE_EXPIRY_SECONDS` determines the cache duration for uploaded files.
|
168 |
+
|
169 |
+
- **Server Settings:**
|
170 |
+
The Flask application configuration (like `SERVER_NAME`) is set for both local development and production deployment. Adjust as required when deploying behind proxy servers or on cloud platforms.
|
171 |
+
|
172 |
+
- **Logging:**
|
173 |
+
Adjust logging levels in the configuration. The current setup logs INFO and ERROR levels to give detailed runtime feedback while processing files.
|
174 |
+
|
175 |
+
---
|
176 |
+
|
177 |
+
## File Structure
|
178 |
+
|
179 |
+
```
|
180 |
+
AVE/
|
181 |
+
├── repos/
|
182 |
+
│ └── AVE/
|
183 |
+
│ ├── main.py # Main Flask application and processing code
|
184 |
+
│ ├── requirements.txt # Project dependencies
|
185 |
+
│ └── ... # Other Python modules and helper functions
|
186 |
+
├── uploads/ # Directory for user-uploaded files
|
187 |
+
├── output/ # Directory for generated and final video output
|
188 |
+
├── templates/
|
189 |
+
│ └── index.html # Main web interface template
|
190 |
+
└── README.md # This file
|
191 |
+
```
|
192 |
+
|
193 |
+
Each section of the code is modularized with clear responsibilities:
|
194 |
+
- `main.py` handles the overall video editing process, including file upload, plan generation, and video assembly.
|
195 |
+
- Templates and static files deliver a modern, responsive UI.
|
196 |
+
- Helper functions manage caching, progress updates, and error handling.
|
197 |
+
|
198 |
+
---
|
199 |
+
|
200 |
+
## Contributing
|
201 |
+
|
202 |
+
Contributions are welcome! If you want to contribute new features, improvements, or fixes:
|
203 |
+
|
204 |
+
1. **Fork the Repository:** Create your own fork and clone it locally.
|
205 |
+
2. **Create a Branch:** Use feature-specific branches (e.g., `feature-websocket-notifications`).
|
206 |
+
3. **Submit a Pull Request:** Provide detailed explanations of your changes and new features.
|
main.py
ADDED
@@ -0,0 +1,1315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import shutil
|
3 |
+
import subprocess
|
4 |
+
import tempfile
|
5 |
+
import google.generativeai as genai
|
6 |
+
from google.generativeai.types import GenerationConfig, File # Import File type
|
7 |
+
from moviepy import *
|
8 |
+
from moviepy.video.fx import *
|
9 |
+
from moviepy.audio.fx.MultiplyVolume import MultiplyVolume
|
10 |
+
# --- ADDED IMPORT ---
|
11 |
+
from moviepy.audio.AudioClip import CompositeAudioClip
|
12 |
+
|
13 |
+
import os, uuid
|
14 |
+
import time
|
15 |
+
import mimetypes
|
16 |
+
import json
|
17 |
+
import threading
|
18 |
+
from pathlib import Path
|
19 |
+
from flask import Flask, render_template, request, url_for, send_file, jsonify
|
20 |
+
from werkzeug.utils import secure_filename
|
21 |
+
import traceback
|
22 |
+
import logging
|
23 |
+
from typing import Dict, Any, List, Optional # Optional added
|
24 |
+
import hashlib # For file hashing
|
25 |
+
|
26 |
+
# Configure logging
|
27 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
28 |
+
logger = logging.getLogger(__name__)
|
29 |
+
|
30 |
+
# Configuration
|
31 |
+
API_KEY = "YOUR_API_KEY" # Replace with your actual key if needed
|
32 |
+
# Updated Model Name as requested - DO NOT CHANGE
|
33 |
+
MODEL_NAME = "gemini-2.5-pro-exp-03-25"
|
34 |
+
UPLOAD_FOLDER = 'uploads'
|
35 |
+
FINAL_OUTPUT_FOLDER = 'output'
|
36 |
+
ALLOWED_EXTENSIONS = {'mp4', 'mov', 'avi', 'mkv', 'mp3', 'wav', 'jpg', 'jpeg', 'png'}
|
37 |
+
MAX_UPLOAD_SIZE = 1 * 1024 * 1024 * 1024 # 1 GB
|
38 |
+
MAX_WAIT_TIME = 300 # seconds for Gemini file processing
|
39 |
+
|
40 |
+
# --- Global State ---
|
41 |
+
progress_updates: Dict[str, Dict[str, Any]] = {}
|
42 |
+
background_tasks: Dict[str, threading.Thread] = {}
|
43 |
+
intermediate_files_registry: Dict[str, List[str]] = {} # Track intermediate files per request (still needed for potential manual cleanup)
|
44 |
+
|
45 |
+
# --- Feature 2: File Caching ---
|
46 |
+
# Cache structure: { file_hash: {'file': GeminiFileObject, 'timestamp': float} }
|
47 |
+
gemini_file_cache: Dict[str, Dict[str, Any]] = {}
|
48 |
+
cache_lock = threading.Lock()
|
49 |
+
CACHE_EXPIRY_SECONDS = 24 * 60 * 60 # Cache entries expire after 24 hours (adjust as needed)
|
50 |
+
|
51 |
+
# --- Feature 3: HQ Generation ---
|
52 |
+
# Stores details needed to re-run a request for HQ
|
53 |
+
# Structure: { request_id: {'form_data': Dict, 'file_paths': Dict} }
|
54 |
+
request_details_cache: Dict[str, Dict] = {}
|
55 |
+
# -------------------------
|
56 |
+
|
57 |
+
# Initialize API
|
58 |
+
genai.configure(api_key=API_KEY)
|
59 |
+
|
60 |
+
# Initialize Flask app
|
61 |
+
app = Flask(__name__)
|
62 |
+
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
63 |
+
app.config['FINAL_OUTPUT_FOLDER'] = FINAL_OUTPUT_FOLDER
|
64 |
+
app.config['MAX_CONTENT_LENGTH'] = MAX_UPLOAD_SIZE
|
65 |
+
# Ensure SERVER_NAME is set for url_for generation in background threads
|
66 |
+
app.config['SERVER_NAME'] = 'localhost:7860' # Or your actual server name/IP if deployed
|
67 |
+
|
68 |
+
# Create necessary directories
|
69 |
+
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
|
70 |
+
os.makedirs(FINAL_OUTPUT_FOLDER, exist_ok=True)
|
71 |
+
|
72 |
+
def allowed_file(filename):
|
73 |
+
"""Check if the file has an allowed extension."""
|
74 |
+
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
75 |
+
|
76 |
+
# --- Feature 2: File Hashing Helper ---
|
77 |
+
def get_file_hash(file_path: str) -> Optional[str]:
|
78 |
+
"""Calculates the SHA256 hash of a file."""
|
79 |
+
if not os.path.exists(file_path):
|
80 |
+
return None
|
81 |
+
hasher = hashlib.sha256()
|
82 |
+
try:
|
83 |
+
with open(file_path, 'rb') as file:
|
84 |
+
while True:
|
85 |
+
chunk = file.read(4096) # Read in chunks
|
86 |
+
if not chunk:
|
87 |
+
break
|
88 |
+
hasher.update(chunk)
|
89 |
+
return hasher.hexdigest()
|
90 |
+
except Exception as e:
|
91 |
+
logger.error(f"Error calculating hash for {file_path}: {e}")
|
92 |
+
return None
|
93 |
+
# ------------------------------------
|
94 |
+
|
95 |
+
def update_progress(request_id: str, stage: str, message: str, error: str | None = None, result: Dict | None = None):
|
96 |
+
"""Update the progress status for a given request ID."""
|
97 |
+
if request_id not in progress_updates:
|
98 |
+
progress_updates[request_id] = {}
|
99 |
+
progress_updates[request_id]['stage'] = stage
|
100 |
+
progress_updates[request_id]['message'] = message
|
101 |
+
progress_updates[request_id]['error'] = error
|
102 |
+
progress_updates[request_id]['result'] = result
|
103 |
+
progress_updates[request_id]['timestamp'] = time.time()
|
104 |
+
logger.info(f"Progress Update [{request_id}] - Stage: {stage}, Message: {message}")
|
105 |
+
if error:
|
106 |
+
logger.error(f"Progress Error [{request_id}]: {error}")
|
107 |
+
|
108 |
+
# --- Feature 1: Modified Cleanup ---
|
109 |
+
# This function is kept for potential manual cleanup or future use,
|
110 |
+
# but it's no longer called automatically in the main flow to delete source files.
|
111 |
+
def cleanup_intermediate_files(request_id: str):
|
112 |
+
"""Remove temporary files created during processing for a specific request."""
|
113 |
+
files_to_remove = intermediate_files_registry.pop(request_id, [])
|
114 |
+
removed_count = 0
|
115 |
+
failed_count = 0
|
116 |
+
if not files_to_remove:
|
117 |
+
logger.info(f"No intermediate files registered for cleanup for request ID: {request_id}.")
|
118 |
+
return
|
119 |
+
|
120 |
+
logger.info(f"Cleaning up {len(files_to_remove)} intermediate files for request ID: {request_id}...")
|
121 |
+
for file_path in files_to_remove:
|
122 |
+
try:
|
123 |
+
if os.path.exists(file_path):
|
124 |
+
os.remove(file_path)
|
125 |
+
logger.info(f"Removed intermediate file: {file_path} [{request_id}]")
|
126 |
+
removed_count += 1
|
127 |
+
else:
|
128 |
+
logger.warning(f"Intermediate file not found for removal: {file_path} [{request_id}]")
|
129 |
+
except Exception as e:
|
130 |
+
logger.error(f"Failed to remove intermediate file {file_path} [{request_id}]: {e}")
|
131 |
+
failed_count += 1
|
132 |
+
logger.info(f"Intermediate file cleanup for {request_id}: {removed_count} removed, {failed_count} failed.")
|
133 |
+
# ---------------------------------
|
134 |
+
|
135 |
+
def generate_output_path(base_folder, original_filename, suffix):
|
136 |
+
"""Generate a unique output path for a file."""
|
137 |
+
base, ext = os.path.splitext(original_filename)
|
138 |
+
safe_base = "".join(c if c.isalnum() or c in ('_','-') else '_' for c in os.path.basename(base))
|
139 |
+
timestamp = int(time.time() * 1000)
|
140 |
+
os.makedirs(base_folder, exist_ok=True)
|
141 |
+
new_path = os.path.join(base_folder, f"{safe_base}_{suffix}_{timestamp}{ext}")
|
142 |
+
# Intermediate files are now tracked per request ID
|
143 |
+
return new_path
|
144 |
+
|
145 |
+
# --- Feature 2: Modified Upload Worker (Now handles caching result) ---
|
146 |
+
def upload_thread_worker(request_id: str, file_path: str, file_hash: str, upload_results: Dict[str, Any], upload_errors: Dict[str, str]):
|
147 |
+
"""Uploads a file to Gemini API, storing results/errors. Updates cache on success."""
|
148 |
+
global gemini_file_cache, cache_lock # Access global cache and lock
|
149 |
+
|
150 |
+
path = Path(file_path)
|
151 |
+
if not path.exists():
|
152 |
+
error_msg = f"File not found: {file_path}"
|
153 |
+
logger.error(f"Upload Error [{request_id}]: {error_msg}")
|
154 |
+
upload_errors[file_path] = error_msg
|
155 |
+
return
|
156 |
+
|
157 |
+
logger.info(f"Starting upload thread for [{request_id}]: {file_path} (Hash: {file_hash[:8]}...)")
|
158 |
+
uploaded_file = None # Initialize
|
159 |
+
try:
|
160 |
+
mime_type, _ = mimetypes.guess_type(file_path)
|
161 |
+
if mime_type is None:
|
162 |
+
mime_type = "application/octet-stream" # Default fallback
|
163 |
+
logger.warning(f"Could not guess mime type for {file_path}. Using {mime_type}.")
|
164 |
+
|
165 |
+
uploaded_file = genai.upload_file(path=path, mime_type=mime_type)
|
166 |
+
logger.info(f"Upload initiated for [{request_id}]: {file_path}. URI: {uploaded_file.uri}, Name: {uploaded_file.name}")
|
167 |
+
|
168 |
+
logger.info(f"Waiting for processing of {uploaded_file.name} [{request_id}]...")
|
169 |
+
start_time = time.time()
|
170 |
+
while uploaded_file.state.name == "PROCESSING":
|
171 |
+
if time.time() - start_time > MAX_WAIT_TIME:
|
172 |
+
error_msg = f"File processing timed out after {MAX_WAIT_TIME}s for {uploaded_file.name}"
|
173 |
+
logger.error(f"Upload Error [{request_id}]: {error_msg}")
|
174 |
+
upload_errors[file_path] = error_msg
|
175 |
+
# --- Feature 1: No deletion on timeout ---
|
176 |
+
# try:
|
177 |
+
# genai.delete_file(uploaded_file.name)
|
178 |
+
# logger.info(f"Deleted timed-out file {uploaded_file.name} [{request_id}]")
|
179 |
+
# except Exception as e:
|
180 |
+
# logger.error(f"Failed to delete timed-out file {uploaded_file.name} [{request_id}]: {e}")
|
181 |
+
# -----------------------------------------
|
182 |
+
return # Exit thread on timeout
|
183 |
+
time.sleep(5)
|
184 |
+
uploaded_file = genai.get_file(name=uploaded_file.name)
|
185 |
+
logger.info(f"File {uploaded_file.name} state [{request_id}]: {uploaded_file.state.name}")
|
186 |
+
|
187 |
+
if uploaded_file.state.name == "ACTIVE":
|
188 |
+
upload_results[file_path] = uploaded_file
|
189 |
+
logger.info(f"File {uploaded_file.name} is ACTIVE [{request_id}].")
|
190 |
+
# --- Feature 2: Update Cache ---
|
191 |
+
with cache_lock:
|
192 |
+
gemini_file_cache[file_hash] = {'file': uploaded_file, 'timestamp': time.time()}
|
193 |
+
logger.info(f"Added/Updated Gemini file cache for hash {file_hash[:8]}... [{request_id}]")
|
194 |
+
# -----------------------------
|
195 |
+
else:
|
196 |
+
error_msg = f"File processing failed for {uploaded_file.name}. State: {uploaded_file.state.name}"
|
197 |
+
logger.error(f"Upload Error [{request_id}]: {error_msg}")
|
198 |
+
upload_errors[file_path] = error_msg
|
199 |
+
# --- Feature 1: No deletion on failure ---
|
200 |
+
# try:
|
201 |
+
# genai.delete_file(uploaded_file.name)
|
202 |
+
# logger.info(f"Deleted failed file {uploaded_file.name} [{request_id}]")
|
203 |
+
# except Exception as e:
|
204 |
+
# logger.error(f"Failed to delete failed file {uploaded_file.name} [{request_id}]: {e}")
|
205 |
+
# ---------------------------------------
|
206 |
+
|
207 |
+
except Exception as e:
|
208 |
+
error_msg = f"Upload/processing failed for {file_path}: {e}"
|
209 |
+
logger.error(f"Upload Error [{request_id}]: {error_msg}")
|
210 |
+
traceback.print_exc()
|
211 |
+
upload_errors[file_path] = error_msg
|
212 |
+
# --- Feature 1: No deletion on exception ---
|
213 |
+
# if uploaded_file and uploaded_file.name:
|
214 |
+
# try:
|
215 |
+
# genai.delete_file(uploaded_file.name)
|
216 |
+
# logger.info(f"Attempted deletion of file {uploaded_file.name} after exception [{request_id}]")
|
217 |
+
# except Exception as del_e:
|
218 |
+
# logger.error(f"Failed to delete file {uploaded_file.name} after exception [{request_id}]: {del_e}")
|
219 |
+
# -----------------------------------------
|
220 |
+
# ------------------------------------
|
221 |
+
|
222 |
+
def generate_editing_plan(
|
223 |
+
request_id: str,
|
224 |
+
uploaded_file_references: Dict[str, File], # Use File type hint
|
225 |
+
source_media_paths: dict, # Dict mapping type -> list of local paths
|
226 |
+
style_description: str,
|
227 |
+
sample_video_path: str | None,
|
228 |
+
target_duration: float
|
229 |
+
):
|
230 |
+
"""Generates a JSON editing plan using the Gemini API."""
|
231 |
+
update_progress(request_id, "PLANNING", "Analyzing media and generating editing plan...")
|
232 |
+
logger.info(f"Generating Editing Plan with Gemini [{request_id}]")
|
233 |
+
logger.info(f"Model: {MODEL_NAME}")
|
234 |
+
logger.info(f"Source Media Paths: {source_media_paths}")
|
235 |
+
logger.info(f"Style Description: '{style_description}'")
|
236 |
+
logger.info(f"Sample Video Path: {sample_video_path}")
|
237 |
+
logger.info(f"Target Duration: {target_duration}s")
|
238 |
+
|
239 |
+
prompt_parts = [
|
240 |
+
"You are an AI video editor assistant specializing in creating short, aesthetic, portrait-mode videos (like Instagram Reels). Your task is to analyze the provided media files and generate a detailed JSON plan for creating a video.",
|
241 |
+
f"The user wants a video approximately {target_duration:.1f} seconds long, suitable for portrait display (e.g., 9:16 aspect ratio).",
|
242 |
+
f"The desired style is described as: '{style_description}'. Pay close attention to the request for *aesthetic and beautiful* shots only.",
|
243 |
+
]
|
244 |
+
|
245 |
+
# Add sample video if available and successfully uploaded/cached
|
246 |
+
if sample_video_path and sample_video_path in uploaded_file_references:
|
247 |
+
sample_file = uploaded_file_references[sample_video_path]
|
248 |
+
prompt_parts.extend([
|
249 |
+
"\nHere is a sample video demonstrating the desired style:",
|
250 |
+
sample_file, # Pass the Gemini File object directly
|
251 |
+
])
|
252 |
+
elif sample_video_path:
|
253 |
+
prompt_parts.append(f"\n(Note: A style sample video was provided '{os.path.basename(sample_video_path)}' but failed to upload/process or was not found in cache, rely on the text description.)")
|
254 |
+
|
255 |
+
prompt_parts.append("\nAvailable source media files (use these exact paths/keys in your plan):")
|
256 |
+
media_index = 1
|
257 |
+
source_keys = {} # Map generated key (e.g., video_1) back to local path
|
258 |
+
|
259 |
+
# Add videos to prompt
|
260 |
+
for path in source_media_paths.get('videos', []):
|
261 |
+
if path in uploaded_file_references:
|
262 |
+
key = f"video_{media_index}"
|
263 |
+
source_keys[key] = path
|
264 |
+
file_obj = uploaded_file_references[path] # Get the Gemini File object
|
265 |
+
prompt_parts.append(f"- {key}: (Video file '{os.path.basename(path)}')")
|
266 |
+
prompt_parts.append(file_obj) # Pass the Gemini File object
|
267 |
+
media_index += 1
|
268 |
+
else:
|
269 |
+
prompt_parts.append(f"- (Video file '{os.path.basename(path)}' failed upload/processing/cache, cannot use)")
|
270 |
+
|
271 |
+
# Add audio files to prompt
|
272 |
+
audio_index = 1
|
273 |
+
for path in source_media_paths.get('audios', []):
|
274 |
+
if path in uploaded_file_references:
|
275 |
+
key = f"audio_{audio_index}"
|
276 |
+
source_keys[key] = path
|
277 |
+
file_obj = uploaded_file_references[path]
|
278 |
+
prompt_parts.append(f"- {key}: (Audio file '{os.path.basename(path)}')")
|
279 |
+
prompt_parts.append(file_obj)
|
280 |
+
audio_index += 1
|
281 |
+
else:
|
282 |
+
prompt_parts.append(f"- (Audio file '{os.path.basename(path)}' failed upload/processing/cache, cannot use)")
|
283 |
+
|
284 |
+
# Add image files to prompt
|
285 |
+
image_index = 1
|
286 |
+
for path in source_media_paths.get('images', []):
|
287 |
+
if path in uploaded_file_references:
|
288 |
+
key = f"image_{image_index}"
|
289 |
+
source_keys[key] = path
|
290 |
+
file_obj = uploaded_file_references[path]
|
291 |
+
prompt_parts.append(f"- {key}: (Image file '{os.path.basename(path)}')")
|
292 |
+
prompt_parts.append(file_obj)
|
293 |
+
image_index += 1
|
294 |
+
else:
|
295 |
+
prompt_parts.append(f"- (Image file '{os.path.basename(path)}' failed upload/processing/cache, cannot use)")
|
296 |
+
|
297 |
+
prompt_parts.append(f"""
|
298 |
+
Instruction: Create a JSON object representing the editing plan. The JSON object should strictly follow this structure:
|
299 |
+
{{
|
300 |
+
"description": "A brief text description of the overall video edit.",
|
301 |
+
"clips": [
|
302 |
+
{{
|
303 |
+
"source": "string (key of the source video file, e.g., 'video_1')",
|
304 |
+
"start_time": "float (start time in seconds within the source video)",
|
305 |
+
"end_time": "float (end time in seconds within the source video)",
|
306 |
+
"order": "integer (sequence number, starting from 1)",
|
307 |
+
"mute": "boolean (optional, default false, set to true to mute this clip's audio)",
|
308 |
+
"speed_factor": "float (optional, default 1.0. e.g., 0.5 for slow-mo, 2.0 for fast-forward)"
|
309 |
+
}}
|
310 |
+
// ... more clip objects
|
311 |
+
],
|
312 |
+
"background_audio": {{
|
313 |
+
"source": "string (key of the source audio file, e.g., 'audio_1', or null if no background audio)",
|
314 |
+
"volume_factor": "float (e.g., 0.7, or null if no audio)"
|
315 |
+
}},
|
316 |
+
"color_adjustments": {{ // Optional overall color adjustment
|
317 |
+
"brightness": "float (optional, e.g., 0.1 to add brightness, -0.1 to reduce. Default 0)",
|
318 |
+
"contrast": "float (optional, e.g., 1.1 for 10% more contrast, 0.9 for 10% less. Default 1.0)"
|
319 |
+
}}
|
320 |
+
}}
|
321 |
+
|
322 |
+
Guidelines:
|
323 |
+
- Select ONLY short, relevant, and HIGHLY AESTHETIC/BEAUTIFUL segments from the source videos that match the style description and sample (if provided). Prioritize quality over quantity, especially for portrait display.
|
324 |
+
- The total duration of the combined clips (considering speed adjustments) should be close to the target duration ({target_duration:.1f}s).
|
325 |
+
- Order the clips logically using the 'order' field.
|
326 |
+
- Use the optional 'speed_factor' field on clips to suggest slow-motion or fast-forward where it enhances the energetic/aesthetic style. Keep factors reasonable (e.g., 0.25 to 4.0).
|
327 |
+
- Optionally suggest overall 'color_adjustments' (brightness, contrast) if it fits the mood (e.g., slightly brighter and more contrast for an energetic feel). Keep adjustments subtle.
|
328 |
+
- Respond ONLY with the JSON object, nothing else. Ensure the JSON is valid.
|
329 |
+
""")
|
330 |
+
|
331 |
+
model = genai.GenerativeModel(
|
332 |
+
MODEL_NAME,
|
333 |
+
generation_config=GenerationConfig(
|
334 |
+
response_mime_type="application/json",
|
335 |
+
temperature=0.5 # Adjust temperature as needed
|
336 |
+
)
|
337 |
+
)
|
338 |
+
|
339 |
+
raw_llm_output = None
|
340 |
+
json_plan_text = None
|
341 |
+
|
342 |
+
try:
|
343 |
+
logger.info(f"Sending Prompt to Gemini for JSON Plan [{request_id}]")
|
344 |
+
# Ensure all parts are strings or File objects
|
345 |
+
valid_prompt_parts = []
|
346 |
+
for part in prompt_parts:
|
347 |
+
if isinstance(part, (str, File)):
|
348 |
+
valid_prompt_parts.append(part)
|
349 |
+
else:
|
350 |
+
logger.warning(f"Skipping invalid type in prompt_parts: {type(part)} [{request_id}]")
|
351 |
+
|
352 |
+
response = model.generate_content(valid_prompt_parts)
|
353 |
+
raw_llm_output = response.text
|
354 |
+
logger.info(f"Received Raw LLM Output (length: {len(raw_llm_output)}) [{request_id}]")
|
355 |
+
|
356 |
+
# Attempt to directly parse as JSON first, as per response_mime_type
|
357 |
+
try:
|
358 |
+
plan = json.loads(raw_llm_output)
|
359 |
+
json_plan_text = raw_llm_output # Store for potential error logging
|
360 |
+
logger.info(f"Successfully parsed raw response as JSON [{request_id}].")
|
361 |
+
except json.JSONDecodeError:
|
362 |
+
logger.warning(f"Direct JSON parsing failed [{request_id}]. Trying regex extraction...")
|
363 |
+
# Fallback to regex if direct parsing fails (e.g., if model includes ``` markers despite mime type)
|
364 |
+
match = re.search(r"```(?:json)?\s*(.*?)\s*```", raw_llm_output, re.DOTALL | re.IGNORECASE)
|
365 |
+
if match:
|
366 |
+
json_plan_text = match.group(1).strip()
|
367 |
+
logger.info(f"Extracted JSON block using regex [{request_id}].")
|
368 |
+
plan = json.loads(json_plan_text)
|
369 |
+
else:
|
370 |
+
logger.error(f"Response is not valid JSON and does not contain ```json ... ``` markers [{request_id}].")
|
371 |
+
raise ValueError("LLM response is not valid JSON and could not be extracted.")
|
372 |
+
|
373 |
+
# --- Validation ---
|
374 |
+
if not isinstance(plan, dict):
|
375 |
+
raise ValueError("LLM response parsed, but it is not a JSON object (dictionary).")
|
376 |
+
if 'clips' not in plan or not isinstance(plan['clips'], list):
|
377 |
+
raise ValueError("Parsed JSON plan missing 'clips' list or it's not a list.")
|
378 |
+
if 'background_audio' not in plan or not isinstance(plan['background_audio'], dict):
|
379 |
+
raise ValueError("Parsed JSON plan missing 'background_audio' object or it's not an object.")
|
380 |
+
# Optional: Validate color_adjustments structure if present
|
381 |
+
if 'color_adjustments' in plan and not isinstance(plan['color_adjustments'], dict):
|
382 |
+
raise ValueError("Parsed JSON plan has 'color_adjustments' but it's not an object.")
|
383 |
+
|
384 |
+
logger.info(f"Gemini Plan Extracted and Parsed Successfully [{request_id}]")
|
385 |
+
|
386 |
+
# --- Map source keys back to local paths ---
|
387 |
+
for clip in plan.get('clips', []):
|
388 |
+
key = clip.get('source')
|
389 |
+
if key in source_keys:
|
390 |
+
clip['source_path'] = source_keys[key] # Add local path to the clip info
|
391 |
+
else:
|
392 |
+
available_keys_str = ", ".join(source_keys.keys())
|
393 |
+
raise ValueError(f"Invalid source key '{key}' found in plan['clips']. Available keys: [{available_keys_str}]")
|
394 |
+
|
395 |
+
bg_audio = plan.get('background_audio', {})
|
396 |
+
bg_audio_key = bg_audio.get('source')
|
397 |
+
if bg_audio_key:
|
398 |
+
if bg_audio_key in source_keys:
|
399 |
+
plan['background_audio']['source_path'] = source_keys[bg_audio_key] # Add local path
|
400 |
+
else:
|
401 |
+
available_keys_str = ", ".join(source_keys.keys())
|
402 |
+
raise ValueError(f"Invalid source key '{bg_audio_key}' found in plan['background_audio']. Available keys: [{available_keys_str}]")
|
403 |
+
|
404 |
+
logger.info(f"Source keys mapped successfully [{request_id}].")
|
405 |
+
update_progress(request_id, "PLANNING", "Editing plan generated successfully.")
|
406 |
+
return {'status': 'success', 'plan': plan}
|
407 |
+
|
408 |
+
except json.JSONDecodeError as e:
|
409 |
+
error_msg = f"Failed to parse AI's plan (invalid JSON): {e}"
|
410 |
+
logger.error(f"{error_msg} [{request_id}]")
|
411 |
+
logger.error(f"Text attempted for JSON parsing:\n{json_plan_text if json_plan_text is not None else 'N/A'}")
|
412 |
+
logger.error(f"Original Raw LLM Response was:\n{raw_llm_output if raw_llm_output is not None else 'Response not received'}")
|
413 |
+
update_progress(request_id, "FAILED", "Error generating plan.", error=error_msg)
|
414 |
+
return {'status': 'error', 'message': error_msg}
|
415 |
+
|
416 |
+
except ValueError as e:
|
417 |
+
error_msg = f"AI plan has invalid structure or processing failed: {e}"
|
418 |
+
logger.error(f"{error_msg} [{request_id}]")
|
419 |
+
logger.error(f"Original Raw LLM Response was:\n{raw_llm_output if raw_llm_output is not None else 'Response not received'}")
|
420 |
+
update_progress(request_id, "FAILED", "Error generating plan.", error=error_msg)
|
421 |
+
return {'status': 'error', 'message': error_msg}
|
422 |
+
|
423 |
+
except Exception as e:
|
424 |
+
error_msg = f"An unexpected error occurred during Gemini interaction or plan processing: {e}"
|
425 |
+
logger.error(f"{error_msg} [{request_id}]")
|
426 |
+
traceback.print_exc()
|
427 |
+
logger.error(f"Original Raw LLM Response was:\n{raw_llm_output if raw_llm_output is not None else 'Response not received'}")
|
428 |
+
update_progress(request_id, "FAILED", "Error generating plan.", error=error_msg)
|
429 |
+
return {'status': 'error', 'message': error_msg}
|
430 |
+
|
431 |
+
# --- MODIFIED FUNCTION SIGNATURE ---
|
432 |
+
def execute_editing_plan(request_id: str, plan: dict, output_filename: str, is_preview: bool = False, mute_all_clips: bool = False) -> dict:
|
433 |
+
"""
|
434 |
+
Executes the editing plan by writing individual processed clips to temp files
|
435 |
+
and then concatenating them using FFMPEG for potentially faster processing.
|
436 |
+
"""
|
437 |
+
update_progress(request_id, "STARTING", f"Starting video assembly {'(Preview Mode)' if is_preview else ''} (Global Mute: {mute_all_clips}, Method: Temp Files)...")
|
438 |
+
logger.info(f"Executing Editing Plan [{request_id}] {'(Preview Mode)' if is_preview else ''} (Global Mute: {mute_all_clips}, Method: Temp Files)")
|
439 |
+
logger.info(f"Output filename: {output_filename}")
|
440 |
+
|
441 |
+
clips_data = sorted(plan.get('clips', []), key=lambda x: x.get('order', 0))
|
442 |
+
if not clips_data:
|
443 |
+
error_msg = 'No clips found in the editing plan.'
|
444 |
+
update_progress(request_id, "FAILED", "Video assembly failed.", error=error_msg)
|
445 |
+
return {'status': 'error', 'message': error_msg}
|
446 |
+
|
447 |
+
temp_dir = None
|
448 |
+
temp_clip_paths = []
|
449 |
+
concat_list_path = None
|
450 |
+
target_resolution = None # Will be set by the first valid clip, expected portrait
|
451 |
+
target_fps = None # Will be set by the first clip, needed for consistency
|
452 |
+
|
453 |
+
try:
|
454 |
+
# Create a dedicated temporary directory for this request
|
455 |
+
temp_dir = tempfile.mkdtemp(prefix=f"autovideo_{request_id}_")
|
456 |
+
logger.info(f"Created temporary directory: {temp_dir} [{request_id}]")
|
457 |
+
|
458 |
+
num_clips = len(clips_data)
|
459 |
+
for i, clip_info in enumerate(clips_data):
|
460 |
+
source_path = clip_info.get('source_path')
|
461 |
+
start_time = clip_info.get('start_time')
|
462 |
+
end_time = clip_info.get('end_time')
|
463 |
+
order = clip_info.get('order')
|
464 |
+
mute_from_plan = clip_info.get('mute', False)
|
465 |
+
speed_factor = clip_info.get('speed_factor', 1.0)
|
466 |
+
|
467 |
+
update_progress(request_id, "PROCESSING_CLIP", f"Processing clip {i+1}/{num_clips} (Order: {order})...")
|
468 |
+
logger.info(f"Processing clip {order} [{request_id}]: Source='{os.path.basename(source_path)}', Start={start_time:.2f}s, End={end_time:.2f}s, PlanMute={mute_from_plan}, Speed={speed_factor:.2f}x")
|
469 |
+
|
470 |
+
# --- Basic Validation (same as before) ---
|
471 |
+
if not all([source_path, isinstance(start_time, (int, float)), isinstance(end_time, (int, float))]):
|
472 |
+
logger.error(f"Missing or invalid data for clip {order}. Skipping. [{request_id}]")
|
473 |
+
continue
|
474 |
+
if start_time >= end_time:
|
475 |
+
logger.warning(f"Start time ({start_time:.2f}s) >= end time ({end_time:.2f}s) for clip {order}. Skipping. [{request_id}]")
|
476 |
+
continue
|
477 |
+
if not os.path.exists(source_path):
|
478 |
+
logger.error(f"Source video file not found: {source_path} for clip {order}. Skipping. [{request_id}]")
|
479 |
+
continue
|
480 |
+
try:
|
481 |
+
if not isinstance(speed_factor, (int, float)) or speed_factor <= 0:
|
482 |
+
logger.warning(f"Invalid speed_factor ({speed_factor}) for clip {order}. Using 1.0. [{request_id}]")
|
483 |
+
speed_factor = 1.0
|
484 |
+
except Exception:
|
485 |
+
logger.warning(f"Error processing speed_factor for clip {order}. Using 1.0. [{request_id}]")
|
486 |
+
speed_factor = 1.0
|
487 |
+
# --- End Validation ---
|
488 |
+
|
489 |
+
video = None # Define video variable outside try block for finally clause
|
490 |
+
try:
|
491 |
+
video = VideoFileClip(source_path)
|
492 |
+
|
493 |
+
# --- Determine Target Resolution and FPS from first valid clip ---
|
494 |
+
if target_resolution is None:
|
495 |
+
temp_res = video.size
|
496 |
+
temp_fps = video.fps
|
497 |
+
if temp_res[0] > temp_res[1]: # Landscape detected
|
498 |
+
logger.warning(f"First clip ({order}) appears landscape ({temp_res[0]}x{temp_res[1]}). Applying portrait rotation fix by resizing.")
|
499 |
+
# Set target as portrait
|
500 |
+
target_resolution = (temp_res[1], temp_res[0])
|
501 |
+
else: # Already portrait
|
502 |
+
target_resolution = temp_res
|
503 |
+
|
504 |
+
# Basic FPS check
|
505 |
+
if not isinstance(temp_fps, (int, float)) or temp_fps <= 0:
|
506 |
+
logger.warning(f"Could not determine valid FPS ({temp_fps}) from first clip {order}. Defaulting to 30. [{request_id}]")
|
507 |
+
target_fps = 30.0
|
508 |
+
else:
|
509 |
+
target_fps = temp_fps
|
510 |
+
|
511 |
+
logger.info(f"Target resolution set to {target_resolution[0]}x{target_resolution[1]} [{request_id}].")
|
512 |
+
logger.info(f"Target FPS set to {target_fps:.2f} [{request_id}].")
|
513 |
+
# --- End Target Determination ---
|
514 |
+
|
515 |
+
|
516 |
+
# --- Apply Portrait Fix / Resize to Target ---
|
517 |
+
resized_clip = video # Start with original
|
518 |
+
if video.size[0] > video.size[1]: # Landscape source
|
519 |
+
logger.warning(f"Clip {order} source is landscape ({video.size[0]}x{video.size[1]}). Resizing to target portrait {target_resolution}. [{request_id}]")
|
520 |
+
resized_clip = video.resized(target_resolution)
|
521 |
+
elif video.size != target_resolution: # Portrait source but wrong size
|
522 |
+
logger.warning(f"Clip {order} resolution {video.size} differs from target {target_resolution}. Resizing. [{request_id}]")
|
523 |
+
resized_clip = video.resized(target_resolution)
|
524 |
+
# --- End Resize ---
|
525 |
+
|
526 |
+
vid_duration = resized_clip.duration
|
527 |
+
if vid_duration is None:
|
528 |
+
logger.warning(f"Could not read duration for resized clip {order}. Skipping. [{request_id}]")
|
529 |
+
continue
|
530 |
+
|
531 |
+
start_time = max(0, min(start_time, vid_duration))
|
532 |
+
end_time = max(start_time, min(end_time, vid_duration))
|
533 |
+
|
534 |
+
if start_time >= end_time:
|
535 |
+
logger.warning(f"Clamped start time ({start_time:.2f}s) >= end time ({end_time:.2f}s) for clip {order}. Skipping. [{request_id}]")
|
536 |
+
continue
|
537 |
+
|
538 |
+
original_subclip_duration = end_time - start_time
|
539 |
+
if original_subclip_duration <= 0.01: # Need a small duration
|
540 |
+
logger.warning(f"Calculated clip duration ({original_subclip_duration:.2f}s) is too short for clip {order}. Skipping. [{request_id}]")
|
541 |
+
continue
|
542 |
+
|
543 |
+
logger.info(f"Cutting clip {order} from {start_time:.2f}s to {end_time:.2f}s [{request_id}]")
|
544 |
+
subclip = resized_clip.subclipped(start_time, end_time)
|
545 |
+
|
546 |
+
# --- MoviePy v2.0 Effects Application ---
|
547 |
+
effects_to_apply = []
|
548 |
+
|
549 |
+
# Apply speed FIRST
|
550 |
+
if speed_factor != 1.0:
|
551 |
+
logger.info(f"Applying speed factor {speed_factor:.2f}x to clip {order} [{request_id}]")
|
552 |
+
speed_effect = MultiplySpeed(factor=speed_factor)
|
553 |
+
effects_to_apply.append(speed_effect)
|
554 |
+
|
555 |
+
if effects_to_apply:
|
556 |
+
subclip = subclip.with_effects(effects_to_apply)
|
557 |
+
if subclip.duration is not None:
|
558 |
+
logger.info(f"Clip {order} duration after effects: {subclip.duration:.2f}s [{request_id}]")
|
559 |
+
else:
|
560 |
+
logger.warning(f"Clip {order} duration unknown after effects. [{request_id}]")
|
561 |
+
# -----------------------------------------
|
562 |
+
|
563 |
+
|
564 |
+
# --- Write Processed Subclip to Temporary File ---
|
565 |
+
temp_filename = f"clip_{order:03d}_{uuid.uuid4().hex[:8]}.mp4"
|
566 |
+
temp_output_path = os.path.join(temp_dir, temp_filename)
|
567 |
+
|
568 |
+
# Define consistent write settings for temp files
|
569 |
+
# Crucial for ffmpeg -c copy to work later
|
570 |
+
temp_write_kwargs = {
|
571 |
+
"codec": "libx264", # Standard codec
|
572 |
+
"audio_codec": "aac", # Standard codec
|
573 |
+
"temp_audiofile": os.path.join(temp_dir, f"temp_audio_{order}.m4a"), # Avoid conflicts
|
574 |
+
"remove_temp": True,
|
575 |
+
"fps": target_fps, # Ensure consistent FPS
|
576 |
+
"logger": "bar", # Quieter logs for temp writes
|
577 |
+
# Use preview settings for potentially faster *individual* writes
|
578 |
+
"preset": 'ultrafast' if is_preview else 'medium',
|
579 |
+
"bitrate": '1000k' if is_preview else '5000k' # Adjust bitrate as needed
|
580 |
+
}
|
581 |
+
|
582 |
+
update_progress(request_id, "WRITING_TEMP", f"Writing temp clip {i+1}/{num_clips} (Order: {order})...")
|
583 |
+
logger.info(f"Writing temporary clip {order} to {temp_output_path} with FPS={target_fps:.2f} [{request_id}]")
|
584 |
+
|
585 |
+
# Ensure the subclip has audio if it's supposed to
|
586 |
+
if not (mute_all_clips or mute_from_plan) and subclip.audio is None:
|
587 |
+
logger.warning(f"Clip {order} was supposed to have audio but doesn't after processing. It will be silent in the temp file. [{request_id}]")
|
588 |
+
# No need to explicitly add silence, ffmpeg handles missing audio streams
|
589 |
+
|
590 |
+
subclip.write_videofile(temp_output_path, **temp_write_kwargs)
|
591 |
+
temp_clip_paths.append(temp_output_path)
|
592 |
+
logger.info(f"Successfully wrote temporary clip {order}. [{request_id}]")
|
593 |
+
|
594 |
+
except Exception as e:
|
595 |
+
logger.error(f"Error processing or writing temp clip {order} from {source_path} [{request_id}]: {e}")
|
596 |
+
traceback.print_exc()
|
597 |
+
# Continue to the next clip
|
598 |
+
finally:
|
599 |
+
# Close the MoviePy objects for this clip *immediately* to free memory
|
600 |
+
if 'subclip' in locals() and subclip:
|
601 |
+
try: subclip.close()
|
602 |
+
except Exception as ce: logger.error(f"Error closing subclip object for clip {order} [{request_id}]: {ce}")
|
603 |
+
if 'resized_clip' in locals() and resized_clip != video: # Avoid double close if no resize happened
|
604 |
+
try: resized_clip.close()
|
605 |
+
except Exception as ce: logger.error(f"Error closing resized_clip object for clip {order} [{request_id}]: {ce}")
|
606 |
+
if video:
|
607 |
+
try: video.close()
|
608 |
+
except Exception as ce: logger.error(f"Error closing source video object for clip {order} [{request_id}]: {ce}")
|
609 |
+
|
610 |
+
|
611 |
+
# --- Concatenate Temporary Clips using FFMPEG ---
|
612 |
+
if not temp_clip_paths:
|
613 |
+
error_msg = 'No valid temporary clips could be created.'
|
614 |
+
update_progress(request_id, "FAILED", "Video assembly failed.", error=error_msg)
|
615 |
+
# Cleanup is handled in the main finally block
|
616 |
+
return {'status': 'error', 'message': error_msg}
|
617 |
+
|
618 |
+
update_progress(request_id, "CONCATENATING", f"Concatenating {len(temp_clip_paths)} temporary clips using FFMPEG...")
|
619 |
+
logger.info(f"Preparing to concatenate {len(temp_clip_paths)} temporary clips via FFMPEG. [{request_id}]")
|
620 |
+
|
621 |
+
# Create the FFMPEG concat list file (using absolute paths is safer)
|
622 |
+
concat_list_path = os.path.join(temp_dir, "concat_list.txt")
|
623 |
+
with open(concat_list_path, 'w') as f:
|
624 |
+
for clip_path in temp_clip_paths:
|
625 |
+
# FFMPEG requires forward slashes, even on Windows, in the concat file
|
626 |
+
# Also escape special characters if any (though uuids shouldn't have them)
|
627 |
+
safe_path = clip_path.replace("\\", "/").replace("'", "'\\''")
|
628 |
+
f.write(f"file '{safe_path}'\n")
|
629 |
+
logger.info(f"Generated FFMPEG concat list: {concat_list_path} [{request_id}]")
|
630 |
+
|
631 |
+
# Define the path for the *intermediate* concatenated video (before audio/color)
|
632 |
+
concatenated_video_path = os.path.join(temp_dir, f"concatenated_{request_id}.mp4")
|
633 |
+
|
634 |
+
# Build the FFMPEG command
|
635 |
+
ffmpeg_cmd = [
|
636 |
+
'ffmpeg',
|
637 |
+
'-y', # Overwrite output without asking
|
638 |
+
'-f', 'concat',
|
639 |
+
'-safe', '0', # Allow unsafe file paths (needed for concat demuxer)
|
640 |
+
'-i', concat_list_path,
|
641 |
+
'-c', 'copy', # CRITICAL: Copy streams without re-encoding (FAST!)
|
642 |
+
'-fflags', '+igndts', # Ignore DTS issues that can arise from concat
|
643 |
+
'-map_metadata', '-1', # Avoid metadata issues from source clips
|
644 |
+
'-movflags', '+faststart', # Good practice for web video
|
645 |
+
concatenated_video_path
|
646 |
+
]
|
647 |
+
|
648 |
+
logger.info(f"Executing FFMPEG command: {' '.join(ffmpeg_cmd)} [{request_id}]")
|
649 |
+
try:
|
650 |
+
# Use stderr=subprocess.PIPE to capture FFMPEG output for logging/debugging
|
651 |
+
process = subprocess.run(ffmpeg_cmd, check=True, capture_output=True, text=True)
|
652 |
+
logger.info(f"FFMPEG concatenation successful. Output:\n{process.stdout}\n{process.stderr} [{request_id}]")
|
653 |
+
except subprocess.CalledProcessError as e:
|
654 |
+
error_msg = f"FFMPEG concatenation failed with exit code {e.returncode}."
|
655 |
+
logger.error(error_msg + f" [{request_id}]")
|
656 |
+
logger.error(f"FFMPEG stderr:\n{e.stderr}")
|
657 |
+
logger.error(f"FFMPEG stdout:\n{e.stdout}")
|
658 |
+
update_progress(request_id, "FAILED", "FFMPEG concatenation failed.", error=error_msg + f" Details: {e.stderr[:200]}...") # Limit error length
|
659 |
+
# Cleanup handled in finally
|
660 |
+
return {'status': 'error', 'message': error_msg, 'details': e.stderr}
|
661 |
+
except FileNotFoundError:
|
662 |
+
error_msg = "FFMPEG command not found. Ensure FFMPEG is installed and in the system's PATH."
|
663 |
+
logger.error(error_msg + f" [{request_id}]")
|
664 |
+
update_progress(request_id, "FAILED", "FFMPEG not found.", error=error_msg)
|
665 |
+
return {'status': 'error', 'message': error_msg}
|
666 |
+
|
667 |
+
|
668 |
+
# --- Post-Processing: Background Audio & Color Adjustments ---
|
669 |
+
final_processed_path = concatenated_video_path # Start with the ffmpeg output
|
670 |
+
needs_final_write = False # Flag if we need another MoviePy write step
|
671 |
+
|
672 |
+
bg_audio_info = plan.get('background_audio')
|
673 |
+
color_adjustments = plan.get('color_adjustments')
|
674 |
+
|
675 |
+
if bg_audio_info or (color_adjustments and isinstance(color_adjustments, dict)):
|
676 |
+
needs_final_write = True
|
677 |
+
update_progress(request_id, "POST_PROCESSING", "Applying background audio and/or color adjustments...")
|
678 |
+
logger.info(f"Loading concatenated video for post-processing (BG Audio/Color). [{request_id}]")
|
679 |
+
|
680 |
+
post_process_clip = None
|
681 |
+
bg_audio_clip_to_close = None
|
682 |
+
try:
|
683 |
+
post_process_clip = VideoFileClip(concatenated_video_path)
|
684 |
+
|
685 |
+
# --- Background Audio ---
|
686 |
+
bg_audio_path = bg_audio_info.get('source_path') if bg_audio_info else None
|
687 |
+
final_audio = post_process_clip.audio # Get audio from the concatenated clip
|
688 |
+
|
689 |
+
if bg_audio_path and os.path.exists(bg_audio_path):
|
690 |
+
volume = bg_audio_info.get('volume_factor', 0.7)
|
691 |
+
if not isinstance(volume, (int, float)) or not (0 <= volume <= 1.5):
|
692 |
+
logger.warning(f"Invalid background audio volume factor ({volume}). Using default 0.7. [{request_id}]")
|
693 |
+
volume = 0.7
|
694 |
+
|
695 |
+
logger.info(f"Adding background audio: '{os.path.basename(bg_audio_path)}' with volume {volume:.2f} [{request_id}]")
|
696 |
+
try:
|
697 |
+
bg_audio_clip = AudioFileClip(bg_audio_path)
|
698 |
+
bg_audio_clip_to_close = bg_audio_clip # Ensure cleanup
|
699 |
+
|
700 |
+
target_vid_duration = post_process_clip.duration
|
701 |
+
if target_vid_duration is not None:
|
702 |
+
if bg_audio_clip.duration > target_vid_duration:
|
703 |
+
bg_audio_clip = bg_audio_clip.subclipped(0, target_vid_duration)
|
704 |
+
# Ensure bg audio matches video duration exactly if possible
|
705 |
+
bg_audio_clip = bg_audio_clip.set_duration(target_vid_duration)
|
706 |
+
|
707 |
+
processed_bg_audio = bg_audio_clip.fx(MultiplyVolume, volume)
|
708 |
+
|
709 |
+
if final_audio: # If the concatenated video has audio
|
710 |
+
logger.info(f"Compositing background audio with existing clip audio. [{request_id}]")
|
711 |
+
# Ensure original audio matches duration
|
712 |
+
if final_audio.duration != target_vid_duration:
|
713 |
+
logger.warning(f"Original concatenated audio duration ({final_audio.duration:.2f}s) doesn't match video duration ({target_vid_duration:.2f}s). Adjusting. [{request_id}]")
|
714 |
+
final_audio = final_audio.set_duration(target_vid_duration)
|
715 |
+
final_audio = CompositeAudioClip([final_audio, processed_bg_audio])
|
716 |
+
else: # If concatenated video was silent
|
717 |
+
logger.info(f"Setting background audio (concatenated video was silent). [{request_id}]")
|
718 |
+
final_audio = processed_bg_audio
|
719 |
+
|
720 |
+
if final_audio and target_vid_duration:
|
721 |
+
final_audio = final_audio.set_duration(target_vid_duration) # Final duration check
|
722 |
+
|
723 |
+
post_process_clip = post_process_clip.set_audio(final_audio)
|
724 |
+
|
725 |
+
except Exception as audio_e:
|
726 |
+
logger.warning(f"Failed to add background audio during post-processing [{request_id}]: {audio_e}. Proceeding without it.")
|
727 |
+
traceback.print_exc()
|
728 |
+
if post_process_clip.audio is None:
|
729 |
+
logger.warning(f"Final video might be silent after failed BG audio add. [{request_id}]")
|
730 |
+
|
731 |
+
elif bg_audio_path:
|
732 |
+
logger.warning(f"Background audio file specified ('{os.path.basename(bg_audio_path)}') not found. Skipping BG audio. [{request_id}]")
|
733 |
+
elif post_process_clip.audio is None:
|
734 |
+
logger.warning(f"No background audio specified and concatenated video is silent. Final video will be silent. [{request_id}]")
|
735 |
+
|
736 |
+
|
737 |
+
# --- Apply Overall Color Adjustments ---
|
738 |
+
if color_adjustments and isinstance(color_adjustments, dict):
|
739 |
+
raw_brightness = color_adjustments.get('brightness', 0)
|
740 |
+
raw_contrast = color_adjustments.get('contrast', 1.0)
|
741 |
+
lum_param = 0
|
742 |
+
contrast_param = 0.0
|
743 |
+
apply_color_fx = False
|
744 |
+
try:
|
745 |
+
if isinstance(raw_brightness, (int, float)) and -1.0 <= raw_brightness <= 1.0 and raw_brightness != 0:
|
746 |
+
lum_param = int(raw_brightness * 255)
|
747 |
+
apply_color_fx = True
|
748 |
+
if isinstance(raw_contrast, (int, float)) and 0.1 <= raw_contrast <= 3.0 and raw_contrast != 1.0:
|
749 |
+
contrast_param = raw_contrast - 1.0
|
750 |
+
apply_color_fx = True
|
751 |
+
|
752 |
+
if apply_color_fx:
|
753 |
+
logger.info(f"Applying overall color adjustments: Brightness={raw_brightness:.2f}, Contrast={raw_contrast:.2f} [{request_id}]")
|
754 |
+
# Moviepy 2.x syntax
|
755 |
+
post_process_clip = post_process_clip.fx(LumContrast, lum=lum_param, contrast=contrast_param)
|
756 |
+
# Moviepy 1.x was: post_process_clip = post_process_clip.fx(vfx.lum_contrast, lum=lum_param, contrast=contrast_param)
|
757 |
+
|
758 |
+
except Exception as e:
|
759 |
+
logger.error(f"Error applying color adjustments during post-processing: {e} [{request_id}]")
|
760 |
+
traceback.print_exc()
|
761 |
+
|
762 |
+
# --- Define Final Output Path and Write ---
|
763 |
+
final_output_path = os.path.join(FINAL_OUTPUT_FOLDER, secure_filename(output_filename))
|
764 |
+
os.makedirs(os.path.dirname(final_output_path), exist_ok=True)
|
765 |
+
final_processed_path = final_output_path # Update the path to the *actual* final file
|
766 |
+
|
767 |
+
update_progress(request_id, "WRITING_FINAL", f"Writing final video with post-processing to {os.path.basename(final_output_path)} {'(Preview)' if is_preview else ''}...")
|
768 |
+
logger.info(f"Writing final video (post-processed) to: {final_output_path} [{request_id}] {'(Preview)' if is_preview else ''}")
|
769 |
+
|
770 |
+
final_write_kwargs = {
|
771 |
+
"codec": "libx264",
|
772 |
+
"audio_codec": "aac",
|
773 |
+
"threads": 16,
|
774 |
+
"logger": 'bar', # Show progress bar for final write
|
775 |
+
"preset": 'ultrafast' if is_preview else 'medium',
|
776 |
+
"bitrate": '500k' if is_preview else '50000k' # Use preview/final settings here
|
777 |
+
}
|
778 |
+
post_process_clip.write_videofile(final_output_path, **final_write_kwargs)
|
779 |
+
logger.info(f"Successfully wrote final post-processed video. [{request_id}]")
|
780 |
+
|
781 |
+
except Exception as post_e:
|
782 |
+
error_msg = f"Failed during post-processing (audio/color) or final write: {post_e}"
|
783 |
+
logger.error(f"Error during post-processing or final write [{request_id}]: {post_e}")
|
784 |
+
traceback.print_exc()
|
785 |
+
update_progress(request_id, "FAILED", "Post-processing/Final Write failed.", error=error_msg)
|
786 |
+
# Cleanup handled in finally
|
787 |
+
return {'status': 'error', 'message': error_msg}
|
788 |
+
finally:
|
789 |
+
# Clean up post-processing clips
|
790 |
+
if post_process_clip:
|
791 |
+
try: post_process_clip.close()
|
792 |
+
except Exception as ce: logger.error(f"Error closing post_process_clip [{request_id}]: {ce}")
|
793 |
+
if bg_audio_clip_to_close:
|
794 |
+
try: bg_audio_clip_to_close.close()
|
795 |
+
except Exception as ce: logger.error(f"Error closing bg_audio_clip_to_close [{request_id}]: {ce}")
|
796 |
+
|
797 |
+
else:
|
798 |
+
# No post-processing needed, the FFMPEG output is the final output.
|
799 |
+
# Move/Rename it to the final destination.
|
800 |
+
final_output_path = os.path.join(FINAL_OUTPUT_FOLDER, secure_filename(output_filename))
|
801 |
+
os.makedirs(os.path.dirname(final_output_path), exist_ok=True)
|
802 |
+
logger.info(f"No post-processing needed. Moving concatenated file to final destination: {final_output_path} [{request_id}]")
|
803 |
+
try:
|
804 |
+
shutil.move(concatenated_video_path, final_output_path)
|
805 |
+
final_processed_path = final_output_path # Update to the final path
|
806 |
+
logger.info(f"Successfully moved concatenated video to final path. [{request_id}]")
|
807 |
+
except Exception as move_e:
|
808 |
+
error_msg = f"Failed to move concatenated video to final destination: {move_e}"
|
809 |
+
logger.error(error_msg + f" [{request_id}]")
|
810 |
+
update_progress(request_id, "FAILED", "Failed to finalize video.", error=error_msg)
|
811 |
+
# Cleanup handled in finally
|
812 |
+
return {'status': 'error', 'message': error_msg}
|
813 |
+
|
814 |
+
|
815 |
+
logger.info(f"Plan Execution Successful [{request_id}]")
|
816 |
+
update_progress(request_id, "COMPLETED", f"Video assembly complete: {os.path.basename(final_processed_path)}")
|
817 |
+
return {'status': 'success', 'output_path': final_processed_path}
|
818 |
+
|
819 |
+
except Exception as e:
|
820 |
+
# Catch-all for unexpected errors during setup or flow control
|
821 |
+
error_msg = f"An unexpected error occurred during video processing: {e}"
|
822 |
+
logger.error(f"Unexpected error in execute_editing_plan [{request_id}]: {e}")
|
823 |
+
logger.error(f"Error Type: {type(e).__name__}")
|
824 |
+
traceback.print_exc()
|
825 |
+
update_progress(request_id, "FAILED", "Unexpected processing error.", error=error_msg)
|
826 |
+
return {'status': 'error', 'message': error_msg}
|
827 |
+
|
828 |
+
finally:
|
829 |
+
# --- Cleanup Temporary Files and Directory ---
|
830 |
+
if temp_dir and os.path.exists(temp_dir):
|
831 |
+
logger.info(f"Cleaning up temporary directory: {temp_dir} [{request_id}]")
|
832 |
+
try:
|
833 |
+
shutil.rmtree(temp_dir)
|
834 |
+
logger.info(f"Successfully removed temporary directory. [{request_id}]")
|
835 |
+
except Exception as cleanup_e:
|
836 |
+
logger.error(f"Error removing temporary directory {temp_dir} [{request_id}]: {cleanup_e}")
|
837 |
+
# Note: MoviePy clip objects should have been closed within the loop or post-processing block
|
838 |
+
|
839 |
+
# --- Feature 2: Cache Cleanup Function ---
|
840 |
+
def cleanup_expired_cache():
|
841 |
+
"""Removes expired entries from the Gemini file cache."""
|
842 |
+
global gemini_file_cache, cache_lock
|
843 |
+
now = time.time()
|
844 |
+
expired_hashes = []
|
845 |
+
with cache_lock:
|
846 |
+
for file_hash, data in gemini_file_cache.items():
|
847 |
+
if now - data.get('timestamp', 0) > CACHE_EXPIRY_SECONDS:
|
848 |
+
expired_hashes.append(file_hash)
|
849 |
+
|
850 |
+
if expired_hashes:
|
851 |
+
logger.info(f"Cleaning up {len(expired_hashes)} expired Gemini cache entries...")
|
852 |
+
for file_hash in expired_hashes:
|
853 |
+
# --- Feature 1: No Deletion from Gemini API ---
|
854 |
+
# cached_file = gemini_file_cache[file_hash].get('file')
|
855 |
+
# if cached_file and hasattr(cached_file, 'name') and cached_file.name:
|
856 |
+
# try:
|
857 |
+
# genai.delete_file(cached_file.name)
|
858 |
+
# logger.info(f"Deleted expired Gemini file from cache: {cached_file.name} (Hash: {file_hash[:8]}...)")
|
859 |
+
# except Exception as del_e:
|
860 |
+
# logger.error(f"Failed to delete expired Gemini file {cached_file.name} from cache: {del_e}")
|
861 |
+
# else:
|
862 |
+
# logger.warning(f"Could not delete expired Gemini file for hash {file_hash[:8]}... (File object missing or invalid)")
|
863 |
+
# ---------------------------------------------
|
864 |
+
del gemini_file_cache[file_hash]
|
865 |
+
logger.info(f"Removed expired cache entry for hash: {file_hash[:8]}...")
|
866 |
+
logger.info("Expired Gemini cache cleanup finished.")
|
867 |
+
# ------------------------------------
|
868 |
+
|
869 |
+
# --- Feature 3: Request Details Cache Cleanup ---
|
870 |
+
def cleanup_expired_request_details():
|
871 |
+
"""Removes old request details from the cache."""
|
872 |
+
global request_details_cache
|
873 |
+
expiry_time = time.time() - (48 * 60 * 60)
|
874 |
+
ids_to_remove = [
|
875 |
+
req_id for req_id, details in request_details_cache.items()
|
876 |
+
if details.get('timestamp', 0) < expiry_time
|
877 |
+
]
|
878 |
+
if ids_to_remove:
|
879 |
+
logger.info(f"Cleaning up {len(ids_to_remove)} expired request details entries...")
|
880 |
+
for req_id in ids_to_remove:
|
881 |
+
if req_id in request_details_cache:
|
882 |
+
del request_details_cache[req_id]
|
883 |
+
logger.info(f"Removed expired request details for ID: {req_id}")
|
884 |
+
logger.info("Expired request details cleanup finished.")
|
885 |
+
# ------------------------------------------
|
886 |
+
|
887 |
+
def process_video_request(request_id: str, form_data: Dict, file_paths: Dict, app: Flask):
|
888 |
+
"""
|
889 |
+
The main logic for processing a video request, run in a background thread.
|
890 |
+
Handles file caching and avoids deleting files.
|
891 |
+
"""
|
892 |
+
global intermediate_files_registry, progress_updates, gemini_file_cache, cache_lock
|
893 |
+
|
894 |
+
uploaded_file_references: Dict[str, File] = {}
|
895 |
+
upload_errors: Dict[str, str] = {}
|
896 |
+
upload_threads = []
|
897 |
+
|
898 |
+
try:
|
899 |
+
# Extract data needed for processing
|
900 |
+
style_description = form_data.get('style_desc', '')
|
901 |
+
target_duration = form_data.get('duration')
|
902 |
+
output_filename = form_data.get('output')
|
903 |
+
is_preview = form_data.get('is_preview', False)
|
904 |
+
# --- ADDED: Get the global mute flag ---
|
905 |
+
mute_all_clips_flag = form_data.get('mute_audio', False)
|
906 |
+
# ---------------------------------------
|
907 |
+
style_sample_path = file_paths.get('style_sample')
|
908 |
+
source_media_paths = file_paths.get('sources', {})
|
909 |
+
|
910 |
+
# --- 1. Identify files for Gemini API & Check Cache ---
|
911 |
+
files_to_upload_api = []
|
912 |
+
files_requiring_api = []
|
913 |
+
if style_sample_path:
|
914 |
+
files_requiring_api.append(style_sample_path)
|
915 |
+
files_requiring_api.extend(source_media_paths.get('videos', []))
|
916 |
+
files_requiring_api.extend(source_media_paths.get('audios', []))
|
917 |
+
files_requiring_api.extend(source_media_paths.get('images', []))
|
918 |
+
|
919 |
+
if not source_media_paths.get('videos'):
|
920 |
+
raise ValueError("No source videos provided for processing.")
|
921 |
+
|
922 |
+
logger.info(f"Checking cache for {len(files_requiring_api)} files potentially needing API processing [{request_id}]...")
|
923 |
+
update_progress(request_id, "PREPARING", "Checking file cache...")
|
924 |
+
|
925 |
+
# --- Feature 2: Cache Check ---
|
926 |
+
cleanup_expired_cache()
|
927 |
+
with cache_lock:
|
928 |
+
for file_path in files_requiring_api:
|
929 |
+
file_hash = get_file_hash(file_path)
|
930 |
+
if not file_hash:
|
931 |
+
logger.warning(f"Could not calculate hash for {file_path}. Will attempt upload. [{request_id}]")
|
932 |
+
files_to_upload_api.append((file_path, None))
|
933 |
+
continue
|
934 |
+
|
935 |
+
cached_data = gemini_file_cache.get(file_hash)
|
936 |
+
if cached_data:
|
937 |
+
cached_file = cached_data.get('file')
|
938 |
+
try:
|
939 |
+
retrieved_file = genai.get_file(name=cached_file.name)
|
940 |
+
if retrieved_file.state.name == "ACTIVE":
|
941 |
+
logger.info(f"Cache HIT: Using cached Gemini file '{cached_file.name}' for {os.path.basename(file_path)} (Hash: {file_hash[:8]}...) [{request_id}]")
|
942 |
+
uploaded_file_references[file_path] = retrieved_file
|
943 |
+
gemini_file_cache[file_hash]['timestamp'] = time.time()
|
944 |
+
else:
|
945 |
+
logger.warning(f"Cache INVALID: Cached Gemini file '{cached_file.name}' for {os.path.basename(file_path)} is no longer ACTIVE (State: {retrieved_file.state.name}). Will re-upload. [{request_id}]")
|
946 |
+
files_to_upload_api.append((file_path, file_hash))
|
947 |
+
del gemini_file_cache[file_hash]
|
948 |
+
except Exception as get_err:
|
949 |
+
logger.warning(f"Cache CHECK FAILED: Error verifying cached Gemini file '{cached_file.name}' for {os.path.basename(file_path)}: {get_err}. Will re-upload. [{request_id}]")
|
950 |
+
files_to_upload_api.append((file_path, file_hash))
|
951 |
+
if file_hash in gemini_file_cache:
|
952 |
+
del gemini_file_cache[file_hash]
|
953 |
+
else:
|
954 |
+
logger.info(f"Cache MISS: File {os.path.basename(file_path)} (Hash: {file_hash[:8]}...) not found in cache. Will upload. [{request_id}]")
|
955 |
+
files_to_upload_api.append((file_path, file_hash))
|
956 |
+
# -----------------------------
|
957 |
+
|
958 |
+
# --- 2. Upload necessary files to Gemini API concurrently ---
|
959 |
+
if not files_to_upload_api:
|
960 |
+
logger.info(f"All required files found in cache. No API uploads needed for request {request_id}.")
|
961 |
+
else:
|
962 |
+
update_progress(request_id, "UPLOADING", f"Uploading {len(files_to_upload_api)} files to processing service...")
|
963 |
+
logger.info(f"Starting Gemini API uploads for {len(files_to_upload_api)} files [{request_id}]...")
|
964 |
+
|
965 |
+
for file_path, file_hash in files_to_upload_api:
|
966 |
+
if file_hash is None:
|
967 |
+
file_hash = f"no_hash_{uuid.uuid4()}"
|
968 |
+
thread = threading.Thread(target=upload_thread_worker, args=(request_id, file_path, file_hash, uploaded_file_references, upload_errors))
|
969 |
+
upload_threads.append(thread)
|
970 |
+
thread.start()
|
971 |
+
|
972 |
+
for i, thread in enumerate(upload_threads):
|
973 |
+
thread.join()
|
974 |
+
update_progress(request_id, "UPLOADING", f"Processing uploaded files ({i+1}/{len(files_to_upload_api)} complete)...")
|
975 |
+
|
976 |
+
logger.info(f"All upload threads finished for [{request_id}].")
|
977 |
+
|
978 |
+
if upload_errors:
|
979 |
+
error_summary = "; ".join(f"{os.path.basename(k)}: {v}" for k, v in upload_errors.items())
|
980 |
+
for fp, err in upload_errors.items():
|
981 |
+
logger.error(f"Upload/Processing Error for {os.path.basename(fp)} [{request_id}]: {err}")
|
982 |
+
all_source_videos = source_media_paths.get('videos', [])
|
983 |
+
available_source_videos = [p for p in all_source_videos if p in uploaded_file_references]
|
984 |
+
if not available_source_videos:
|
985 |
+
raise ValueError(f"All source videos failed upload/processing or cache retrieval: {error_summary}")
|
986 |
+
else:
|
987 |
+
logger.warning(f"Some files failed upload/processing, continuing if possible: {error_summary}")
|
988 |
+
|
989 |
+
if not any(p in uploaded_file_references for p in source_media_paths.get('videos', [])):
|
990 |
+
raise ValueError("API analysis requires source videos, but none are available via cache or successful upload.")
|
991 |
+
|
992 |
+
logger.info(f"Proceeding to plan generation. API references available for {len(uploaded_file_references)} files. [{request_id}].")
|
993 |
+
|
994 |
+
# --- 3. Generate Editing Plan ---
|
995 |
+
plan_result = generate_editing_plan(
|
996 |
+
request_id=request_id,
|
997 |
+
uploaded_file_references=uploaded_file_references,
|
998 |
+
source_media_paths=source_media_paths,
|
999 |
+
style_description=style_description,
|
1000 |
+
sample_video_path=style_sample_path,
|
1001 |
+
target_duration=target_duration
|
1002 |
+
)
|
1003 |
+
|
1004 |
+
if plan_result['status'] != 'success':
|
1005 |
+
raise ValueError(f"Failed to generate editing plan: {plan_result['message']}")
|
1006 |
+
|
1007 |
+
editing_plan = plan_result['plan']
|
1008 |
+
logger.info(f"Generated Editing Plan Successfully [{request_id}]")
|
1009 |
+
|
1010 |
+
# --- 4. Execute Editing Plan ---
|
1011 |
+
# --- MODIFIED: Pass the mute_all_clips_flag here ---
|
1012 |
+
execution_result = execute_editing_plan(
|
1013 |
+
request_id=request_id,
|
1014 |
+
plan=editing_plan,
|
1015 |
+
output_filename=output_filename,
|
1016 |
+
is_preview=is_preview,
|
1017 |
+
mute_all_clips=mute_all_clips_flag # Pass the flag
|
1018 |
+
)
|
1019 |
+
# -------------------------------------------------
|
1020 |
+
|
1021 |
+
# --- Handle Execution Result ---
|
1022 |
+
if execution_result['status'] == 'success':
|
1023 |
+
final_output_path = execution_result['output_path']
|
1024 |
+
final_output_basename = os.path.basename(final_output_path)
|
1025 |
+
|
1026 |
+
logger.info(f"Video editing successful. Output path: {final_output_path} [{request_id}]")
|
1027 |
+
logger.info(f"Preparing final result for request {request_id}. Filename: {final_output_basename}")
|
1028 |
+
|
1029 |
+
with app.app_context():
|
1030 |
+
try:
|
1031 |
+
video_url = url_for('download_file', filename=final_output_basename, _external=False)
|
1032 |
+
logger.info(f"Download URL generated within context: {video_url} [{request_id}]")
|
1033 |
+
|
1034 |
+
result_data = {
|
1035 |
+
'status': 'success',
|
1036 |
+
'message': f"Video {'preview ' if is_preview else ''}generated successfully!",
|
1037 |
+
'video_url': video_url,
|
1038 |
+
'output_filename': final_output_basename,
|
1039 |
+
'is_preview': is_preview,
|
1040 |
+
'request_id': request_id
|
1041 |
+
}
|
1042 |
+
update_progress(request_id, "COMPLETED", f"Video generation finished successfully {'(Preview)' if is_preview else ''}.", result=result_data)
|
1043 |
+
logger.info(f"Final success status updated for request {request_id}.")
|
1044 |
+
|
1045 |
+
except Exception as url_gen_e:
|
1046 |
+
logger.error(f"Error generating download URL within context for request {request_id}: {url_gen_e}")
|
1047 |
+
traceback.print_exc()
|
1048 |
+
update_progress(request_id, "FAILED", "Video generated, but failed to create download link.", error=str(url_gen_e))
|
1049 |
+
|
1050 |
+
else:
|
1051 |
+
logger.error(f"Video execution plan failed for request {request_id}. Status should already be FAILED.")
|
1052 |
+
|
1053 |
+
except Exception as e:
|
1054 |
+
logger.error(f"--- Unhandled Error in process_video_request Thread [{request_id}] ---")
|
1055 |
+
logger.error(f"Error Type: {type(e).__name__}")
|
1056 |
+
logger.error(f"Error Message: {e}")
|
1057 |
+
traceback.print_exc()
|
1058 |
+
current_status = progress_updates.get(request_id, {}).get('stage', 'UNKNOWN')
|
1059 |
+
if current_status != "FAILED":
|
1060 |
+
update_progress(request_id, "FAILED", "An unexpected error occurred during processing.", error=str(e))
|
1061 |
+
|
1062 |
+
finally:
|
1063 |
+
# --- 5. Cleanup ---
|
1064 |
+
logger.info(f"Initiating cleanup for request {request_id}...")
|
1065 |
+
logger.info(f"Skipping deletion of Gemini API files for request {request_id} (Feature 1).")
|
1066 |
+
logger.info(f"Skipping deletion of local intermediate files in '{UPLOAD_FOLDER}' for request {request_id} (Feature 1).")
|
1067 |
+
|
1068 |
+
if request_id in background_tasks:
|
1069 |
+
try:
|
1070 |
+
del background_tasks[request_id]
|
1071 |
+
logger.info(f"Removed background task entry for completed/failed request {request_id}")
|
1072 |
+
except KeyError:
|
1073 |
+
logger.warning(f"Tried to remove background task entry for {request_id}, but it was already gone.")
|
1074 |
+
|
1075 |
+
logger.info(f"Processing finished for request {request_id}.")
|
1076 |
+
|
1077 |
+
|
1078 |
+
# --- Flask Routes ---
|
1079 |
+
|
1080 |
+
# Default style description constant
|
1081 |
+
DEFAULT_STYLE_DESCRIPTION = """Project Goal: Create a fast-paced, energetic, and aesthetically beautiful promotional video showcasing the product for an Instagram home decor/kitchen channel.
|
1082 |
+
|
1083 |
+
Pacing: Fast, energetic, engaging.
|
1084 |
+
Editing: Quick cuts.
|
1085 |
+
Visuals: HIGHLY aesthetic, clean, beautiful shots ONLY. Focus on quality over quantity. Prioritize well-lit, well-composed footage. Do NOT use any mediocre or subpar shots, even if provided.
|
1086 |
+
|
1087 |
+
Pacing and Cuts:
|
1088 |
+
Quick Cuts: Keep shot durations short (e.g., 0.5 seconds to 2 seconds max per clip).
|
1089 |
+
Transitions: Mostly hard cuts will work best for this style. Avoid slow fades or complex wipes unless one specifically enhances the aesthetic (e.g., a very quick, clean wipe or maybe a smooth match-cut if the footage allows).
|
1090 |
+
|
1091 |
+
It's not a tutorial, it's a vibe."""
|
1092 |
+
|
1093 |
+
|
1094 |
+
@app.route('/', methods=['GET'])
|
1095 |
+
def index_get():
|
1096 |
+
now = time.time()
|
1097 |
+
ids_to_clean_progress = [rid for rid, data in list(progress_updates.items())
|
1098 |
+
if now - data.get('timestamp', 0) > 3600 * 48]
|
1099 |
+
for rid in ids_to_clean_progress:
|
1100 |
+
if rid not in background_tasks:
|
1101 |
+
if rid in progress_updates:
|
1102 |
+
del progress_updates[rid]
|
1103 |
+
logger.info(f"Cleaned up old progress entry: {rid}")
|
1104 |
+
|
1105 |
+
cleanup_expired_request_details()
|
1106 |
+
cleanup_expired_cache()
|
1107 |
+
|
1108 |
+
return render_template('index.html', default_style_desc=DEFAULT_STYLE_DESCRIPTION)
|
1109 |
+
|
1110 |
+
@app.route('/generate', methods=['POST'])
|
1111 |
+
def generate_video_post():
|
1112 |
+
global background_tasks, intermediate_files_registry, request_details_cache
|
1113 |
+
|
1114 |
+
request_id = str(uuid.uuid4())
|
1115 |
+
intermediate_files_registry[request_id] = []
|
1116 |
+
|
1117 |
+
try:
|
1118 |
+
# --- Form Data Validation ---
|
1119 |
+
style_description = request.form.get('style_desc', '').strip()
|
1120 |
+
target_duration_str = request.form.get('duration')
|
1121 |
+
output_filename_base = secure_filename(request.form.get('output', f'ai_edited_video_{request_id[:8]}'))
|
1122 |
+
output_filename_base = os.path.splitext(output_filename_base)[0]
|
1123 |
+
|
1124 |
+
if not style_description:
|
1125 |
+
style_description = DEFAULT_STYLE_DESCRIPTION
|
1126 |
+
logger.info(f"Using default style description for request {request_id}")
|
1127 |
+
|
1128 |
+
try:
|
1129 |
+
target_duration = float(target_duration_str)
|
1130 |
+
if target_duration <= 0: raise ValueError("Duration must be positive")
|
1131 |
+
except (ValueError, TypeError):
|
1132 |
+
return jsonify({'status': 'error', 'message': 'Invalid target duration. Please enter a positive number.'}), 400
|
1133 |
+
|
1134 |
+
is_preview = request.form.get('generate_preview') == 'on'
|
1135 |
+
logger.info(f"Request {request_id} - Preview Mode: {is_preview}")
|
1136 |
+
|
1137 |
+
# --- ADDED: Get Mute flag ---
|
1138 |
+
mute_audio_flag = request.form.get('mute_audio') == 'on'
|
1139 |
+
logger.info(f"Request {request_id} - Mute Original Audio: {mute_audio_flag}")
|
1140 |
+
# --------------------------
|
1141 |
+
|
1142 |
+
output_suffix = "_preview" if is_preview else "_hq"
|
1143 |
+
output_filename = f"{output_filename_base}{output_suffix}.mp4"
|
1144 |
+
|
1145 |
+
# --- File Handling ---
|
1146 |
+
saved_file_paths = {"sources": {"videos": [], "audios": [], "images": []}, "style_sample": None}
|
1147 |
+
request_files = request.files
|
1148 |
+
|
1149 |
+
def save_file(file_storage, category, prefix="source"):
|
1150 |
+
if file_storage and file_storage.filename:
|
1151 |
+
if allowed_file(file_storage.filename):
|
1152 |
+
base, ext = os.path.splitext(file_storage.filename)
|
1153 |
+
safe_base = "".join(c if c.isalnum() or c in ('_','-','.') else '_' for c in base)
|
1154 |
+
filename = secure_filename(f"{prefix}_{safe_base}_{request_id[:8]}{ext}")
|
1155 |
+
save_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
1156 |
+
try:
|
1157 |
+
file_storage.save(save_path)
|
1158 |
+
intermediate_files_registry[request_id].append(save_path)
|
1159 |
+
logger.info(f"Saved uploaded file [{request_id}]: {save_path}")
|
1160 |
+
|
1161 |
+
if category == "style_sample":
|
1162 |
+
saved_file_paths["style_sample"] = save_path
|
1163 |
+
elif category in saved_file_paths["sources"]:
|
1164 |
+
saved_file_paths["sources"][category].append(save_path)
|
1165 |
+
return None
|
1166 |
+
except Exception as save_err:
|
1167 |
+
logger.error(f"Failed to save file {filename} to {save_path}: {save_err}")
|
1168 |
+
return f"Error saving file: {file_storage.filename}"
|
1169 |
+
else:
|
1170 |
+
return f"Invalid file type for {category}: {file_storage.filename}"
|
1171 |
+
return None
|
1172 |
+
|
1173 |
+
style_sample_error = save_file(request_files.get('style_sample'), "style_sample", prefix="style")
|
1174 |
+
if style_sample_error: return jsonify({'status': 'error', 'message': style_sample_error}), 400
|
1175 |
+
|
1176 |
+
videos = request_files.getlist('videos[]')
|
1177 |
+
if not videos or all(not f.filename for f in videos):
|
1178 |
+
return jsonify({'status': 'error', 'message': 'Please upload at least one source video.'}), 400
|
1179 |
+
|
1180 |
+
for video in videos:
|
1181 |
+
video_error = save_file(video, "videos")
|
1182 |
+
if video_error:
|
1183 |
+
return jsonify({'status': 'error', 'message': video_error}), 400
|
1184 |
+
|
1185 |
+
if not saved_file_paths["sources"]["videos"]:
|
1186 |
+
return jsonify({'status': 'error', 'message': 'Failed to save any source videos. Check file types and permissions.'}), 400
|
1187 |
+
|
1188 |
+
for audio in request_files.getlist('audios[]'):
|
1189 |
+
audio_error = save_file(audio, "audios")
|
1190 |
+
if audio_error:
|
1191 |
+
return jsonify({'status': 'error', 'message': audio_error}), 400
|
1192 |
+
|
1193 |
+
for image in request_files.getlist('images[]'):
|
1194 |
+
image_error = save_file(image, "images")
|
1195 |
+
if image_error:
|
1196 |
+
return jsonify({'status': 'error', 'message': image_error}), 400
|
1197 |
+
|
1198 |
+
# --- Prepare Data for Background Thread ---
|
1199 |
+
# --- MODIFIED: Added mute_audio flag ---
|
1200 |
+
form_data_for_thread = {
|
1201 |
+
'style_desc': style_description,
|
1202 |
+
'duration': target_duration,
|
1203 |
+
'output': output_filename,
|
1204 |
+
'is_preview': is_preview,
|
1205 |
+
'mute_audio': mute_audio_flag
|
1206 |
+
}
|
1207 |
+
# ---------------------------------------
|
1208 |
+
|
1209 |
+
# --- Feature 3: Store Request Details ---
|
1210 |
+
request_details_cache[request_id] = {
|
1211 |
+
'form_data': form_data_for_thread.copy(),
|
1212 |
+
'file_paths': saved_file_paths.copy(),
|
1213 |
+
'timestamp': time.time()
|
1214 |
+
}
|
1215 |
+
logger.info(f"Stored request details for potential HQ generation. ID: {request_id}")
|
1216 |
+
# --------------------------------------
|
1217 |
+
|
1218 |
+
# --- Start Background Thread ---
|
1219 |
+
update_progress(request_id, "RECEIVED", "Request received. Initializing processing...")
|
1220 |
+
thread = threading.Thread(target=process_video_request, args=(request_id, form_data_for_thread, saved_file_paths, app))
|
1221 |
+
background_tasks[request_id] = thread
|
1222 |
+
thread.start()
|
1223 |
+
|
1224 |
+
logger.info(f"Started background processing thread for request ID: {request_id}")
|
1225 |
+
|
1226 |
+
# --- Return Immediate Response ---
|
1227 |
+
return jsonify({
|
1228 |
+
'status': 'processing_started',
|
1229 |
+
'message': 'Video generation process started. You can monitor the progress.',
|
1230 |
+
'request_id': request_id
|
1231 |
+
})
|
1232 |
+
|
1233 |
+
except Exception as e:
|
1234 |
+
logger.error(f"--- Error in /generate endpoint before starting thread [{request_id}] ---")
|
1235 |
+
traceback.print_exc()
|
1236 |
+
return jsonify({'status': 'error', 'message': f"An internal server error occurred during setup: {e}"}), 500
|
1237 |
+
|
1238 |
+
# --- Feature 3: New Route for High-Quality Generation ---
|
1239 |
+
@app.route('/generate-hq/<preview_request_id>', methods=['POST'])
|
1240 |
+
def generate_high_quality_video(preview_request_id):
|
1241 |
+
global background_tasks, request_details_cache
|
1242 |
+
|
1243 |
+
logger.info(f"Received request to generate High Quality video based on preview ID: {preview_request_id}")
|
1244 |
+
|
1245 |
+
original_details = request_details_cache.get(preview_request_id)
|
1246 |
+
if not original_details:
|
1247 |
+
logger.error(f"Original request details not found for preview ID: {preview_request_id}")
|
1248 |
+
return jsonify({'status': 'error', 'message': 'Original request details not found. Cannot generate high-quality version.'}), 404
|
1249 |
+
|
1250 |
+
hq_request_id = str(uuid.uuid4())
|
1251 |
+
logger.info(f"Generating HQ video with new request ID: {hq_request_id}")
|
1252 |
+
|
1253 |
+
hq_form_data = original_details['form_data'].copy()
|
1254 |
+
hq_form_data['is_preview'] = False # Set to HQ mode
|
1255 |
+
|
1256 |
+
base_output_name = os.path.splitext(hq_form_data['output'])[0]
|
1257 |
+
if base_output_name.endswith('_preview'):
|
1258 |
+
base_output_name = base_output_name[:-len('_preview')]
|
1259 |
+
hq_form_data['output'] = f"{base_output_name}_hq.mp4"
|
1260 |
+
|
1261 |
+
hq_file_paths = original_details['file_paths'].copy()
|
1262 |
+
|
1263 |
+
request_details_cache[hq_request_id] = {
|
1264 |
+
'form_data': hq_form_data.copy(),
|
1265 |
+
'file_paths': hq_file_paths.copy(),
|
1266 |
+
'timestamp': time.time(),
|
1267 |
+
'based_on_preview_id': preview_request_id
|
1268 |
+
}
|
1269 |
+
|
1270 |
+
update_progress(hq_request_id, "RECEIVED", "High-Quality generation request received. Initializing...")
|
1271 |
+
thread = threading.Thread(target=process_video_request, args=(hq_request_id, hq_form_data, hq_file_paths, app))
|
1272 |
+
background_tasks[hq_request_id] = thread
|
1273 |
+
thread.start()
|
1274 |
+
|
1275 |
+
logger.info(f"Started background processing thread for HQ request ID: {hq_request_id}")
|
1276 |
+
|
1277 |
+
return jsonify({
|
1278 |
+
'status': 'processing_started',
|
1279 |
+
'message': 'High-Quality video generation process started.',
|
1280 |
+
'request_id': hq_request_id
|
1281 |
+
})
|
1282 |
+
# ----------------------------------------------------
|
1283 |
+
|
1284 |
+
@app.route('/progress/<request_id>', methods=['GET'])
|
1285 |
+
def get_progress(request_id):
|
1286 |
+
"""Endpoint for the client to poll for progress updates."""
|
1287 |
+
progress_data = progress_updates.get(request_id)
|
1288 |
+
|
1289 |
+
if not progress_data:
|
1290 |
+
return jsonify({"stage": "UNKNOWN", "message": "Request ID not found or expired.", "error": None, "result": None}), 404
|
1291 |
+
|
1292 |
+
return jsonify(progress_data)
|
1293 |
+
|
1294 |
+
|
1295 |
+
@app.route('/output/<path:filename>')
|
1296 |
+
def download_file(filename):
|
1297 |
+
"""Serves the final generated video file."""
|
1298 |
+
safe_filename = secure_filename(filename)
|
1299 |
+
if safe_filename != filename or '/' in filename or '\\' in filename:
|
1300 |
+
logger.warning(f"Attempt to access potentially unsafe path rejected: {filename}")
|
1301 |
+
return "Invalid filename", 400
|
1302 |
+
|
1303 |
+
file_path = os.path.join(app.config['FINAL_OUTPUT_FOLDER'], safe_filename)
|
1304 |
+
logger.info(f"Attempting to send file: {file_path}")
|
1305 |
+
|
1306 |
+
if not os.path.exists(file_path):
|
1307 |
+
logger.error(f"Download request: File not found at {file_path}")
|
1308 |
+
return "File not found", 404
|
1309 |
+
|
1310 |
+
response = send_file(file_path, as_attachment=False)
|
1311 |
+
return response
|
1312 |
+
|
1313 |
+
|
1314 |
+
if __name__ == '__main__':
|
1315 |
+
app.run(host='0.0.0.0', port=7860, debug=False, threaded=True)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
Flask
|
2 |
+
google-generativeai
|
3 |
+
moviepy
|
templates/index.html
ADDED
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>AVE - AI Video Editor</title>
|
7 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
8 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
9 |
+
<link href="https://fonts.googleapis.com/css2?family=Crimson+Text:ital,wght@0,400;0,600;0,700;1,400;1,600;1,700&display=swap" rel="stylesheet">
|
10 |
+
<link href="https://fonts.googleapis.com/css2?family=Faculty+Glyphic&family=Funnel+Sans:ital,wght@0,300..800;1,300..800&display=swap" rel="stylesheet">
|
11 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/css/all.min.css" integrity="sha512-SnH5WK+bZxgPHs44uWIX+LLJAJ9/2PkPKZ5QiAj6Ta86w+fsb2TkcmfRyVX3pBnMFcV7oQPJkl9QevSCWr3W6A==" crossorigin="anonymous" referrerpolicy="no-referrer" />
|
12 |
+
<style>
|
13 |
+
:root {
|
14 |
+
--primary-color: #20c997;
|
15 |
+
--secondary-color: #0dcaf0;
|
16 |
+
--dark-bg: #12141c;
|
17 |
+
--card-bg: #252a41;
|
18 |
+
--card-bg-transparent: rgba(37, 42, 65, 0.9);
|
19 |
+
--input-bg: rgba(0, 0, 0, 0.2);
|
20 |
+
--input-focus-bg: rgba(0, 0, 0, 0.3);
|
21 |
+
--input-border: rgba(255, 255, 255, 0.1);
|
22 |
+
--input-focus-border: var(--secondary-color);
|
23 |
+
--text-primary: #ffffff;
|
24 |
+
--text-secondary: #e5e7eb;
|
25 |
+
--text-muted: #9ca3af;
|
26 |
+
--success-color: #4ade80;
|
27 |
+
--success-bg: rgba(74, 222, 128, 0.1);
|
28 |
+
--error-color: #f87171;
|
29 |
+
--error-bg: rgba(248, 113, 113, 0.1);
|
30 |
+
--gradient-start: #0d6efd;
|
31 |
+
--gradient-mid: var(--primary-color);
|
32 |
+
--gradient-end: var(--secondary-color);
|
33 |
+
--secondary-color-rgb: 13, 202, 240;
|
34 |
+
--switch-bg-off: #4b5563;
|
35 |
+
--switch-bg-on: linear-gradient(135deg, var(--gradient-start), var(--gradient-mid));
|
36 |
+
--border-radius-lg: 16px;
|
37 |
+
--border-radius-md: 12px;
|
38 |
+
--border-radius-sm: 8px;
|
39 |
+
--border-radius-pill: 34px;
|
40 |
+
}
|
41 |
+
body { font-family: "Faculty Glyphic", serif; margin: 0; padding: 0; background-color: var(--dark-bg); color: var(--text-secondary); line-height: 1.7; overflow-x: hidden; overflow-y: auto; position: relative; }
|
42 |
+
#fluid-canvas { position: fixed; top: 0; left: 0; width: 100%; height: 100%; z-index: -1; }
|
43 |
+
.container { background-color: var(--card-bg-transparent); padding: 25px 30px; border-radius: var(--border-radius-lg); box-shadow: 0 15px 35px rgba(0, 0, 0, 0.4); width: 100%; max-width: 1500px; box-sizing: border-box; position: relative; overflow: hidden; z-index: 1; margin: 40px auto; }
|
44 |
+
.container::before { content: ""; position: absolute; top: 0; left: 0; right: 0; height: 6px; background: linear-gradient(to right, var(--gradient-start), var(--gradient-mid), var(--gradient-end)); border-radius: var(--border-radius-lg) var(--border-radius-lg) 0 0; opacity: 0.8; z-index: 2; }
|
45 |
+
.content-wrapper { position: relative; z-index: 1; }
|
46 |
+
h1, h2 { text-align: center; color: var(--text-primary); margin-bottom: 35px; font-family: "Faculty Glyphic", sans-serif; font-weight: 700; text-shadow: 0 2px 5px rgba(0, 0, 0, 0.3); }
|
47 |
+
h1 { font-size: 2.4rem; margin-bottom: 45px; }
|
48 |
+
h2 { font-size: 1.8rem; margin-top: 45px; }
|
49 |
+
h1 i { margin-right: 12px; background: linear-gradient(to right, var(--gradient-start), var(--gradient-mid), var(--gradient-end)); -webkit-background-clip: text; background-clip: text; -webkit-text-fill-color: transparent; text-shadow: none; font-size: 0.9em; vertical-align: middle; }
|
50 |
+
.main-card { background: linear-gradient(135deg, rgba(13, 110, 253, 0.1), rgba(32, 201, 151, 0.1), rgba(13, 202, 240, 0.1)); padding: 25px 35px; border-radius: var(--border-radius-md); margin-bottom: 45px; box-shadow: 0 5px 15px rgba(0, 0, 0, 0.15); position: relative; overflow: hidden; border: 1px solid rgba(255, 255, 255, 0.08); text-align: center; }
|
51 |
+
.main-title { color: var(--text-primary); font-size: 1.6rem; margin-bottom: 10px; font-family: "Funnel Sans", sans-serif; font-weight: 700; }
|
52 |
+
.main-subtitle { color: var(--text-secondary); font-size: 1.1rem; margin-bottom: 0; font-family: "Faculty Glyphic", serif; }
|
53 |
+
.form-grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(180px, 1fr)); gap: 20px; margin-bottom: 35px; }
|
54 |
+
.form-group { margin-bottom: 0; background-color: rgba(255, 255, 255, 0.04); padding: 18px 18px 12px 18px; border-radius: var(--border-radius-md); transition: background-color 0.3s ease, transform 0.2s ease, box-shadow 0.3s ease; border: 1px solid transparent; box-sizing: border-box; display: flex; flex-direction: column; justify-content: space-between; min-height: 170px; }
|
55 |
+
.form-group-span-full { grid-column: 1 / -1; min-height: auto; margin-bottom: 20px; }
|
56 |
+
.form-group-span-full:last-of-type { margin-bottom: 0; }
|
57 |
+
.form-group:has(input[type="number"]), .form-group:has(select) { justify-content: center; text-align: center; min-height: auto; }
|
58 |
+
.form-group:focus-within:not(.switch-group-box) { background-color: rgba(255, 255, 255, 0.07); border-color: rgba(var(--secondary-color-rgb), 0.3); box-shadow: 0 0 15px rgba(var(--secondary-color-rgb), 0.1); }
|
59 |
+
.form-group:has(input[type="number"]) label, .form-group:has(select) label { justify-content: center; margin-bottom: 10px; }
|
60 |
+
.form-group:has(input[type="file"]) label:not(.file-input-label) { justify-content: flex-start; }
|
61 |
+
.form-group label:not(.switch-text-label):not(.file-input-label) { display: flex; align-items: center; margin-bottom: 12px; font-weight: 700; color: var(--text-secondary); font-size: 0.95rem; flex-shrink: 0; font-family: "Faculty Glyphic", serif; }
|
62 |
+
.form-group label:not(.switch-text-label):not(.file-input-label) i { margin-right: 10px; color: var(--text-muted); width: 1.1em; text-align: center; transition: color 0.3s ease; font-size: 1em; }
|
63 |
+
.form-group:focus-within:not(.switch-group-box) label:not(.switch-text-label):not(.file-input-label) i { color: var(--secondary-color); }
|
64 |
+
.form-group input[type="text"], .form-group input[type="number"], .form-group select, .form-group textarea { width: 100%; padding: 12px 14px; border: 1px solid var(--input-border); border-radius: var(--border-radius-sm); box-sizing: border-box; font-size: 1rem; transition: border-color 0.3s ease, box-shadow 0.3s ease, background-color 0.3s ease; background-color: var(--input-bg); color: var(--text-primary); flex-grow: 0; margin-bottom: 10px; font-family: "Faculty Glyphic", serif; }
|
65 |
+
.form-group input[type="number"] { text-align: center; }
|
66 |
+
.form-group select { appearance: none; background-image: url('data:image/svg+xml;charset=US-ASCII,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20width%3D%22292.4%22%20height%3D%22292.4%22%3E%3Cpath%20fill%3D%22%23cccccc%22%20d%3D%22M287%2069.4a17.6%2017.6%200%200%200-13-5.4H18.4c-5%200-9.3%201.8-12.9%205.4A17.6%2017.6%200%200%200%200%2082.2c0%205%201.8%209.3%205.4%2012.9l128%20127.9c3.6%203.6%207.8%205.4%2012.8%205.4s9.2-1.8%2012.8-5.4L287%2095c3.5-3.5%205.4-7.8%205.4-12.8%200-5-1.9-9.2-5.5-12.8z%22%2F%3E%3C/svg%3E'); background-repeat: no-repeat; background-position: right 14px center; background-size: 11px auto; padding-right: 40px; }
|
67 |
+
.form-group:has(select) select { text-align: center; padding-left: 40px; }
|
68 |
+
.form-group input::placeholder, .form-group textarea::placeholder { color: var(--text-muted); opacity: 0.7; font-family: "Faculty Glyphic", serif; }
|
69 |
+
.form-group input[type="text"]:focus, .form-group input[type="number"]:focus, .form-group select:focus, .form-group textarea:focus { border-color: var(--input-focus-border); outline: none; box-shadow: 0 0 0 3px rgba(var(--secondary-color-rgb), 0.25); background-color: var(--input-focus-bg); }
|
70 |
+
.form-group input:focus, .form-group select:focus { box-shadow: 0 0 8px rgba(var(--secondary-color-rgb), 0.8); }
|
71 |
+
.form-group textarea { resize: vertical; flex-grow: 1; min-height: 70px; }
|
72 |
+
.form-group input[type="file"] { opacity: 0; position: absolute; z-index: -1; left: 0; top: 0; width: 1px; height: 1px; }
|
73 |
+
.file-input-label { display: flex; flex-direction: column; align-items: center; justify-content: center; padding: 15px; border: 1px dashed var(--input-border); border-radius: var(--border-radius-sm); background-color: var(--input-bg); color: var(--text-muted); cursor: pointer; transition: all 0.3s ease; font-size: 0.9rem; margin-bottom: 10px; text-align: center; width: 100%; box-sizing: border-box; min-height: 75px; flex-grow: 1; font-family: "Faculty Glyphic", serif; }
|
74 |
+
.file-input-label i { margin-right: 0; margin-bottom: 8px; font-size: 1.2em; }
|
75 |
+
.file-input-label span { display: block; }
|
76 |
+
.file-input-label:hover { border-color: var(--secondary-color); color: var(--secondary-color); background-color: var(--input-focus-bg); }
|
77 |
+
.file-name-display { font-size: 0.85rem; color: var(--text-muted); margin-top: 0px; margin-bottom: 10px; display: block; min-height: 1.2em; word-break: break-all; text-align: center; font-family: "Faculty Glyphic", serif; }
|
78 |
+
.form-group small { font-size: 0.8rem; color: var(--text-muted); display: block; margin-top: auto; padding-top: 8px; font-family: 'Faculty Glyphic', serif; font-style: italic; opacity: 0.8; flex-shrink: 0; text-align: left; line-height: 1.3; }
|
79 |
+
.form-group:has(input[type="number"]) small, .form-group:has(select) small, .form-group:has(input[type="file"]) small { text-align: center; }
|
80 |
+
.form-group:has(.switch-wrapper) { background-color: rgba(255, 255, 255, 0.04); padding: 15px 20px; border-radius: var(--border-radius-md); border: 1px solid transparent; transition: background-color 0.3s ease, border-color 0.3s ease; min-height: auto; display: flex; flex-direction: column; gap: 10px; justify-content: center; }
|
81 |
+
.form-group:has(.switch-wrapper):focus-within { background-color: rgba(255, 255, 255, 0.07); border-color: rgba(var(--secondary-color-rgb), 0.3); }
|
82 |
+
.switch-wrapper { display: flex; align-items: center; justify-content: space-between; width: 100%; margin-bottom: 0; }
|
83 |
+
.switch-text-label { font-weight: 700; color: var(--text-secondary); font-size: 0.95rem; cursor: pointer; user-select: none; margin-right: 15px; font-family: "Faculty Glyphic", serif; }
|
84 |
+
.switch { position: relative; display: inline-block; width: 50px; height: 26px; flex-shrink: 0; }
|
85 |
+
.switch input { opacity: 0; width: 0; height: 0; }
|
86 |
+
.slider { position: absolute; cursor: pointer; top: 0; left: 0; right: 0; bottom: 0; background-color: var(--switch-bg-off); transition: .4s; border-radius: var(--border-radius-pill); }
|
87 |
+
.slider:before { position: absolute; content: ""; height: 18px; width: 18px; left: 4px; bottom: 4px; background-color: white; transition: .4s; border-radius: 50%; }
|
88 |
+
.switch input:checked + .slider { background: var(--switch-bg-on); }
|
89 |
+
.switch input:focus + .slider { box-shadow: 0 0 0 3px rgba(var(--secondary-color-rgb), 0.25); }
|
90 |
+
.switch input:checked + .slider:before { transform: translateX(24px); }
|
91 |
+
.button { display: flex; align-items: center; justify-content: center; width: 100%; margin-top: 35px; background: linear-gradient(135deg, var(--gradient-start), var(--gradient-mid), var(--gradient-end)); color: white; padding: 16px 28px; border: none; border-radius: var(--border-radius-sm); cursor: pointer; font-size: 1.15rem; font-family: "Funnel Sans", sans-serif; font-weight: 700; transition: all 0.3s ease; box-shadow: 0 5px 15px rgba(13, 110, 253, 0.3); position: relative; overflow: hidden; text-transform: uppercase; letter-spacing: 0.5px; }
|
92 |
+
.button::after { content: ''; position: absolute; top: -50%; left: -50%; width: 200%; height: 200%; background: radial-gradient(circle, rgba(255, 255, 255, 0.3) 0%, rgba(255, 255, 255, 0) 70%); transform: rotate(45deg); transition: opacity 0.5s ease; opacity: 0; }
|
93 |
+
.button:hover { transform: translateY(-5px) scale(1.05); box-shadow: 0 10px 25px rgba(13, 110, 253, 0.5); }
|
94 |
+
.button:hover::after { opacity: 1; }
|
95 |
+
.button:active { transform: translateY(-1px); box-shadow: 0 4px 12px rgba(13, 110, 253, 0.3); }
|
96 |
+
.button i { margin-right: 12px; font-size: 1em; }
|
97 |
+
.button:disabled { background: #4b5563; cursor: not-allowed; transform: none; box-shadow: none; color: var(--text-muted); }
|
98 |
+
.button:disabled::after { display: none; }
|
99 |
+
.button:disabled i { color: var(--text-muted); }
|
100 |
+
.message { margin-top: 30px; padding: 20px 28px; border-radius: var(--border-radius-md); display: flex; align-items: center; animation: fadeIn 0.4s ease-out; border: 1px solid transparent; font-size: 1.05rem; font-family: "Faculty Glyphic", serif; position: relative; z-index: 2; }
|
101 |
+
@keyframes fadeIn { from { opacity: 0; transform: translateY(10px); } to { opacity: 1; transform: translateY(0); } }
|
102 |
+
.message i { margin-right: 18px; font-size: 1.3em; flex-shrink: 0; }
|
103 |
+
.error { background-color: var(--error-bg); color: var(--error-color); border-color: rgba(248, 113, 113, 0.3); }
|
104 |
+
.error i { color: var(--error-color); }
|
105 |
+
.success { background-color: var(--success-bg); color: var(--success-color); border-color: rgba(74, 222, 128, 0.3); }
|
106 |
+
.success i { color: var(--success-color); }
|
107 |
+
#loading-indicator { display: none; text-align: center; margin: 25px 0 0 0; font-weight: 500; color: var(--text-muted); font-size: 0.95rem; font-family: "Faculty Glyphic", serif; position: relative; z-index: 2; }
|
108 |
+
#loading-indicator .spinner { display: inline-block; vertical-align: middle; border: 3px solid rgba(255, 255, 255, 0.1); border-top-color: var(--secondary-color); border-radius: 50%; width: 20px; height: 20px; animation: spin 0.8s linear infinite; margin-right: 10px; }
|
109 |
+
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
|
110 |
+
#progress-area { display: none; margin-top: 35px; padding: 28px 35px; background-color: rgba(0, 0, 0, 0.2); border-radius: var(--border-radius-md); border: 1px solid var(--input-border); animation: fadeIn 0.5s ease-out; text-align: center; position: relative; z-index: 2; }
|
111 |
+
#progress-area .progress-spinner { display: inline-block; vertical-align: middle; border: 4px solid rgba(255, 255, 255, 0.1); border-top-color: var(--gradient-start); border-right-color: var(--gradient-mid); border-bottom-color: var(--gradient-end); border-left-color: transparent; border-radius: 50%; width: 32px; height: 32px; animation: spin 0.8s linear infinite; margin-bottom: 18px; }
|
112 |
+
#progress-stage { display: block; font-weight: 700; color: var(--text-primary); font-size: 1.2rem; margin-bottom: 10px; font-family: "Funnel Sans", sans-serif; }
|
113 |
+
#progress-message { display: block; color: var(--text-secondary); font-size: 1rem; min-height: 1.2em; font-family: "Faculty Glyphic", serif; }
|
114 |
+
#video-modal { display: none; position: fixed; z-index: 1000; left: 0; top: 0; width: 100%; height: 100%; overflow: auto; background-color: rgba(0, 0, 0, 0.7); align-items: center; justify-content: center; padding: 20px; box-sizing: border-box; animation: fadeIn 0.3s ease-out; }
|
115 |
+
.modal-content { background-color: var(--card-bg); margin: auto; padding: 30px 35px; border-radius: var(--border-radius-lg); box-shadow: 0 10px 30px rgba(0, 0, 0, 0.4); width: 90%; max-width: 800px; position: relative; border-top: 6px solid; border-image: linear-gradient(to right, var(--gradient-start), var(--gradient-mid), var(--gradient-end)) 1; text-align: center; }
|
116 |
+
.modal-close { color: var(--text-muted); position: absolute; top: 15px; right: 25px; font-size: 28px; font-weight: bold; transition: color 0.3s ease; }
|
117 |
+
.modal-close:hover, .modal-close:focus { color: var(--text-primary); text-decoration: none; cursor: pointer; }
|
118 |
+
.modal-content h2 { font-size: 1.6rem; margin-top: 0; margin-bottom: 25px; color: var(--text-primary); font-family: "Funnel Sans", sans-serif; }
|
119 |
+
.modal-video-container { margin-bottom: 30px; background-color: rgba(0, 0, 0, 0.25); padding: 15px; border-radius: var(--border-radius-md); box-shadow: inset 0 1px 5px rgba(0, 0, 0, 0.2); border: 1px solid var(--input-border); }
|
120 |
+
.modal-video-container video { max-width: 100%; height: auto; display: block; border-radius: var(--border-radius-sm); background-color: #000; }
|
121 |
+
.modal-download-link { display: inline-flex; align-items: center; justify-content: center; margin-top: 15px; background: linear-gradient(135deg, var(--secondary-color), var(--primary-color)); color: white; padding: 14px 24px; text-decoration: none; border-radius: var(--border-radius-sm); font-weight: 700; transition: all 0.3s ease; box-shadow: 0 5px 15px rgba(var(--secondary-color-rgb), 0.3); position: relative; overflow: hidden; font-size: 1.05rem; text-transform: uppercase; letter-spacing: 0.5px; font-family: "Funnel Sans", sans-serif; }
|
122 |
+
.modal-download-link::after { content: ''; position: absolute; top: -50%; left: -50%; width: 200%; height: 200%; background: radial-gradient(circle, rgba(255, 255, 255, 0.2) 0%, rgba(255, 255, 255, 0) 70%); transform: rotate(45deg); transition: opacity 0.5s ease; opacity: 0; }
|
123 |
+
.modal-download-link:hover { transform: translateY(-3px); box-shadow: 0 8px 20px rgba(var(--secondary-color-rgb), 0.4); }
|
124 |
+
.modal-download-link:hover::after { opacity: 1; }
|
125 |
+
.modal-download-link:active { transform: translateY(-1px); box-shadow: 0 4px 12px rgba(var(--secondary-color-rgb), 0.3); }
|
126 |
+
.modal-download-link i { margin-right: 12px; font-size: 1em; }
|
127 |
+
#modal-hq-confirmation { display: none; margin-top: 30px; padding: 22px 28px; background-color: rgba(var(--secondary-color-rgb), 0.1); border: 1px solid rgba(var(--secondary-color-rgb), 0.3); border-radius: var(--border-radius-md); text-align: center; animation: fadeIn 0.4s ease-out; }
|
128 |
+
#modal-hq-confirmation p { margin: 0 0 18px 0; color: var(--text-secondary); font-size: 1rem; font-family: "Faculty Glyphic", serif; }
|
129 |
+
#modal-hq-confirmation .popup-buttons button { padding: 9px 20px; border-radius: var(--border-radius-sm); border: none; cursor: pointer; font-weight: 700; font-size: 0.95rem; transition: all 0.2s ease; margin: 0 10px; min-width: 90px; font-family: "Funnel Sans", sans-serif; }
|
130 |
+
#modal-hq-confirm-yes { background-color: var(--success-color); color: var(--dark-bg); }
|
131 |
+
#modal-hq-confirm-yes:hover { background-color: #6adf9a; transform: translateY(-1px); }
|
132 |
+
#modal-hq-confirm-no { background-color: var(--text-muted); color: var(--dark-bg); }
|
133 |
+
#modal-hq-confirm-no:hover { background-color: #b0b7c3; transform: translateY(-1px); }
|
134 |
+
</style>
|
135 |
+
</head>
|
136 |
+
<body>
|
137 |
+
<canvas id="fluid-canvas"></canvas>
|
138 |
+
<div class="container">
|
139 |
+
<div class="content-wrapper">
|
140 |
+
<h1><i class="fas fa-magic"></i>AI Video Editor</h1>
|
141 |
+
<div class="main-card">
|
142 |
+
<div class="main-title">Craft Your Vision Instantly</div>
|
143 |
+
<div class="main-subtitle">Let AI handle the edits. Just upload your files and describe the style.</div>
|
144 |
+
</div>
|
145 |
+
<form id="video-form">
|
146 |
+
<div class="form-grid">
|
147 |
+
<div class="form-group">
|
148 |
+
<label for="videos"><i class="fas fa-folder-open"></i>Source Videos</label>
|
149 |
+
<label for="videos" class="file-input-label"><i class="fas fa-upload"></i><span>Choose Files...</span></label>
|
150 |
+
<input type="file" id="videos" name="videos[]" accept="video/*" multiple required data-display-target="videos-display">
|
151 |
+
<span class="file-name-display" id="videos-display">No files selected</span>
|
152 |
+
<small>Select one or more video clips.</small>
|
153 |
+
</div>
|
154 |
+
<div class="form-group">
|
155 |
+
<label for="audios"><i class="fas fa-music"></i>Source Audios</label>
|
156 |
+
<label for="audios" class="file-input-label"><i class="fas fa-upload"></i><span>Choose Files...</span></label>
|
157 |
+
<input type="file" id="audios" name="audios[]" accept="audio/*" multiple data-display-target="audios-display">
|
158 |
+
<span class="file-name-display" id="audios-display">No files selected</span>
|
159 |
+
<small>Optional background audio tracks.</small>
|
160 |
+
</div>
|
161 |
+
<div class="form-group">
|
162 |
+
<label for="style_sample"><i class="fas fa-palette"></i>Style Sample</label>
|
163 |
+
<label for="style_sample" class="file-input-label"><i class="fas fa-upload"></i><span>Choose File...</span></label>
|
164 |
+
<input type="file" id="style_sample" name="style_sample" accept="video/*" data-display-target="style-sample-display">
|
165 |
+
<span class="file-name-display" id="style-sample-display">No file selected</span>
|
166 |
+
<small>Optional video to mimic style.</small>
|
167 |
+
</div>
|
168 |
+
<div class="form-group">
|
169 |
+
<label for="duration"><i class="fas fa-clock"></i>Target Duration (s)</label>
|
170 |
+
<input type="number" id="duration" name="duration" step="1" min="1" required placeholder="e.g., 30">
|
171 |
+
<small>Desired final video length.</small>
|
172 |
+
</div>
|
173 |
+
<div class="form-group">
|
174 |
+
<label for="variations"><i class="fas fa-random"></i>Variations</label>
|
175 |
+
<select id="variations" name="variations">
|
176 |
+
<option value="1" selected>1 Plan</option>
|
177 |
+
<option value="2">2 Plans</option>
|
178 |
+
<option value="3">3 Plans</option>
|
179 |
+
<option value="4">4 Plans</option>
|
180 |
+
</select>
|
181 |
+
<small>Number of edit plans to generate.</small>
|
182 |
+
</div>
|
183 |
+
<div class="form-group">
|
184 |
+
<div class="switch-wrapper">
|
185 |
+
<label for="mute_audio" class="switch-text-label">Mute Audio</label>
|
186 |
+
<label class="switch"><input type="checkbox" id="mute_audio" name="mute_audio"><span class="slider"></span></label>
|
187 |
+
</div>
|
188 |
+
<div class="switch-wrapper">
|
189 |
+
<label for="generate_preview" class="switch-text-label">Generate Preview</label>
|
190 |
+
<label class="switch"><input type="checkbox" id="generate_preview" name="generate_preview"><span class="slider"></span></label>
|
191 |
+
</div>
|
192 |
+
<small>Mute source audio / Generate low-res preview first.</small>
|
193 |
+
</div>
|
194 |
+
<div class="form-group">
|
195 |
+
<label for="model_name"><i class="fas fa-brain"></i>AI Model</label>
|
196 |
+
<select id="model_name" name="model_name">
|
197 |
+
{% if available_models %}
|
198 |
+
{% for model in available_models %}
|
199 |
+
<option value="{{ model }}" {% if model == default_model %}selected{% endif %}>{{ model }}</option>
|
200 |
+
{% endfor %}
|
201 |
+
{% else %}
|
202 |
+
<option value="{{ default_model | escape }}" selected>{{ default_model | escape }}</option>
|
203 |
+
{% endif %}
|
204 |
+
</select>
|
205 |
+
<small>Choose the AI editing model.</small>
|
206 |
+
</div>
|
207 |
+
<div class="form-group form-group-span-full">
|
208 |
+
<label for="style_desc"><i class="fas fa-pen-alt"></i>Style Description</label>
|
209 |
+
<textarea id="style_desc" name="style_desc" rows="2" required placeholder="Describe desired style, pacing, mood, effects, etc. Defaults to Instagram Reel style if left blank.">{{ default_style_desc | escape }}</textarea>
|
210 |
+
<small>Describe the desired look and feel (e.g., "fast-paced, energetic, like a travel vlog").</small>
|
211 |
+
</div>
|
212 |
+
<div class="form-group form-group-span-full">
|
213 |
+
<label for="output"><i class="fas fa-file-video"></i>Output File Name</label>
|
214 |
+
<input type="text" id="output" name="output" value="ai_edited_video.mp4" required>
|
215 |
+
<small>Name for the final generated video file.</small>
|
216 |
+
</div>
|
217 |
+
</div>
|
218 |
+
<button type="submit" id="submit-button" class="button"><i class="fas fa-cogs"></i>Generate Video</button>
|
219 |
+
</form>
|
220 |
+
<div id="loading-indicator" style="display: none;"><div class="spinner"></div><span>Submitting request...</span></div>
|
221 |
+
<div id="progress-area" style="display: none;"><div class="progress-spinner"></div><span id="progress-stage">Starting...</span><span id="progress-message">Please wait while we process your request.</span></div>
|
222 |
+
<div id="message-area"></div>
|
223 |
+
</div>
|
224 |
+
</div>
|
225 |
+
<div id="video-modal">
|
226 |
+
<div class="modal-content">
|
227 |
+
<span class="modal-close" id="modal-close-button">×</span>
|
228 |
+
<h2 id="modal-title">Your Generated Video</h2>
|
229 |
+
<div class="modal-video-container">
|
230 |
+
<video id="modal-video" controls controlsList="nodownload">Your browser does not support the video tag.</video>
|
231 |
+
</div>
|
232 |
+
<a id="modal-download-link" href="#" class="modal-download-link" download><i class="fas fa-download"></i>Download Video</a>
|
233 |
+
<div id="modal-hq-confirmation" style="display: none;">
|
234 |
+
<p id="modal-hq-popup-message">Preview generated! Generate the high-quality version now?</p>
|
235 |
+
<div class="popup-buttons">
|
236 |
+
<button id="modal-hq-confirm-yes">Yes, Generate HQ (Plan 1)</button>
|
237 |
+
<button id="modal-hq-confirm-no">No, Thanks</button>
|
238 |
+
</div>
|
239 |
+
</div>
|
240 |
+
</div>
|
241 |
+
</div>
|
242 |
+
|
243 |
+
<script src="./script.js"></script>
|
244 |
+
|
245 |
+
<script>
|
246 |
+
const form = document.getElementById('video-form');
|
247 |
+
const submitButton = document.getElementById('submit-button');
|
248 |
+
const loadingIndicator = document.getElementById('loading-indicator');
|
249 |
+
const progressArea = document.getElementById('progress-area');
|
250 |
+
const progressStage = document.getElementById('progress-stage');
|
251 |
+
const progressMessage = document.getElementById('progress-message');
|
252 |
+
const messageArea = document.getElementById('message-area');
|
253 |
+
const fileInputs = document.querySelectorAll('input[type="file"]');
|
254 |
+
const videoModal = document.getElementById('video-modal');
|
255 |
+
const modalTitle = document.getElementById('modal-title');
|
256 |
+
const modalVideo = document.getElementById('modal-video');
|
257 |
+
const modalDownloadLink = document.getElementById('modal-download-link');
|
258 |
+
const modalCloseButton = document.getElementById('modal-close-button');
|
259 |
+
const modalHqPopup = document.getElementById('modal-hq-confirmation');
|
260 |
+
const modalHqPopupMessage = document.getElementById('modal-hq-popup-message');
|
261 |
+
const modalHqConfirmYes = document.getElementById('modal-hq-confirm-yes');
|
262 |
+
const modalHqConfirmNo = document.getElementById('modal-hq-confirm-no');
|
263 |
+
const variationsSelect = document.getElementById('variations');
|
264 |
+
const modelSelect = document.getElementById('model_name');
|
265 |
+
const fluidCanvasElement = document.getElementById('fluid-canvas');
|
266 |
+
|
267 |
+
document.addEventListener('DOMContentLoaded', () => {
|
268 |
+
let currentRequestId = null;
|
269 |
+
let lastPreviewRequestId = null;
|
270 |
+
let numPlansGeneratedForLastPreview = 1;
|
271 |
+
let progressInterval = null;
|
272 |
+
const POLLING_INTERVAL = 2000;
|
273 |
+
|
274 |
+
function showMessage(type, text, area = messageArea) {
|
275 |
+
const iconClass = type === 'success' ? 'fa-check-circle' : 'fa-exclamation-triangle';
|
276 |
+
area.innerHTML = `<div class="message ${type}"><i class="fas ${iconClass}"></i>${text}</div>`;
|
277 |
+
area.style.display = 'block';
|
278 |
+
area.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
279 |
+
}
|
280 |
+
function hideModal() {
|
281 |
+
videoModal.style.display = 'none';
|
282 |
+
modalVideo.pause(); modalVideo.src = ''; modalDownloadLink.href = '#'; modalDownloadLink.removeAttribute('download'); modalHqPopup.style.display = 'none';
|
283 |
+
}
|
284 |
+
function clearState(clearForm = true) {
|
285 |
+
messageArea.innerHTML = ''; messageArea.style.display = 'none'; hideModal(); progressArea.style.display = 'none'; loadingIndicator.style.display = 'none';
|
286 |
+
if (progressInterval) { clearInterval(progressInterval); progressInterval = null; }
|
287 |
+
currentRequestId = null; lastPreviewRequestId = null; numPlansGeneratedForLastPreview = 1;
|
288 |
+
submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video';
|
289 |
+
if (clearForm) {
|
290 |
+
form.reset();
|
291 |
+
fileInputs.forEach(input => {
|
292 |
+
const displayTargetId = input.dataset.displayTarget; const displaySpan = document.getElementById(displayTargetId);
|
293 |
+
if (displaySpan) { displaySpan.textContent = input.multiple ? 'No files selected' : 'No file selected'; displaySpan.style.color = 'var(--text-muted)'; }
|
294 |
+
});
|
295 |
+
const styleDescTextarea = document.getElementById('style_desc'); const tempEl = document.createElement('textarea'); tempEl.innerHTML = `{{ default_style_desc | escape }}`; const defaultDesc = tempEl.value; if (styleDescTextarea) styleDescTextarea.value = defaultDesc;
|
296 |
+
const outputInput = document.getElementById('output'); if (outputInput) outputInput.value = 'ai_edited_video.mp4';
|
297 |
+
if (variationsSelect) variationsSelect.value = '1';
|
298 |
+
tempEl.innerHTML = `{{ default_model | escape }}`; const defaultModelValue = tempEl.value; if (modelSelect) modelSelect.value = defaultModelValue;
|
299 |
+
document.getElementById('mute_audio').checked = false; document.getElementById('generate_preview').checked = false;
|
300 |
+
}
|
301 |
+
}
|
302 |
+
function updateProgressDisplay(stage, message) {
|
303 |
+
progressStage.textContent = stage.replace(/_/g, ' ').replace(/\b\w/g, l => l.toUpperCase()); progressMessage.textContent = message || 'Processing...'; progressArea.style.display = 'block'; loadingIndicator.style.display = 'none'; hideModal();
|
304 |
+
}
|
305 |
+
fileInputs.forEach(input => {
|
306 |
+
input.addEventListener('change', (event) => {
|
307 |
+
const displayTargetId = input.dataset.displayTarget; const displaySpan = document.getElementById(displayTargetId);
|
308 |
+
if (displaySpan) {
|
309 |
+
const files = input.files;
|
310 |
+
if (files.length === 1) { displaySpan.textContent = files[0].name; displaySpan.style.color = 'var(--text-secondary)'; }
|
311 |
+
else if (files.length > 1) { displaySpan.textContent = `${files.length} files selected`; displaySpan.style.color = 'var(--text-secondary)'; }
|
312 |
+
else { displaySpan.textContent = input.multiple ? 'No files selected' : 'No file selected'; displaySpan.style.color = 'var(--text-muted)'; }
|
313 |
+
}
|
314 |
+
});
|
315 |
+
});
|
316 |
+
async function triggerHqGeneration(previewRequestId) {
|
317 |
+
if (!previewRequestId) { showMessage('error', 'Cannot generate HQ: Original preview request ID is missing.'); submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video'; return; }
|
318 |
+
console.log(`Requesting HQ generation based on preview ID: ${previewRequestId}`); hideModal(); messageArea.innerHTML = ''; messageArea.style.display = 'none'; updateProgressDisplay('STARTING_HQ', 'Initializing High-Quality generation (using Plan 1)...'); submitButton.disabled = true; submitButton.innerHTML = '<i class="fas fa-hourglass-half"></i> Processing HQ...';
|
319 |
+
try {
|
320 |
+
const response = await fetch(`/generate-hq/${previewRequestId}`, { method: 'POST' }); const result = await response.json();
|
321 |
+
if (!response.ok) { throw new Error(result.message || `Server error: ${response.status}`); }
|
322 |
+
if (result.status === 'processing_started' && result.request_id) {
|
323 |
+
currentRequestId = result.request_id; lastPreviewRequestId = null; numPlansGeneratedForLastPreview = 1; console.log("HQ Processing started with Request ID:", currentRequestId); updateProgressDisplay('RECEIVED_HQ', result.message || 'HQ Processing started...'); if (progressInterval) clearInterval(progressInterval); progressInterval = setInterval(pollProgress, POLLING_INTERVAL);
|
324 |
+
} else { throw new Error(result.message || 'Received an unexpected response when starting HQ generation.'); }
|
325 |
+
} catch (error) {
|
326 |
+
console.error('HQ Generation Start Error:', error); progressArea.style.display = 'none'; showMessage('error', `Failed to start HQ generation: ${error.message}`); submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video';
|
327 |
+
}
|
328 |
+
}
|
329 |
+
async function pollProgress() {
|
330 |
+
if (!currentRequestId) { console.warn("Polling stopped: No active request ID."); if (progressInterval) clearInterval(progressInterval); progressInterval = null; return; }
|
331 |
+
try {
|
332 |
+
const response = await fetch(`/progress/${currentRequestId}`);
|
333 |
+
if (!response.ok) {
|
334 |
+
if (response.status === 404) { console.error(`Polling failed: Request ID ${currentRequestId} not found.`); showMessage('error', `Polling error: Request ID not found or expired. Please start a new request.`); }
|
335 |
+
else { console.error(`Polling failed with status: ${response.status}`); let errorMsg = `Polling error: Server returned status ${response.status}. Please try again later.`; try { const errorData = await response.json(); errorMsg = errorData.message || errorMsg; } catch (e) { } showMessage('error', errorMsg); }
|
336 |
+
if (progressInterval) clearInterval(progressInterval); progressInterval = null; progressArea.style.display = 'none'; submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video'; return;
|
337 |
+
}
|
338 |
+
const data = await response.json(); updateProgressDisplay(data.stage, data.message);
|
339 |
+
if (data.stage === 'COMPLETED') {
|
340 |
+
clearInterval(progressInterval); progressInterval = null; progressArea.style.display = 'none'; const resultData = data.result || {}; const isPreview = resultData.is_preview; const successMsg = data.message || `Video ${isPreview ? 'preview ' : ''}generated successfully!`; showMessage('success', successMsg, messageArea);
|
341 |
+
if (resultData.video_url) {
|
342 |
+
modalTitle.textContent = `Your Generated Video ${isPreview ? '(Preview)' : '(HQ)'}`; modalVideo.src = resultData.video_url; modalDownloadLink.href = resultData.video_url; modalDownloadLink.download = resultData.output_filename || `ai_edited_video${isPreview ? '_preview' : '_hq'}.mp4`; modalVideo.load();
|
343 |
+
if (isPreview && resultData.request_id) {
|
344 |
+
lastPreviewRequestId = resultData.request_id; numPlansGeneratedForLastPreview = resultData.num_plans_generated || 1;
|
345 |
+
if (numPlansGeneratedForLastPreview > 1) { modalHqPopupMessage.textContent = `Preview generated using Plan 1/${numPlansGeneratedForLastPreview}. Generate the high-quality version using Plan 1?`; modalHqConfirmYes.textContent = `Yes, Generate HQ (Plan 1)`; }
|
346 |
+
else { modalHqPopupMessage.textContent = `Preview generated! Generate the high-quality version now?`; modalHqConfirmYes.textContent = `Yes, Generate HQ`; }
|
347 |
+
modalHqPopup.style.display = 'block'; submitButton.disabled = true; submitButton.innerHTML = '<i class="fas fa-check"></i> Preview Ready';
|
348 |
+
} else { modalHqPopup.style.display = 'none'; submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video'; }
|
349 |
+
videoModal.style.display = 'flex';
|
350 |
+
} else { showMessage('error', `Processing completed, but no video URL was returned.${isPreview ? ' (Preview)' : ''}`); submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video'; }
|
351 |
+
} else if (data.stage === 'FAILED') {
|
352 |
+
clearInterval(progressInterval); progressInterval = null; progressArea.style.display = 'none'; showMessage('error', data.error || 'An unknown error occurred during processing.'); submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video';
|
353 |
+
} else if (data.stage === 'UNKNOWN') {
|
354 |
+
clearInterval(progressInterval); progressInterval = null; progressArea.style.display = 'none'; showMessage('error', data.message || 'Request status is unknown or has expired.'); submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video';
|
355 |
+
}
|
356 |
+
} catch (error) {
|
357 |
+
console.error('Polling Fetch Error:', error); if (progressInterval) clearInterval(progressInterval); progressInterval = null; progressArea.style.display = 'none'; showMessage('error', `Polling connection error: ${error.message}. Please check connection.`); submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video';
|
358 |
+
}
|
359 |
+
}
|
360 |
+
form.addEventListener('submit', async (event) => {
|
361 |
+
event.preventDefault(); clearState(false);
|
362 |
+
const videosInput = document.getElementById('videos'); const durationInput = document.getElementById('duration'); const styleDescInput = document.getElementById('style_desc'); const outputInput = document.getElementById('output');
|
363 |
+
if (!videosInput.files || videosInput.files.length === 0) { showMessage('error', 'Please select at least one source video file.'); return; }
|
364 |
+
if (!durationInput.value || durationInput.value <= 0) { showMessage('error', 'Please enter a valid positive target duration.'); return; }
|
365 |
+
if (!styleDescInput.value.trim()) { showMessage('error', 'Please provide a style description.'); return; }
|
366 |
+
if (!outputInput.value.trim()) { showMessage('error', 'Please provide an output file name.'); return; }
|
367 |
+
loadingIndicator.style.display = 'block'; submitButton.disabled = true; submitButton.innerHTML = '<i class="fas fa-spinner fa-spin"></i> Submitting...'; const formData = new FormData(form);
|
368 |
+
try {
|
369 |
+
const response = await fetch('/generate', { method: 'POST', body: formData, }); const result = await response.json();
|
370 |
+
if (!response.ok) { throw new Error(result.message || `Server error: ${response.status}`); }
|
371 |
+
if (result.status === 'processing_started' && result.request_id) {
|
372 |
+
currentRequestId = result.request_id; lastPreviewRequestId = null; numPlansGeneratedForLastPreview = 1; console.log("Processing started with Request ID:", currentRequestId); loadingIndicator.style.display = 'none'; updateProgressDisplay('RECEIVED', result.message || 'Processing started...'); if (progressInterval) clearInterval(progressInterval); progressInterval = setInterval(pollProgress, POLLING_INTERVAL); submitButton.innerHTML = '<i class="fas fa-hourglass-half"></i> Processing...';
|
373 |
+
} else { throw new Error(result.message || 'Received an unexpected response from the server.'); }
|
374 |
+
} catch (error) {
|
375 |
+
console.error('Form Submission Error:', error); showMessage('error', `Submission Failed: ${error.message || 'Could not connect.'}`); submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video'; loadingIndicator.style.display = 'none';
|
376 |
+
}
|
377 |
+
});
|
378 |
+
modalCloseButton.addEventListener('click', () => { hideModal(); if (lastPreviewRequestId) { submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video'; } });
|
379 |
+
videoModal.addEventListener('click', (event) => { if (event.target === videoModal) { hideModal(); if (lastPreviewRequestId) { submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video'; } } });
|
380 |
+
modalHqConfirmYes.addEventListener('click', () => { triggerHqGeneration(lastPreviewRequestId); });
|
381 |
+
modalHqConfirmNo.addEventListener('click', () => { hideModal(); submitButton.disabled = false; submitButton.innerHTML = '<i class="fas fa-cogs"></i> Generate Video'; console.log("User declined HQ generation."); lastPreviewRequestId = null; });
|
382 |
+
});
|
383 |
+
</script>
|
384 |
+
</body>
|
385 |
+
</html>
|
templates/script.js
ADDED
@@ -0,0 +1,1618 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
MIT License
|
3 |
+
|
4 |
+
Copyright (c) 2017 Pavel Dobryakov
|
5 |
+
|
6 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
+
of this software and associated documentation files (the "Software"), to deal
|
8 |
+
in the Software without restriction, including without limitation the rights
|
9 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
copies of the Software, and to permit persons to whom the Software is
|
11 |
+
furnished to do so, subject to the following conditions:
|
12 |
+
|
13 |
+
The above copyright notice and this permission notice shall be included in all
|
14 |
+
copies or substantial portions of the Software.
|
15 |
+
|
16 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
19 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
22 |
+
SOFTWARE.
|
23 |
+
*/
|
24 |
+
|
25 |
+
'use strict';
|
26 |
+
|
27 |
+
// Simulation section
|
28 |
+
|
29 |
+
const canvas = document.getElementsByTagName('canvas')[0];
|
30 |
+
resizeCanvas();
|
31 |
+
|
32 |
+
let config = {
|
33 |
+
SIM_RESOLUTION: 128,
|
34 |
+
DYE_RESOLUTION: 1024,
|
35 |
+
CAPTURE_RESOLUTION: 512,
|
36 |
+
DENSITY_DISSIPATION: 1,
|
37 |
+
VELOCITY_DISSIPATION: 0.2,
|
38 |
+
PRESSURE: 0.8,
|
39 |
+
PRESSURE_ITERATIONS: 20,
|
40 |
+
CURL: 30,
|
41 |
+
SPLAT_RADIUS: 0.25,
|
42 |
+
SPLAT_FORCE: 6000,
|
43 |
+
SHADING: true,
|
44 |
+
COLORFUL: true,
|
45 |
+
COLOR_UPDATE_SPEED: 10,
|
46 |
+
PAUSED: false,
|
47 |
+
BACK_COLOR: { r: 0, g: 0, b: 0 },
|
48 |
+
TRANSPARENT: false,
|
49 |
+
BLOOM: true,
|
50 |
+
BLOOM_ITERATIONS: 8,
|
51 |
+
BLOOM_RESOLUTION: 256,
|
52 |
+
BLOOM_INTENSITY: 0.8,
|
53 |
+
BLOOM_THRESHOLD: 0.6,
|
54 |
+
BLOOM_SOFT_KNEE: 0.7,
|
55 |
+
SUNRAYS: true,
|
56 |
+
SUNRAYS_RESOLUTION: 196,
|
57 |
+
SUNRAYS_WEIGHT: 1.0,
|
58 |
+
}
|
59 |
+
|
60 |
+
function pointerPrototype () {
|
61 |
+
this.id = -1;
|
62 |
+
this.texcoordX = 0;
|
63 |
+
this.texcoordY = 0;
|
64 |
+
this.prevTexcoordX = 0;
|
65 |
+
this.prevTexcoordY = 0;
|
66 |
+
this.deltaX = 0;
|
67 |
+
this.deltaY = 0;
|
68 |
+
this.down = false;
|
69 |
+
this.moved = false;
|
70 |
+
this.color = [30, 0, 300];
|
71 |
+
}
|
72 |
+
|
73 |
+
let pointers = [];
|
74 |
+
let splatStack = [];
|
75 |
+
pointers.push(new pointerPrototype());
|
76 |
+
|
77 |
+
const { gl, ext } = getWebGLContext(canvas);
|
78 |
+
|
79 |
+
if (isMobile()) {
|
80 |
+
config.DYE_RESOLUTION = 512;
|
81 |
+
}
|
82 |
+
if (!ext.supportLinearFiltering) {
|
83 |
+
config.DYE_RESOLUTION = 512;
|
84 |
+
config.SHADING = false;
|
85 |
+
config.BLOOM = false;
|
86 |
+
config.SUNRAYS = false;
|
87 |
+
}
|
88 |
+
|
89 |
+
startGUI();
|
90 |
+
|
91 |
+
function getWebGLContext (canvas) {
|
92 |
+
const params = { alpha: true, depth: false, stencil: false, antialias: false, preserveDrawingBuffer: false };
|
93 |
+
|
94 |
+
let gl = canvas.getContext('webgl2', params);
|
95 |
+
const isWebGL2 = !!gl;
|
96 |
+
if (!isWebGL2)
|
97 |
+
gl = canvas.getContext('webgl', params) || canvas.getContext('experimental-webgl', params);
|
98 |
+
|
99 |
+
let halfFloat;
|
100 |
+
let supportLinearFiltering;
|
101 |
+
if (isWebGL2) {
|
102 |
+
gl.getExtension('EXT_color_buffer_float');
|
103 |
+
supportLinearFiltering = gl.getExtension('OES_texture_float_linear');
|
104 |
+
} else {
|
105 |
+
halfFloat = gl.getExtension('OES_texture_half_float');
|
106 |
+
supportLinearFiltering = gl.getExtension('OES_texture_half_float_linear');
|
107 |
+
}
|
108 |
+
|
109 |
+
gl.clearColor(0.0, 0.0, 0.0, 1.0);
|
110 |
+
|
111 |
+
const halfFloatTexType = isWebGL2 ? gl.HALF_FLOAT : halfFloat.HALF_FLOAT_OES;
|
112 |
+
let formatRGBA;
|
113 |
+
let formatRG;
|
114 |
+
let formatR;
|
115 |
+
|
116 |
+
if (isWebGL2)
|
117 |
+
{
|
118 |
+
formatRGBA = getSupportedFormat(gl, gl.RGBA16F, gl.RGBA, halfFloatTexType);
|
119 |
+
formatRG = getSupportedFormat(gl, gl.RG16F, gl.RG, halfFloatTexType);
|
120 |
+
formatR = getSupportedFormat(gl, gl.R16F, gl.RED, halfFloatTexType);
|
121 |
+
}
|
122 |
+
else
|
123 |
+
{
|
124 |
+
formatRGBA = getSupportedFormat(gl, gl.RGBA, gl.RGBA, halfFloatTexType);
|
125 |
+
formatRG = getSupportedFormat(gl, gl.RGBA, gl.RGBA, halfFloatTexType);
|
126 |
+
formatR = getSupportedFormat(gl, gl.RGBA, gl.RGBA, halfFloatTexType);
|
127 |
+
}
|
128 |
+
|
129 |
+
return {
|
130 |
+
gl,
|
131 |
+
ext: {
|
132 |
+
formatRGBA,
|
133 |
+
formatRG,
|
134 |
+
formatR,
|
135 |
+
halfFloatTexType,
|
136 |
+
supportLinearFiltering
|
137 |
+
}
|
138 |
+
};
|
139 |
+
}
|
140 |
+
|
141 |
+
function getSupportedFormat (gl, internalFormat, format, type)
|
142 |
+
{
|
143 |
+
if (!supportRenderTextureFormat(gl, internalFormat, format, type))
|
144 |
+
{
|
145 |
+
switch (internalFormat)
|
146 |
+
{
|
147 |
+
case gl.R16F:
|
148 |
+
return getSupportedFormat(gl, gl.RG16F, gl.RG, type);
|
149 |
+
case gl.RG16F:
|
150 |
+
return getSupportedFormat(gl, gl.RGBA16F, gl.RGBA, type);
|
151 |
+
default:
|
152 |
+
return null;
|
153 |
+
}
|
154 |
+
}
|
155 |
+
|
156 |
+
return {
|
157 |
+
internalFormat,
|
158 |
+
format
|
159 |
+
}
|
160 |
+
}
|
161 |
+
|
162 |
+
function supportRenderTextureFormat (gl, internalFormat, format, type) {
|
163 |
+
let texture = gl.createTexture();
|
164 |
+
gl.bindTexture(gl.TEXTURE_2D, texture);
|
165 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
166 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
167 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
|
168 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
169 |
+
gl.texImage2D(gl.TEXTURE_2D, 0, internalFormat, 4, 4, 0, format, type, null);
|
170 |
+
|
171 |
+
let fbo = gl.createFramebuffer();
|
172 |
+
gl.bindFramebuffer(gl.FRAMEBUFFER, fbo);
|
173 |
+
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
|
174 |
+
|
175 |
+
let status = gl.checkFramebufferStatus(gl.FRAMEBUFFER);
|
176 |
+
return status == gl.FRAMEBUFFER_COMPLETE;
|
177 |
+
}
|
178 |
+
|
179 |
+
function startGUI () {
|
180 |
+
}
|
181 |
+
|
182 |
+
function isMobile () {
|
183 |
+
return /Mobi|Android/i.test(navigator.userAgent);
|
184 |
+
}
|
185 |
+
|
186 |
+
function captureScreenshot () {
|
187 |
+
let res = getResolution(config.CAPTURE_RESOLUTION);
|
188 |
+
let target = createFBO(res.width, res.height, ext.formatRGBA.internalFormat, ext.formatRGBA.format, ext.halfFloatTexType, gl.NEAREST);
|
189 |
+
render(target);
|
190 |
+
|
191 |
+
let texture = framebufferToTexture(target);
|
192 |
+
texture = normalizeTexture(texture, target.width, target.height);
|
193 |
+
|
194 |
+
let captureCanvas = textureToCanvas(texture, target.width, target.height);
|
195 |
+
let datauri = captureCanvas.toDataURL();
|
196 |
+
downloadURI('fluid.png', datauri);
|
197 |
+
URL.revokeObjectURL(datauri);
|
198 |
+
}
|
199 |
+
|
200 |
+
function framebufferToTexture (target) {
|
201 |
+
gl.bindFramebuffer(gl.FRAMEBUFFER, target.fbo);
|
202 |
+
let length = target.width * target.height * 4;
|
203 |
+
let texture = new Float32Array(length);
|
204 |
+
gl.readPixels(0, 0, target.width, target.height, gl.RGBA, gl.FLOAT, texture);
|
205 |
+
return texture;
|
206 |
+
}
|
207 |
+
|
208 |
+
function normalizeTexture (texture, width, height) {
|
209 |
+
let result = new Uint8Array(texture.length);
|
210 |
+
let id = 0;
|
211 |
+
for (let i = height - 1; i >= 0; i--) {
|
212 |
+
for (let j = 0; j < width; j++) {
|
213 |
+
let nid = i * width * 4 + j * 4;
|
214 |
+
result[nid + 0] = clamp01(texture[id + 0]) * 255;
|
215 |
+
result[nid + 1] = clamp01(texture[id + 1]) * 255;
|
216 |
+
result[nid + 2] = clamp01(texture[id + 2]) * 255;
|
217 |
+
result[nid + 3] = clamp01(texture[id + 3]) * 255;
|
218 |
+
id += 4;
|
219 |
+
}
|
220 |
+
}
|
221 |
+
return result;
|
222 |
+
}
|
223 |
+
|
224 |
+
function clamp01 (input) {
|
225 |
+
return Math.min(Math.max(input, 0), 1);
|
226 |
+
}
|
227 |
+
|
228 |
+
function textureToCanvas (texture, width, height) {
|
229 |
+
let captureCanvas = document.createElement('canvas');
|
230 |
+
let ctx = captureCanvas.getContext('2d');
|
231 |
+
captureCanvas.width = width;
|
232 |
+
captureCanvas.height = height;
|
233 |
+
|
234 |
+
let imageData = ctx.createImageData(width, height);
|
235 |
+
imageData.data.set(texture);
|
236 |
+
ctx.putImageData(imageData, 0, 0);
|
237 |
+
|
238 |
+
return captureCanvas;
|
239 |
+
}
|
240 |
+
|
241 |
+
function downloadURI (filename, uri) {
|
242 |
+
let link = document.createElement('a');
|
243 |
+
link.download = filename;
|
244 |
+
link.href = uri;
|
245 |
+
document.body.appendChild(link);
|
246 |
+
link.click();
|
247 |
+
document.body.removeChild(link);
|
248 |
+
}
|
249 |
+
|
250 |
+
class Material {
|
251 |
+
constructor (vertexShader, fragmentShaderSource) {
|
252 |
+
this.vertexShader = vertexShader;
|
253 |
+
this.fragmentShaderSource = fragmentShaderSource;
|
254 |
+
this.programs = [];
|
255 |
+
this.activeProgram = null;
|
256 |
+
this.uniforms = [];
|
257 |
+
}
|
258 |
+
|
259 |
+
setKeywords (keywords) {
|
260 |
+
let hash = 0;
|
261 |
+
for (let i = 0; i < keywords.length; i++)
|
262 |
+
hash += hashCode(keywords[i]);
|
263 |
+
|
264 |
+
let program = this.programs[hash];
|
265 |
+
if (program == null)
|
266 |
+
{
|
267 |
+
let fragmentShader = compileShader(gl.FRAGMENT_SHADER, this.fragmentShaderSource, keywords);
|
268 |
+
program = createProgram(this.vertexShader, fragmentShader);
|
269 |
+
this.programs[hash] = program;
|
270 |
+
}
|
271 |
+
|
272 |
+
if (program == this.activeProgram) return;
|
273 |
+
|
274 |
+
this.uniforms = getUniforms(program);
|
275 |
+
this.activeProgram = program;
|
276 |
+
}
|
277 |
+
|
278 |
+
bind () {
|
279 |
+
gl.useProgram(this.activeProgram);
|
280 |
+
}
|
281 |
+
}
|
282 |
+
|
283 |
+
class Program {
|
284 |
+
constructor (vertexShader, fragmentShader) {
|
285 |
+
this.uniforms = {};
|
286 |
+
this.program = createProgram(vertexShader, fragmentShader);
|
287 |
+
this.uniforms = getUniforms(this.program);
|
288 |
+
}
|
289 |
+
|
290 |
+
bind () {
|
291 |
+
gl.useProgram(this.program);
|
292 |
+
}
|
293 |
+
}
|
294 |
+
|
295 |
+
function createProgram (vertexShader, fragmentShader) {
|
296 |
+
let program = gl.createProgram();
|
297 |
+
gl.attachShader(program, vertexShader);
|
298 |
+
gl.attachShader(program, fragmentShader);
|
299 |
+
gl.linkProgram(program);
|
300 |
+
|
301 |
+
if (!gl.getProgramParameter(program, gl.LINK_STATUS))
|
302 |
+
console.trace(gl.getProgramInfoLog(program));
|
303 |
+
|
304 |
+
return program;
|
305 |
+
}
|
306 |
+
|
307 |
+
function getUniforms (program) {
|
308 |
+
let uniforms = [];
|
309 |
+
let uniformCount = gl.getProgramParameter(program, gl.ACTIVE_UNIFORMS);
|
310 |
+
for (let i = 0; i < uniformCount; i++) {
|
311 |
+
let uniformName = gl.getActiveUniform(program, i).name;
|
312 |
+
uniforms[uniformName] = gl.getUniformLocation(program, uniformName);
|
313 |
+
}
|
314 |
+
return uniforms;
|
315 |
+
}
|
316 |
+
|
317 |
+
function compileShader (type, source, keywords) {
|
318 |
+
source = addKeywords(source, keywords);
|
319 |
+
|
320 |
+
const shader = gl.createShader(type);
|
321 |
+
gl.shaderSource(shader, source);
|
322 |
+
gl.compileShader(shader);
|
323 |
+
|
324 |
+
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS))
|
325 |
+
console.trace(gl.getShaderInfoLog(shader));
|
326 |
+
|
327 |
+
return shader;
|
328 |
+
};
|
329 |
+
|
330 |
+
function addKeywords (source, keywords) {
|
331 |
+
if (keywords == null) return source;
|
332 |
+
let keywordsString = '';
|
333 |
+
keywords.forEach(keyword => {
|
334 |
+
keywordsString += '#define ' + keyword + '\n';
|
335 |
+
});
|
336 |
+
return keywordsString + source;
|
337 |
+
}
|
338 |
+
|
339 |
+
const baseVertexShader = compileShader(gl.VERTEX_SHADER, `
|
340 |
+
precision highp float;
|
341 |
+
|
342 |
+
attribute vec2 aPosition;
|
343 |
+
varying vec2 vUv;
|
344 |
+
varying vec2 vL;
|
345 |
+
varying vec2 vR;
|
346 |
+
varying vec2 vT;
|
347 |
+
varying vec2 vB;
|
348 |
+
uniform vec2 texelSize;
|
349 |
+
|
350 |
+
void main () {
|
351 |
+
vUv = aPosition * 0.5 + 0.5;
|
352 |
+
vL = vUv - vec2(texelSize.x, 0.0);
|
353 |
+
vR = vUv + vec2(texelSize.x, 0.0);
|
354 |
+
vT = vUv + vec2(0.0, texelSize.y);
|
355 |
+
vB = vUv - vec2(0.0, texelSize.y);
|
356 |
+
gl_Position = vec4(aPosition, 0.0, 1.0);
|
357 |
+
}
|
358 |
+
`);
|
359 |
+
|
360 |
+
const blurVertexShader = compileShader(gl.VERTEX_SHADER, `
|
361 |
+
precision highp float;
|
362 |
+
|
363 |
+
attribute vec2 aPosition;
|
364 |
+
varying vec2 vUv;
|
365 |
+
varying vec2 vL;
|
366 |
+
varying vec2 vR;
|
367 |
+
uniform vec2 texelSize;
|
368 |
+
|
369 |
+
void main () {
|
370 |
+
vUv = aPosition * 0.5 + 0.5;
|
371 |
+
float offset = 1.33333333;
|
372 |
+
vL = vUv - texelSize * offset;
|
373 |
+
vR = vUv + texelSize * offset;
|
374 |
+
gl_Position = vec4(aPosition, 0.0, 1.0);
|
375 |
+
}
|
376 |
+
`);
|
377 |
+
|
378 |
+
const blurShader = compileShader(gl.FRAGMENT_SHADER, `
|
379 |
+
precision mediump float;
|
380 |
+
precision mediump sampler2D;
|
381 |
+
|
382 |
+
varying vec2 vUv;
|
383 |
+
varying vec2 vL;
|
384 |
+
varying vec2 vR;
|
385 |
+
uniform sampler2D uTexture;
|
386 |
+
|
387 |
+
void main () {
|
388 |
+
vec4 sum = texture2D(uTexture, vUv) * 0.29411764;
|
389 |
+
sum += texture2D(uTexture, vL) * 0.35294117;
|
390 |
+
sum += texture2D(uTexture, vR) * 0.35294117;
|
391 |
+
gl_FragColor = sum;
|
392 |
+
}
|
393 |
+
`);
|
394 |
+
|
395 |
+
const copyShader = compileShader(gl.FRAGMENT_SHADER, `
|
396 |
+
precision mediump float;
|
397 |
+
precision mediump sampler2D;
|
398 |
+
|
399 |
+
varying highp vec2 vUv;
|
400 |
+
uniform sampler2D uTexture;
|
401 |
+
|
402 |
+
void main () {
|
403 |
+
gl_FragColor = texture2D(uTexture, vUv);
|
404 |
+
}
|
405 |
+
`);
|
406 |
+
|
407 |
+
const clearShader = compileShader(gl.FRAGMENT_SHADER, `
|
408 |
+
precision mediump float;
|
409 |
+
precision mediump sampler2D;
|
410 |
+
|
411 |
+
varying highp vec2 vUv;
|
412 |
+
uniform sampler2D uTexture;
|
413 |
+
uniform float value;
|
414 |
+
|
415 |
+
void main () {
|
416 |
+
gl_FragColor = value * texture2D(uTexture, vUv);
|
417 |
+
}
|
418 |
+
`);
|
419 |
+
|
420 |
+
const colorShader = compileShader(gl.FRAGMENT_SHADER, `
|
421 |
+
precision mediump float;
|
422 |
+
|
423 |
+
uniform vec4 color;
|
424 |
+
|
425 |
+
void main () {
|
426 |
+
gl_FragColor = color;
|
427 |
+
}
|
428 |
+
`);
|
429 |
+
|
430 |
+
const checkerboardShader = compileShader(gl.FRAGMENT_SHADER, `
|
431 |
+
precision highp float;
|
432 |
+
precision highp sampler2D;
|
433 |
+
|
434 |
+
varying vec2 vUv;
|
435 |
+
uniform sampler2D uTexture;
|
436 |
+
uniform float aspectRatio;
|
437 |
+
|
438 |
+
#define SCALE 25.0
|
439 |
+
|
440 |
+
void main () {
|
441 |
+
vec2 uv = floor(vUv * SCALE * vec2(aspectRatio, 1.0));
|
442 |
+
float v = mod(uv.x + uv.y, 2.0);
|
443 |
+
v = v * 0.1 + 0.8;
|
444 |
+
gl_FragColor = vec4(vec3(v), 1.0);
|
445 |
+
}
|
446 |
+
`);
|
447 |
+
|
448 |
+
const displayShaderSource = `
|
449 |
+
precision highp float;
|
450 |
+
precision highp sampler2D;
|
451 |
+
|
452 |
+
varying vec2 vUv;
|
453 |
+
varying vec2 vL;
|
454 |
+
varying vec2 vR;
|
455 |
+
varying vec2 vT;
|
456 |
+
varying vec2 vB;
|
457 |
+
uniform sampler2D uTexture;
|
458 |
+
uniform sampler2D uBloom;
|
459 |
+
uniform sampler2D uSunrays;
|
460 |
+
uniform sampler2D uDithering;
|
461 |
+
uniform vec2 ditherScale;
|
462 |
+
uniform vec2 texelSize;
|
463 |
+
|
464 |
+
vec3 linearToGamma (vec3 color) {
|
465 |
+
color = max(color, vec3(0));
|
466 |
+
return max(1.055 * pow(color, vec3(0.416666667)) - 0.055, vec3(0));
|
467 |
+
}
|
468 |
+
|
469 |
+
void main () {
|
470 |
+
vec3 c = texture2D(uTexture, vUv).rgb;
|
471 |
+
|
472 |
+
#ifdef SHADING
|
473 |
+
vec3 lc = texture2D(uTexture, vL).rgb;
|
474 |
+
vec3 rc = texture2D(uTexture, vR).rgb;
|
475 |
+
vec3 tc = texture2D(uTexture, vT).rgb;
|
476 |
+
vec3 bc = texture2D(uTexture, vB).rgb;
|
477 |
+
|
478 |
+
float dx = length(rc) - length(lc);
|
479 |
+
float dy = length(tc) - length(bc);
|
480 |
+
|
481 |
+
vec3 n = normalize(vec3(dx, dy, length(texelSize)));
|
482 |
+
vec3 l = vec3(0.0, 0.0, 1.0);
|
483 |
+
|
484 |
+
float diffuse = clamp(dot(n, l) + 0.7, 0.7, 1.0);
|
485 |
+
c *= diffuse;
|
486 |
+
#endif
|
487 |
+
|
488 |
+
#ifdef BLOOM
|
489 |
+
vec3 bloom = texture2D(uBloom, vUv).rgb;
|
490 |
+
#endif
|
491 |
+
|
492 |
+
#ifdef SUNRAYS
|
493 |
+
float sunrays = texture2D(uSunrays, vUv).r;
|
494 |
+
c *= sunrays;
|
495 |
+
#ifdef BLOOM
|
496 |
+
bloom *= sunrays;
|
497 |
+
#endif
|
498 |
+
#endif
|
499 |
+
|
500 |
+
#ifdef BLOOM
|
501 |
+
float noise = texture2D(uDithering, vUv * ditherScale).r;
|
502 |
+
noise = noise * 2.0 - 1.0;
|
503 |
+
bloom += noise / 255.0;
|
504 |
+
bloom = linearToGamma(bloom);
|
505 |
+
c += bloom;
|
506 |
+
#endif
|
507 |
+
|
508 |
+
float a = max(c.r, max(c.g, c.b));
|
509 |
+
gl_FragColor = vec4(c, a);
|
510 |
+
}
|
511 |
+
`;
|
512 |
+
|
513 |
+
const bloomPrefilterShader = compileShader(gl.FRAGMENT_SHADER, `
|
514 |
+
precision mediump float;
|
515 |
+
precision mediump sampler2D;
|
516 |
+
|
517 |
+
varying vec2 vUv;
|
518 |
+
uniform sampler2D uTexture;
|
519 |
+
uniform vec3 curve;
|
520 |
+
uniform float threshold;
|
521 |
+
|
522 |
+
void main () {
|
523 |
+
vec3 c = texture2D(uTexture, vUv).rgb;
|
524 |
+
float br = max(c.r, max(c.g, c.b));
|
525 |
+
float rq = clamp(br - curve.x, 0.0, curve.y);
|
526 |
+
rq = curve.z * rq * rq;
|
527 |
+
c *= max(rq, br - threshold) / max(br, 0.0001);
|
528 |
+
gl_FragColor = vec4(c, 0.0);
|
529 |
+
}
|
530 |
+
`);
|
531 |
+
|
532 |
+
const bloomBlurShader = compileShader(gl.FRAGMENT_SHADER, `
|
533 |
+
precision mediump float;
|
534 |
+
precision mediump sampler2D;
|
535 |
+
|
536 |
+
varying vec2 vL;
|
537 |
+
varying vec2 vR;
|
538 |
+
varying vec2 vT;
|
539 |
+
varying vec2 vB;
|
540 |
+
uniform sampler2D uTexture;
|
541 |
+
|
542 |
+
void main () {
|
543 |
+
vec4 sum = vec4(0.0);
|
544 |
+
sum += texture2D(uTexture, vL);
|
545 |
+
sum += texture2D(uTexture, vR);
|
546 |
+
sum += texture2D(uTexture, vT);
|
547 |
+
sum += texture2D(uTexture, vB);
|
548 |
+
sum *= 0.25;
|
549 |
+
gl_FragColor = sum;
|
550 |
+
}
|
551 |
+
`);
|
552 |
+
|
553 |
+
const bloomFinalShader = compileShader(gl.FRAGMENT_SHADER, `
|
554 |
+
precision mediump float;
|
555 |
+
precision mediump sampler2D;
|
556 |
+
|
557 |
+
varying vec2 vL;
|
558 |
+
varying vec2 vR;
|
559 |
+
varying vec2 vT;
|
560 |
+
varying vec2 vB;
|
561 |
+
uniform sampler2D uTexture;
|
562 |
+
uniform float intensity;
|
563 |
+
|
564 |
+
void main () {
|
565 |
+
vec4 sum = vec4(0.0);
|
566 |
+
sum += texture2D(uTexture, vL);
|
567 |
+
sum += texture2D(uTexture, vR);
|
568 |
+
sum += texture2D(uTexture, vT);
|
569 |
+
sum += texture2D(uTexture, vB);
|
570 |
+
sum *= 0.25;
|
571 |
+
gl_FragColor = sum * intensity;
|
572 |
+
}
|
573 |
+
`);
|
574 |
+
|
575 |
+
const sunraysMaskShader = compileShader(gl.FRAGMENT_SHADER, `
|
576 |
+
precision highp float;
|
577 |
+
precision highp sampler2D;
|
578 |
+
|
579 |
+
varying vec2 vUv;
|
580 |
+
uniform sampler2D uTexture;
|
581 |
+
|
582 |
+
void main () {
|
583 |
+
vec4 c = texture2D(uTexture, vUv);
|
584 |
+
float br = max(c.r, max(c.g, c.b));
|
585 |
+
c.a = 1.0 - min(max(br * 20.0, 0.0), 0.8);
|
586 |
+
gl_FragColor = c;
|
587 |
+
}
|
588 |
+
`);
|
589 |
+
|
590 |
+
const sunraysShader = compileShader(gl.FRAGMENT_SHADER, `
|
591 |
+
precision highp float;
|
592 |
+
precision highp sampler2D;
|
593 |
+
|
594 |
+
varying vec2 vUv;
|
595 |
+
uniform sampler2D uTexture;
|
596 |
+
uniform float weight;
|
597 |
+
|
598 |
+
#define ITERATIONS 16
|
599 |
+
|
600 |
+
void main () {
|
601 |
+
float Density = 0.3;
|
602 |
+
float Decay = 0.95;
|
603 |
+
float Exposure = 0.7;
|
604 |
+
|
605 |
+
vec2 coord = vUv;
|
606 |
+
vec2 dir = vUv - 0.5;
|
607 |
+
|
608 |
+
dir *= 1.0 / float(ITERATIONS) * Density;
|
609 |
+
float illuminationDecay = 1.0;
|
610 |
+
|
611 |
+
float color = texture2D(uTexture, vUv).a;
|
612 |
+
|
613 |
+
for (int i = 0; i < ITERATIONS; i++)
|
614 |
+
{
|
615 |
+
coord -= dir;
|
616 |
+
float col = texture2D(uTexture, coord).a;
|
617 |
+
color += col * illuminationDecay * weight;
|
618 |
+
illuminationDecay *= Decay;
|
619 |
+
}
|
620 |
+
|
621 |
+
gl_FragColor = vec4(color * Exposure, 0.0, 0.0, 1.0);
|
622 |
+
}
|
623 |
+
`);
|
624 |
+
|
625 |
+
const splatShader = compileShader(gl.FRAGMENT_SHADER, `
|
626 |
+
precision highp float;
|
627 |
+
precision highp sampler2D;
|
628 |
+
|
629 |
+
varying vec2 vUv;
|
630 |
+
uniform sampler2D uTarget;
|
631 |
+
uniform float aspectRatio;
|
632 |
+
uniform vec3 color;
|
633 |
+
uniform vec2 point;
|
634 |
+
uniform float radius;
|
635 |
+
|
636 |
+
void main () {
|
637 |
+
vec2 p = vUv - point.xy;
|
638 |
+
p.x *= aspectRatio;
|
639 |
+
vec3 splat = exp(-dot(p, p) / radius) * color;
|
640 |
+
vec3 base = texture2D(uTarget, vUv).xyz;
|
641 |
+
gl_FragColor = vec4(base + splat, 1.0);
|
642 |
+
}
|
643 |
+
`);
|
644 |
+
|
645 |
+
const advectionShader = compileShader(gl.FRAGMENT_SHADER, `
|
646 |
+
precision highp float;
|
647 |
+
precision highp sampler2D;
|
648 |
+
|
649 |
+
varying vec2 vUv;
|
650 |
+
uniform sampler2D uVelocity;
|
651 |
+
uniform sampler2D uSource;
|
652 |
+
uniform vec2 texelSize;
|
653 |
+
uniform vec2 dyeTexelSize;
|
654 |
+
uniform float dt;
|
655 |
+
uniform float dissipation;
|
656 |
+
|
657 |
+
vec4 bilerp (sampler2D sam, vec2 uv, vec2 tsize) {
|
658 |
+
vec2 st = uv / tsize - 0.5;
|
659 |
+
|
660 |
+
vec2 iuv = floor(st);
|
661 |
+
vec2 fuv = fract(st);
|
662 |
+
|
663 |
+
vec4 a = texture2D(sam, (iuv + vec2(0.5, 0.5)) * tsize);
|
664 |
+
vec4 b = texture2D(sam, (iuv + vec2(1.5, 0.5)) * tsize);
|
665 |
+
vec4 c = texture2D(sam, (iuv + vec2(0.5, 1.5)) * tsize);
|
666 |
+
vec4 d = texture2D(sam, (iuv + vec2(1.5, 1.5)) * tsize);
|
667 |
+
|
668 |
+
return mix(mix(a, b, fuv.x), mix(c, d, fuv.x), fuv.y);
|
669 |
+
}
|
670 |
+
|
671 |
+
void main () {
|
672 |
+
#ifdef MANUAL_FILTERING
|
673 |
+
vec2 coord = vUv - dt * bilerp(uVelocity, vUv, texelSize).xy * texelSize;
|
674 |
+
vec4 result = bilerp(uSource, coord, dyeTexelSize);
|
675 |
+
#else
|
676 |
+
vec2 coord = vUv - dt * texture2D(uVelocity, vUv).xy * texelSize;
|
677 |
+
vec4 result = texture2D(uSource, coord);
|
678 |
+
#endif
|
679 |
+
float decay = 1.0 + dissipation * dt;
|
680 |
+
gl_FragColor = result / decay;
|
681 |
+
}`,
|
682 |
+
ext.supportLinearFiltering ? null : ['MANUAL_FILTERING']
|
683 |
+
);
|
684 |
+
|
685 |
+
const divergenceShader = compileShader(gl.FRAGMENT_SHADER, `
|
686 |
+
precision mediump float;
|
687 |
+
precision mediump sampler2D;
|
688 |
+
|
689 |
+
varying highp vec2 vUv;
|
690 |
+
varying highp vec2 vL;
|
691 |
+
varying highp vec2 vR;
|
692 |
+
varying highp vec2 vT;
|
693 |
+
varying highp vec2 vB;
|
694 |
+
uniform sampler2D uVelocity;
|
695 |
+
|
696 |
+
void main () {
|
697 |
+
float L = texture2D(uVelocity, vL).x;
|
698 |
+
float R = texture2D(uVelocity, vR).x;
|
699 |
+
float T = texture2D(uVelocity, vT).y;
|
700 |
+
float B = texture2D(uVelocity, vB).y;
|
701 |
+
|
702 |
+
vec2 C = texture2D(uVelocity, vUv).xy;
|
703 |
+
if (vL.x < 0.0) { L = -C.x; }
|
704 |
+
if (vR.x > 1.0) { R = -C.x; }
|
705 |
+
if (vT.y > 1.0) { T = -C.y; }
|
706 |
+
if (vB.y < 0.0) { B = -C.y; }
|
707 |
+
|
708 |
+
float div = 0.5 * (R - L + T - B);
|
709 |
+
gl_FragColor = vec4(div, 0.0, 0.0, 1.0);
|
710 |
+
}
|
711 |
+
`);
|
712 |
+
|
713 |
+
const curlShader = compileShader(gl.FRAGMENT_SHADER, `
|
714 |
+
precision mediump float;
|
715 |
+
precision mediump sampler2D;
|
716 |
+
|
717 |
+
varying highp vec2 vUv;
|
718 |
+
varying highp vec2 vL;
|
719 |
+
varying highp vec2 vR;
|
720 |
+
varying highp vec2 vT;
|
721 |
+
varying highp vec2 vB;
|
722 |
+
uniform sampler2D uVelocity;
|
723 |
+
|
724 |
+
void main () {
|
725 |
+
float L = texture2D(uVelocity, vL).y;
|
726 |
+
float R = texture2D(uVelocity, vR).y;
|
727 |
+
float T = texture2D(uVelocity, vT).x;
|
728 |
+
float B = texture2D(uVelocity, vB).x;
|
729 |
+
float vorticity = R - L - T + B;
|
730 |
+
gl_FragColor = vec4(0.5 * vorticity, 0.0, 0.0, 1.0);
|
731 |
+
}
|
732 |
+
`);
|
733 |
+
|
734 |
+
const vorticityShader = compileShader(gl.FRAGMENT_SHADER, `
|
735 |
+
precision highp float;
|
736 |
+
precision highp sampler2D;
|
737 |
+
|
738 |
+
varying vec2 vUv;
|
739 |
+
varying vec2 vL;
|
740 |
+
varying vec2 vR;
|
741 |
+
varying vec2 vT;
|
742 |
+
varying vec2 vB;
|
743 |
+
uniform sampler2D uVelocity;
|
744 |
+
uniform sampler2D uCurl;
|
745 |
+
uniform float curl;
|
746 |
+
uniform float dt;
|
747 |
+
|
748 |
+
void main () {
|
749 |
+
float L = texture2D(uCurl, vL).x;
|
750 |
+
float R = texture2D(uCurl, vR).x;
|
751 |
+
float T = texture2D(uCurl, vT).x;
|
752 |
+
float B = texture2D(uCurl, vB).x;
|
753 |
+
float C = texture2D(uCurl, vUv).x;
|
754 |
+
|
755 |
+
vec2 force = 0.5 * vec2(abs(T) - abs(B), abs(R) - abs(L));
|
756 |
+
force /= length(force) + 0.0001;
|
757 |
+
force *= curl * C;
|
758 |
+
force.y *= -1.0;
|
759 |
+
|
760 |
+
vec2 velocity = texture2D(uVelocity, vUv).xy;
|
761 |
+
velocity += force * dt;
|
762 |
+
velocity = min(max(velocity, -1000.0), 1000.0);
|
763 |
+
gl_FragColor = vec4(velocity, 0.0, 1.0);
|
764 |
+
}
|
765 |
+
`);
|
766 |
+
|
767 |
+
const pressureShader = compileShader(gl.FRAGMENT_SHADER, `
|
768 |
+
precision mediump float;
|
769 |
+
precision mediump sampler2D;
|
770 |
+
|
771 |
+
varying highp vec2 vUv;
|
772 |
+
varying highp vec2 vL;
|
773 |
+
varying highp vec2 vR;
|
774 |
+
varying highp vec2 vT;
|
775 |
+
varying highp vec2 vB;
|
776 |
+
uniform sampler2D uPressure;
|
777 |
+
uniform sampler2D uDivergence;
|
778 |
+
|
779 |
+
void main () {
|
780 |
+
float L = texture2D(uPressure, vL).x;
|
781 |
+
float R = texture2D(uPressure, vR).x;
|
782 |
+
float T = texture2D(uPressure, vT).x;
|
783 |
+
float B = texture2D(uPressure, vB).x;
|
784 |
+
float C = texture2D(uPressure, vUv).x;
|
785 |
+
float divergence = texture2D(uDivergence, vUv).x;
|
786 |
+
float pressure = (L + R + B + T - divergence) * 0.25;
|
787 |
+
gl_FragColor = vec4(pressure, 0.0, 0.0, 1.0);
|
788 |
+
}
|
789 |
+
`);
|
790 |
+
|
791 |
+
const gradientSubtractShader = compileShader(gl.FRAGMENT_SHADER, `
|
792 |
+
precision mediump float;
|
793 |
+
precision mediump sampler2D;
|
794 |
+
|
795 |
+
varying highp vec2 vUv;
|
796 |
+
varying highp vec2 vL;
|
797 |
+
varying highp vec2 vR;
|
798 |
+
varying highp vec2 vT;
|
799 |
+
varying highp vec2 vB;
|
800 |
+
uniform sampler2D uPressure;
|
801 |
+
uniform sampler2D uVelocity;
|
802 |
+
|
803 |
+
void main () {
|
804 |
+
float L = texture2D(uPressure, vL).x;
|
805 |
+
float R = texture2D(uPressure, vR).x;
|
806 |
+
float T = texture2D(uPressure, vT).x;
|
807 |
+
float B = texture2D(uPressure, vB).x;
|
808 |
+
vec2 velocity = texture2D(uVelocity, vUv).xy;
|
809 |
+
velocity.xy -= vec2(R - L, T - B);
|
810 |
+
gl_FragColor = vec4(velocity, 0.0, 1.0);
|
811 |
+
}
|
812 |
+
`);
|
813 |
+
|
814 |
+
const blit = (() => {
|
815 |
+
gl.bindBuffer(gl.ARRAY_BUFFER, gl.createBuffer());
|
816 |
+
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([-1, -1, -1, 1, 1, 1, 1, -1]), gl.STATIC_DRAW);
|
817 |
+
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, gl.createBuffer());
|
818 |
+
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array([0, 1, 2, 0, 2, 3]), gl.STATIC_DRAW);
|
819 |
+
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
|
820 |
+
gl.enableVertexAttribArray(0);
|
821 |
+
|
822 |
+
return (target, clear = false) => {
|
823 |
+
if (target == null)
|
824 |
+
{
|
825 |
+
gl.viewport(0, 0, gl.drawingBufferWidth, gl.drawingBufferHeight);
|
826 |
+
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
|
827 |
+
}
|
828 |
+
else
|
829 |
+
{
|
830 |
+
gl.viewport(0, 0, target.width, target.height);
|
831 |
+
gl.bindFramebuffer(gl.FRAMEBUFFER, target.fbo);
|
832 |
+
}
|
833 |
+
if (clear)
|
834 |
+
{
|
835 |
+
gl.clearColor(0.0, 0.0, 0.0, 1.0);
|
836 |
+
gl.clear(gl.COLOR_BUFFER_BIT);
|
837 |
+
}
|
838 |
+
// CHECK_FRAMEBUFFER_STATUS();
|
839 |
+
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
|
840 |
+
}
|
841 |
+
})();
|
842 |
+
|
843 |
+
function CHECK_FRAMEBUFFER_STATUS () {
|
844 |
+
let status = gl.checkFramebufferStatus(gl.FRAMEBUFFER);
|
845 |
+
if (status != gl.FRAMEBUFFER_COMPLETE)
|
846 |
+
console.trace("Framebuffer error: " + status);
|
847 |
+
}
|
848 |
+
|
849 |
+
let dye;
|
850 |
+
let velocity;
|
851 |
+
let divergence;
|
852 |
+
let curl;
|
853 |
+
let pressure;
|
854 |
+
let bloom;
|
855 |
+
let bloomFramebuffers = [];
|
856 |
+
let sunrays;
|
857 |
+
let sunraysTemp;
|
858 |
+
|
859 |
+
let ditheringTexture = createTextureAsync('LDR_LLL1_0.png');
|
860 |
+
|
861 |
+
const blurProgram = new Program(blurVertexShader, blurShader);
|
862 |
+
const copyProgram = new Program(baseVertexShader, copyShader);
|
863 |
+
const clearProgram = new Program(baseVertexShader, clearShader);
|
864 |
+
const colorProgram = new Program(baseVertexShader, colorShader);
|
865 |
+
const checkerboardProgram = new Program(baseVertexShader, checkerboardShader);
|
866 |
+
const bloomPrefilterProgram = new Program(baseVertexShader, bloomPrefilterShader);
|
867 |
+
const bloomBlurProgram = new Program(baseVertexShader, bloomBlurShader);
|
868 |
+
const bloomFinalProgram = new Program(baseVertexShader, bloomFinalShader);
|
869 |
+
const sunraysMaskProgram = new Program(baseVertexShader, sunraysMaskShader);
|
870 |
+
const sunraysProgram = new Program(baseVertexShader, sunraysShader);
|
871 |
+
const splatProgram = new Program(baseVertexShader, splatShader);
|
872 |
+
const advectionProgram = new Program(baseVertexShader, advectionShader);
|
873 |
+
const divergenceProgram = new Program(baseVertexShader, divergenceShader);
|
874 |
+
const curlProgram = new Program(baseVertexShader, curlShader);
|
875 |
+
const vorticityProgram = new Program(baseVertexShader, vorticityShader);
|
876 |
+
const pressureProgram = new Program(baseVertexShader, pressureShader);
|
877 |
+
const gradienSubtractProgram = new Program(baseVertexShader, gradientSubtractShader);
|
878 |
+
|
879 |
+
const displayMaterial = new Material(baseVertexShader, displayShaderSource);
|
880 |
+
|
881 |
+
function initFramebuffers () {
|
882 |
+
let simRes = getResolution(config.SIM_RESOLUTION);
|
883 |
+
let dyeRes = getResolution(config.DYE_RESOLUTION);
|
884 |
+
|
885 |
+
const texType = ext.halfFloatTexType;
|
886 |
+
const rgba = ext.formatRGBA;
|
887 |
+
const rg = ext.formatRG;
|
888 |
+
const r = ext.formatR;
|
889 |
+
const filtering = ext.supportLinearFiltering ? gl.LINEAR : gl.NEAREST;
|
890 |
+
|
891 |
+
gl.disable(gl.BLEND);
|
892 |
+
|
893 |
+
if (dye == null)
|
894 |
+
dye = createDoubleFBO(dyeRes.width, dyeRes.height, rgba.internalFormat, rgba.format, texType, filtering);
|
895 |
+
else
|
896 |
+
dye = resizeDoubleFBO(dye, dyeRes.width, dyeRes.height, rgba.internalFormat, rgba.format, texType, filtering);
|
897 |
+
|
898 |
+
if (velocity == null)
|
899 |
+
velocity = createDoubleFBO(simRes.width, simRes.height, rg.internalFormat, rg.format, texType, filtering);
|
900 |
+
else
|
901 |
+
velocity = resizeDoubleFBO(velocity, simRes.width, simRes.height, rg.internalFormat, rg.format, texType, filtering);
|
902 |
+
|
903 |
+
divergence = createFBO (simRes.width, simRes.height, r.internalFormat, r.format, texType, gl.NEAREST);
|
904 |
+
curl = createFBO (simRes.width, simRes.height, r.internalFormat, r.format, texType, gl.NEAREST);
|
905 |
+
pressure = createDoubleFBO(simRes.width, simRes.height, r.internalFormat, r.format, texType, gl.NEAREST);
|
906 |
+
|
907 |
+
initBloomFramebuffers();
|
908 |
+
initSunraysFramebuffers();
|
909 |
+
}
|
910 |
+
|
911 |
+
function initBloomFramebuffers () {
|
912 |
+
let res = getResolution(config.BLOOM_RESOLUTION);
|
913 |
+
|
914 |
+
const texType = ext.halfFloatTexType;
|
915 |
+
const rgba = ext.formatRGBA;
|
916 |
+
const filtering = ext.supportLinearFiltering ? gl.LINEAR : gl.NEAREST;
|
917 |
+
|
918 |
+
bloom = createFBO(res.width, res.height, rgba.internalFormat, rgba.format, texType, filtering);
|
919 |
+
|
920 |
+
bloomFramebuffers.length = 0;
|
921 |
+
for (let i = 0; i < config.BLOOM_ITERATIONS; i++)
|
922 |
+
{
|
923 |
+
let width = res.width >> (i + 1);
|
924 |
+
let height = res.height >> (i + 1);
|
925 |
+
|
926 |
+
if (width < 2 || height < 2) break;
|
927 |
+
|
928 |
+
let fbo = createFBO(width, height, rgba.internalFormat, rgba.format, texType, filtering);
|
929 |
+
bloomFramebuffers.push(fbo);
|
930 |
+
}
|
931 |
+
}
|
932 |
+
|
933 |
+
function initSunraysFramebuffers () {
|
934 |
+
let res = getResolution(config.SUNRAYS_RESOLUTION);
|
935 |
+
|
936 |
+
const texType = ext.halfFloatTexType;
|
937 |
+
const r = ext.formatR;
|
938 |
+
const filtering = ext.supportLinearFiltering ? gl.LINEAR : gl.NEAREST;
|
939 |
+
|
940 |
+
sunrays = createFBO(res.width, res.height, r.internalFormat, r.format, texType, filtering);
|
941 |
+
sunraysTemp = createFBO(res.width, res.height, r.internalFormat, r.format, texType, filtering);
|
942 |
+
}
|
943 |
+
|
944 |
+
function createFBO (w, h, internalFormat, format, type, param) {
|
945 |
+
gl.activeTexture(gl.TEXTURE0);
|
946 |
+
let texture = gl.createTexture();
|
947 |
+
gl.bindTexture(gl.TEXTURE_2D, texture);
|
948 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, param);
|
949 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, param);
|
950 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
|
951 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
952 |
+
gl.texImage2D(gl.TEXTURE_2D, 0, internalFormat, w, h, 0, format, type, null);
|
953 |
+
|
954 |
+
let fbo = gl.createFramebuffer();
|
955 |
+
gl.bindFramebuffer(gl.FRAMEBUFFER, fbo);
|
956 |
+
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
|
957 |
+
gl.viewport(0, 0, w, h);
|
958 |
+
gl.clear(gl.COLOR_BUFFER_BIT);
|
959 |
+
|
960 |
+
let texelSizeX = 1.0 / w;
|
961 |
+
let texelSizeY = 1.0 / h;
|
962 |
+
|
963 |
+
return {
|
964 |
+
texture,
|
965 |
+
fbo,
|
966 |
+
width: w,
|
967 |
+
height: h,
|
968 |
+
texelSizeX,
|
969 |
+
texelSizeY,
|
970 |
+
attach (id) {
|
971 |
+
gl.activeTexture(gl.TEXTURE0 + id);
|
972 |
+
gl.bindTexture(gl.TEXTURE_2D, texture);
|
973 |
+
return id;
|
974 |
+
}
|
975 |
+
};
|
976 |
+
}
|
977 |
+
|
978 |
+
function createDoubleFBO (w, h, internalFormat, format, type, param) {
|
979 |
+
let fbo1 = createFBO(w, h, internalFormat, format, type, param);
|
980 |
+
let fbo2 = createFBO(w, h, internalFormat, format, type, param);
|
981 |
+
|
982 |
+
return {
|
983 |
+
width: w,
|
984 |
+
height: h,
|
985 |
+
texelSizeX: fbo1.texelSizeX,
|
986 |
+
texelSizeY: fbo1.texelSizeY,
|
987 |
+
get read () {
|
988 |
+
return fbo1;
|
989 |
+
},
|
990 |
+
set read (value) {
|
991 |
+
fbo1 = value;
|
992 |
+
},
|
993 |
+
get write () {
|
994 |
+
return fbo2;
|
995 |
+
},
|
996 |
+
set write (value) {
|
997 |
+
fbo2 = value;
|
998 |
+
},
|
999 |
+
swap () {
|
1000 |
+
let temp = fbo1;
|
1001 |
+
fbo1 = fbo2;
|
1002 |
+
fbo2 = temp;
|
1003 |
+
}
|
1004 |
+
}
|
1005 |
+
}
|
1006 |
+
|
1007 |
+
function resizeFBO (target, w, h, internalFormat, format, type, param) {
|
1008 |
+
let newFBO = createFBO(w, h, internalFormat, format, type, param);
|
1009 |
+
copyProgram.bind();
|
1010 |
+
gl.uniform1i(copyProgram.uniforms.uTexture, target.attach(0));
|
1011 |
+
blit(newFBO);
|
1012 |
+
return newFBO;
|
1013 |
+
}
|
1014 |
+
|
1015 |
+
function resizeDoubleFBO (target, w, h, internalFormat, format, type, param) {
|
1016 |
+
if (target.width == w && target.height == h)
|
1017 |
+
return target;
|
1018 |
+
target.read = resizeFBO(target.read, w, h, internalFormat, format, type, param);
|
1019 |
+
target.write = createFBO(w, h, internalFormat, format, type, param);
|
1020 |
+
target.width = w;
|
1021 |
+
target.height = h;
|
1022 |
+
target.texelSizeX = 1.0 / w;
|
1023 |
+
target.texelSizeY = 1.0 / h;
|
1024 |
+
return target;
|
1025 |
+
}
|
1026 |
+
|
1027 |
+
function createTextureAsync (url) {
|
1028 |
+
let texture = gl.createTexture();
|
1029 |
+
gl.bindTexture(gl.TEXTURE_2D, texture);
|
1030 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
|
1031 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
|
1032 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT);
|
1033 |
+
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT);
|
1034 |
+
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGB, 1, 1, 0, gl.RGB, gl.UNSIGNED_BYTE, new Uint8Array([255, 255, 255]));
|
1035 |
+
|
1036 |
+
let obj = {
|
1037 |
+
texture,
|
1038 |
+
width: 1,
|
1039 |
+
height: 1,
|
1040 |
+
attach (id) {
|
1041 |
+
gl.activeTexture(gl.TEXTURE0 + id);
|
1042 |
+
gl.bindTexture(gl.TEXTURE_2D, texture);
|
1043 |
+
return id;
|
1044 |
+
}
|
1045 |
+
};
|
1046 |
+
|
1047 |
+
let image = new Image();
|
1048 |
+
image.onload = () => {
|
1049 |
+
obj.width = image.width;
|
1050 |
+
obj.height = image.height;
|
1051 |
+
gl.bindTexture(gl.TEXTURE_2D, texture);
|
1052 |
+
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGB, gl.RGB, gl.UNSIGNED_BYTE, image);
|
1053 |
+
};
|
1054 |
+
image.src = url;
|
1055 |
+
|
1056 |
+
return obj;
|
1057 |
+
}
|
1058 |
+
|
1059 |
+
function updateKeywords () {
|
1060 |
+
let displayKeywords = [];
|
1061 |
+
if (config.SHADING) displayKeywords.push("SHADING");
|
1062 |
+
if (config.BLOOM) displayKeywords.push("BLOOM");
|
1063 |
+
if (config.SUNRAYS) displayKeywords.push("SUNRAYS");
|
1064 |
+
displayMaterial.setKeywords(displayKeywords);
|
1065 |
+
}
|
1066 |
+
|
1067 |
+
updateKeywords();
|
1068 |
+
initFramebuffers();
|
1069 |
+
|
1070 |
+
multipleSplats(parseInt(Math.random() * 10) + 5, 0.8); // Initial burst with slightly
|
1071 |
+
|
1072 |
+
// Configuration for ambient splats
|
1073 |
+
const AMBIENT_INTERVAL = 1000; // Time between ambient splats in milliseconds (e.g., 2 seconds)
|
1074 |
+
const MAX_AMBIENT_SPLATS = 5; // Maximum number of random splats per interval (e.g., 1 or 2)
|
1075 |
+
const AMBIENT_INTENSITY = 0.3; // How strong the ambient splats are (e.g., 20% of normal)
|
1076 |
+
|
1077 |
+
setInterval(() => {
|
1078 |
+
// Only add ambient splats if the simulation isn't paused by the user (P key)
|
1079 |
+
if (!config.PAUSED) {
|
1080 |
+
// Generate a small number of splats (1 to MAX_AMBIENT_SPLATS)
|
1081 |
+
const numSplats = Math.floor(Math.random() * MAX_AMBIENT_SPLATS) + 1;
|
1082 |
+
// Call multipleSplats with the low ambient intensity
|
1083 |
+
multipleSplats(numSplats, AMBIENT_INTENSITY);
|
1084 |
+
}
|
1085 |
+
}, AMBIENT_INTERVAL);
|
1086 |
+
|
1087 |
+
|
1088 |
+
let lastUpdateTime = Date.now();
|
1089 |
+
let colorUpdateTimer = 0.0;
|
1090 |
+
update();
|
1091 |
+
|
1092 |
+
function update () {
|
1093 |
+
const dt = calcDeltaTime();
|
1094 |
+
if (resizeCanvas())
|
1095 |
+
initFramebuffers();
|
1096 |
+
updateColors(dt);
|
1097 |
+
applyInputs();
|
1098 |
+
if (!config.PAUSED)
|
1099 |
+
step(dt);
|
1100 |
+
render(null);
|
1101 |
+
requestAnimationFrame(update);
|
1102 |
+
}
|
1103 |
+
|
1104 |
+
function calcDeltaTime () {
|
1105 |
+
let now = Date.now();
|
1106 |
+
let dt = (now - lastUpdateTime) / 1000;
|
1107 |
+
dt = Math.min(dt, 0.016666);
|
1108 |
+
lastUpdateTime = now;
|
1109 |
+
return dt;
|
1110 |
+
}
|
1111 |
+
|
1112 |
+
function resizeCanvas () {
|
1113 |
+
let width = scaleByPixelRatio(canvas.clientWidth);
|
1114 |
+
let height = scaleByPixelRatio(canvas.clientHeight);
|
1115 |
+
if (canvas.width != width || canvas.height != height) {
|
1116 |
+
canvas.width = width;
|
1117 |
+
canvas.height = height;
|
1118 |
+
return true;
|
1119 |
+
}
|
1120 |
+
return false;
|
1121 |
+
}
|
1122 |
+
|
1123 |
+
function updateColors (dt) {
|
1124 |
+
if (!config.COLORFUL) return;
|
1125 |
+
|
1126 |
+
colorUpdateTimer += dt * config.COLOR_UPDATE_SPEED;
|
1127 |
+
if (colorUpdateTimer >= 1) {
|
1128 |
+
colorUpdateTimer = wrap(colorUpdateTimer, 0, 1);
|
1129 |
+
pointers.forEach(p => {
|
1130 |
+
p.color = generateColor();
|
1131 |
+
});
|
1132 |
+
}
|
1133 |
+
}
|
1134 |
+
|
1135 |
+
function applyInputs () {
|
1136 |
+
if (splatStack.length > 0)
|
1137 |
+
multipleSplats(splatStack.pop());
|
1138 |
+
|
1139 |
+
pointers.forEach(p => {
|
1140 |
+
if (p.moved) {
|
1141 |
+
p.moved = false;
|
1142 |
+
splatPointer(p);
|
1143 |
+
}
|
1144 |
+
});
|
1145 |
+
}
|
1146 |
+
|
1147 |
+
function step (dt) {
|
1148 |
+
gl.disable(gl.BLEND);
|
1149 |
+
|
1150 |
+
curlProgram.bind();
|
1151 |
+
gl.uniform2f(curlProgram.uniforms.texelSize, velocity.texelSizeX, velocity.texelSizeY);
|
1152 |
+
gl.uniform1i(curlProgram.uniforms.uVelocity, velocity.read.attach(0));
|
1153 |
+
blit(curl);
|
1154 |
+
|
1155 |
+
vorticityProgram.bind();
|
1156 |
+
gl.uniform2f(vorticityProgram.uniforms.texelSize, velocity.texelSizeX, velocity.texelSizeY);
|
1157 |
+
gl.uniform1i(vorticityProgram.uniforms.uVelocity, velocity.read.attach(0));
|
1158 |
+
gl.uniform1i(vorticityProgram.uniforms.uCurl, curl.attach(1));
|
1159 |
+
gl.uniform1f(vorticityProgram.uniforms.curl, config.CURL);
|
1160 |
+
gl.uniform1f(vorticityProgram.uniforms.dt, dt);
|
1161 |
+
blit(velocity.write);
|
1162 |
+
velocity.swap();
|
1163 |
+
|
1164 |
+
divergenceProgram.bind();
|
1165 |
+
gl.uniform2f(divergenceProgram.uniforms.texelSize, velocity.texelSizeX, velocity.texelSizeY);
|
1166 |
+
gl.uniform1i(divergenceProgram.uniforms.uVelocity, velocity.read.attach(0));
|
1167 |
+
blit(divergence);
|
1168 |
+
|
1169 |
+
clearProgram.bind();
|
1170 |
+
gl.uniform1i(clearProgram.uniforms.uTexture, pressure.read.attach(0));
|
1171 |
+
gl.uniform1f(clearProgram.uniforms.value, config.PRESSURE);
|
1172 |
+
blit(pressure.write);
|
1173 |
+
pressure.swap();
|
1174 |
+
|
1175 |
+
pressureProgram.bind();
|
1176 |
+
gl.uniform2f(pressureProgram.uniforms.texelSize, velocity.texelSizeX, velocity.texelSizeY);
|
1177 |
+
gl.uniform1i(pressureProgram.uniforms.uDivergence, divergence.attach(0));
|
1178 |
+
for (let i = 0; i < config.PRESSURE_ITERATIONS; i++) {
|
1179 |
+
gl.uniform1i(pressureProgram.uniforms.uPressure, pressure.read.attach(1));
|
1180 |
+
blit(pressure.write);
|
1181 |
+
pressure.swap();
|
1182 |
+
}
|
1183 |
+
|
1184 |
+
gradienSubtractProgram.bind();
|
1185 |
+
gl.uniform2f(gradienSubtractProgram.uniforms.texelSize, velocity.texelSizeX, velocity.texelSizeY);
|
1186 |
+
gl.uniform1i(gradienSubtractProgram.uniforms.uPressure, pressure.read.attach(0));
|
1187 |
+
gl.uniform1i(gradienSubtractProgram.uniforms.uVelocity, velocity.read.attach(1));
|
1188 |
+
blit(velocity.write);
|
1189 |
+
velocity.swap();
|
1190 |
+
|
1191 |
+
advectionProgram.bind();
|
1192 |
+
gl.uniform2f(advectionProgram.uniforms.texelSize, velocity.texelSizeX, velocity.texelSizeY);
|
1193 |
+
if (!ext.supportLinearFiltering)
|
1194 |
+
gl.uniform2f(advectionProgram.uniforms.dyeTexelSize, velocity.texelSizeX, velocity.texelSizeY);
|
1195 |
+
let velocityId = velocity.read.attach(0);
|
1196 |
+
gl.uniform1i(advectionProgram.uniforms.uVelocity, velocityId);
|
1197 |
+
gl.uniform1i(advectionProgram.uniforms.uSource, velocityId);
|
1198 |
+
gl.uniform1f(advectionProgram.uniforms.dt, dt);
|
1199 |
+
gl.uniform1f(advectionProgram.uniforms.dissipation, config.VELOCITY_DISSIPATION);
|
1200 |
+
blit(velocity.write);
|
1201 |
+
velocity.swap();
|
1202 |
+
|
1203 |
+
if (!ext.supportLinearFiltering)
|
1204 |
+
gl.uniform2f(advectionProgram.uniforms.dyeTexelSize, dye.texelSizeX, dye.texelSizeY);
|
1205 |
+
gl.uniform1i(advectionProgram.uniforms.uVelocity, velocity.read.attach(0));
|
1206 |
+
gl.uniform1i(advectionProgram.uniforms.uSource, dye.read.attach(1));
|
1207 |
+
gl.uniform1f(advectionProgram.uniforms.dissipation, config.DENSITY_DISSIPATION);
|
1208 |
+
blit(dye.write);
|
1209 |
+
dye.swap();
|
1210 |
+
}
|
1211 |
+
|
1212 |
+
function render (target) {
|
1213 |
+
if (config.BLOOM)
|
1214 |
+
applyBloom(dye.read, bloom);
|
1215 |
+
if (config.SUNRAYS) {
|
1216 |
+
applySunrays(dye.read, dye.write, sunrays);
|
1217 |
+
blur(sunrays, sunraysTemp, 1);
|
1218 |
+
}
|
1219 |
+
|
1220 |
+
if (target == null || !config.TRANSPARENT) {
|
1221 |
+
gl.blendFunc(gl.ONE, gl.ONE_MINUS_SRC_ALPHA);
|
1222 |
+
gl.enable(gl.BLEND);
|
1223 |
+
}
|
1224 |
+
else {
|
1225 |
+
gl.disable(gl.BLEND);
|
1226 |
+
}
|
1227 |
+
|
1228 |
+
if (!config.TRANSPARENT)
|
1229 |
+
drawColor(target, normalizeColor(config.BACK_COLOR));
|
1230 |
+
if (target == null && config.TRANSPARENT)
|
1231 |
+
drawCheckerboard(target);
|
1232 |
+
drawDisplay(target);
|
1233 |
+
}
|
1234 |
+
|
1235 |
+
function drawColor (target, color) {
|
1236 |
+
colorProgram.bind();
|
1237 |
+
gl.uniform4f(colorProgram.uniforms.color, color.r, color.g, color.b, 1);
|
1238 |
+
blit(target);
|
1239 |
+
}
|
1240 |
+
|
1241 |
+
function drawCheckerboard (target) {
|
1242 |
+
checkerboardProgram.bind();
|
1243 |
+
gl.uniform1f(checkerboardProgram.uniforms.aspectRatio, canvas.width / canvas.height);
|
1244 |
+
blit(target);
|
1245 |
+
}
|
1246 |
+
|
1247 |
+
function drawDisplay (target) {
|
1248 |
+
let width = target == null ? gl.drawingBufferWidth : target.width;
|
1249 |
+
let height = target == null ? gl.drawingBufferHeight : target.height;
|
1250 |
+
|
1251 |
+
displayMaterial.bind();
|
1252 |
+
if (config.SHADING)
|
1253 |
+
gl.uniform2f(displayMaterial.uniforms.texelSize, 1.0 / width, 1.0 / height);
|
1254 |
+
gl.uniform1i(displayMaterial.uniforms.uTexture, dye.read.attach(0));
|
1255 |
+
if (config.BLOOM) {
|
1256 |
+
gl.uniform1i(displayMaterial.uniforms.uBloom, bloom.attach(1));
|
1257 |
+
gl.uniform1i(displayMaterial.uniforms.uDithering, ditheringTexture.attach(2));
|
1258 |
+
let scale = getTextureScale(ditheringTexture, width, height);
|
1259 |
+
gl.uniform2f(displayMaterial.uniforms.ditherScale, scale.x, scale.y);
|
1260 |
+
}
|
1261 |
+
if (config.SUNRAYS)
|
1262 |
+
gl.uniform1i(displayMaterial.uniforms.uSunrays, sunrays.attach(3));
|
1263 |
+
blit(target);
|
1264 |
+
}
|
1265 |
+
|
1266 |
+
function applyBloom (source, destination) {
|
1267 |
+
if (bloomFramebuffers.length < 2)
|
1268 |
+
return;
|
1269 |
+
|
1270 |
+
let last = destination;
|
1271 |
+
|
1272 |
+
gl.disable(gl.BLEND);
|
1273 |
+
bloomPrefilterProgram.bind();
|
1274 |
+
let knee = config.BLOOM_THRESHOLD * config.BLOOM_SOFT_KNEE + 0.0001;
|
1275 |
+
let curve0 = config.BLOOM_THRESHOLD - knee;
|
1276 |
+
let curve1 = knee * 2;
|
1277 |
+
let curve2 = 0.25 / knee;
|
1278 |
+
gl.uniform3f(bloomPrefilterProgram.uniforms.curve, curve0, curve1, curve2);
|
1279 |
+
gl.uniform1f(bloomPrefilterProgram.uniforms.threshold, config.BLOOM_THRESHOLD);
|
1280 |
+
gl.uniform1i(bloomPrefilterProgram.uniforms.uTexture, source.attach(0));
|
1281 |
+
blit(last);
|
1282 |
+
|
1283 |
+
bloomBlurProgram.bind();
|
1284 |
+
for (let i = 0; i < bloomFramebuffers.length; i++) {
|
1285 |
+
let dest = bloomFramebuffers[i];
|
1286 |
+
gl.uniform2f(bloomBlurProgram.uniforms.texelSize, last.texelSizeX, last.texelSizeY);
|
1287 |
+
gl.uniform1i(bloomBlurProgram.uniforms.uTexture, last.attach(0));
|
1288 |
+
blit(dest);
|
1289 |
+
last = dest;
|
1290 |
+
}
|
1291 |
+
|
1292 |
+
gl.blendFunc(gl.ONE, gl.ONE);
|
1293 |
+
gl.enable(gl.BLEND);
|
1294 |
+
|
1295 |
+
for (let i = bloomFramebuffers.length - 2; i >= 0; i--) {
|
1296 |
+
let baseTex = bloomFramebuffers[i];
|
1297 |
+
gl.uniform2f(bloomBlurProgram.uniforms.texelSize, last.texelSizeX, last.texelSizeY);
|
1298 |
+
gl.uniform1i(bloomBlurProgram.uniforms.uTexture, last.attach(0));
|
1299 |
+
gl.viewport(0, 0, baseTex.width, baseTex.height);
|
1300 |
+
blit(baseTex);
|
1301 |
+
last = baseTex;
|
1302 |
+
}
|
1303 |
+
|
1304 |
+
gl.disable(gl.BLEND);
|
1305 |
+
bloomFinalProgram.bind();
|
1306 |
+
gl.uniform2f(bloomFinalProgram.uniforms.texelSize, last.texelSizeX, last.texelSizeY);
|
1307 |
+
gl.uniform1i(bloomFinalProgram.uniforms.uTexture, last.attach(0));
|
1308 |
+
gl.uniform1f(bloomFinalProgram.uniforms.intensity, config.BLOOM_INTENSITY);
|
1309 |
+
blit(destination);
|
1310 |
+
}
|
1311 |
+
|
1312 |
+
function applySunrays (source, mask, destination) {
|
1313 |
+
gl.disable(gl.BLEND);
|
1314 |
+
sunraysMaskProgram.bind();
|
1315 |
+
gl.uniform1i(sunraysMaskProgram.uniforms.uTexture, source.attach(0));
|
1316 |
+
blit(mask);
|
1317 |
+
|
1318 |
+
sunraysProgram.bind();
|
1319 |
+
gl.uniform1f(sunraysProgram.uniforms.weight, config.SUNRAYS_WEIGHT);
|
1320 |
+
gl.uniform1i(sunraysProgram.uniforms.uTexture, mask.attach(0));
|
1321 |
+
blit(destination);
|
1322 |
+
}
|
1323 |
+
|
1324 |
+
function blur (target, temp, iterations) {
|
1325 |
+
blurProgram.bind();
|
1326 |
+
for (let i = 0; i < iterations; i++) {
|
1327 |
+
gl.uniform2f(blurProgram.uniforms.texelSize, target.texelSizeX, 0.0);
|
1328 |
+
gl.uniform1i(blurProgram.uniforms.uTexture, target.attach(0));
|
1329 |
+
blit(temp);
|
1330 |
+
|
1331 |
+
gl.uniform2f(blurProgram.uniforms.texelSize, 0.0, target.texelSizeY);
|
1332 |
+
gl.uniform1i(blurProgram.uniforms.uTexture, temp.attach(0));
|
1333 |
+
blit(target);
|
1334 |
+
}
|
1335 |
+
}
|
1336 |
+
|
1337 |
+
function splatPointer (pointer) {
|
1338 |
+
let dx = pointer.deltaX * config.SPLAT_FORCE;
|
1339 |
+
let dy = pointer.deltaY * config.SPLAT_FORCE;
|
1340 |
+
splat(pointer.texcoordX, pointer.texcoordY, dx, dy, pointer.color);
|
1341 |
+
}
|
1342 |
+
|
1343 |
+
function multipleSplats (amount, intensityMultiplier = 1.0) { // Added intensityMultiplier
|
1344 |
+
for (let i = 0; i < amount; i++) {
|
1345 |
+
const color = generateColor();
|
1346 |
+
// Scale color intensity based on the multiplier
|
1347 |
+
color.r *= 10.0 * intensityMultiplier;
|
1348 |
+
color.g *= 10.0 * intensityMultiplier;
|
1349 |
+
color.b *= 10.0 * intensityMultiplier;
|
1350 |
+
const x = Math.random();
|
1351 |
+
const y = Math.random();
|
1352 |
+
// Scale velocity intensity based on the multiplier
|
1353 |
+
const dx = 1000 * (Math.random() - 0.5) * intensityMultiplier;
|
1354 |
+
const dy = 1000 * (Math.random() - 0.5) * intensityMultiplier;
|
1355 |
+
splat(x, y, dx, dy, color);
|
1356 |
+
}
|
1357 |
+
}
|
1358 |
+
|
1359 |
+
|
1360 |
+
function splat (x, y, dx, dy, color) {
|
1361 |
+
splatProgram.bind();
|
1362 |
+
gl.uniform1i(splatProgram.uniforms.uTarget, velocity.read.attach(0));
|
1363 |
+
gl.uniform1f(splatProgram.uniforms.aspectRatio, canvas.width / canvas.height);
|
1364 |
+
gl.uniform2f(splatProgram.uniforms.point, x, y);
|
1365 |
+
gl.uniform3f(splatProgram.uniforms.color, dx, dy, 0.0);
|
1366 |
+
gl.uniform1f(splatProgram.uniforms.radius, correctRadius(config.SPLAT_RADIUS / 100.0));
|
1367 |
+
blit(velocity.write);
|
1368 |
+
velocity.swap();
|
1369 |
+
|
1370 |
+
gl.uniform1i(splatProgram.uniforms.uTarget, dye.read.attach(0));
|
1371 |
+
gl.uniform3f(splatProgram.uniforms.color, color.r, color.g, color.b);
|
1372 |
+
blit(dye.write);
|
1373 |
+
dye.swap();
|
1374 |
+
}
|
1375 |
+
|
1376 |
+
function correctRadius (radius) {
|
1377 |
+
let aspectRatio = canvas.width / canvas.height;
|
1378 |
+
if (aspectRatio > 1)
|
1379 |
+
radius *= aspectRatio;
|
1380 |
+
return radius;
|
1381 |
+
}
|
1382 |
+
|
1383 |
+
// Get reference to container to check event targets
|
1384 |
+
const containerElement = document.querySelector('.container');
|
1385 |
+
|
1386 |
+
window.addEventListener('mousedown', e => {
|
1387 |
+
// Check if the click started inside the container/form
|
1388 |
+
if (containerElement && containerElement.contains(e.target)) {
|
1389 |
+
// If the target (or its ancestor) is the container, ignore for fluid
|
1390 |
+
return;
|
1391 |
+
}
|
1392 |
+
|
1393 |
+
const rect = canvas.getBoundingClientRect();
|
1394 |
+
// Calculate position relative to the canvas origin
|
1395 |
+
let posX = scaleByPixelRatio(e.clientX - rect.left);
|
1396 |
+
let posY = scaleByPixelRatio(e.clientY - rect.top);
|
1397 |
+
|
1398 |
+
let pointer = pointers.find(p => p.id == -1);
|
1399 |
+
if (pointer == null)
|
1400 |
+
pointer = new pointerPrototype();
|
1401 |
+
|
1402 |
+
// Pass the SCALED PIXEL coordinates relative to the canvas
|
1403 |
+
updatePointerDownData(pointer, -1, posX, posY);
|
1404 |
+
});
|
1405 |
+
|
1406 |
+
window.addEventListener('mousemove', e => {
|
1407 |
+
let pointer = pointers[0]; // Assuming the first pointer is for mouse
|
1408 |
+
if (!pointer.down) return; // Only track if mouse is down
|
1409 |
+
|
1410 |
+
const rect = canvas.getBoundingClientRect();
|
1411 |
+
// Calculate position relative to the canvas origin
|
1412 |
+
let posX = scaleByPixelRatio(e.clientX - rect.left);
|
1413 |
+
let posY = scaleByPixelRatio(e.clientY - rect.top);
|
1414 |
+
|
1415 |
+
// Pass the SCALED PIXEL coordinates relative to the canvas
|
1416 |
+
updatePointerMoveData(pointer, posX, posY);
|
1417 |
+
});
|
1418 |
+
|
1419 |
+
// window.addEventListener('mouseup', ...) // Keep this listener as is
|
1420 |
+
|
1421 |
+
window.addEventListener('touchstart', e => {
|
1422 |
+
// Note: We generally avoid preventDefault on window touchstart/move
|
1423 |
+
// as it can break scrolling. Let's see if it works without it.
|
1424 |
+
|
1425 |
+
const touches = e.targetTouches;
|
1426 |
+
const rect = canvas.getBoundingClientRect();
|
1427 |
+
let didProcessTouchOutside = false;
|
1428 |
+
|
1429 |
+
for (let i = 0; i < touches.length; i++) {
|
1430 |
+
// Check if the touch started inside the container/form
|
1431 |
+
if (containerElement && containerElement.contains(touches[i].target)) {
|
1432 |
+
continue; // Ignore this specific touch for fluid
|
1433 |
+
}
|
1434 |
+
|
1435 |
+
didProcessTouchOutside = true; // Mark that at least one touch outside occurred
|
1436 |
+
|
1437 |
+
// Ensure pointers array is large enough
|
1438 |
+
// Use pointers.length directly, as pointers[0] is mouse
|
1439 |
+
while (pointers.length <= touches[i].identifier + 1)
|
1440 |
+
pointers.push(new pointerPrototype());
|
1441 |
+
|
1442 |
+
// Calculate position relative to the canvas origin
|
1443 |
+
let relativeX = touches[i].clientX - rect.left;
|
1444 |
+
let relativeY = touches[i].clientY - rect.top;
|
1445 |
+
let posX = scaleByPixelRatio(relativeX);
|
1446 |
+
let posY = scaleByPixelRatio(relativeY);
|
1447 |
+
|
1448 |
+
// Find the correct pointer slot or reuse an inactive one if needed
|
1449 |
+
// For simplicity, let's just assign based on identifier + 1 for now
|
1450 |
+
// (assuming identifier 0 is first touch, 1 is second etc.)
|
1451 |
+
let pointerIndex = touches[i].identifier + 1;
|
1452 |
+
if(pointerIndex >= pointers.length) pointerIndex = pointers.length -1; // Safety check
|
1453 |
+
|
1454 |
+
|
1455 |
+
// Pass the SCALED PIXEL coordinates relative to the canvas
|
1456 |
+
updatePointerDownData(pointers[pointerIndex], touches[i].identifier, posX, posY);
|
1457 |
+
}
|
1458 |
+
// if (didProcessTouchOutside) { e.preventDefault(); } // Avoid if possible
|
1459 |
+
});
|
1460 |
+
|
1461 |
+
window.addEventListener('touchmove', e => {
|
1462 |
+
const touches = e.targetTouches;
|
1463 |
+
const rect = canvas.getBoundingClientRect();
|
1464 |
+
|
1465 |
+
for (let i = 0; i < touches.length; i++) {
|
1466 |
+
// Find the pointer associated with this touch ID
|
1467 |
+
let pointer = pointers.find(p => p.id == touches[i].identifier);
|
1468 |
+
if (!pointer || !pointer.down) continue; // Ignore if not tracked or not down
|
1469 |
+
|
1470 |
+
// Calculate position relative to the canvas origin
|
1471 |
+
let relativeX = touches[i].clientX - rect.left;
|
1472 |
+
let relativeY = touches[i].clientY - rect.top;
|
1473 |
+
let posX = scaleByPixelRatio(relativeX);
|
1474 |
+
let posY = scaleByPixelRatio(relativeY);
|
1475 |
+
|
1476 |
+
// Pass the SCALED PIXEL coordinates relative to the canvas
|
1477 |
+
updatePointerMoveData(pointer, posX, posY);
|
1478 |
+
}
|
1479 |
+
}, false); // UseCapture = false is default, but good to be explicit
|
1480 |
+
|
1481 |
+
window.addEventListener('touchend', e => {
|
1482 |
+
const touches = e.changedTouches;
|
1483 |
+
for (let i = 0; i < touches.length; i++)
|
1484 |
+
{
|
1485 |
+
let pointer = pointers.find(p => p.id == touches[i].identifier);
|
1486 |
+
if (pointer == null) continue;
|
1487 |
+
updatePointerUpData(pointer);
|
1488 |
+
}
|
1489 |
+
});
|
1490 |
+
|
1491 |
+
window.addEventListener('keydown', e => {
|
1492 |
+
if (e.code === 'KeyP')
|
1493 |
+
config.PAUSED = !config.PAUSED;
|
1494 |
+
if (e.key === ' ')
|
1495 |
+
splatStack.push(parseInt(Math.random() * 20) + 5);
|
1496 |
+
});
|
1497 |
+
|
1498 |
+
function updatePointerDownData (pointer, id, posX, posY) {
|
1499 |
+
pointer.id = id;
|
1500 |
+
pointer.down = true;
|
1501 |
+
pointer.moved = false;
|
1502 |
+
pointer.texcoordX = posX / canvas.width;
|
1503 |
+
pointer.texcoordY = 1.0 - posY / canvas.height;
|
1504 |
+
pointer.prevTexcoordX = pointer.texcoordX;
|
1505 |
+
pointer.prevTexcoordY = pointer.texcoordY;
|
1506 |
+
pointer.deltaX = 0;
|
1507 |
+
pointer.deltaY = 0;
|
1508 |
+
pointer.color = generateColor();
|
1509 |
+
}
|
1510 |
+
|
1511 |
+
function updatePointerMoveData (pointer, posX, posY) {
|
1512 |
+
pointer.prevTexcoordX = pointer.texcoordX;
|
1513 |
+
pointer.prevTexcoordY = pointer.texcoordY;
|
1514 |
+
pointer.texcoordX = posX / canvas.width;
|
1515 |
+
pointer.texcoordY = 1.0 - posY / canvas.height;
|
1516 |
+
pointer.deltaX = correctDeltaX(pointer.texcoordX - pointer.prevTexcoordX);
|
1517 |
+
pointer.deltaY = correctDeltaY(pointer.texcoordY - pointer.prevTexcoordY);
|
1518 |
+
pointer.moved = Math.abs(pointer.deltaX) > 0 || Math.abs(pointer.deltaY) > 0;
|
1519 |
+
}
|
1520 |
+
|
1521 |
+
function updatePointerUpData (pointer) {
|
1522 |
+
pointer.down = false;
|
1523 |
+
}
|
1524 |
+
|
1525 |
+
function correctDeltaX (delta) {
|
1526 |
+
let aspectRatio = canvas.width / canvas.height;
|
1527 |
+
if (aspectRatio < 1) delta *= aspectRatio;
|
1528 |
+
return delta;
|
1529 |
+
}
|
1530 |
+
|
1531 |
+
function correctDeltaY (delta) {
|
1532 |
+
let aspectRatio = canvas.width / canvas.height;
|
1533 |
+
if (aspectRatio > 1) delta /= aspectRatio;
|
1534 |
+
return delta;
|
1535 |
+
}
|
1536 |
+
|
1537 |
+
function generateColor () {
|
1538 |
+
let c = HSVtoRGB(Math.random(), 1.0, 1.0);
|
1539 |
+
c.r *= 0.15;
|
1540 |
+
c.g *= 0.15;
|
1541 |
+
c.b *= 0.15;
|
1542 |
+
return c;
|
1543 |
+
}
|
1544 |
+
|
1545 |
+
function HSVtoRGB (h, s, v) {
|
1546 |
+
let r, g, b, i, f, p, q, t;
|
1547 |
+
i = Math.floor(h * 6);
|
1548 |
+
f = h * 6 - i;
|
1549 |
+
p = v * (1 - s);
|
1550 |
+
q = v * (1 - f * s);
|
1551 |
+
t = v * (1 - (1 - f) * s);
|
1552 |
+
|
1553 |
+
switch (i % 6) {
|
1554 |
+
case 0: r = v, g = t, b = p; break;
|
1555 |
+
case 1: r = q, g = v, b = p; break;
|
1556 |
+
case 2: r = p, g = v, b = t; break;
|
1557 |
+
case 3: r = p, g = q, b = v; break;
|
1558 |
+
case 4: r = t, g = p, b = v; break;
|
1559 |
+
case 5: r = v, g = p, b = q; break;
|
1560 |
+
}
|
1561 |
+
|
1562 |
+
return {
|
1563 |
+
r,
|
1564 |
+
g,
|
1565 |
+
b
|
1566 |
+
};
|
1567 |
+
}
|
1568 |
+
|
1569 |
+
function normalizeColor (input) {
|
1570 |
+
let output = {
|
1571 |
+
r: input.r / 255,
|
1572 |
+
g: input.g / 255,
|
1573 |
+
b: input.b / 255
|
1574 |
+
};
|
1575 |
+
return output;
|
1576 |
+
}
|
1577 |
+
|
1578 |
+
function wrap (value, min, max) {
|
1579 |
+
let range = max - min;
|
1580 |
+
if (range == 0) return min;
|
1581 |
+
return (value - min) % range + min;
|
1582 |
+
}
|
1583 |
+
|
1584 |
+
function getResolution (resolution) {
|
1585 |
+
let aspectRatio = gl.drawingBufferWidth / gl.drawingBufferHeight;
|
1586 |
+
if (aspectRatio < 1)
|
1587 |
+
aspectRatio = 1.0 / aspectRatio;
|
1588 |
+
|
1589 |
+
let min = Math.round(resolution);
|
1590 |
+
let max = Math.round(resolution * aspectRatio);
|
1591 |
+
|
1592 |
+
if (gl.drawingBufferWidth > gl.drawingBufferHeight)
|
1593 |
+
return { width: max, height: min };
|
1594 |
+
else
|
1595 |
+
return { width: min, height: max };
|
1596 |
+
}
|
1597 |
+
|
1598 |
+
function getTextureScale (texture, width, height) {
|
1599 |
+
return {
|
1600 |
+
x: width / texture.width,
|
1601 |
+
y: height / texture.height
|
1602 |
+
};
|
1603 |
+
}
|
1604 |
+
|
1605 |
+
function scaleByPixelRatio (input) {
|
1606 |
+
let pixelRatio = window.devicePixelRatio || 1;
|
1607 |
+
return Math.floor(input * pixelRatio);
|
1608 |
+
}
|
1609 |
+
|
1610 |
+
function hashCode (s) {
|
1611 |
+
if (s.length == 0) return 0;
|
1612 |
+
let hash = 0;
|
1613 |
+
for (let i = 0; i < s.length; i++) {
|
1614 |
+
hash = (hash << 5) - hash + s.charCodeAt(i);
|
1615 |
+
hash |= 0; // Convert to 32bit integer
|
1616 |
+
}
|
1617 |
+
return hash;
|
1618 |
+
};
|