Update app.py
Browse files
app.py
CHANGED
@@ -693,64 +693,18 @@ def main():
|
|
693 |
if 'chat_history' not in st.session_state:
|
694 |
st.session_state.chat_history = []
|
695 |
|
696 |
-
#
|
697 |
-
|
698 |
-
with st.expander("Hugging Face Spaces Debugging Information", expanded=True):
|
699 |
-
st.markdown("""
|
700 |
-
### Common Issues with Hugging Face Spaces
|
701 |
-
|
702 |
-
1. **403/404 Errors**: Often caused by permission issues when accessing files or external resources.
|
703 |
-
|
704 |
-
2. **Memory Limits**: Free spaces have limited memory (16GB). Large models may cause OOM errors.
|
705 |
-
|
706 |
-
3. **Disk Space**: Limited to 10GB for persistent storage.
|
707 |
-
|
708 |
-
4. **Network Restrictions**: Some external URLs might be blocked or restricted.
|
709 |
-
|
710 |
-
### Accessing Logs
|
711 |
-
To see detailed error logs in Hugging Face Spaces:
|
712 |
-
|
713 |
-
1. Go to your Space dashboard
|
714 |
-
2. Click on "Logs" in the left sidebar
|
715 |
-
3. Check both "Build logs" and "Running logs" tabs
|
716 |
-
|
717 |
-
In the running logs, look for Python tracebacks or error messages.
|
718 |
-
|
719 |
-
### This App's Setup
|
720 |
-
- All image processing is now done in-memory to avoid file permission issues
|
721 |
-
- Debug logging is available through this interface
|
722 |
-
- For large model loading issues, try using smaller models or increasing RAM allocation
|
723 |
-
""")
|
724 |
-
|
725 |
-
# Add a test connection button
|
726 |
-
if st.button("Test Network Connection"):
|
727 |
-
try:
|
728 |
-
import requests
|
729 |
-
test_urls = [
|
730 |
-
"https://huggingface.co/",
|
731 |
-
"https://www.google.com/",
|
732 |
-
"https://jsonplaceholder.typicode.com/todos/1"
|
733 |
-
]
|
734 |
-
|
735 |
-
for url in test_urls:
|
736 |
-
try:
|
737 |
-
response = requests.get(url, timeout=5)
|
738 |
-
st.write(f"✅ {url}: Status {response.status_code}")
|
739 |
-
except Exception as e:
|
740 |
-
st.write(f"❌ {url}: Error - {str(e)}")
|
741 |
-
except Exception as e:
|
742 |
-
st.error(f"Could not perform connection test: {str(e)}")
|
743 |
|
744 |
-
#
|
745 |
-
with
|
746 |
-
st.
|
747 |
|
748 |
-
#
|
749 |
-
|
750 |
-
|
751 |
-
with xception_col:
|
752 |
if not st.session_state.xception_model_loaded:
|
753 |
-
if st.button("📥 Load Xception Model
|
754 |
# Load Xception model
|
755 |
model, device = load_detection_model_xception()
|
756 |
if model is not None:
|
@@ -763,7 +717,226 @@ def main():
|
|
763 |
else:
|
764 |
st.success("✅ Xception model loaded and ready!")
|
765 |
|
766 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
767 |
if not st.session_state.blip_model_loaded:
|
768 |
if st.button("📥 Load BLIP for Captioning", type="primary"):
|
769 |
# Load BLIP models
|
@@ -779,10 +952,65 @@ def main():
|
|
779 |
st.error("❌ Failed to load BLIP models.")
|
780 |
else:
|
781 |
st.success("✅ BLIP captioning models loaded and ready!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
782 |
|
783 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
784 |
if not st.session_state.llm_model_loaded:
|
785 |
-
if st.button("📥 Load Vision LLM
|
786 |
# Load LLM model
|
787 |
model, tokenizer = load_llm_model()
|
788 |
if model is not None and tokenizer is not None:
|
@@ -794,263 +1022,23 @@ def main():
|
|
794 |
st.error("❌ Failed to load Vision LLM.")
|
795 |
else:
|
796 |
st.success("✅ Vision LLM loaded and ready!")
|
797 |
-
|
798 |
-
# Image upload section
|
799 |
-
with st.expander("Stage 2: Image Upload & Initial Detection", expanded=True):
|
800 |
-
st.subheader("Upload an Image")
|
801 |
-
|
802 |
-
# Add alternative upload methods
|
803 |
-
upload_tab1, upload_tab2 = st.tabs(["File Upload", "URL Input"])
|
804 |
-
|
805 |
-
uploaded_image = None
|
806 |
|
807 |
-
|
808 |
-
|
809 |
-
|
810 |
-
try:
|
811 |
-
# Simple direct approach - load the image directly
|
812 |
-
image = Image.open(uploaded_file).convert("RGB")
|
813 |
-
uploaded_image = image
|
814 |
-
st.session_state.upload_method = "file"
|
815 |
-
except Exception as e:
|
816 |
-
st.error(f"Error loading image: {str(e)}")
|
817 |
-
import traceback
|
818 |
-
st.error(traceback.format_exc())
|
819 |
-
|
820 |
-
with upload_tab2:
|
821 |
-
url = st.text_input("Enter image URL:")
|
822 |
-
if url and url.strip():
|
823 |
-
try:
|
824 |
-
import requests
|
825 |
-
# Simplified URL handling with more robust approach
|
826 |
-
headers = {
|
827 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
828 |
-
'Accept': 'image/jpeg, image/png, image/*, */*',
|
829 |
-
'Referer': 'https://huggingface.co/'
|
830 |
-
}
|
831 |
-
|
832 |
-
# Try three different methods to handle various API restrictions
|
833 |
-
try_methods = True
|
834 |
-
|
835 |
-
# Method 1: Direct requests
|
836 |
-
if try_methods:
|
837 |
-
try:
|
838 |
-
response = requests.get(url, stream=True, headers=headers, timeout=10)
|
839 |
-
if response.status_code == 200 and 'image' in response.headers.get('Content-Type', ''):
|
840 |
-
try:
|
841 |
-
image = Image.open(io.BytesIO(response.content)).convert("RGB")
|
842 |
-
uploaded_image = image
|
843 |
-
st.session_state.upload_method = "url_direct"
|
844 |
-
try_methods = False
|
845 |
-
st.success("✅ Image loaded via direct request")
|
846 |
-
except Exception as e:
|
847 |
-
st.warning(f"Direct method received data but couldn't process as image: {str(e)}")
|
848 |
-
else:
|
849 |
-
st.info(f"Direct method failed: Status {response.status_code}, trying alternative method...")
|
850 |
-
except Exception as e:
|
851 |
-
st.info(f"Direct method error: {str(e)}, trying alternative method...")
|
852 |
-
|
853 |
-
# Method 2: Use Python's urllib as fallback
|
854 |
-
if try_methods:
|
855 |
-
try:
|
856 |
-
import urllib.request
|
857 |
-
from urllib.error import HTTPError
|
858 |
-
|
859 |
-
opener = urllib.request.build_opener()
|
860 |
-
opener.addheaders = [('User-agent', headers['User-Agent'])]
|
861 |
-
urllib.request.install_opener(opener)
|
862 |
-
|
863 |
-
with urllib.request.urlopen(url, timeout=10) as response:
|
864 |
-
image_data = response.read()
|
865 |
-
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
866 |
-
uploaded_image = image
|
867 |
-
st.session_state.upload_method = "url_urllib"
|
868 |
-
try_methods = False
|
869 |
-
st.success("✅ Image loaded via urllib")
|
870 |
-
except HTTPError as e:
|
871 |
-
st.info(f"urllib method failed: HTTP error {e.code}, trying next method...")
|
872 |
-
except Exception as e:
|
873 |
-
st.info(f"urllib method error: {str(e)}, trying next method...")
|
874 |
-
|
875 |
-
# Method 3: Use a proxy service as last resort
|
876 |
-
if try_methods:
|
877 |
-
try:
|
878 |
-
# This uses an image proxy service to bypass CORS issues
|
879 |
-
# Only as last resort since it depends on external service
|
880 |
-
proxy_url = f"https://images.weserv.nl/?url={url}"
|
881 |
-
response = requests.get(proxy_url, stream=True, timeout=10)
|
882 |
-
if response.status_code == 200:
|
883 |
-
image = Image.open(io.BytesIO(response.content)).convert("RGB")
|
884 |
-
uploaded_image = image
|
885 |
-
st.session_state.upload_method = "url_proxy"
|
886 |
-
try_methods = False
|
887 |
-
st.success("✅ Image loaded via proxy service")
|
888 |
-
else:
|
889 |
-
st.error(f"All methods failed to load the image from URL. Last status: {response.status_code}")
|
890 |
-
except Exception as e:
|
891 |
-
st.error(f"All methods failed. Final error: {str(e)}")
|
892 |
-
|
893 |
-
if not uploaded_image:
|
894 |
-
st.error("Failed to load image using all available methods.")
|
895 |
-
|
896 |
-
except Exception as e:
|
897 |
-
st.error(f"Error processing URL: {str(e)}")
|
898 |
-
if st.session_state.debug:
|
899 |
-
import traceback
|
900 |
-
st.error(traceback.format_exc())
|
901 |
-
|
902 |
-
# If we have an uploaded image, process it
|
903 |
-
if uploaded_image is not None:
|
904 |
-
# Display the image
|
905 |
-
image = uploaded_image
|
906 |
-
col1, col2 = st.columns([1, 2])
|
907 |
-
with col1:
|
908 |
-
st.image(image, caption="Uploaded Image", width=300)
|
909 |
-
|
910 |
-
# Generate detailed caption for original image if BLIP model is loaded
|
911 |
-
if st.session_state.blip_model_loaded:
|
912 |
-
with st.spinner("Generating image description..."):
|
913 |
-
caption = generate_image_caption(
|
914 |
-
image,
|
915 |
-
st.session_state.original_processor,
|
916 |
-
st.session_state.original_model
|
917 |
-
)
|
918 |
-
st.session_state.image_caption = caption
|
919 |
|
920 |
-
#
|
921 |
-
if st.session_state.xception_model_loaded:
|
922 |
-
try:
|
923 |
-
with st.spinner("Analyzing image with Xception model..."):
|
924 |
-
# Preprocess image for Xception
|
925 |
-
input_tensor, original_image, face_box = preprocess_image_xception(image)
|
926 |
-
|
927 |
-
if input_tensor is None:
|
928 |
-
st.error("Failed to preprocess image. Please try another image.")
|
929 |
-
st.stop()
|
930 |
-
|
931 |
-
# Get device and model
|
932 |
-
device = st.session_state.device
|
933 |
-
model = st.session_state.xception_model
|
934 |
-
|
935 |
-
# Ensure model is in eval mode
|
936 |
-
model.eval()
|
937 |
-
|
938 |
-
# Move tensor to device
|
939 |
-
input_tensor = input_tensor.to(device)
|
940 |
-
|
941 |
-
# Forward pass with proper error handling
|
942 |
-
try:
|
943 |
-
with torch.no_grad():
|
944 |
-
logits = model(input_tensor)
|
945 |
-
probabilities = torch.softmax(logits, dim=1)[0]
|
946 |
-
pred_class = torch.argmax(probabilities).item()
|
947 |
-
confidence = probabilities[pred_class].item()
|
948 |
-
|
949 |
-
# Explicit class mapping - adjust if needed based on your model
|
950 |
-
pred_label = "Fake" if pred_class == 0 else "Real"
|
951 |
-
except Exception as e:
|
952 |
-
st.error(f"Error in model inference: {str(e)}")
|
953 |
-
import traceback
|
954 |
-
st.error(traceback.format_exc())
|
955 |
-
# Set default values
|
956 |
-
pred_class = 0
|
957 |
-
confidence = 0.5
|
958 |
-
pred_label = "Error in prediction"
|
959 |
-
|
960 |
-
# Display results
|
961 |
-
with col2:
|
962 |
-
st.markdown("### Detection Result")
|
963 |
-
st.markdown(f"**Classification:** {pred_label} (Confidence: {confidence:.2%})")
|
964 |
-
|
965 |
-
# Display face box on image if detected
|
966 |
-
if face_box:
|
967 |
-
img_to_show = original_image.copy()
|
968 |
-
img_draw = np.array(img_to_show)
|
969 |
-
x, y, w, h = face_box
|
970 |
-
cv2.rectangle(img_draw, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
971 |
-
st.image(Image.fromarray(img_draw), caption="Detected Face", width=300)
|
972 |
-
|
973 |
-
# GradCAM visualization with error handling
|
974 |
-
st.subheader("GradCAM Visualization")
|
975 |
-
try:
|
976 |
-
cam, overlay, comparison, detected_face_box = process_image_with_xception_gradcam(
|
977 |
-
image, model, device, pred_class
|
978 |
-
)
|
979 |
-
|
980 |
-
if comparison:
|
981 |
-
# Display GradCAM results (controlled size)
|
982 |
-
st.image(comparison, caption="Original | CAM | Overlay", width=700)
|
983 |
-
|
984 |
-
# Save for later use
|
985 |
-
st.session_state.comparison_image = comparison
|
986 |
-
else:
|
987 |
-
st.error("GradCAM visualization failed - comparison image not generated")
|
988 |
-
|
989 |
-
# Generate caption for GradCAM overlay image if BLIP model is loaded
|
990 |
-
if st.session_state.blip_model_loaded and overlay:
|
991 |
-
with st.spinner("Analyzing GradCAM visualization..."):
|
992 |
-
gradcam_caption = generate_gradcam_caption(
|
993 |
-
overlay,
|
994 |
-
st.session_state.finetuned_processor,
|
995 |
-
st.session_state.finetuned_model
|
996 |
-
)
|
997 |
-
st.session_state.gradcam_caption = gradcam_caption
|
998 |
-
|
999 |
-
# Display the caption directly here as well for immediate feedback
|
1000 |
-
st.markdown("### GradCAM Analysis")
|
1001 |
-
st.markdown(gradcam_caption)
|
1002 |
-
except Exception as e:
|
1003 |
-
st.error(f"Error generating GradCAM: {str(e)}")
|
1004 |
-
import traceback
|
1005 |
-
st.error(traceback.format_exc())
|
1006 |
-
|
1007 |
-
# Save results in session state for LLM analysis
|
1008 |
-
st.session_state.current_image = image
|
1009 |
-
st.session_state.current_overlay = overlay if 'overlay' in locals() else None
|
1010 |
-
st.session_state.current_face_box = detected_face_box if 'detected_face_box' in locals() else None
|
1011 |
-
st.session_state.current_pred_label = pred_label
|
1012 |
-
st.session_state.current_confidence = confidence
|
1013 |
-
|
1014 |
-
st.success("✅ Initial detection and GradCAM visualization complete!")
|
1015 |
-
except Exception as e:
|
1016 |
-
st.error(f"Overall error in Xception processing: {str(e)}")
|
1017 |
-
import traceback
|
1018 |
-
st.error(traceback.format_exc())
|
1019 |
-
else:
|
1020 |
-
st.warning("⚠️ Please load the Xception model first to perform initial detection.")
|
1021 |
-
|
1022 |
-
# Image Analysis Summary section - AFTER Stage 2
|
1023 |
-
if hasattr(st.session_state, 'current_image') and (hasattr(st.session_state, 'image_caption') or hasattr(st.session_state, 'gradcam_caption')):
|
1024 |
-
with st.expander("Image Analysis Summary", expanded=True):
|
1025 |
-
# Display images and analysis in organized layout
|
1026 |
col1, col2 = st.columns([1, 2])
|
1027 |
-
|
1028 |
with col1:
|
1029 |
-
# Display original image
|
1030 |
st.image(st.session_state.current_image, caption="Original Image", width=300)
|
1031 |
-
# Display GradCAM overlay
|
1032 |
if hasattr(st.session_state, 'current_overlay'):
|
1033 |
st.image(st.session_state.current_overlay, caption="GradCAM Visualization", width=300)
|
1034 |
|
1035 |
with col2:
|
1036 |
-
#
|
1037 |
-
if hasattr(st.session_state, '
|
1038 |
-
st.markdown("###
|
1039 |
-
st.markdown(st.session_state.
|
1040 |
-
st.markdown("---")
|
1041 |
-
|
1042 |
-
# GradCAM analysis
|
1043 |
-
if hasattr(st.session_state, 'gradcam_caption'):
|
1044 |
-
st.markdown("### GradCAM Analysis")
|
1045 |
-
st.markdown(st.session_state.gradcam_caption)
|
1046 |
-
st.markdown("---")
|
1047 |
-
else:
|
1048 |
-
st.warning("GradCAM caption not found in session state.")
|
1049 |
-
|
1050 |
-
# LLM Analysis section - AFTER Image Analysis Summary
|
1051 |
-
with st.expander("Stage 3: Detailed Analysis with Vision LLM", expanded=False):
|
1052 |
-
if hasattr(st.session_state, 'current_image') and st.session_state.llm_model_loaded:
|
1053 |
-
st.subheader("Detailed Deepfake Analysis")
|
1054 |
|
1055 |
# Display chat history
|
1056 |
for i, (question, answer) in enumerate(st.session_state.chat_history):
|
@@ -1058,6 +1046,17 @@ def main():
|
|
1058 |
st.markdown(f"**Answer:** {answer}")
|
1059 |
st.markdown("---")
|
1060 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1061 |
# Include both captions in the prompt if available
|
1062 |
caption_text = ""
|
1063 |
if hasattr(st.session_state, 'image_caption'):
|
@@ -1142,11 +1141,11 @@ def main():
|
|
1142 |
|
1143 |
except Exception as e:
|
1144 |
st.error(f"Error during LLM analysis: {str(e)}")
|
1145 |
-
|
1146 |
-
elif not hasattr(st.session_state, 'current_image'):
|
1147 |
-
st.warning("⚠️ Please upload an image and complete the initial detection first.")
|
1148 |
else:
|
1149 |
-
st.
|
|
|
|
|
|
|
1150 |
|
1151 |
# Footer
|
1152 |
st.markdown("---")
|
|
|
693 |
if 'chat_history' not in st.session_state:
|
694 |
st.session_state.chat_history = []
|
695 |
|
696 |
+
# Create multi-tab interface
|
697 |
+
tab1, tab2, tab3 = st.tabs(["Deepfake Detection", "Image Captions", "LLM Analysis"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
698 |
|
699 |
+
# Tab 1: Deepfake Detection with Model Loading and Image Upload
|
700 |
+
with tab1:
|
701 |
+
st.header("Deepfake Detection")
|
702 |
|
703 |
+
# Model Loading section
|
704 |
+
with st.expander("Load Detection Model", expanded=True):
|
705 |
+
st.write("Please load the Xception model for deepfake detection:")
|
|
|
706 |
if not st.session_state.xception_model_loaded:
|
707 |
+
if st.button("📥 Load Xception Model", type="primary"):
|
708 |
# Load Xception model
|
709 |
model, device = load_detection_model_xception()
|
710 |
if model is not None:
|
|
|
717 |
else:
|
718 |
st.success("✅ Xception model loaded and ready!")
|
719 |
|
720 |
+
# Image upload section
|
721 |
+
with st.expander("Upload and Analyze Image", expanded=True):
|
722 |
+
st.subheader("Upload an Image")
|
723 |
+
|
724 |
+
# Add alternative upload methods
|
725 |
+
upload_tab1, upload_tab2 = st.tabs(["File Upload", "URL Input"])
|
726 |
+
|
727 |
+
uploaded_image = None
|
728 |
+
|
729 |
+
with upload_tab1:
|
730 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
731 |
+
if uploaded_file is not None:
|
732 |
+
try:
|
733 |
+
# Simple direct approach - load the image directly
|
734 |
+
image = Image.open(uploaded_file).convert("RGB")
|
735 |
+
uploaded_image = image
|
736 |
+
st.session_state.upload_method = "file"
|
737 |
+
except Exception as e:
|
738 |
+
st.error(f"Error loading image: {str(e)}")
|
739 |
+
import traceback
|
740 |
+
st.error(traceback.format_exc())
|
741 |
+
|
742 |
+
with upload_tab2:
|
743 |
+
url = st.text_input("Enter image URL:")
|
744 |
+
if url and url.strip():
|
745 |
+
try:
|
746 |
+
import requests
|
747 |
+
# Simplified URL handling with more robust approach
|
748 |
+
headers = {
|
749 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
750 |
+
'Accept': 'image/jpeg, image/png, image/*, */*',
|
751 |
+
'Referer': 'https://huggingface.co/'
|
752 |
+
}
|
753 |
+
|
754 |
+
# Try three different methods to handle various API restrictions
|
755 |
+
try_methods = True
|
756 |
+
|
757 |
+
# Method 1: Direct requests
|
758 |
+
if try_methods:
|
759 |
+
try:
|
760 |
+
response = requests.get(url, stream=True, headers=headers, timeout=10)
|
761 |
+
if response.status_code == 200 and 'image' in response.headers.get('Content-Type', ''):
|
762 |
+
try:
|
763 |
+
image = Image.open(io.BytesIO(response.content)).convert("RGB")
|
764 |
+
uploaded_image = image
|
765 |
+
st.session_state.upload_method = "url_direct"
|
766 |
+
try_methods = False
|
767 |
+
st.success("✅ Image loaded via direct request")
|
768 |
+
except Exception as e:
|
769 |
+
st.warning(f"Direct method received data but couldn't process as image: {str(e)}")
|
770 |
+
else:
|
771 |
+
st.info(f"Direct method failed: Status {response.status_code}, trying alternative method...")
|
772 |
+
except Exception as e:
|
773 |
+
st.info(f"Direct method error: {str(e)}, trying alternative method...")
|
774 |
+
|
775 |
+
# Method 2: Use Python's urllib as fallback
|
776 |
+
if try_methods:
|
777 |
+
try:
|
778 |
+
import urllib.request
|
779 |
+
from urllib.error import HTTPError
|
780 |
+
|
781 |
+
opener = urllib.request.build_opener()
|
782 |
+
opener.addheaders = [('User-agent', headers['User-Agent'])]
|
783 |
+
urllib.request.install_opener(opener)
|
784 |
+
|
785 |
+
with urllib.request.urlopen(url, timeout=10) as response:
|
786 |
+
image_data = response.read()
|
787 |
+
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
788 |
+
uploaded_image = image
|
789 |
+
st.session_state.upload_method = "url_urllib"
|
790 |
+
try_methods = False
|
791 |
+
st.success("✅ Image loaded via urllib")
|
792 |
+
except HTTPError as e:
|
793 |
+
st.info(f"urllib method failed: HTTP error {e.code}, trying next method...")
|
794 |
+
except Exception as e:
|
795 |
+
st.info(f"urllib method error: {str(e)}, trying next method...")
|
796 |
+
|
797 |
+
# Method 3: Use a proxy service as last resort
|
798 |
+
if try_methods:
|
799 |
+
try:
|
800 |
+
# This uses an image proxy service to bypass CORS issues
|
801 |
+
# Only as last resort since it depends on external service
|
802 |
+
proxy_url = f"https://images.weserv.nl/?url={url}"
|
803 |
+
response = requests.get(proxy_url, stream=True, timeout=10)
|
804 |
+
if response.status_code == 200:
|
805 |
+
image = Image.open(io.BytesIO(response.content)).convert("RGB")
|
806 |
+
uploaded_image = image
|
807 |
+
st.session_state.upload_method = "url_proxy"
|
808 |
+
try_methods = False
|
809 |
+
st.success("✅ Image loaded via proxy service")
|
810 |
+
else:
|
811 |
+
st.error(f"All methods failed to load the image from URL. Last status: {response.status_code}")
|
812 |
+
except Exception as e:
|
813 |
+
st.error(f"All methods failed. Final error: {str(e)}")
|
814 |
+
|
815 |
+
if not uploaded_image:
|
816 |
+
st.error("Failed to load image using all available methods.")
|
817 |
+
|
818 |
+
except Exception as e:
|
819 |
+
st.error(f"Error processing URL: {str(e)}")
|
820 |
+
if st.session_state.debug:
|
821 |
+
import traceback
|
822 |
+
st.error(traceback.format_exc())
|
823 |
+
|
824 |
+
# If we have an uploaded image, process it
|
825 |
+
if uploaded_image is not None:
|
826 |
+
# Display the image
|
827 |
+
image = uploaded_image
|
828 |
+
col1, col2 = st.columns([1, 2])
|
829 |
+
with col1:
|
830 |
+
st.image(image, caption="Uploaded Image", width=300)
|
831 |
+
|
832 |
+
# Continue with Xception model analysis
|
833 |
+
if st.session_state.xception_model_loaded:
|
834 |
+
try:
|
835 |
+
with st.spinner("Analyzing image with Xception model..."):
|
836 |
+
# Preprocess image for Xception
|
837 |
+
input_tensor, original_image, face_box = preprocess_image_xception(image)
|
838 |
+
|
839 |
+
if input_tensor is None:
|
840 |
+
st.error("Failed to preprocess image. Please try another image.")
|
841 |
+
st.stop()
|
842 |
+
|
843 |
+
# Get device and model
|
844 |
+
device = st.session_state.device
|
845 |
+
model = st.session_state.xception_model
|
846 |
+
|
847 |
+
# Ensure model is in eval mode
|
848 |
+
model.eval()
|
849 |
+
|
850 |
+
# Move tensor to device
|
851 |
+
input_tensor = input_tensor.to(device)
|
852 |
+
|
853 |
+
# Forward pass with proper error handling
|
854 |
+
try:
|
855 |
+
with torch.no_grad():
|
856 |
+
logits = model(input_tensor)
|
857 |
+
probabilities = torch.softmax(logits, dim=1)[0]
|
858 |
+
pred_class = torch.argmax(probabilities).item()
|
859 |
+
confidence = probabilities[pred_class].item()
|
860 |
+
|
861 |
+
# Explicit class mapping - adjust if needed based on your model
|
862 |
+
pred_label = "Fake" if pred_class == 0 else "Real"
|
863 |
+
except Exception as e:
|
864 |
+
st.error(f"Error in model inference: {str(e)}")
|
865 |
+
import traceback
|
866 |
+
st.error(traceback.format_exc())
|
867 |
+
# Set default values
|
868 |
+
pred_class = 0
|
869 |
+
confidence = 0.5
|
870 |
+
pred_label = "Error in prediction"
|
871 |
+
|
872 |
+
# Display results
|
873 |
+
with col2:
|
874 |
+
st.markdown("### Detection Result")
|
875 |
+
st.markdown(f"**Classification:** {pred_label} (Confidence: {confidence:.2%})")
|
876 |
+
|
877 |
+
# Display face box on image if detected
|
878 |
+
if face_box:
|
879 |
+
img_to_show = original_image.copy()
|
880 |
+
img_draw = np.array(img_to_show)
|
881 |
+
x, y, w, h = face_box
|
882 |
+
cv2.rectangle(img_draw, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
883 |
+
st.image(Image.fromarray(img_draw), caption="Detected Face", width=300)
|
884 |
+
|
885 |
+
# GradCAM visualization with error handling
|
886 |
+
st.subheader("GradCAM Visualization")
|
887 |
+
try:
|
888 |
+
cam, overlay, comparison, detected_face_box = process_image_with_xception_gradcam(
|
889 |
+
image, model, device, pred_class
|
890 |
+
)
|
891 |
+
|
892 |
+
if comparison:
|
893 |
+
# Display GradCAM results (controlled size)
|
894 |
+
st.image(comparison, caption="Original | CAM | Overlay", width=700)
|
895 |
+
|
896 |
+
# Save for later use
|
897 |
+
st.session_state.comparison_image = comparison
|
898 |
+
else:
|
899 |
+
st.error("GradCAM visualization failed - comparison image not generated")
|
900 |
+
|
901 |
+
# Generate caption for GradCAM overlay image if BLIP model is loaded
|
902 |
+
if st.session_state.blip_model_loaded and overlay:
|
903 |
+
with st.spinner("Analyzing GradCAM visualization..."):
|
904 |
+
gradcam_caption = generate_gradcam_caption(
|
905 |
+
overlay,
|
906 |
+
st.session_state.finetuned_processor,
|
907 |
+
st.session_state.finetuned_model
|
908 |
+
)
|
909 |
+
st.session_state.gradcam_caption = gradcam_caption
|
910 |
+
|
911 |
+
# Display the caption directly here as well for immediate feedback
|
912 |
+
st.markdown("### GradCAM Analysis")
|
913 |
+
st.markdown(gradcam_caption)
|
914 |
+
except Exception as e:
|
915 |
+
st.error(f"Error generating GradCAM: {str(e)}")
|
916 |
+
import traceback
|
917 |
+
st.error(traceback.format_exc())
|
918 |
+
|
919 |
+
# Save results in session state for use in other tabs
|
920 |
+
st.session_state.current_image = image
|
921 |
+
st.session_state.current_overlay = overlay if 'overlay' in locals() else None
|
922 |
+
st.session_state.current_face_box = detected_face_box if 'detected_face_box' in locals() else None
|
923 |
+
st.session_state.current_pred_label = pred_label
|
924 |
+
st.session_state.current_confidence = confidence
|
925 |
+
|
926 |
+
st.success("✅ Initial detection and GradCAM visualization complete!")
|
927 |
+
except Exception as e:
|
928 |
+
st.error(f"Overall error in Xception processing: {str(e)}")
|
929 |
+
import traceback
|
930 |
+
st.error(traceback.format_exc())
|
931 |
+
else:
|
932 |
+
st.warning("⚠️ Please load the Xception model first to perform initial detection.")
|
933 |
+
|
934 |
+
# Tab 2: Image Captions with BLIP models
|
935 |
+
with tab2:
|
936 |
+
st.header("Image Captions")
|
937 |
+
|
938 |
+
# Model Loading section
|
939 |
+
with st.expander("Load Captioning Models", expanded=True):
|
940 |
if not st.session_state.blip_model_loaded:
|
941 |
if st.button("📥 Load BLIP for Captioning", type="primary"):
|
942 |
# Load BLIP models
|
|
|
952 |
st.error("❌ Failed to load BLIP models.")
|
953 |
else:
|
954 |
st.success("✅ BLIP captioning models loaded and ready!")
|
955 |
+
|
956 |
+
# Image Caption Display
|
957 |
+
if hasattr(st.session_state, 'current_image'):
|
958 |
+
col1, col2 = st.columns([1, 2])
|
959 |
+
|
960 |
+
with col1:
|
961 |
+
st.image(st.session_state.current_image, caption="Image", width=300)
|
962 |
|
963 |
+
if hasattr(st.session_state, 'current_overlay'):
|
964 |
+
st.image(st.session_state.current_overlay, caption="GradCAM Visualization", width=300)
|
965 |
+
|
966 |
+
with col2:
|
967 |
+
if not st.session_state.blip_model_loaded:
|
968 |
+
st.warning("⚠️ Please load the BLIP models first to see captions.")
|
969 |
+
else:
|
970 |
+
# Button to generate captions if not already generated
|
971 |
+
if not hasattr(st.session_state, 'image_caption') or st.button("Regenerate Image Caption"):
|
972 |
+
with st.spinner("Generating image description..."):
|
973 |
+
caption = generate_image_caption(
|
974 |
+
st.session_state.current_image,
|
975 |
+
st.session_state.original_processor,
|
976 |
+
st.session_state.original_model
|
977 |
+
)
|
978 |
+
st.session_state.image_caption = caption
|
979 |
+
|
980 |
+
# Display original image caption
|
981 |
+
if hasattr(st.session_state, 'image_caption'):
|
982 |
+
st.markdown("### Image Description")
|
983 |
+
st.markdown(st.session_state.image_caption)
|
984 |
+
st.markdown("---")
|
985 |
+
|
986 |
+
# Display GradCAM caption if available
|
987 |
+
if hasattr(st.session_state, 'gradcam_caption'):
|
988 |
+
st.markdown("### GradCAM Analysis")
|
989 |
+
st.markdown(st.session_state.gradcam_caption)
|
990 |
+
|
991 |
+
# Button to regenerate GradCAM caption
|
992 |
+
if hasattr(st.session_state, 'current_overlay') and st.button("Regenerate GradCAM Caption"):
|
993 |
+
with st.spinner("Reanalyzing GradCAM visualization..."):
|
994 |
+
gradcam_caption = generate_gradcam_caption(
|
995 |
+
st.session_state.current_overlay,
|
996 |
+
st.session_state.finetuned_processor,
|
997 |
+
st.session_state.finetuned_model
|
998 |
+
)
|
999 |
+
st.session_state.gradcam_caption = gradcam_caption
|
1000 |
+
st.rerun()
|
1001 |
+
else:
|
1002 |
+
st.info("GradCAM caption not available. Visit the Detection tab to generate it.")
|
1003 |
+
else:
|
1004 |
+
st.info("Please upload and analyze an image in the Detection tab first.")
|
1005 |
+
|
1006 |
+
# Tab 3: LLM Analysis
|
1007 |
+
with tab3:
|
1008 |
+
st.header("LLM Analysis")
|
1009 |
+
|
1010 |
+
# Model Loading section
|
1011 |
+
with st.expander("Load LLM Model", expanded=True):
|
1012 |
if not st.session_state.llm_model_loaded:
|
1013 |
+
if st.button("📥 Load Vision LLM", type="primary"):
|
1014 |
# Load LLM model
|
1015 |
model, tokenizer = load_llm_model()
|
1016 |
if model is not None and tokenizer is not None:
|
|
|
1022 |
st.error("❌ Failed to load Vision LLM.")
|
1023 |
else:
|
1024 |
st.success("✅ Vision LLM loaded and ready!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1025 |
|
1026 |
+
# Chat Interface
|
1027 |
+
if hasattr(st.session_state, 'current_image') and st.session_state.llm_model_loaded:
|
1028 |
+
st.subheader("Deepfake Analysis Chat")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1029 |
|
1030 |
+
# Display images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1031 |
col1, col2 = st.columns([1, 2])
|
|
|
1032 |
with col1:
|
|
|
1033 |
st.image(st.session_state.current_image, caption="Original Image", width=300)
|
|
|
1034 |
if hasattr(st.session_state, 'current_overlay'):
|
1035 |
st.image(st.session_state.current_overlay, caption="GradCAM Visualization", width=300)
|
1036 |
|
1037 |
with col2:
|
1038 |
+
# Display detection result if available
|
1039 |
+
if hasattr(st.session_state, 'current_pred_label'):
|
1040 |
+
st.markdown("### Detection Result")
|
1041 |
+
st.markdown(f"**Classification:** {st.session_state.current_pred_label} (Confidence: {st.session_state.current_confidence:.2%})")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1042 |
|
1043 |
# Display chat history
|
1044 |
for i, (question, answer) in enumerate(st.session_state.chat_history):
|
|
|
1046 |
st.markdown(f"**Answer:** {answer}")
|
1047 |
st.markdown("---")
|
1048 |
|
1049 |
+
# Custom instruction
|
1050 |
+
use_custom_instructions = st.toggle("Enable Custom Instructions", value=False, help="Toggle to enable/disable custom instructions")
|
1051 |
+
if use_custom_instructions:
|
1052 |
+
custom_instruction = st.text_area(
|
1053 |
+
"Custom Instructions (Advanced)",
|
1054 |
+
value="Specify your preferred style of explanation (e.g., 'Provide technical, detailed explanations' or 'Use simple, non-technical language'). You can also specify what aspects of the image to focus on.",
|
1055 |
+
help="Add specific instructions for the analysis"
|
1056 |
+
)
|
1057 |
+
else:
|
1058 |
+
custom_instruction = ""
|
1059 |
+
|
1060 |
# Include both captions in the prompt if available
|
1061 |
caption_text = ""
|
1062 |
if hasattr(st.session_state, 'image_caption'):
|
|
|
1141 |
|
1142 |
except Exception as e:
|
1143 |
st.error(f"Error during LLM analysis: {str(e)}")
|
|
|
|
|
|
|
1144 |
else:
|
1145 |
+
if not hasattr(st.session_state, 'current_image'):
|
1146 |
+
st.warning("⚠️ Please upload an image in the Detection tab first.")
|
1147 |
+
else:
|
1148 |
+
st.warning("⚠️ Please load the Vision LLM to perform detailed analysis.")
|
1149 |
|
1150 |
# Footer
|
1151 |
st.markdown("---")
|