saakshigupta commited on
Commit
388d161
·
verified ·
1 Parent(s): e05ed33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -116
app.py CHANGED
@@ -197,17 +197,8 @@ This tool detects deepfakes using three AI models:
197
  temperature = 0.7
198
  max_tokens = 500
199
 
200
- # Custom instruction text area in sidebar
201
- use_custom_instructions = st.sidebar.toggle("Enable Custom Instructions", value=False, help="Toggle to enable/disable custom instructions")
202
-
203
- if use_custom_instructions:
204
- custom_instruction = st.sidebar.text_area(
205
- "Custom Instructions (Advanced)",
206
- value="Specify your preferred style of explanation (e.g., 'Provide technical, detailed explanations' or 'Use simple, non-technical language'). You can also specify what aspects of the image to focus on.",
207
- help="Add specific instructions for the analysis"
208
- )
209
- else:
210
- custom_instruction = ""
211
 
212
  # ----- GradCAM Implementation for Xception -----
213
  class ImageDataset(torch.utils.data.Dataset):
@@ -1027,120 +1018,124 @@ def main():
1027
  if hasattr(st.session_state, 'current_image') and st.session_state.llm_model_loaded:
1028
  st.subheader("Deepfake Analysis Chat")
1029
 
1030
- # Display images
1031
- col1, col2 = st.columns([1, 2])
1032
- with col1:
1033
- st.image(st.session_state.current_image, caption="Original Image", width=300)
1034
- if hasattr(st.session_state, 'current_overlay'):
1035
- st.image(st.session_state.current_overlay, caption="GradCAM Visualization", width=300)
1036
-
1037
- with col2:
1038
- # Display detection result if available
1039
- if hasattr(st.session_state, 'current_pred_label'):
1040
- st.markdown("### Detection Result")
1041
- st.markdown(f"**Classification:** {st.session_state.current_pred_label} (Confidence: {st.session_state.current_confidence:.2%})")
1042
 
1043
- # Display chat history
1044
- for i, (question, answer) in enumerate(st.session_state.chat_history):
1045
- st.markdown(f"**Question {i+1}:** {question}")
1046
- st.markdown(f"**Answer:** {answer}")
1047
- st.markdown("---")
1048
-
1049
- # Custom instruction
1050
- use_custom_instructions = st.toggle("Enable Custom Instructions", value=False, help="Toggle to enable/disable custom instructions")
1051
- if use_custom_instructions:
1052
- custom_instruction = st.text_area(
1053
- "Custom Instructions (Advanced)",
1054
- value="Specify your preferred style of explanation (e.g., 'Provide technical, detailed explanations' or 'Use simple, non-technical language'). You can also specify what aspects of the image to focus on.",
1055
- help="Add specific instructions for the analysis"
1056
- )
1057
- else:
1058
- custom_instruction = ""
1059
-
1060
- # Include both captions in the prompt if available
1061
- caption_text = ""
1062
- if hasattr(st.session_state, 'image_caption'):
1063
- caption_text += f"\n\nImage Description:\n{st.session_state.image_caption}"
1064
 
1065
- if hasattr(st.session_state, 'gradcam_caption'):
1066
- caption_text += f"\n\nGradCAM Analysis:\n{st.session_state.gradcam_caption}"
1067
-
1068
- # Default question with option to customize
1069
- default_question = f"This image has been classified as {{pred_label}}. Analyze all the provided images (original, GradCAM visualization, and comparison) to determine if this is a deepfake. Focus on highlighted areas in the GradCAM visualization. Provide both a technical explanation for experts and a simple explanation for non-technical users."
1070
-
1071
- # User input for new question
1072
- new_question = st.text_area("Ask a question about the image:", value=default_question if not st.session_state.chat_history else "", height=100)
1073
-
1074
- # Analyze button and Clear Chat button in the same row
1075
- col1, col2 = st.columns([3, 1])
1076
- with col1:
1077
- analyze_button = st.button("🔍 Send Question", type="primary")
1078
- with col2:
1079
- clear_button = st.button("🗑️ Clear Chat History")
1080
-
1081
- if clear_button:
1082
- st.session_state.chat_history = []
1083
- st.rerun()
1084
 
1085
- if analyze_button and new_question:
1086
- try:
1087
- # Add caption info if it's the first question
1088
- if not st.session_state.chat_history:
1089
- full_question = new_question + caption_text
1090
- else:
1091
- full_question = new_question
1092
 
1093
- result = analyze_image_with_llm(
1094
- st.session_state.current_image,
1095
- st.session_state.current_overlay,
1096
- st.session_state.current_face_box,
1097
- st.session_state.current_pred_label,
1098
- st.session_state.current_confidence,
1099
- full_question,
1100
- st.session_state.llm_model,
1101
- st.session_state.tokenizer,
1102
- temperature=temperature,
1103
- max_tokens=max_tokens,
1104
- custom_instruction=custom_instruction
 
 
 
 
 
1105
  )
 
 
 
 
 
 
 
1106
 
1107
- # Add to chat history
1108
- st.session_state.chat_history.append((new_question, result))
1109
-
1110
- # Display the latest result too
1111
- st.success("✅ Analysis complete!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112
 
1113
- # Check if the result contains both technical and non-technical explanations
1114
- if "Technical" in result and "Non-Technical" in result:
1115
- try:
1116
- # Split the result into technical and non-technical sections
1117
- parts = result.split("Non-Technical")
1118
- technical = parts[0]
1119
- non_technical = "Non-Technical" + parts[1]
1120
-
1121
- # Display in two columns
1122
- tech_col, simple_col = st.columns(2)
1123
- with tech_col:
1124
- st.subheader("Technical Analysis")
1125
- st.markdown(technical)
1126
-
1127
- with simple_col:
1128
- st.subheader("Simple Explanation")
1129
- st.markdown(non_technical)
1130
- except Exception as e:
1131
- # Fallback if splitting fails
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1132
  st.subheader("Analysis Result")
1133
  st.markdown(result)
1134
- else:
1135
- # Just display the whole result
1136
- st.subheader("Analysis Result")
1137
- st.markdown(result)
1138
-
1139
- # Rerun to update the chat history display
1140
- st.rerun()
1141
-
1142
- except Exception as e:
1143
- st.error(f"Error during LLM analysis: {str(e)}")
1144
  else:
1145
  if not hasattr(st.session_state, 'current_image'):
1146
  st.warning("⚠️ Please upload an image in the Detection tab first.")
 
197
  temperature = 0.7
198
  max_tokens = 500
199
 
200
+ # Define empty custom_instruction to maintain compatibility
201
+ custom_instruction = ""
 
 
 
 
 
 
 
 
 
202
 
203
  # ----- GradCAM Implementation for Xception -----
204
  class ImageDataset(torch.utils.data.Dataset):
 
1018
  if hasattr(st.session_state, 'current_image') and st.session_state.llm_model_loaded:
1019
  st.subheader("Deepfake Analysis Chat")
1020
 
1021
+ # Display reference images in a sidebar-like column
1022
+ col_images, col_chat = st.columns([1, 3])
 
 
 
 
 
 
 
 
 
 
1023
 
1024
+ with col_images:
1025
+ st.write("#### Reference Images")
1026
+ st.image(st.session_state.current_image, caption="Original", use_column_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1027
 
1028
+ if hasattr(st.session_state, 'current_overlay'):
1029
+ st.image(st.session_state.current_overlay, caption="GradCAM", use_column_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
 
1031
+ if hasattr(st.session_state, 'comparison_image'):
1032
+ st.image(st.session_state.comparison_image, caption="Comparison", use_column_width=True)
 
 
 
 
 
1033
 
1034
+ if hasattr(st.session_state, 'current_pred_label'):
1035
+ st.info(f"**Classification:** {st.session_state.current_pred_label} (Confidence: {st.session_state.current_confidence:.2%})")
1036
+
1037
+ with col_chat:
1038
+ # Display chat history
1039
+ for i, (question, answer) in enumerate(st.session_state.chat_history):
1040
+ st.markdown(f"**Question {i+1}:** {question}")
1041
+ st.markdown(f"**Answer:** {answer}")
1042
+ st.markdown("---")
1043
+
1044
+ # Custom instruction in the chat column
1045
+ use_custom_instructions = st.toggle("Enable Custom Instructions", key="llm_custom_instructions", value=False)
1046
+ if use_custom_instructions:
1047
+ custom_instruction = st.text_area(
1048
+ "Custom Instructions (Advanced)",
1049
+ value="Specify your preferred style of explanation (e.g., 'Provide technical, detailed explanations' or 'Use simple, non-technical language'). You can also specify what aspects of the image to focus on.",
1050
+ help="Add specific instructions for the analysis"
1051
  )
1052
+ else:
1053
+ custom_instruction = ""
1054
+
1055
+ # Include both captions in the prompt if available
1056
+ caption_text = ""
1057
+ if hasattr(st.session_state, 'image_caption'):
1058
+ caption_text += f"\n\nImage Description:\n{st.session_state.image_caption}"
1059
 
1060
+ if hasattr(st.session_state, 'gradcam_caption'):
1061
+ caption_text += f"\n\nGradCAM Analysis:\n{st.session_state.gradcam_caption}"
1062
+
1063
+ # Default question with option to customize
1064
+ default_question = f"This image has been classified as {{pred_label}}. Analyze all the provided images (original, GradCAM visualization, and comparison) to determine if this is a deepfake. Focus on highlighted areas in the GradCAM visualization. Provide both a technical explanation for experts and a simple explanation for non-technical users."
1065
+
1066
+ # User input for new question
1067
+ new_question = st.text_area("Ask a question about the image:", value=default_question if not st.session_state.chat_history else "", height=100)
1068
+
1069
+ # Analyze button and Clear Chat button in the same row
1070
+ col1, col2 = st.columns([3, 1])
1071
+ with col1:
1072
+ analyze_button = st.button("🔍 Send Question", type="primary")
1073
+ with col2:
1074
+ clear_button = st.button("🗑️ Clear Chat History")
1075
+
1076
+ if clear_button:
1077
+ st.session_state.chat_history = []
1078
+ st.rerun()
1079
 
1080
+ if analyze_button and new_question:
1081
+ try:
1082
+ # Add caption info if it's the first question
1083
+ if not st.session_state.chat_history:
1084
+ full_question = new_question + caption_text
1085
+ else:
1086
+ full_question = new_question
1087
+
1088
+ result = analyze_image_with_llm(
1089
+ st.session_state.current_image,
1090
+ st.session_state.current_overlay,
1091
+ st.session_state.current_face_box,
1092
+ st.session_state.current_pred_label,
1093
+ st.session_state.current_confidence,
1094
+ full_question,
1095
+ st.session_state.llm_model,
1096
+ st.session_state.tokenizer,
1097
+ temperature=temperature,
1098
+ max_tokens=max_tokens,
1099
+ custom_instruction=custom_instruction
1100
+ )
1101
+
1102
+ # Add to chat history
1103
+ st.session_state.chat_history.append((new_question, result))
1104
+
1105
+ # Display the latest result too
1106
+ st.success("✅ Analysis complete!")
1107
+
1108
+ # Check if the result contains both technical and non-technical explanations
1109
+ if "Technical" in result and "Non-Technical" in result:
1110
+ try:
1111
+ # Split the result into technical and non-technical sections
1112
+ parts = result.split("Non-Technical")
1113
+ technical = parts[0]
1114
+ non_technical = "Non-Technical" + parts[1]
1115
+
1116
+ # Display in two columns
1117
+ tech_col, simple_col = st.columns(2)
1118
+ with tech_col:
1119
+ st.subheader("Technical Analysis")
1120
+ st.markdown(technical)
1121
+
1122
+ with simple_col:
1123
+ st.subheader("Simple Explanation")
1124
+ st.markdown(non_technical)
1125
+ except Exception as e:
1126
+ # Fallback if splitting fails
1127
+ st.subheader("Analysis Result")
1128
+ st.markdown(result)
1129
+ else:
1130
+ # Just display the whole result
1131
  st.subheader("Analysis Result")
1132
  st.markdown(result)
1133
+
1134
+ # Rerun to update the chat history display
1135
+ st.rerun()
1136
+
1137
+ except Exception as e:
1138
+ st.error(f"Error during LLM analysis: {str(e)}")
 
 
 
 
1139
  else:
1140
  if not hasattr(st.session_state, 'current_image'):
1141
  st.warning("⚠️ Please upload an image in the Detection tab first.")