GuglielmoTor commited on
Commit
3332e5b
Β·
verified Β·
1 Parent(s): feaf9aa

Update insight_and_tasks/agents/mentions_agent.py

Browse files
insight_and_tasks/agents/mentions_agent.py CHANGED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # agents/mentions_agent.py
2
+ import pandas as pd
3
+ from typing import Dict, List, Any, Optional, Mapping
4
+ import logging
5
+ import pandasai as pai # Assuming pandasai is imported as pai globally or configured
6
+
7
+ from google.adk.agents import LlmAgent # Assuming this is the correct import path
8
+
9
+ # Project-specific imports
10
+ from utils.retry_mechanism import RetryMechanism
11
+ from data_models.metrics import AgentMetrics, TimeSeriesMetric
12
+
13
+ # Configure logger for this module
14
+ logger = logging.getLogger(__name__)
15
+
16
+ DEFAULT_AGENT_MODEL = "gemini-2.5-flash-preview-05-20"
17
+
18
+ class EnhancedMentionsAnalysisAgent:
19
+ """
20
+ Enhanced mentions analysis agent with time-series metric extraction and sentiment processing.
21
+ """
22
+ AGENT_NAME = "mentions_analyst"
23
+ AGENT_DESCRIPTION = "Expert analyst specializing in brand mention trends and sentiment patterns."
24
+ AGENT_INSTRUCTION = """
25
+ You are a specialized LinkedIn brand mentions expert focused on sentiment trends and mention patterns over time.
26
+
27
+ Your role includes:
28
+
29
+ 1. MENTION TREND ANALYSIS (monthly, using 'date' column):
30
+ - Analyze mention volume trends over time.
31
+ - Identify periods with significant spikes or dips in mention activity.
32
+
33
+ 2. SENTIMENT PATTERN ANALYSIS (monthly, using 'date' and 'sentiment_label'):
34
+ - Track the evolution of sentiment (e.g., positive, negative, neutral) associated with mentions.
35
+ - Calculate and analyze the average sentiment score over time (if sentiment can be quantified).
36
+ - Identify shifts in overall sentiment and potential drivers for these changes.
37
+
38
+ 3. CORRELATION (Conceptual):
39
+ - Consider if mention spikes/dips or sentiment shifts correlate with any known company activities, campaigns, or external events (though this data might not be in the input DataFrame, mention the need to investigate).
40
+
41
+ 4. METRIC EXTRACTION (for AgentMetrics):
42
+ - Extract time-series data for monthly mention volume.
43
+ - Extract time-series data for monthly sentiment distribution (e.g., count of positive/negative/neutral mentions) and average sentiment score.
44
+ - Provide aggregate metrics like total mentions, overall sentiment distribution, and average sentiment score for the period.
45
+ - Include categorical metrics like the distribution of sentiment labels.
46
+
47
+ Focus on identifying actionable insights from mention data. How is the brand being perceived? Are there emerging reputational risks or opportunities?
48
+ Use the provided DataFrame columns: 'date' (for mentions), 'sentiment_label' (e.g., 'Positive πŸ‘', 'Negative πŸ‘Ž', 'Neutral 😐'), and potentially 'mention_source' or 'mention_content' if available and relevant for deeper analysis (though focus on 'date' and 'sentiment_label' for core metrics).
49
+ """
50
+
51
+ # Standardized sentiment mapping (can be expanded)
52
+ # This mapping is crucial for converting labels to scores.
53
+ SENTIMENT_MAPPING = {
54
+ 'Positive πŸ‘': 1,
55
+ 'Positive': 1, # Adding common variations
56
+ 'Very Positive': 1.5, # Example for more granular sentiment
57
+ 'Negative πŸ‘Ž': -1,
58
+ 'Negative': -1,
59
+ 'Very Negative': -1.5,
60
+ 'Neutral 😐': 0,
61
+ 'Neutral': 0,
62
+ 'Mixed': 0, # Or handle mixed sentiment differently
63
+ 'Unknown': 0 # Default score for unmapped or unknown sentiments
64
+ }
65
+
66
+
67
+ def __init__(self, api_key: str, model_name: Optional[str] = None):
68
+ self.api_key = api_key
69
+ self.model_name = model_name or DEFAULT_AGENT_MODEL
70
+ self.agent = LlmAgent(
71
+ name=self.AGENT_NAME,
72
+ model=self.model_name,
73
+ description=self.AGENT_DESCRIPTION,
74
+ instruction=self.AGENT_INSTRUCTION
75
+ )
76
+ self.retry_mechanism = RetryMechanism()
77
+ logger.info(f"{self.AGENT_NAME} initialized with model {self.model_name}.")
78
+
79
+ def _get_sentiment_score(self, sentiment_label: Optional[str]) -> float:
80
+ """Maps a sentiment label to a numerical score using SENTIMENT_MAPPING."""
81
+ if sentiment_label is None:
82
+ return self.SENTIMENT_MAPPING.get('Unknown', 0)
83
+ # Attempt to match known labels, case-insensitively for robustness if needed,
84
+ # but exact match is safer with the current emoji-inclusive keys.
85
+ return float(self.SENTIMENT_MAPPING.get(str(sentiment_label).strip(), self.SENTIMENT_MAPPING.get('Unknown',0)))
86
+
87
+
88
+ def _preprocess_mentions_data(self, df: pd.DataFrame) -> pd.DataFrame:
89
+ """Cleans and prepares mentions data for analysis."""
90
+ if df is None or df.empty:
91
+ return pd.DataFrame()
92
+
93
+ df_processed = df.copy()
94
+
95
+ # Convert 'date' to datetime
96
+ if 'date' in df_processed.columns:
97
+ df_processed['date'] = pd.to_datetime(df_processed['date'], errors='coerce')
98
+ # df_processed.dropna(subset=['date'], inplace=True) # Keep for other metrics even if date is NaT
99
+ else:
100
+ logger.warning("'date' column not found in mentions data. Time-series analysis will be limited.")
101
+ # df_processed['date'] = pd.NaT # Add placeholder if critical
102
+
103
+ # Process 'sentiment_label' and create 'sentiment_score'
104
+ if 'sentiment_label' in df_processed.columns:
105
+ df_processed['sentiment_label'] = df_processed['sentiment_label'].astype(str).fillna('Unknown')
106
+ df_processed['sentiment_score'] = df_processed['sentiment_label'].apply(self._get_sentiment_score)
107
+ else:
108
+ logger.info("'sentiment_label' column not found. Sentiment analysis will be limited.")
109
+ df_processed['sentiment_label'] = 'Unknown'
110
+ df_processed['sentiment_score'] = self._get_sentiment_score('Unknown')
111
+
112
+ return df_processed
113
+
114
+ def _extract_time_series_metrics(self, df_processed: pd.DataFrame) -> List[TimeSeriesMetric]:
115
+ """Extracts monthly time-series metrics from processed mentions data."""
116
+ ts_metrics = []
117
+ if df_processed.empty or 'date' not in df_processed.columns or df_processed['date'].isnull().all():
118
+ logger.info("Cannot extract time-series metrics for mentions: 'date' is missing or all null.")
119
+ return ts_metrics
120
+
121
+ df_ts = df_processed.dropna(subset=['date']).copy()
122
+ if df_ts.empty:
123
+ logger.info("No valid 'date' values for mentions time-series metrics after filtering NaT.")
124
+ return ts_metrics
125
+
126
+ df_ts['year_month'] = df_ts['date'].dt.strftime('%Y-%m')
127
+
128
+ # Monthly mention volume
129
+ monthly_volume = df_ts.groupby('year_month').size().reset_index(name='mention_count')
130
+ if not monthly_volume.empty:
131
+ ts_metrics.append(TimeSeriesMetric(
132
+ metric_name="monthly_mention_volume",
133
+ values=monthly_volume['mention_count'].tolist(),
134
+ timestamps=monthly_volume['year_month'].tolist(),
135
+ metric_type="time_series",
136
+ time_granularity="monthly",
137
+ unit="count"
138
+ ))
139
+
140
+ # Monthly average sentiment score
141
+ if 'sentiment_score' in df_ts.columns:
142
+ monthly_avg_sentiment = df_ts.groupby('year_month')['sentiment_score'].mean().reset_index()
143
+ if not monthly_avg_sentiment.empty:
144
+ ts_metrics.append(TimeSeriesMetric(
145
+ metric_name="avg_monthly_sentiment_score",
146
+ values=monthly_avg_sentiment['sentiment_score'].tolist(),
147
+ timestamps=monthly_avg_sentiment['year_month'].tolist(),
148
+ metric_type="time_series",
149
+ time_granularity="monthly",
150
+ unit="score" # Score range depends on SENTIMENT_MAPPING
151
+ ))
152
+
153
+ # Monthly distribution of sentiment labels
154
+ if 'sentiment_label' in df_ts.columns and df_ts['sentiment_label'].nunique() > 1:
155
+ # Ensure 'sentiment_label' is not all 'Unknown'
156
+ if not (df_ts['sentiment_label'] == 'Unknown').all():
157
+ sentiment_counts_by_month = df_ts.groupby(['year_month', 'sentiment_label']).size().unstack(fill_value=0)
158
+ for sentiment_val in sentiment_counts_by_month.columns:
159
+ if sentiment_val == 'Unknown' and (sentiment_counts_by_month[sentiment_val] == 0).all():
160
+ continue
161
+ ts_metrics.append(TimeSeriesMetric(
162
+ metric_name=f"monthly_mention_count_sentiment_{str(sentiment_val).lower().replace(' ', '_').replace('πŸ‘','positive').replace('πŸ‘Ž','negative').replace('😐','neutral')}",
163
+ values=sentiment_counts_by_month[sentiment_val].tolist(),
164
+ timestamps=sentiment_counts_by_month.index.tolist(), # year_month is index
165
+ metric_type="time_series",
166
+ time_granularity="monthly",
167
+ unit="count"
168
+ ))
169
+ else:
170
+ logger.info("Sentiment label data is all 'Unknown', skipping sentiment distribution time series.")
171
+
172
+ return ts_metrics
173
+
174
+ def _calculate_aggregate_metrics(self, df_processed: pd.DataFrame) -> Dict[str, float]:
175
+ """Calculates aggregate metrics for mentions."""
176
+ agg_metrics = {}
177
+ if df_processed.empty:
178
+ return agg_metrics
179
+
180
+ agg_metrics['total_mentions_analyzed'] = float(len(df_processed))
181
+
182
+ if 'sentiment_score' in df_processed.columns and not df_processed['sentiment_score'].empty:
183
+ agg_metrics['overall_avg_sentiment_score'] = float(df_processed['sentiment_score'].mean())
184
+
185
+ if 'sentiment_label' in df_processed.columns:
186
+ total_valid_sentiments = len(df_processed.dropna(subset=['sentiment_label'])) # Count non-NaN labels
187
+ if total_valid_sentiments > 0:
188
+ # Iterate through our defined sentiment mapping to count occurrences
189
+ sentiment_counts = df_processed['sentiment_label'].value_counts()
190
+ for label, score_val in self.SENTIMENT_MAPPING.items():
191
+ # Use a clean key for the metric name
192
+ clean_label_key = str(label).lower().replace(' ', '_').replace('πŸ‘','positive').replace('πŸ‘Ž','negative').replace('😐','neutral')
193
+ if clean_label_key == "unknown" and score_val == 0: # Skip generic unknown if it's just a fallback
194
+ if sentiment_counts.get(label, 0) == 0 and 'Unknown' not in label : continue
195
+
196
+
197
+ count = sentiment_counts.get(label, 0)
198
+ if count > 0 or label == 'Unknown': # Report if count > 0 or if it's the 'Unknown' category itself
199
+ agg_metrics[f'{clean_label_key}_mention_ratio'] = float(count / total_valid_sentiments)
200
+ agg_metrics[f'{clean_label_key}_mention_count'] = float(count)
201
+
202
+
203
+ # Mentions per day/week (if 'date' column is valid)
204
+ if 'date' in df_processed.columns and not df_processed['date'].isnull().all():
205
+ df_dated = df_processed.dropna(subset=['date']).sort_values('date')
206
+ if len(df_dated) > 1:
207
+ duration_days = (df_dated['date'].max() - df_dated['date'].min()).days
208
+ if duration_days > 0:
209
+ agg_metrics['avg_mentions_per_day'] = float(len(df_dated) / duration_days)
210
+ agg_metrics['avg_mentions_per_week'] = float(len(df_dated) / (duration_days / 7.0))
211
+ elif len(df_dated) == 1: # Single day with mentions
212
+ agg_metrics['avg_mentions_per_day'] = float(len(df_dated))
213
+ agg_metrics['avg_mentions_per_week'] = float(len(df_dated) * 7) # Extrapolate
214
+
215
+ return agg_metrics
216
+
217
+ def _extract_categorical_metrics(self, df_processed: pd.DataFrame) -> Dict[str, Any]:
218
+ """Extracts categorical distributions for mentions."""
219
+ cat_metrics = {}
220
+ if df_processed.empty:
221
+ return cat_metrics
222
+
223
+ # Sentiment label distribution (counts and percentages)
224
+ if 'sentiment_label' in df_processed.columns and df_processed['sentiment_label'].nunique() > 0:
225
+ cat_metrics['sentiment_label_distribution_percentage'] = df_processed['sentiment_label'].value_counts(normalize=True).apply(lambda x: f"{x:.2%}").to_dict()
226
+ cat_metrics['sentiment_label_counts'] = df_processed['sentiment_label'].value_counts().to_dict()
227
+
228
+ # Example: If 'mention_source' column existed:
229
+ # if 'mention_source' in df_processed.columns:
230
+ # cat_metrics['mention_source_distribution'] = df_processed['mention_source'].value_counts(normalize=True).to_dict()
231
+ # cat_metrics['mention_source_counts'] = df_processed['mention_source'].value_counts().to_dict()
232
+
233
+ return cat_metrics
234
+
235
+ def _extract_time_periods(self, df_processed: pd.DataFrame) -> List[str]:
236
+ """Extracts unique year-month time periods covered by the mentions data."""
237
+ if df_processed.empty or 'date' not in df_processed.columns or df_processed['date'].isnull().all():
238
+ return ["Data period not available or N/A"]
239
+
240
+ if 'year_month' in df_processed.columns: # If already created during TS extraction
241
+ periods = sorted(df_processed['year_month'].dropna().unique().tolist(), reverse=True)
242
+ elif 'date' in df_processed.columns: # Derive if not present
243
+ dates = df_processed['date'].dropna()
244
+ if not dates.empty:
245
+ periods = sorted(dates.dt.strftime('%Y-%m').unique().tolist(), reverse=True)
246
+ else: return ["N/A"]
247
+ else: return ["N/A"]
248
+
249
+ return periods[:12] # Return up to the last 12 months
250
+
251
+ def analyze_mentions_data(self, mentions_df: pd.DataFrame) -> AgentMetrics:
252
+ """
253
+ Generates comprehensive mentions analysis.
254
+ """
255
+ if mentions_df is None or mentions_df.empty:
256
+ logger.warning("Mentions DataFrame is empty. Returning empty metrics.")
257
+ return AgentMetrics(
258
+ agent_name=self.AGENT_NAME,
259
+ analysis_summary="No mentions data provided for analysis.",
260
+ time_periods_covered=["N/A"]
261
+ )
262
+
263
+ # 1. Preprocess data
264
+ df_processed = self._preprocess_mentions_data(mentions_df)
265
+ if df_processed.empty and not mentions_df.empty:
266
+ logger.warning("Mentions DataFrame became empty after preprocessing.")
267
+ return AgentMetrics(
268
+ agent_name=self.AGENT_NAME,
269
+ analysis_summary="Mentions data could not be processed.",
270
+ time_periods_covered=["N/A"]
271
+ )
272
+ elif df_processed.empty and mentions_df.empty:
273
+ return AgentMetrics(agent_name=self.AGENT_NAME, analysis_summary="No mentions data provided.")
274
+
275
+
276
+ # 2. Generate textual analysis using PandasAI
277
+ df_description_for_pandasai = "LinkedIn brand mentions data. Key columns: 'date' (date of mention), 'sentiment_label' (e.g., 'Positive πŸ‘', 'Negative πŸ‘Ž', 'Neutral 😐'), 'sentiment_score' (numeric score from -1.5 to 1.5)."
278
+
279
+ analysis_result_text = "PandasAI analysis for mentions could not be performed."
280
+ try:
281
+ pandas_ai_df = pai.DataFrame(df_processed, description=df_description_for_pandasai)
282
+ analysis_query = f"""
283
+ Analyze the provided LinkedIn brand mentions data. Focus on:
284
+ 1. Monthly trends in mention volume.
285
+ 2. Monthly trends in sentiment (average 'sentiment_score' and distribution of 'sentiment_label').
286
+ 3. Identify any significant spikes/dips in mentions or shifts in sentiment.
287
+ Provide a concise summary of brand perception based on this data.
288
+ """
289
+ def chat_operation():
290
+ if not pai.config.llm:
291
+ logger.warning("PandasAI LLM not configured for mentions agent. Attempting to configure.")
292
+ from utils.pandasai_setup import configure_pandasai
293
+ configure_pandasai(self.api_key, self.model_name)
294
+ if not pai.config.llm:
295
+ raise RuntimeError("PandasAI LLM could not be configured for mentions chat operation.")
296
+ logger.info(f"Executing PandasAI chat for mentions analysis with LLM: {pai.config.llm}")
297
+ return pandas_ai_df.chat(analysis_query)
298
+
299
+ analysis_result_raw = self.retry_mechanism.retry_with_backoff(
300
+ func=chat_operation, max_retries=2, base_delay=2.0, exceptions=(Exception,)
301
+ )
302
+ analysis_result_text = str(analysis_result_raw) if analysis_result_raw else "No textual analysis for mentions generated by PandasAI."
303
+ logger.info("Mentions analysis via PandasAI completed.")
304
+
305
+ except Exception as e:
306
+ logger.error(f"Mentions analysis with PandasAI failed: {e}", exc_info=True)
307
+ analysis_result_text = f"Mentions analysis using PandasAI failed. Error: {str(e)[:200]}"
308
+
309
+ # 3. Extract structured metrics
310
+ time_series_metrics = self._extract_time_series_metrics(df_processed)
311
+ aggregate_metrics = self._calculate_aggregate_metrics(df_processed)
312
+ categorical_metrics = self._extract_categorical_metrics(df_processed)
313
+ time_periods = self._extract_time_periods(df_processed)
314
+
315
+ return AgentMetrics(
316
+ agent_name=self.AGENT_NAME,
317
+ analysis_summary=analysis_result_text[:2000],
318
+ time_series_metrics=time_series_metrics,
319
+ aggregate_metrics=aggregate_metrics,
320
+ categorical_metrics=categorical_metrics,
321
+ time_periods_covered=time_periods,
322
+ data_sources_used=[f"mentions_df (shape: {mentions_df.shape}) -> df_processed (shape: {df_processed.shape})"]
323
+ )
324
+
325
+ if __name__ == '__main__':
326
+ try:
327
+ from utils.logging_config import setup_logging
328
+ setup_logging()
329
+ logger.info("Logging setup for EnhancedMentionsAnalysisAgent test.")
330
+ except ImportError:
331
+ logging.basicConfig(level=logging.INFO)
332
+ logger.warning("Could not import setup_logging. Using basicConfig.")
333
+
334
+ MOCK_API_KEY = os.environ.get("GOOGLE_API_KEY", "test_api_key_mentions")
335
+ MODEL_NAME = DEFAULT_AGENT_MODEL
336
+
337
+ try:
338
+ from utils.pandasai_setup import configure_pandasai
339
+ if MOCK_API_KEY != "test_api_key_mentions":
340
+ configure_pandasai(MOCK_API_KEY, MODEL_NAME)
341
+ logger.info("PandasAI configured for testing EnhancedMentionsAnalysisAgent.")
342
+ else:
343
+ logger.warning("Using mock API key for mentions. PandasAI chat will likely fail or use a mock.")
344
+ class MockPandasAIDataFrame:
345
+ def __init__(self, df, description): self.df = df; self.description = description
346
+ def chat(self, query): return f"Mock PandasAI mentions response to: {query}"
347
+ pai.DataFrame = MockPandasAIDataFrame
348
+ except ImportError:
349
+ logger.error("utils.pandasai_setup not found. PandasAI will not be configured for mentions.")
350
+ class MockPandasAIDataFrame:
351
+ def __init__(self, df, description): self.df = df; self.description = description
352
+ def chat(self, query): return f"Mock PandasAI mentions response to: {query}"
353
+ pai.DataFrame = MockPandasAIDataFrame
354
+
355
+
356
+ sample_mentions_data = {
357
+ 'date': pd.to_datetime(['2023-01-05', '2023-01-15', '2023-02-02', '2023-02-20', '2023-03-10', '2023-03-12']),
358
+ 'sentiment_label': ['Positive πŸ‘', 'Negative πŸ‘Ž', 'Neutral 😐', 'Positive πŸ‘', 'Positive πŸ‘', 'Unknown'],
359
+ # 'mention_content': ['Great product!', 'Service was slow.', 'Just a mention.', 'Love the new feature!', 'Highly recommend.', 'Seen this around.']
360
+ }
361
+ sample_df_mentions = pd.DataFrame(sample_mentions_data)
362
+
363
+ mentions_agent = EnhancedMentionsAnalysisAgent(api_key=MOCK_API_KEY, model_name=MODEL_NAME)
364
+
365
+ logger.info("Analyzing sample mentions data...")
366
+ mentions_metrics_result = mentions_agent.analyze_mentions_data(sample_df_mentions)
367
+
368
+ print("\n--- EnhancedMentionsAnalysisAgent Results ---")
369
+ print(f"Agent Name: {mentions_metrics_result.agent_name}")
370
+ print(f"Analysis Summary: {mentions_metrics_result.analysis_summary}")
371
+ print("\nTime Series Metrics (Mentions):")
372
+ for ts_metric in mentions_metrics_result.time_series_metrics:
373
+ print(f" - {ts_metric.metric_name}: {len(ts_metric.values)} data points, e.g., {ts_metric.values[:3]} for ts {ts_metric.timestamps[:3]} (Unit: {ts_metric.unit})")
374
+ print("\nAggregate Metrics (Mentions):")
375
+ for key, value in mentions_metrics_result.aggregate_metrics.items():
376
+ print(f" - {key}: {value}")
377
+ print("\nCategorical Metrics (Mentions):")
378
+ for key, value in mentions_metrics_result.categorical_metrics.items():
379
+ print(f" - {key}:")
380
+ if isinstance(value, dict):
381
+ for sub_key, sub_value in list(value.items())[:2]: # Print first 2 for brevity
382
+ print(f" - {sub_key}: {sub_value}")
383
+ else:
384
+ print(f" {value}")
385
+ print(f"\nTime Periods Covered (Mentions): {mentions_metrics_result.time_periods_covered}")
386
+
387
+ # Test with empty DataFrame
388
+ logger.info("\n--- Testing Mentions Agent with empty DataFrame ---")
389
+ empty_mentions_metrics = mentions_agent.analyze_mentions_data(pd.DataFrame())
390
+ print(f"Empty Mentions DF Analysis Summary: {empty_mentions_metrics.analysis_summary}")