Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,55 +4,63 @@ import pandas as pd
|
|
4 |
import os
|
5 |
import logging
|
6 |
import matplotlib
|
7 |
-
matplotlib.use('Agg') # Set backend for Matplotlib
|
8 |
-
|
9 |
-
import time
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
import asyncio
|
14 |
|
15 |
# --- Module Imports ---
|
16 |
from utils.gradio_utils import get_url_user_token
|
17 |
|
18 |
-
#
|
19 |
from config import (
|
20 |
LINKEDIN_CLIENT_ID_ENV_VAR, BUBBLE_APP_NAME_ENV_VAR,
|
21 |
BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR,
|
22 |
-
|
23 |
)
|
24 |
-
# from formulas import PLOT_FORMULAS # Used in analytics_handlers
|
25 |
-
|
26 |
-
# Services (assuming these exist and are correctly defined)
|
27 |
from services.state_manager import process_and_store_bubble_token
|
28 |
from services.sync_logic import sync_all_linkedin_data_orchestrator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
#
|
31 |
-
from
|
|
|
32 |
|
33 |
-
#
|
34 |
-
|
35 |
-
from
|
36 |
-
from ui.
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
-
# Handler Classes
|
40 |
-
from services.dashboard_sync_handlers import DashboardSyncHandlers
|
41 |
-
from services.analytics_handlers import AnalyticsHandlers
|
42 |
-
from services.agentic_handlers import AgenticHandlers, AGENTIC_MODULES_LOADED as AGENTIC_HANDLERS_MODULES_LOADED
|
43 |
|
44 |
-
#
|
45 |
-
|
46 |
-
# The one from AgenticHandlers is based on its own try-except for its specific imports.
|
47 |
-
# We might need a global one if app.py itself tries to import agentic modules directly.
|
48 |
-
# For now, using the one from AgenticHandlers as it's most relevant to agentic functionality.
|
49 |
-
APP_AGENTIC_MODULES_LOADED = AGENTIC_HANDLERS_MODULES_LOADED
|
50 |
|
51 |
# Configure logging
|
52 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
|
53 |
|
54 |
# API Key Setup
|
55 |
-
os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "False"
|
56 |
user_provided_api_key = os.environ.get("GEMINI_API_KEY")
|
57 |
if user_provided_api_key:
|
58 |
os.environ["GOOGLE_API_KEY"] = user_provided_api_key
|
@@ -60,212 +68,349 @@ if user_provided_api_key:
|
|
60 |
else:
|
61 |
logging.error("CRITICAL ERROR: The API key environment variable 'GEMINI_API_KEY' was not found.")
|
62 |
|
63 |
-
|
64 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
|
65 |
title="LinkedIn Organization Dashboard") as app:
|
66 |
-
|
67 |
-
# --- Global States ---
|
68 |
token_state = gr.State(value={
|
69 |
"token": None, "client_id": None, "org_urn": None,
|
70 |
"bubble_posts_df": pd.DataFrame(), "bubble_post_stats_df": pd.DataFrame(),
|
71 |
-
"bubble_mentions_df": pd.DataFrame(),
|
72 |
"bubble_follower_stats_df": pd.DataFrame(),
|
73 |
"fetch_count_for_api": 0, "url_user_token_temp_storage": None,
|
74 |
"config_date_col_posts": "published_at", "config_date_col_mentions": "date",
|
75 |
"config_date_col_followers": "date", "config_media_type_col": "media_type",
|
76 |
"config_eb_labels_col": "li_eb_label"
|
77 |
})
|
78 |
-
|
79 |
-
# States for analytics tab chatbot
|
80 |
-
chat_histories_st = gr.State({}) # Stores chat histories for each plot_id {plot_id: [{"role":"user",...}]}
|
81 |
-
current_chat_plot_id_st = gr.State(None) # ID of the plot currently active in chat
|
82 |
-
plot_data_for_chatbot_st = gr.State({}) # Stores summaries for plots {plot_id: "summary text"}
|
83 |
|
84 |
-
# States for analytics tab
|
85 |
-
|
86 |
-
|
|
|
87 |
|
88 |
-
#
|
89 |
-
orchestration_raw_results_st = gr.State(None) # Stores raw output from
|
90 |
-
key_results_for_selection_st = gr.State([]) # Stores list of dicts for KR
|
91 |
-
selected_key_result_ids_st = gr.State([]) # Stores
|
92 |
|
93 |
-
# --- Hidden Components for URL Params ---
|
94 |
gr.Markdown("# 🚀 LinkedIn Organization Dashboard")
|
95 |
url_user_token_display = gr.Textbox(label="User Token (Nascosto)", interactive=False, visible=False)
|
96 |
status_box = gr.Textbox(label="Stato Generale Token LinkedIn", interactive=False, value="Inizializzazione...")
|
97 |
org_urn_display = gr.Textbox(label="URN Organizzazione (Nascosto)", interactive=False, visible=False)
|
98 |
|
99 |
-
# --- Load URL parameters ---
|
100 |
-
# This runs on app load to fetch params from the URL query string
|
101 |
app.load(fn=get_url_user_token, inputs=None, outputs=[url_user_token_display, org_urn_display], api_name="get_url_params", show_progress=False)
|
102 |
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
with gr.Tabs() as tabs:
|
105 |
with gr.TabItem("1️⃣ Dashboard & Sync", id="tab_dashboard_sync"):
|
106 |
-
|
|
|
|
|
|
|
107 |
|
108 |
-
|
109 |
-
|
110 |
|
111 |
-
# Agentic
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
)
|
137 |
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
if APP_AGENTIC_MODULES_LOADED and agentic_handler:
|
143 |
-
agentic_handler.setup_event_handlers() # Sets up internal events for agentic tabs (e.g., KR selection)
|
144 |
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
agentic_pipeline_outputs_list = [
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
orchestration_raw_results_st,
|
154 |
-
selected_key_result_ids_st,
|
155 |
key_results_for_selection_st,
|
156 |
-
|
157 |
]
|
158 |
-
|
159 |
-
|
160 |
-
#
|
161 |
-
|
162 |
-
|
163 |
-
# 4. Run agentic pipeline (agentic_handler.run_agentic_pipeline_autonomously_on_update)
|
164 |
-
|
165 |
-
initial_load_event = org_urn_display.change( # Triggers after app.load populates org_urn_display
|
166 |
-
fn=dashboard_sync_handler.initial_load_sequence,
|
167 |
inputs=[url_user_token_display, org_urn_display, token_state],
|
168 |
-
outputs=[
|
169 |
-
|
170 |
-
token_state,
|
171 |
-
dashboard_sync_components['sync_data_btn'],
|
172 |
-
dashboard_sync_components['dashboard_display_html']
|
173 |
-
],
|
174 |
-
show_progress="full" # For the initial data processing part
|
175 |
)
|
176 |
|
177 |
-
# Chain analytics refresh after initial load
|
178 |
initial_load_event.then(
|
179 |
-
fn=
|
180 |
inputs=[
|
181 |
-
token_state,
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
],
|
187 |
-
outputs=
|
188 |
-
show_progress="full"
|
|
|
|
|
|
|
|
|
|
|
189 |
)
|
190 |
-
|
191 |
-
# Chain agentic pipeline run after initial analytics refresh (if modules loaded)
|
192 |
-
if APP_AGENTIC_MODULES_LOADED and agentic_handler:
|
193 |
-
initial_load_event.then( # Chaining from initial_load_event ensures it uses the updated token_state
|
194 |
-
fn=agentic_handler.run_agentic_pipeline_autonomously_on_update,
|
195 |
-
inputs=[token_state], # Depends on the updated token_state
|
196 |
-
outputs=agentic_pipeline_outputs_list,
|
197 |
-
show_progress="minimal" # For agentic pipeline
|
198 |
-
)
|
199 |
-
|
200 |
-
# Sync Data Sequence:
|
201 |
-
# 1. sync_all_linkedin_data_orchestrator
|
202 |
-
# 2. process_and_store_bubble_token (to update state based on sync results)
|
203 |
-
# 3. display_main_dashboard
|
204 |
-
# 4. refresh_analytics_graphs_ui
|
205 |
-
# 5. run_agentic_pipeline_autonomously_on_update
|
206 |
|
207 |
-
sync_event_part1 =
|
208 |
-
fn=sync_all_linkedin_data_orchestrator,
|
209 |
inputs=[token_state],
|
210 |
-
outputs=[
|
211 |
-
show_progress="full"
|
212 |
)
|
213 |
-
|
214 |
sync_event_part2 = sync_event_part1.then(
|
215 |
-
fn=process_and_store_bubble_token,
|
216 |
-
inputs=[url_user_token_display, org_urn_display, token_state],
|
217 |
-
outputs=[status_box, token_state,
|
218 |
-
show_progress=False
|
219 |
)
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
inputs=[token_state], # Uses the token_state updated by process_and_store_bubble_token
|
226 |
-
outputs=agentic_pipeline_outputs_list,
|
227 |
-
show_progress="minimal"
|
228 |
-
)
|
229 |
-
|
230 |
-
sync_event_part3 = sync_event_part2.then( # Continues from token processing
|
231 |
-
fn=display_main_dashboard, # From ui.ui_generators
|
232 |
-
inputs=[token_state], # Uses the updated token_state
|
233 |
-
outputs=[dashboard_sync_components['dashboard_display_html']],
|
234 |
-
show_progress=False # Quick UI update
|
235 |
)
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
|
|
|
|
|
|
|
|
240 |
inputs=[
|
241 |
-
token_state,
|
242 |
-
|
243 |
-
|
244 |
-
|
|
|
245 |
],
|
246 |
-
outputs=
|
247 |
-
show_progress="full"
|
248 |
)
|
249 |
|
250 |
-
|
251 |
-
# --- Launch ---
|
252 |
if __name__ == "__main__":
|
253 |
-
|
254 |
-
if not os.environ.get(LINKEDIN_CLIENT_ID_ENV_VAR):
|
255 |
logging.warning(f"ATTENZIONE: '{LINKEDIN_CLIENT_ID_ENV_VAR}' non impostata.")
|
256 |
if not all(os.environ.get(var) for var in [BUBBLE_APP_NAME_ENV_VAR, BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR]):
|
257 |
-
logging.warning("ATTENZIONE:
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
if not os.environ.get("GEMINI_API_KEY") and APP_AGENTIC_MODULES_LOADED:
|
263 |
-
logging.warning("ATTENZIONE: 'GEMINI_API_KEY' non impostata. La pipeline AI per le tab 3 e 4 potrebbe non funzionare.")
|
264 |
|
265 |
-
try:
|
266 |
-
logging.info(f"
|
267 |
-
|
268 |
-
logging.
|
269 |
-
|
270 |
-
|
271 |
|
|
|
|
4 |
import os
|
5 |
import logging
|
6 |
import matplotlib
|
7 |
+
matplotlib.use('Agg') # Set backend for Matplotlib
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
import time
|
10 |
+
from datetime import datetime, timedelta
|
11 |
+
import numpy as np
|
12 |
+
from collections import OrderedDict, defaultdict # Added defaultdict
|
13 |
+
import asyncio
|
14 |
|
15 |
# --- Module Imports ---
|
16 |
from utils.gradio_utils import get_url_user_token
|
17 |
|
18 |
+
# Functions from newly created/refactored modules
|
19 |
from config import (
|
20 |
LINKEDIN_CLIENT_ID_ENV_VAR, BUBBLE_APP_NAME_ENV_VAR,
|
21 |
BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR,
|
22 |
+
PLOT_ID_TO_FORMULA_KEY_MAP # Keep this if used by AnalyticsTab
|
23 |
)
|
|
|
|
|
|
|
24 |
from services.state_manager import process_and_store_bubble_token
|
25 |
from services.sync_logic import sync_all_linkedin_data_orchestrator
|
26 |
+
from ui.ui_generators import (
|
27 |
+
display_main_dashboard,
|
28 |
+
build_analytics_tab_plot_area, # This will be passed to AnalyticsTab
|
29 |
+
BOMB_ICON, EXPLORE_ICON, FORMULA_ICON, ACTIVE_ICON # These will be passed
|
30 |
+
)
|
31 |
+
from ui.analytics_plot_generator import update_analytics_plots_figures, create_placeholder_plot # Pass these
|
32 |
+
from formulas import PLOT_FORMULAS # Keep this if used by AnalyticsTab
|
33 |
|
34 |
+
# --- EXISTING CHATBOT MODULE IMPORTS ---
|
35 |
+
from features.chatbot.chatbot_prompts import get_initial_insight_prompt_and_suggestions # Pass this
|
36 |
+
from features.chatbot.chatbot_handler import generate_llm_response # Pass this
|
37 |
|
38 |
+
# --- NEW AGENTIC PIPELINE IMPORTS ---
|
39 |
+
try:
|
40 |
+
from run_agentic_pipeline import run_full_analytics_orchestration
|
41 |
+
from ui.insights_ui_generator import (
|
42 |
+
format_report_to_markdown,
|
43 |
+
extract_key_results_for_selection,
|
44 |
+
format_single_okr_for_display
|
45 |
+
)
|
46 |
+
AGENTIC_MODULES_LOADED = True
|
47 |
+
except ImportError as e:
|
48 |
+
logging.error(f"Could not import agentic pipeline modules: {e}. Tabs 3 and 4 will be disabled.")
|
49 |
+
AGENTIC_MODULES_LOADED = False
|
50 |
+
async def run_full_analytics_orchestration(*args, **kwargs): return None # Placeholder
|
51 |
+
def format_report_to_markdown(report_string): return "Agentic modules not loaded. Report unavailable." # Placeholder
|
52 |
+
def extract_key_results_for_selection(okrs_dict): return [] # Placeholder
|
53 |
+
def format_single_okr_for_display(okr_data, **kwargs): return "Agentic modules not loaded. OKR display unavailable." # Placeholder
|
54 |
|
|
|
|
|
|
|
|
|
55 |
|
56 |
+
# --- IMPORT THE NEW ANALYTICS TAB MODULE ---
|
57 |
+
from services.analytics_tab_module import AnalyticsTab # Assuming analytics_tab_module.py is in the services directory
|
|
|
|
|
|
|
|
|
58 |
|
59 |
# Configure logging
|
60 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
|
61 |
|
62 |
# API Key Setup
|
63 |
+
os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "False"
|
64 |
user_provided_api_key = os.environ.get("GEMINI_API_KEY")
|
65 |
if user_provided_api_key:
|
66 |
os.environ["GOOGLE_API_KEY"] = user_provided_api_key
|
|
|
68 |
else:
|
69 |
logging.error("CRITICAL ERROR: The API key environment variable 'GEMINI_API_KEY' was not found.")
|
70 |
|
71 |
+
|
72 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"),
|
73 |
title="LinkedIn Organization Dashboard") as app:
|
|
|
|
|
74 |
token_state = gr.State(value={
|
75 |
"token": None, "client_id": None, "org_urn": None,
|
76 |
"bubble_posts_df": pd.DataFrame(), "bubble_post_stats_df": pd.DataFrame(),
|
77 |
+
"bubble_mentions_df": pd.DataFrame(),
|
78 |
"bubble_follower_stats_df": pd.DataFrame(),
|
79 |
"fetch_count_for_api": 0, "url_user_token_temp_storage": None,
|
80 |
"config_date_col_posts": "published_at", "config_date_col_mentions": "date",
|
81 |
"config_date_col_followers": "date", "config_media_type_col": "media_type",
|
82 |
"config_eb_labels_col": "li_eb_label"
|
83 |
})
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
+
# States for existing analytics tab chatbot - these are passed to AnalyticsTab
|
86 |
+
chat_histories_st = gr.State({})
|
87 |
+
current_chat_plot_id_st = gr.State(None)
|
88 |
+
plot_data_for_chatbot_st = gr.State({}) # This will be populated by the analytics module's refresh
|
89 |
|
90 |
+
# --- STATES FOR AGENTIC PIPELINE ---
|
91 |
+
orchestration_raw_results_st = gr.State(None) # Stores the full raw output from the agentic pipeline
|
92 |
+
key_results_for_selection_st = gr.State([]) # Stores the list of dicts for KR selection (label, id, etc.)
|
93 |
+
selected_key_result_ids_st = gr.State([]) # Stores the unique_kr_ids selected in the CheckboxGroup
|
94 |
|
|
|
95 |
gr.Markdown("# 🚀 LinkedIn Organization Dashboard")
|
96 |
url_user_token_display = gr.Textbox(label="User Token (Nascosto)", interactive=False, visible=False)
|
97 |
status_box = gr.Textbox(label="Stato Generale Token LinkedIn", interactive=False, value="Inizializzazione...")
|
98 |
org_urn_display = gr.Textbox(label="URN Organizzazione (Nascosto)", interactive=False, visible=False)
|
99 |
|
|
|
|
|
100 |
app.load(fn=get_url_user_token, inputs=None, outputs=[url_user_token_display, org_urn_display], api_name="get_url_params", show_progress=False)
|
101 |
|
102 |
+
def initial_load_sequence(url_token, org_urn_val, current_state):
|
103 |
+
status_msg, new_state, btn_update = process_and_store_bubble_token(url_token, org_urn_val, current_state)
|
104 |
+
dashboard_content = display_main_dashboard(new_state)
|
105 |
+
return status_msg, new_state, btn_update, dashboard_content
|
106 |
+
|
107 |
+
# --- Instantiate the AnalyticsTab module ---
|
108 |
+
analytics_icons = {
|
109 |
+
'bomb': BOMB_ICON, 'explore': EXPLORE_ICON,
|
110 |
+
'formula': FORMULA_ICON, 'active': ACTIVE_ICON
|
111 |
+
}
|
112 |
+
analytics_tab_instance = AnalyticsTab(
|
113 |
+
token_state=token_state,
|
114 |
+
chat_histories_st=chat_histories_st,
|
115 |
+
current_chat_plot_id_st=current_chat_plot_id_st,
|
116 |
+
plot_data_for_chatbot_st=plot_data_for_chatbot_st,
|
117 |
+
plot_id_to_formula_map=PLOT_ID_TO_FORMULA_KEY_MAP,
|
118 |
+
plot_formulas_data=PLOT_FORMULAS,
|
119 |
+
icons=analytics_icons,
|
120 |
+
fn_build_plot_area=build_analytics_tab_plot_area,
|
121 |
+
fn_update_plot_figures=update_analytics_plots_figures,
|
122 |
+
fn_create_placeholder_plot=create_placeholder_plot,
|
123 |
+
fn_get_initial_insight=get_initial_insight_prompt_and_suggestions,
|
124 |
+
fn_generate_llm_response=generate_llm_response
|
125 |
+
)
|
126 |
+
|
127 |
with gr.Tabs() as tabs:
|
128 |
with gr.TabItem("1️⃣ Dashboard & Sync", id="tab_dashboard_sync"):
|
129 |
+
gr.Markdown("Il sistema controlla i dati esistenti da Bubble. 'Sincronizza' si attiva se sono necessari nuovi dati.")
|
130 |
+
sync_data_btn = gr.Button("🔄 Sincronizza Dati LinkedIn", variant="primary", visible=False, interactive=False)
|
131 |
+
sync_status_html_output = gr.HTML("<p style='text-align:center;'>Stato sincronizzazione...</p>")
|
132 |
+
dashboard_display_html = gr.HTML("<p style='text-align:center;'>Caricamento dashboard...</p>")
|
133 |
|
134 |
+
# --- Use the AnalyticsTab module to create Tab 2 ---
|
135 |
+
analytics_tab_instance.create_tab_ui()
|
136 |
|
137 |
+
# --- Tab 3: Agentic Analysis Report ---
|
138 |
+
with gr.TabItem("3️⃣ Agentic Analysis Report", id="tab_agentic_report", visible=AGENTIC_MODULES_LOADED):
|
139 |
+
gr.Markdown("## 🤖 Comprehensive Analysis Report (AI Generated)")
|
140 |
+
agentic_pipeline_status_md = gr.Markdown("Stato Pipeline AI (filtro 'Sempre'): In attesa...", visible=True)
|
141 |
+
gr.Markdown("Questo report è generato da un agente AI con filtro 'Sempre' sui dati disponibili. Rivedi criticamente.")
|
142 |
+
agentic_report_display_md = gr.Markdown("La pipeline AI si avvierà automaticamente dopo il caricamento iniziale dei dati o dopo una sincronizzazione.")
|
143 |
+
if not AGENTIC_MODULES_LOADED:
|
144 |
+
gr.Markdown("🔴 **Error:** Agentic pipeline modules could not be loaded. This tab is disabled.")
|
145 |
|
146 |
+
# --- Tab 4: Agentic OKRs & Tasks ---
|
147 |
+
with gr.TabItem("4️⃣ Agentic OKRs & Tasks", id="tab_agentic_okrs", visible=AGENTIC_MODULES_LOADED):
|
148 |
+
gr.Markdown("## 🎯 AI Generated OKRs and Actionable Tasks (filtro 'Sempre')")
|
149 |
+
gr.Markdown("Basato sull'analisi AI (filtro 'Sempre'), l'agente ha proposto i seguenti OKR e task. Seleziona i Key Results per dettagli.")
|
150 |
+
if not AGENTIC_MODULES_LOADED:
|
151 |
+
gr.Markdown("🔴 **Error:** Agentic pipeline modules could not be loaded. This tab is disabled.")
|
152 |
+
with gr.Row():
|
153 |
+
with gr.Column(scale=1):
|
154 |
+
gr.Markdown("### Suggested Key Results (da analisi 'Sempre')")
|
155 |
+
key_results_cbg = gr.CheckboxGroup(label="Select Key Results", choices=[], value=[], interactive=True)
|
156 |
+
with gr.Column(scale=3):
|
157 |
+
gr.Markdown("### Detailed OKRs and Tasks for Selected Key Results")
|
158 |
+
okr_detail_display_md = gr.Markdown("I dettagli OKR appariranno qui dopo l'esecuzione della pipeline AI.")
|
159 |
|
160 |
+
def update_okr_display_on_selection(selected_kr_unique_ids: list, raw_orchestration_results: dict, all_krs_for_selection: list):
|
161 |
+
if not raw_orchestration_results or not AGENTIC_MODULES_LOADED:
|
162 |
+
return gr.update(value="Nessun dato dalla pipeline AI o moduli non caricati.")
|
163 |
+
|
164 |
+
actionable_okrs_dict = raw_orchestration_results.get("actionable_okrs_and_tasks")
|
165 |
+
if not actionable_okrs_dict or not isinstance(actionable_okrs_dict.get("okrs"), list):
|
166 |
+
return gr.update(value="Nessun OKR trovato nei risultati della pipeline.")
|
167 |
+
|
168 |
+
okrs_list = actionable_okrs_dict["okrs"]
|
169 |
+
# Ensure all_krs_for_selection is a list of dicts with expected keys
|
170 |
+
if not all_krs_for_selection or not isinstance(all_krs_for_selection, list) or \
|
171 |
+
not all(isinstance(kr, dict) and 'unique_kr_id' in kr and 'okr_index' in kr and 'kr_index' in kr for kr in all_krs_for_selection):
|
172 |
+
logging.error("all_krs_for_selection is not in the expected format.")
|
173 |
+
return gr.update(value="Errore interno: formato dati KR non valido.")
|
174 |
+
|
175 |
+
|
176 |
+
kr_id_to_indices = {kr_info['unique_kr_id']: (kr_info['okr_index'], kr_info['kr_index']) for kr_info in all_krs_for_selection}
|
177 |
+
|
178 |
+
selected_krs_by_okr_idx = defaultdict(list)
|
179 |
+
if selected_kr_unique_ids:
|
180 |
+
for kr_unique_id in selected_kr_unique_ids:
|
181 |
+
if kr_unique_id in kr_id_to_indices:
|
182 |
+
okr_idx, kr_idx = kr_id_to_indices[kr_unique_id]
|
183 |
+
selected_krs_by_okr_idx[okr_idx].append(kr_idx)
|
184 |
+
|
185 |
+
output_md_parts = []
|
186 |
+
if not okrs_list:
|
187 |
+
output_md_parts.append("Nessun OKR generato.")
|
188 |
+
else:
|
189 |
+
for okr_idx, okr_data in enumerate(okrs_list):
|
190 |
+
accepted_indices_for_this_okr = selected_krs_by_okr_idx.get(okr_idx)
|
191 |
+
# If specific KRs are selected, only show OKRs that have at least one of the selected KRs
|
192 |
+
# OR if no KRs are selected at all, show all OKRs.
|
193 |
+
if selected_kr_unique_ids: # User has made a selection
|
194 |
+
if accepted_indices_for_this_okr is not None: # This OKR has some of the selected KRs
|
195 |
+
output_md_parts.append(format_single_okr_for_display(okr_data, accepted_kr_indices=accepted_indices_for_this_okr, okr_main_index=okr_idx))
|
196 |
+
else: # No KRs selected, show all OKRs with all their KRs
|
197 |
+
output_md_parts.append(format_single_okr_for_display(okr_data, accepted_kr_indices=None, okr_main_index=okr_idx))
|
198 |
+
|
199 |
+
if not output_md_parts and selected_kr_unique_ids:
|
200 |
+
final_md = "Nessun OKR corrisponde alla selezione corrente o i KR selezionati non hanno task dettagliati."
|
201 |
+
elif not output_md_parts and not selected_kr_unique_ids: # Should be covered by "Nessun OKR generato."
|
202 |
+
final_md = "Nessun OKR generato."
|
203 |
+
else:
|
204 |
+
final_md = "\n\n---\n\n".join(output_md_parts)
|
205 |
+
return gr.update(value=final_md)
|
206 |
+
|
207 |
+
if AGENTIC_MODULES_LOADED:
|
208 |
+
key_results_cbg.change(
|
209 |
+
fn=update_okr_display_on_selection,
|
210 |
+
inputs=[key_results_cbg, orchestration_raw_results_st, key_results_for_selection_st],
|
211 |
+
outputs=[okr_detail_display_md],
|
212 |
+
api_name="update_okr_display_on_selection_module"
|
213 |
+
)
|
214 |
+
|
215 |
+
async def run_agentic_pipeline_autonomously(current_token_state_val):
|
216 |
+
logging.info(f"Agentic pipeline check triggered for token_state update. Current token: {'Set' if current_token_state_val.get('token') else 'Not Set'}")
|
217 |
+
# Initial state before pipeline runs or if skipped
|
218 |
+
initial_yield = (
|
219 |
+
gr.update(value="Pipeline AI: In attesa dei dati necessari..."), # agentic_report_display_md
|
220 |
+
gr.update(choices=[], value=[], interactive=False), # key_results_cbg
|
221 |
+
gr.update(value="Pipeline AI: In attesa dei dati necessari..."), # okr_detail_display_md
|
222 |
+
orchestration_raw_results_st.value, # Preserve current raw results
|
223 |
+
selected_key_result_ids_st.value, # Preserve current selection
|
224 |
+
key_results_for_selection_st.value, # Preserve current options
|
225 |
+
"Pipeline AI: In attesa dei dati..." # agentic_pipeline_status_md
|
226 |
)
|
227 |
|
228 |
+
if not current_token_state_val or not current_token_state_val.get("token"):
|
229 |
+
logging.info("Agentic pipeline: Token not available in token_state. Skipping.")
|
230 |
+
yield initial_yield
|
231 |
+
return
|
|
|
|
|
232 |
|
233 |
+
logging.info("Agentic pipeline starting autonomously with 'Sempre' filter.")
|
234 |
+
# Update status to indicate processing
|
235 |
+
yield (
|
236 |
+
gr.update(value="Analisi AI (Sempre) in corso..."),
|
237 |
+
gr.update(choices=[], value=[], interactive=False), # Keep CBG disabled during run
|
238 |
+
gr.update(value="Dettagli OKR (Sempre) in corso di generazione..."),
|
239 |
+
orchestration_raw_results_st.value, # Preserve
|
240 |
+
selected_key_result_ids_st.value, # Preserve
|
241 |
+
key_results_for_selection_st.value, # Preserve
|
242 |
+
"Esecuzione pipeline AI (Sempre)..."
|
243 |
+
)
|
244 |
+
|
245 |
+
if not AGENTIC_MODULES_LOADED:
|
246 |
+
logging.warning("Agentic modules not loaded. Skipping autonomous pipeline.")
|
247 |
+
yield (
|
248 |
+
gr.update(value="Moduli AI non caricati. Report non disponibile."),
|
249 |
+
gr.update(choices=[], value=[], interactive=False),
|
250 |
+
gr.update(value="Moduli AI non caricati. OKR non disponibili."),
|
251 |
+
None, [], [], "Pipeline AI: Moduli non caricati."
|
252 |
+
)
|
253 |
+
return
|
254 |
+
|
255 |
+
try:
|
256 |
+
# Parameters for 'Sempre' filter for the agentic pipeline
|
257 |
+
date_filter_val_agentic = "Sempre"
|
258 |
+
custom_start_val_agentic = None
|
259 |
+
custom_end_val_agentic = None
|
260 |
+
|
261 |
+
orchestration_output = await run_full_analytics_orchestration(
|
262 |
+
current_token_state_val,
|
263 |
+
date_filter_val_agentic,
|
264 |
+
custom_start_val_agentic,
|
265 |
+
custom_end_val_agentic
|
266 |
+
)
|
267 |
+
agentic_status_text = "Pipeline AI (Sempre) completata."
|
268 |
+
logging.info(f"Autonomous agentic pipeline finished. Output keys: {orchestration_output.keys() if orchestration_output else 'None'}")
|
269 |
+
|
270 |
+
if orchestration_output:
|
271 |
+
orchestration_results_update = orchestration_output # Store full results in state
|
272 |
+
report_str = orchestration_output.get('comprehensive_analysis_report', "Nessun report dettagliato fornito.")
|
273 |
+
agentic_report_md_update = gr.update(value=format_report_to_markdown(report_str))
|
274 |
+
|
275 |
+
actionable_okrs = orchestration_output.get('actionable_okrs_and_tasks') # This is the dict containing 'okrs' list
|
276 |
+
krs_for_ui_selection_list = extract_key_results_for_selection(actionable_okrs) # Expects the dict
|
277 |
+
|
278 |
+
krs_for_selection_update = krs_for_ui_selection_list # Update state with list of KR dicts
|
279 |
+
|
280 |
+
# Choices for CheckboxGroup: list of (label, value) tuples
|
281 |
+
kr_choices_for_cbg = [(kr['kr_description'], kr['unique_kr_id']) for kr in krs_for_ui_selection_list]
|
282 |
+
key_results_cbg_update = gr.update(choices=kr_choices_for_cbg, value=[], interactive=True) # Reset selection
|
283 |
+
|
284 |
+
# Display all OKRs by default after pipeline run
|
285 |
+
all_okrs_md_parts = []
|
286 |
+
if actionable_okrs and isinstance(actionable_okrs.get("okrs"), list):
|
287 |
+
for okr_idx, okr_item in enumerate(actionable_okrs["okrs"]):
|
288 |
+
all_okrs_md_parts.append(format_single_okr_for_display(okr_item, accepted_kr_indices=None, okr_main_index=okr_idx))
|
289 |
+
|
290 |
+
if not all_okrs_md_parts:
|
291 |
+
okr_detail_display_md_update = gr.update(value="Nessun OKR generato o trovato (Sempre).")
|
292 |
+
else:
|
293 |
+
okr_detail_display_md_update = gr.update(value="\n\n---\n\n".join(all_okrs_md_parts))
|
294 |
+
|
295 |
+
selected_krs_update = [] # Reset selected KRs state
|
296 |
+
else:
|
297 |
+
agentic_report_md_update = gr.update(value="Nessun report generato dalla pipeline AI (Sempre).")
|
298 |
+
key_results_cbg_update = gr.update(choices=[], value=[], interactive=False)
|
299 |
+
okr_detail_display_md_update = gr.update(value="Nessun OKR generato o errore nella pipeline AI (Sempre).")
|
300 |
+
orchestration_results_update = None
|
301 |
+
selected_krs_update = []
|
302 |
+
krs_for_selection_update = []
|
303 |
+
|
304 |
+
yield (
|
305 |
+
agentic_report_md_update,
|
306 |
+
key_results_cbg_update,
|
307 |
+
okr_detail_display_md_update,
|
308 |
+
orchestration_results_update, # state
|
309 |
+
selected_krs_update, # state
|
310 |
+
krs_for_selection_update, # state
|
311 |
+
agentic_status_text
|
312 |
+
)
|
313 |
+
except Exception as e:
|
314 |
+
logging.error(f"Error during autonomous agentic pipeline execution: {e}", exc_info=True)
|
315 |
+
agentic_status_text = f"Errore pipeline AI (Sempre): {str(e)}"
|
316 |
+
yield (
|
317 |
+
gr.update(value=f"Errore generazione report AI (Sempre): {str(e)}"),
|
318 |
+
gr.update(choices=[], value=[], interactive=False),
|
319 |
+
gr.update(value=f"Errore generazione OKR AI (Sempre): {str(e)}"),
|
320 |
+
None, [], [], agentic_status_text # Reset states on error
|
321 |
+
)
|
322 |
+
|
323 |
+
# Define the output list for the agentic pipeline callbacks
|
324 |
+
# Order: Report MD, KR CBG, OKR Detail MD, RawResults State, SelectedKRIDs State, KRList State, Status MD
|
325 |
agentic_pipeline_outputs_list = [
|
326 |
+
agentic_report_display_md,
|
327 |
+
key_results_cbg,
|
328 |
+
okr_detail_display_md,
|
329 |
orchestration_raw_results_st,
|
330 |
+
selected_key_result_ids_st,
|
331 |
key_results_for_selection_st,
|
332 |
+
agentic_pipeline_status_md
|
333 |
]
|
334 |
+
agentic_pipeline_inputs = [token_state] # Input for the autonomous run
|
335 |
+
|
336 |
+
# --- Event Handling ---
|
337 |
+
initial_load_event = org_urn_display.change(
|
338 |
+
fn=initial_load_sequence,
|
|
|
|
|
|
|
|
|
339 |
inputs=[url_user_token_display, org_urn_display, token_state],
|
340 |
+
outputs=[status_box, token_state, sync_data_btn, dashboard_display_html],
|
341 |
+
show_progress="full"
|
|
|
|
|
|
|
|
|
|
|
342 |
)
|
343 |
|
|
|
344 |
initial_load_event.then(
|
345 |
+
fn=analytics_tab_instance._refresh_analytics_graphs_ui,
|
346 |
inputs=[
|
347 |
+
token_state,
|
348 |
+
analytics_tab_instance.date_filter_selector,
|
349 |
+
analytics_tab_instance.custom_start_date_picker,
|
350 |
+
analytics_tab_instance.custom_end_date_picker,
|
351 |
+
chat_histories_st
|
352 |
],
|
353 |
+
outputs=analytics_tab_instance.graph_refresh_outputs_list,
|
354 |
+
show_progress="full"
|
355 |
+
).then(
|
356 |
+
fn=run_agentic_pipeline_autonomously, # Generator function
|
357 |
+
inputs=agentic_pipeline_inputs,
|
358 |
+
outputs=agentic_pipeline_outputs_list,
|
359 |
+
show_progress="minimal" # Use minimal for generators that yield status
|
360 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
361 |
|
362 |
+
sync_event_part1 = sync_data_btn.click(
|
363 |
+
fn=sync_all_linkedin_data_orchestrator,
|
364 |
inputs=[token_state],
|
365 |
+
outputs=[sync_status_html_output, token_state],
|
366 |
+
show_progress="full"
|
367 |
)
|
|
|
368 |
sync_event_part2 = sync_event_part1.then(
|
369 |
+
fn=process_and_store_bubble_token,
|
370 |
+
inputs=[url_user_token_display, org_urn_display, token_state],
|
371 |
+
outputs=[status_box, token_state, sync_data_btn],
|
372 |
+
show_progress=False
|
373 |
)
|
374 |
+
sync_event_part2.then(
|
375 |
+
fn=run_agentic_pipeline_autonomously, # Generator function
|
376 |
+
inputs=agentic_pipeline_inputs,
|
377 |
+
outputs=agentic_pipeline_outputs_list,
|
378 |
+
show_progress="minimal"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
379 |
)
|
380 |
+
sync_event_part3 = sync_event_part2.then(
|
381 |
+
fn=display_main_dashboard,
|
382 |
+
inputs=[token_state],
|
383 |
+
outputs=[dashboard_display_html],
|
384 |
+
show_progress=False
|
385 |
+
)
|
386 |
+
sync_event_graphs_after_sync = sync_event_part3.then(
|
387 |
+
fn=analytics_tab_instance._refresh_analytics_graphs_ui,
|
388 |
inputs=[
|
389 |
+
token_state,
|
390 |
+
analytics_tab_instance.date_filter_selector,
|
391 |
+
analytics_tab_instance.custom_start_date_picker,
|
392 |
+
analytics_tab_instance.custom_end_date_picker,
|
393 |
+
chat_histories_st
|
394 |
],
|
395 |
+
outputs=analytics_tab_instance.graph_refresh_outputs_list,
|
396 |
+
show_progress="full"
|
397 |
)
|
398 |
|
|
|
|
|
399 |
if __name__ == "__main__":
|
400 |
+
if not os.environ.get(LINKEDIN_CLIENT_ID_ENV_VAR):
|
|
|
401 |
logging.warning(f"ATTENZIONE: '{LINKEDIN_CLIENT_ID_ENV_VAR}' non impostata.")
|
402 |
if not all(os.environ.get(var) for var in [BUBBLE_APP_NAME_ENV_VAR, BUBBLE_API_KEY_PRIVATE_ENV_VAR, BUBBLE_API_ENDPOINT_ENV_VAR]):
|
403 |
+
logging.warning("ATTENZIONE: Una o più variabili d'ambiente Bubble (BUBBLE_APP_NAME, BUBBLE_API_KEY_PRIVATE, BUBBLE_API_ENDPOINT) non sono impostate.")
|
404 |
+
if not AGENTIC_MODULES_LOADED:
|
405 |
+
logging.warning("CRITICAL: Agentic pipeline modules failed to load. Tabs 3 and 4 (Agentic Report & OKRs) will be non-functional.")
|
406 |
+
if not os.environ.get("GEMINI_API_KEY"): # Check GEMINI_API_KEY directly as GOOGLE_API_KEY is derived
|
407 |
+
logging.warning("ATTENZIONE: 'GEMINI_API_KEY' non impostata. Questo è necessario per le funzionalità AI, incluse le tab agentiche e il chatbot dei grafici.")
|
|
|
|
|
408 |
|
409 |
+
try:
|
410 |
+
logging.info(f"Gradio version: {gr.__version__}")
|
411 |
+
logging.info(f"Pandas version: {pd.__version__}")
|
412 |
+
logging.info(f"Matplotlib version: {matplotlib.__version__}, Backend: {matplotlib.get_backend()}")
|
413 |
+
except Exception as e:
|
414 |
+
logging.warning(f"Could not log library versions: {e}")
|
415 |
|
416 |
+
app.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)), debug=True)
|