Spaces:
Running
Running
File size: 10,219 Bytes
1c7993b 7f1bb16 1c7993b 85133d0 1c7993b 85133d0 1c7993b 7f1bb16 1c7993b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
# insights_ui_generator.py
import logging
from typing import Dict, Any, List, Optional
# Configure logger for this module. Assumes logging is configured in app.py or main entry point.
logger = logging.getLogger(__name__)
def format_report_to_markdown(report_string: Optional[str]) -> str:
"""
Formats the comprehensive analysis report string into a displayable Markdown format.
This can be enhanced to add more structure if the report has implicit sections.
Args:
report_string: The raw text report from the orchestrator.
Returns:
A Markdown formatted string.
"""
if not report_string or not report_string.strip():
return "## Comprehensive Analysis Report\n\n*No analysis report was generated, or an error occurred during its generation.*"
# Simple formatting for now. Could be enhanced (e.g., looking for patterns like "Section X:" to make them H3)
# Ensure paragraphs are separated. Replace multiple newlines with double newlines for Markdown paragraphs.
# report_string_cleaned = re.sub(r'\n\s*\n', '\n\n', report_string.strip())
formatted_report = f"## Comprehensive Analysis Report\n\n{report_string.strip()}"
# You might add more sophisticated parsing here if your LLM output for the report
# has a consistent structure that can be converted to richer Markdown.
return formatted_report
def extract_key_results_for_selection(
actionable_okrs_and_tasks_dict: Optional[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""
Extracts Key Results from the OKR structure for UI selection in Gradio.
Each Key Result is given a unique ID for state management in the Gradio app.
Args:
actionable_okrs_and_tasks_dict: The dictionary representation of TaskExtractionOutput,
typically `orchestration_results["actionable_okrs_and_tasks"]`.
Expected structure: {'okrs': List[OKR_dict], ...}
Returns:
A list of dictionaries, where each dictionary represents a Key Result:
{'okr_index': int, 'kr_index': int, 'okr_objective': str,
'kr_description': str, 'unique_kr_id': str}
"""
key_results_for_ui: List[Dict[str, Any]] = []
if not actionable_okrs_and_tasks_dict or not isinstance(actionable_okrs_and_tasks_dict.get('okrs'), list):
logger.warning("No 'okrs' list found or it's not a list in the provided task extraction output.")
return key_results_for_ui
okrs_list = actionable_okrs_and_tasks_dict['okrs']
for okr_idx, okr_data in enumerate(okrs_list):
if not isinstance(okr_data, dict):
logger.warning(f"OKR item at index {okr_idx} is not a dictionary, skipping.")
continue
okr_objective = okr_data.get('objective_description', f"Objective {okr_idx + 1} (Unnamed)")
key_results_list = okr_data.get('key_results', [])
if not isinstance(key_results_list, list):
logger.warning(f"Expected 'key_results' in OKR '{okr_objective}' (index {okr_idx}) to be a list, got {type(key_results_list)}.")
continue
for kr_idx, kr_data in enumerate(key_results_list):
if not isinstance(kr_data, dict):
logger.warning(f"Key Result item for OKR '{okr_objective}' at KR index {kr_idx} is not a dictionary, skipping.")
continue
kr_description = kr_data.get('key_result_description', f"Key Result {kr_idx + 1} (No description provided)")
key_results_for_ui.append({
'okr_index': okr_idx, # Index of the parent OKR in the original list
'kr_index': kr_idx, # Index of this KR within its parent OKR
'okr_objective': okr_objective,
'kr_description': kr_description,
'unique_kr_id': f"okr{okr_idx}_kr{kr_idx}" # Unique ID for Gradio component linking
})
if not key_results_for_ui:
logger.info("No Key Results were extracted for selection from the OKR data.")
return key_results_for_ui
def format_single_okr_for_display(
okr_data: Dict[str, Any],
accepted_kr_indices: Optional[List[int]] = None,
okr_main_index: Optional[int] = None # For titling if needed
) -> str:
"""
Formats a single complete OKR object (with its Key Results and Tasks) into a
detailed Markdown string for display. Optionally filters to show only accepted Key Results.
Args:
okr_data: A dictionary representing a single OKR from the TaskExtractionOutput.
accepted_kr_indices: Optional list of indices of Key Results within this OKR
that were accepted by the user. If None, all KRs are displayed.
okr_main_index: Optional index of this OKR in the main list, for titling.
Returns:
A Markdown formatted string representing the OKR.
"""
if not okr_data or not isinstance(okr_data, dict):
return "*Invalid OKR data provided for display.*\n"
md_parts = []
objective_title_num = f" {okr_main_index + 1}" if okr_main_index is not None else ""
objective = okr_data.get('objective_description', f"Unnamed Objective{objective_title_num}")
objective_timeline = okr_data.get('objective_timeline', '')
objective_owner = okr_data.get('objective_owner', 'N/A')
md_parts.append(f"### Objective{objective_title_num}: {objective}")
if objective_timeline:
md_parts.append(f"**Overall Timeline:** {objective_timeline}")
if objective_owner and objective_owner != 'N/A':
md_parts.append(f"**Overall Owner:** {objective_owner}")
md_parts.append("\n---")
key_results_list = okr_data.get('key_results', [])
displayed_kr_count = 0
if not isinstance(key_results_list, list) or not key_results_list:
md_parts.append("\n*No Key Results defined for this objective.*")
else:
for kr_idx, kr_data in enumerate(key_results_list):
if accepted_kr_indices is not None and kr_idx not in accepted_kr_indices:
continue # Skip this KR if a filter is applied and it's not in the accepted list
displayed_kr_count +=1
if not isinstance(kr_data, dict):
md_parts.append(f"\n**Key Result {kr_idx+1}:** *Invalid data format for this Key Result.*")
continue
kr_desc = kr_data.get('key_result_description') or kr_data.get('description') or f"Key Result {kr_idx+1} (No description)"
target_metric = kr_data.get('target_metric')
target_value = kr_data.get('target_value')
kr_data_subj = kr_data.get('data_subject')
kr_type = kr_data.get('key_result_type')
md_parts.append(f"\n#### Key Result {displayed_kr_count} (Original Index: {kr_idx+1}): {kr_desc}")
if target_metric and target_value:
md_parts.append(f" - **Target:** Measure `{target_metric}` to achieve/reach `{target_value}`")
if kr_type and kr_data_subj:
md_parts.append(f" **Key result type**: {kr_type}, for **data subject** {kr_data_subj}")
tasks_list = kr_data.get('tasks', [])
if tasks_list and isinstance(tasks_list, list):
md_parts.append(" **Associated Tasks:**")
for task_idx, task_data in enumerate(tasks_list):
if not isinstance(task_data, dict):
md_parts.append(f" - Task {task_idx+1}: *Invalid data format for this task.*")
continue
task_desc = task_data.get('task_description') or task_data.get('description') or f"Task {task_idx+1} (No description)"
task_cat = task_data.get('task_category') or task_data.get('category') or 'N/A'
task_effort = task_data.get('effort', 'N/A')
task_timeline = task_data.get('timeline', 'N/A')
task_priority = task_data.get('priority', 'N/A')
task_responsible = task_data.get('responsible_party', 'N/A')
task_type = task_data.get('task_type', 'N/A')
data_subject_val = task_data.get('data_subject')
data_subject_str = f", Data Subject: `{data_subject_val}`" if data_subject_val and task_type == 'tracking' else ""
md_parts.append(f" - **{task_idx+1}. {task_desc}**")
md_parts.append(f" - *Category:* {task_cat} | *Type:* {task_type}{data_subject_str}")
md_parts.append(f" - *Priority:* **{task_priority}** | *Effort:* {task_effort} | *Timeline:* {task_timeline}")
md_parts.append(f" - *Responsible:* {task_responsible}")
obj_deliv = task_data.get('objective_deliverable')
if obj_deliv: md_parts.append(f" - *Objective/Deliverable:* {obj_deliv}")
success_crit = task_data.get('success_criteria_metrics')
if success_crit: md_parts.append(f" - *Success Metrics:* {success_crit}")
why_prop = task_data.get('why_proposed')
if why_prop: md_parts.append(f" - *Rationale:* {why_prop}")
priority_just = task_data.get('priority_justification')
if priority_just: md_parts.append(f" - *Priority Justification:* {priority_just}")
dependencies = task_data.get('dependencies_prerequisites')
if dependencies: md_parts.append(f" - *Dependencies:* {dependencies}")
md_parts.append("") # Extra newline for spacing between tasks details
else:
md_parts.append(" *No tasks defined for this Key Result.*")
md_parts.append("\n---\n") # Separator between Key Results
if displayed_kr_count == 0 and accepted_kr_indices is not None:
md_parts.append("\n*No Key Results matching the 'accepted' filter for this objective.*")
return "\n".join(md_parts)
|