Spaces:
Sleeping
Sleeping
File size: 16,630 Bytes
4bf5701 b4c92f5 4bf5701 0bbf2df 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 b4c92f5 4bf5701 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 |
import pandas as pd
from typing import Dict, List, Tuple
from .metrics import compute_all_metrics
from .semantic_embedding import get_model_and_device, train_fasttext_model, FASTTEXT_MODEL_ID
from .tokenize import tokenize_texts
import logging
from itertools import combinations
logger = logging.getLogger(__name__)
def process_texts(
text_data: Dict[str, str],
filenames: List[str],
enable_semantic: bool = True,
model_name: str = "buddhist-nlp/buddhist-sentence-similarity",
use_stopwords: bool = True,
use_lite_stopwords: bool = False,
progress_callback = None
) -> Tuple[pd.DataFrame, pd.DataFrame, str]:
"""
Processes uploaded texts, segments them by chapter marker, and computes metrics between chapters of different files.
Args:
text_data (Dict[str, str]): A dictionary mapping filenames to their content.
filenames (List[str]): A list of filenames that were uploaded.
enable_semantic (bool, optional): Whether to compute semantic similarity metrics.
Requires loading a sentence transformer model, which can be time-consuming. Defaults to True.
model_name (str, optional): The name of the sentence transformer model to use for semantic similarity.
Must be a valid model identifier on Hugging Face. Defaults to "buddhist-nlp/buddhist-sentence-similarity".
use_stopwords (bool, optional): Whether to use stopwords in the metrics calculation. Defaults to True.
use_lite_stopwords (bool, optional): Whether to use the lite stopwords list (common particles only)
instead of the comprehensive list. Only applies if use_stopwords is True. Defaults to False.
progress_callback (callable, optional): A callback function for reporting progress updates.
Should accept a float between 0 and 1 and a description string. Defaults to None.
Returns:
Tuple[pd.DataFrame, pd.DataFrame, str]:
- metrics_df: DataFrame with similarity metrics between corresponding chapters of file pairs.
Contains columns: 'Text Pair', 'Chapter', 'Jaccard Similarity (%)', 'Normalized LCS',
'Semantic Similarity' (if enable_semantic=True), and 'TF-IDF Cosine Sim'.
- word_counts_df: DataFrame with word counts for each segment (chapter) in each file.
Contains columns: 'Filename', 'ChapterNumber', 'SegmentID', 'WordCount'.
- warning: A string containing any warnings generated during processing (e.g., missing chapter markers).
Raises:
RuntimeError: If the botok tokenizer fails to initialize.
ValueError: If the input files cannot be processed or if metrics computation fails.
"""
# Initialize model and device variables
st_model, st_device = None, None
model_warning = ""
# Update progress if callback provided
if progress_callback is not None:
try:
progress_callback(0.25, desc="Preparing for text analysis...")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
# Continue processing even if progress reporting fails
# Load semantic model if enabled
if enable_semantic:
logger.info("Semantic similarity enabled. Loading embedding model...")
try:
logger.info("Using model: %s", model_name)
# Check if this is a FastText model request
if model_name == FASTTEXT_MODEL_ID:
# Try to load the official Facebook FastText Tibetan model first
if progress_callback is not None:
try:
progress_callback(0.25, desc="Loading official Facebook FastText Tibetan model...")
except Exception as e:
logger.warning("Progress callback error (non-critical): %s", str(e))
st_model, st_device, model_type = get_model_and_device(model_id=model_name)
# If model is None, we need to train a fallback model
if st_model is None:
if progress_callback is not None:
try:
progress_callback(0.25, desc="Official model unavailable. Training fallback FastText model...")
except Exception as e:
logger.warning("Progress callback error (non-critical): %s", str(e))
# Collect all text data for training
all_texts = list(text_data.values())
# Train the model with standard parameters for stability
st_model = train_fasttext_model(all_texts, dim=100, epoch=5)
if progress_callback is not None:
try:
progress_callback(0.3, desc="Fallback FastText model trained successfully")
except Exception as e:
logger.warning("Progress callback error (non-critical): %s", str(e))
else:
if progress_callback is not None:
try:
progress_callback(0.3, desc="Official Facebook FastText Tibetan model loaded successfully")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
else:
# For sentence transformers
st_model, st_device, model_type = get_model_and_device(model_id=model_name)
logger.info(f"Model {model_name} loaded successfully on {st_device}.")
if progress_callback is not None:
try:
progress_callback(0.3, desc="Model loaded successfully")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
except Exception as e:
error_msg = str(e)
logger.error(f"Failed to load sentence transformer model: {error_msg}. Semantic similarity will not be available.")
# Create a user-friendly warning message
if "is not a valid model identifier" in error_msg:
model_warning = f"The model '{model_name}' could not be found on Hugging Face. Semantic similarity will not be available."
elif "CUDA out of memory" in error_msg:
model_warning = "Not enough GPU memory to load the semantic model. Try using a smaller model or disable semantic similarity."
else:
model_warning = f"Failed to load semantic model: {error_msg}. Semantic similarity will not be available."
if progress_callback is not None:
try:
progress_callback(0.3, desc="Continuing without semantic model")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
else:
logger.info("Semantic similarity disabled. Skipping model loading.")
if progress_callback is not None:
try:
progress_callback(0.3, desc="Processing text segments")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
# Detect chapter marker and segment texts
if progress_callback is not None:
try:
progress_callback(0.35, desc="Segmenting texts by chapters...")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
chapter_marker = "༈"
fallback = False
segment_texts = {}
# Process each file
for i, fname in enumerate(filenames):
if progress_callback is not None and len(filenames) > 1:
try:
progress_callback(0.35 + (0.05 * (i / len(filenames))),
desc=f"Segmenting file {i+1}/{len(filenames)}: {fname}")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
content = text_data[fname]
# Check if content is empty
if not content.strip():
logger.warning(f"File '{fname}' is empty or contains only whitespace.")
continue
# Split by chapter marker if present
if chapter_marker in content:
segments = [
seg.strip() for seg in content.split(chapter_marker) if seg.strip()
]
# Check if we have valid segments after splitting
if not segments:
logger.warning(f"File '{fname}' contains chapter markers but no valid text segments.")
continue
for idx, seg in enumerate(segments):
seg_id = f"{fname}|chapter {idx+1}"
segment_texts[seg_id] = seg
else:
# No chapter markers found, treat entire file as one segment
seg_id = f"{fname}|chapter 1"
segment_texts[seg_id] = content.strip()
fallback = True
# Generate warning if no chapter markers found
warning = model_warning # Include any model warnings
if fallback:
chapter_warning = (
"No chapter marker found in one or more files. "
"Each file will be treated as a single segment. "
"For best results, add a unique marker (e.g., ༈) to separate chapters or sections."
)
warning = warning + " " + chapter_warning if warning else chapter_warning
# Check if we have any valid segments
if not segment_texts:
logger.error("No valid text segments found in any of the uploaded files.")
return pd.DataFrame(), pd.DataFrame(), "No valid text segments found in the uploaded files. Please check your files and try again."
# Group chapters by filename (preserving order)
if progress_callback is not None:
try:
progress_callback(0.4, desc="Organizing text segments...")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
file_to_chapters = {}
for seg_id in segment_texts:
fname = seg_id.split("|")[0]
file_to_chapters.setdefault(fname, []).append(seg_id)
# For each pair of files, compare corresponding chapters (by index)
if progress_callback is not None:
try:
progress_callback(0.45, desc="Computing similarity metrics...")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
results = []
files = list(file_to_chapters.keys())
# Check if we have at least two files to compare
if len(files) < 2:
logger.warning("Need at least two files to compute similarity metrics.")
return pd.DataFrame(), pd.DataFrame(), "Need at least two files to compute similarity metrics."
# Track total number of comparisons for progress reporting
total_comparisons = 0
for file1, file2 in combinations(files, 2):
chaps1 = file_to_chapters[file1]
chaps2 = file_to_chapters[file2]
total_comparisons += min(len(chaps1), len(chaps2))
# Process each file pair
comparison_count = 0
for file1, file2 in combinations(files, 2):
chaps1 = file_to_chapters[file1]
chaps2 = file_to_chapters[file2]
min_chaps = min(len(chaps1), len(chaps2))
if progress_callback is not None:
try:
progress_callback(0.45, desc=f"Comparing {file1} with {file2}...")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
for idx in range(min_chaps):
seg1 = chaps1[idx]
seg2 = chaps2[idx]
# Update progress
comparison_count += 1
if progress_callback is not None and total_comparisons > 0:
try:
progress_percentage = 0.45 + (0.25 * (comparison_count / total_comparisons))
progress_callback(progress_percentage,
desc=f"Computing metrics for chapter {idx+1} ({comparison_count}/{total_comparisons})")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
try:
# Compute metrics for this chapter pair
pair_metrics = compute_all_metrics(
{seg1: segment_texts[seg1], seg2: segment_texts[seg2]},
model=st_model,
device=st_device,
enable_semantic=enable_semantic,
model_type=model_type if 'model_type' in locals() else "sentence_transformer",
use_stopwords=use_stopwords,
use_lite_stopwords=use_lite_stopwords
)
# Rename 'Text Pair' to show file stems and chapter number
pair_metrics.loc[:, "Text Pair"] = f"{file1} vs {file2}"
pair_metrics.loc[:, "Chapter"] = idx + 1
results.append(pair_metrics)
except Exception as e:
logger.error(f"Error computing metrics for {seg1} vs {seg2}: {e}")
# Continue with other comparisons instead of failing completely
continue
# Create the metrics DataFrame
if results:
metrics_df = pd.concat(results, ignore_index=True)
else:
metrics_df = pd.DataFrame()
warning += " No valid metrics could be computed. Please check your files and try again."
# Calculate word counts
if progress_callback is not None:
try:
progress_callback(0.75, desc="Calculating word counts...")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
word_counts_data = []
# Process each segment
for i, (seg_id, text_content) in enumerate(segment_texts.items()):
# Update progress
if progress_callback is not None and len(segment_texts) > 0:
try:
progress_percentage = 0.75 + (0.15 * (i / len(segment_texts)))
progress_callback(progress_percentage, desc=f"Counting words in segment {i+1}/{len(segment_texts)}")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
fname, chapter_info = seg_id.split("|", 1)
chapter_num = int(chapter_info.replace("chapter ", ""))
try:
# Use botok for accurate word count for raw Tibetan text
tokenized_segments = tokenize_texts([text_content]) # Returns a list of lists
if tokenized_segments and tokenized_segments[0]:
word_count = len(tokenized_segments[0])
else:
word_count = 0
word_counts_data.append(
{
"Filename": fname.replace(".txt", ""),
"ChapterNumber": chapter_num,
"SegmentID": seg_id,
"WordCount": word_count,
}
)
except Exception as e:
logger.error(f"Error calculating word count for segment {seg_id}: {e}")
# Add entry with 0 word count to maintain consistency
word_counts_data.append(
{
"Filename": fname.replace(".txt", ""),
"ChapterNumber": chapter_num,
"SegmentID": seg_id,
"WordCount": 0,
}
)
# Create and sort the word counts DataFrame
word_counts_df = pd.DataFrame(word_counts_data)
if not word_counts_df.empty:
word_counts_df = word_counts_df.sort_values(
by=["Filename", "ChapterNumber"]
).reset_index(drop=True)
if progress_callback is not None:
try:
progress_callback(0.95, desc="Analysis complete!")
except Exception as e:
logger.warning(f"Progress callback error (non-critical): {e}")
return metrics_df, word_counts_df, warning
|