#!/usr/bin/env python3 """ Demonstration of the response_formatter.py utility. This script shows how to integrate the ResponseFormatter with BasicAgent to ensure HF evaluation format compliance. """ import sys import os sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from utils.response_formatter import ( ResponseFormatter, ResponseType, FormatStandard, FormatConfig, format_for_hf_evaluation, validate_answer_format, BasicAgentFormatter ) def demonstrate_basic_formatting(): """Demonstrate basic response formatting capabilities.""" print("๐Ÿ”ง Basic Response Formatting Demo") print("=" * 50) # Sample problematic responses that need formatting test_responses = [ "FINAL ANSWER: The capital of France is Paris", "**RESULT:** 25 + 37 = 62", "## Answer\n\nThe temperature is 212ยฐF", "`Answer:` The solar system has 8 planets", "CONCLUSION: Machine learning is a subset of AI", ] for response in test_responses: formatted = format_for_hf_evaluation(response) print(f"๐Ÿ“ Original: '{response}'") print(f"โœ… Formatted: '{formatted}'") print() def demonstrate_validation(): """Demonstrate response validation capabilities.""" print("๐Ÿ” Response Validation Demo") print("=" * 50) test_cases = [ ("Paris", "Valid simple answer"), ("FINAL ANSWER: 42", "Contains forbidden prefix"), ("The result is 212 degrees Fahrenheit", "Good quality with units"), ("", "Empty answer"), ("I don't know", "Uncertain response"), ] for answer, description in test_cases: is_valid, issues, quality_score = validate_answer_format(answer) print(f"๐Ÿ“ Testing: {description}") print(f" Answer: '{answer}'") print(f" Valid: {is_valid}") print(f" Quality Score: {quality_score:.2f}") if issues: print(f" Issues: {', '.join(issues)}") print() def demonstrate_agent_integration(): """Demonstrate BasicAgent integration.""" print("๐Ÿค– BasicAgent Integration Demo") print("=" * 50) agent_formatter = BasicAgentFormatter() # Simulate responses from BasicAgent with metadata scenarios = [ { "answer": "FINAL ANSWER: 25 + 37 = 62", "metadata": {"question_type": "mathematical"}, "description": "Mathematical calculation" }, { "answer": "**Research Result:** Paris is the capital of France because it's the political center.", "metadata": {"use_web_search": True}, "description": "Web research response" }, { "answer": "ANSWER: The human heart has four chambers.", "metadata": {"question_type": "simple_factual"}, "description": "Simple factual answer" } ] for scenario in scenarios: formatted = agent_formatter.format_agent_response( scenario["answer"], scenario["metadata"] ) print(f"๐Ÿ“ Scenario: {scenario['description']}") print(f" Original: '{scenario['answer']}'") print(f" Metadata: {scenario['metadata']}") print(f" Formatted: '{formatted}'") print() def demonstrate_advanced_features(): """Demonstrate advanced formatting features.""" print("โšก Advanced Features Demo") print("=" * 50) # Create custom formatter with specific configuration custom_config = FormatConfig( format_standard=FormatStandard.HF_EVALUATION, remove_markdown=True, remove_prefixes=True, max_length=1000, ensure_period=True ) formatter = ResponseFormatter(custom_config) # Batch processing demo answers = [ "FINAL ANSWER: The speed of light is 299,792,458 m/s", "**Result:** Converting 100ยฐC to Fahrenheit: (100 ร— 9/5) + 32 = 212ยฐF", "## Conclusion\n\nThe Earth orbits the Sun", "ANSWER: Machine learning algorithms learn from data", ] response_types = [ ResponseType.SIMPLE_ANSWER, ResponseType.CALCULATION, ResponseType.SIMPLE_ANSWER, ResponseType.EXPLANATION, ] print("๐Ÿ“Š Batch Processing Results:") results = formatter.batch_format(answers, response_types) for i, result in enumerate(results): print(f"\n{i+1}. Original: '{answers[i][:50]}...'") print(f" Formatted: '{result.answer}'") print(f" Type: {result.response_type.value}") print(f" Valid: {result.validation.is_valid}") print(f" Quality: {result.validation.quality_score:.2f}") # Statistics demo stats = formatter.get_format_statistics(results) print(f"\n๐Ÿ“ˆ Statistics:") print(f" Total Responses: {stats['total_responses']}") print(f" Valid Responses: {stats['valid_responses']}") print(f" Validity Rate: {stats['validity_rate']:.2f}") print(f" Avg Quality Score: {stats['average_quality_score']:.2f}") def demonstrate_integration_example(): """Show how to integrate with existing BasicAgent code.""" print("๐Ÿ”— Integration Example") print("=" * 50) # Example of how to modify BasicAgent to use ResponseFormatter example_code = ''' # In your BasicAgent class: from utils.response_formatter import BasicAgentFormatter class BasicAgent: def __init__(self): self.response_formatter = BasicAgentFormatter() # ... other initialization def __call__(self, question): # ... existing processing logic raw_answer = self.process_question(question) # Format for HF evaluation compliance metadata = { "question_type": self.classify_question(question), "use_web_search": self.used_web_search, } formatted_answer = self.response_formatter.format_agent_response( raw_answer, metadata ) return formatted_answer ''' print("๐Ÿ“ Integration Code Example:") print(example_code) print("\nโœ… Benefits of Integration:") benefits = [ "โœ“ Automatic removal of 'FINAL ANSWER:' prefixes", "โœ“ Clean markdown formatting removal", "โœ“ Response quality validation and scoring", "โœ“ Consistent HF evaluation format compliance", "โœ“ Comprehensive logging and debugging support", "โœ“ Configurable formatting options", "โœ“ Batch processing capabilities for testing" ] for benefit in benefits: print(f" {benefit}") if __name__ == "__main__": print("๐Ÿงช Response Formatter Comprehensive Demo") print("=" * 60) print() demonstrate_basic_formatting() print() demonstrate_validation() print() demonstrate_agent_integration() print() demonstrate_advanced_features() print() demonstrate_integration_example() print() print("๐ŸŽ‰ Demo completed! The ResponseFormatter is ready for Phase 2A integration.") print("๐Ÿ“ Files created:") print(" - utils/response_formatter.py (Main utility)") print(" - utils/test_response_formatter.py (Test suite)") print(" - utils/demo_response_formatter.py (This demo)")