TestingAssist / api /routes /automation_routes.py
thechaiexperiment's picture
Upload 17 files
d825c91 verified
raw
history blame
8.09 kB
from fastapi import APIRouter, HTTPException, Depends
from typing import List, Dict, Any, Optional
from loguru import logger
from pydantic import BaseModel
from services.automation_service import automation_service
from services.test_service import test_service
router = APIRouter()
class TestScriptRequest(BaseModel):
test_cases: List[Dict[str, Any]]
framework: str = "pytest"
language: str = "python"
browser: str = "chrome"
class TestPlanRequest(BaseModel):
requirements: List[Dict[str, Any]]
test_cases: List[Dict[str, Any]]
project_info: Dict[str, Any]
@router.post("/generate-scripts")
async def generate_test_scripts(request: TestScriptRequest) -> Dict[str, Any]:
"""
Generate test automation scripts.
Parameters:
- request: Test script generation request
"""
try:
# Generate test scripts
scripts = await automation_service.generate_test_scripts(
test_cases=request.test_cases,
framework=request.framework,
language=request.language,
browser=request.browser
)
# Generate Gherkin feature file
feature = await automation_service.generate_gherkin_feature(
test_cases=request.test_cases
)
return {
"status": "success",
"scripts": scripts,
"feature": feature
}
except Exception as e:
logger.error(f"Error generating test scripts: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/generate-test-plan")
async def generate_test_plan(request: TestPlanRequest) -> Dict[str, Any]:
"""
Generate comprehensive test plan.
Parameters:
- request: Test plan generation request
"""
try:
# Calculate coverage metrics
coverage = await test_service.validate_test_cases(
test_cases=request.test_cases,
requirements=request.requirements
)
# Prioritize test cases
prioritized_cases = await test_service.prioritize_test_cases(
test_cases=request.test_cases,
requirements=request.requirements
)
# Generate test plan sections
test_plan = {
"project_info": request.project_info,
"scope": _generate_scope(request.requirements),
"approach": _generate_approach(request.test_cases),
"resources": _generate_resources(),
"schedule": _generate_schedule(request.test_cases),
"risk_assessment": _generate_risk_assessment(prioritized_cases),
"requirement_traceability": coverage["coverage_matrix"],
"coverage_metrics": {
"percentage": coverage["coverage_percentage"],
"total_requirements": coverage["total_requirements"],
"covered_requirements": coverage["covered_requirements"]
}
}
return {
"status": "success",
"test_plan": test_plan
}
except Exception as e:
logger.error(f"Error generating test plan: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
def _generate_scope(requirements: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Generate test scope section."""
return {
"in_scope": [
{
"id": req["id"],
"title": req["title"],
"priority": req.get("priority", "Medium")
}
for req in requirements
],
"out_of_scope": [],
"assumptions": [
"Test environment is properly configured",
"Test data is available",
"Dependencies are stable"
]
}
def _generate_approach(test_cases: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Generate test approach section."""
return {
"strategy": "Risk-based testing approach",
"test_levels": [
"Unit Testing",
"Integration Testing",
"System Testing",
"Acceptance Testing"
],
"test_types": [
"Functional Testing",
"Non-functional Testing",
"Regression Testing"
],
"automation_approach": {
"framework": "pytest",
"tools": [
"Selenium WebDriver",
"Playwright",
"pytest-html"
],
"coverage_goal": "80%"
}
}
def _generate_resources() -> Dict[str, Any]:
"""Generate resources section."""
return {
"team": [
{
"role": "Test Lead",
"responsibilities": [
"Test plan creation",
"Resource allocation",
"Progress tracking"
]
},
{
"role": "Test Engineer",
"responsibilities": [
"Test case execution",
"Defect reporting",
"Test automation"
]
}
],
"tools": [
"Test Management Tool",
"Automation Framework",
"CI/CD Pipeline",
"Version Control System"
],
"environments": [
"Development",
"Testing",
"Staging",
"Production"
]
}
def _generate_schedule(test_cases: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Generate schedule section."""
return {
"phases": [
{
"name": "Planning",
"duration": "1 week",
"activities": [
"Test plan creation",
"Resource allocation",
"Tool setup"
]
},
{
"name": "Design",
"duration": "2 weeks",
"activities": [
"Test case design",
"Automation framework setup",
"Test data preparation"
]
},
{
"name": "Execution",
"duration": "3 weeks",
"activities": [
"Test case execution",
"Defect reporting",
"Regression testing"
]
},
{
"name": "Closure",
"duration": "1 week",
"activities": [
"Test summary report",
"Lessons learned",
"Knowledge transfer"
]
}
],
"milestones": [
"Test plan approval",
"Test case design completion",
"Automation framework ready",
"Test execution completion",
"Test closure"
]
}
def _generate_risk_assessment(
prioritized_cases: List[Dict[str, Any]]
) -> Dict[str, Any]:
"""Generate risk assessment section."""
return {
"high_risk_areas": [
{
"test_case": case["test_case"],
"risk_level": case["risk_level"],
"justification": case["justification"]
}
for case in prioritized_cases
if case["risk_level"] == "High"
],
"mitigation_strategies": [
"Early testing of high-risk areas",
"Additional test coverage for critical features",
"Regular risk reassessment",
"Automated regression testing"
],
"contingency_plans": [
"Resource reallocation if needed",
"Schedule adjustment for high-risk areas",
"Additional testing cycles if required"
]
}