Spaces:
Sleeping
Sleeping
File size: 4,760 Bytes
ccb31be cbe7195 fc8383a 6577f0c 367325c 7d587e9 fc8383a ccb31be fc8383a ccb31be cbe7195 e8a4040 7d587e9 fc8383a 395ca0b cbe7195 395ca0b cbe7195 ccb31be cbe7195 395ca0b cbe7195 58d0306 ccb5208 cbe7195 ccb31be fb304aa cbe7195 fb304aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import unittest
import streamlit as st
import requests
from fpdf import FPDF
import os
import time
from datetime import datetime
# Mock function to call Mistral API (replace with real call for actual usage)
def call_mistral_api(prompt):
return "Functional" # Mock response for testing
# Test case class to check requirement analysis functionality
class RequirementTestCase(unittest.TestCase):
def test_requirement_type(self):
requirement = "The system shall allow users to login."
expected = "Functional"
actual = call_mistral_api(f"Classify the following requirement as Functional or Non-Functional:\n\n{requirement}\n\nType:")
self.assertEqual(actual, expected, f"Test failed for requirement type. Expected: {expected}, but got: {actual}")
def test_stakeholders_identification(self):
requirement = "The system shall allow users to login."
expected = "User, System Administrator"
actual = call_mistral_api(f"Identify the stakeholders for the following requirement:\n\n{requirement}\n\nStakeholders:")
self.assertEqual(actual, expected, f"Test failed for stakeholders. Expected: {expected}, but got: {actual}")
def test_defects_identification(self):
requirement = "The system shall allow users to login."
expected = "No defects"
actual = call_mistral_api(f"Analyze the following requirement and identify ONLY MAJOR defects:\n\n{requirement}\n\nDefects:")
self.assertEqual(actual, expected, f"Test failed for defects. Expected: {expected}, but got: {actual}")
# Add more test cases as needed...
# Custom runner to display detailed results
class DetailedTextTestRunner(unittest.TextTestRunner):
def getDescription(self, test):
return f"{test.__class__.__name__} ({test._testMethodName})"
def run(self, test):
result = super().run(test)
self.display_results(result)
return result
def display_results(self, result):
if result.errors or result.failures:
st.subheader("Test Results")
for test, traceback in result.errors + result.failures:
st.write(f"**Test Case:** {test}")
st.write(f"**Error/Failure:** {traceback}")
else:
st.success("All tests passed successfully!")
st.subheader("Test Summary")
st.write(f"Total Tests Run: {result.testsRun}")
st.write(f"Failures: {len(result.failures)}")
st.write(f"Errors: {len(result.errors)}")
st.write(f"Skipped: {len(result.skipped)}")
# Streamlit app
def main():
st.title("AI Powered Requirement Analysis and Defect Detection using Large Language Model Mistral")
st.markdown("**Team Name:** Sadia, Areeba, Rabbia, Tesmia")
st.markdown("**Model:** Mistral")
# Input requirements manually
input_text = st.text_area("Enter your requirements (one per line or separated by periods):")
requirements = []
if input_text:
# Split by periods or newlines
requirements = [req.strip() for req in input_text.replace("\n", ".").split(".") if req.strip()]
# Analyze requirements and display results
if st.button("Analyze Requirements"):
if not requirements:
st.warning("Please enter requirements.")
else:
results = []
for req in requirements:
if req.strip(): # Ignore empty lines
results.append(analyze_requirement(req.strip()))
# Display results
st.subheader("Analysis Results")
for i, result in enumerate(results, start=1):
st.write(f"### Requirement R{i}: {result['Requirement']}")
st.write(f"**Type:** {result['Type']}")
st.write(f"**Stakeholders:** {result['Stakeholders']}")
st.write(f"**Domain:** {result['Domain']}")
st.write(f"**Defects:** {result['Defects']}")
st.write(f"**Rewritten:** {result['Rewritten']}")
st.write("---")
# Generate and download PDF report
pdf_report = generate_pdf_report(results)
with open(pdf_report, "rb") as f:
st.download_button(
label="Download PDF Report",
data=f,
file_name="requirements_report.pdf",
mime="application/pdf"
)
# Run the unit tests
st.subheader("Running Unit Tests...")
test_suite = unittest.defaultTestLoader.loadTestsFromTestCase(RequirementTestCase)
test_runner = DetailedTextTestRunner(verbosity=2)
test_runner.run(test_suite)
# Run the app
if __name__ == "__main__":
main()
|