Requirements / app.py
iisadia's picture
Update app.py
ccb31be verified
raw
history blame
4.76 kB
import unittest
import streamlit as st
import requests
from fpdf import FPDF
import os
import time
from datetime import datetime
# Mock function to call Mistral API (replace with real call for actual usage)
def call_mistral_api(prompt):
return "Functional" # Mock response for testing
# Test case class to check requirement analysis functionality
class RequirementTestCase(unittest.TestCase):
def test_requirement_type(self):
requirement = "The system shall allow users to login."
expected = "Functional"
actual = call_mistral_api(f"Classify the following requirement as Functional or Non-Functional:\n\n{requirement}\n\nType:")
self.assertEqual(actual, expected, f"Test failed for requirement type. Expected: {expected}, but got: {actual}")
def test_stakeholders_identification(self):
requirement = "The system shall allow users to login."
expected = "User, System Administrator"
actual = call_mistral_api(f"Identify the stakeholders for the following requirement:\n\n{requirement}\n\nStakeholders:")
self.assertEqual(actual, expected, f"Test failed for stakeholders. Expected: {expected}, but got: {actual}")
def test_defects_identification(self):
requirement = "The system shall allow users to login."
expected = "No defects"
actual = call_mistral_api(f"Analyze the following requirement and identify ONLY MAJOR defects:\n\n{requirement}\n\nDefects:")
self.assertEqual(actual, expected, f"Test failed for defects. Expected: {expected}, but got: {actual}")
# Add more test cases as needed...
# Custom runner to display detailed results
class DetailedTextTestRunner(unittest.TextTestRunner):
def getDescription(self, test):
return f"{test.__class__.__name__} ({test._testMethodName})"
def run(self, test):
result = super().run(test)
self.display_results(result)
return result
def display_results(self, result):
if result.errors or result.failures:
st.subheader("Test Results")
for test, traceback in result.errors + result.failures:
st.write(f"**Test Case:** {test}")
st.write(f"**Error/Failure:** {traceback}")
else:
st.success("All tests passed successfully!")
st.subheader("Test Summary")
st.write(f"Total Tests Run: {result.testsRun}")
st.write(f"Failures: {len(result.failures)}")
st.write(f"Errors: {len(result.errors)}")
st.write(f"Skipped: {len(result.skipped)}")
# Streamlit app
def main():
st.title("AI Powered Requirement Analysis and Defect Detection using Large Language Model Mistral")
st.markdown("**Team Name:** Sadia, Areeba, Rabbia, Tesmia")
st.markdown("**Model:** Mistral")
# Input requirements manually
input_text = st.text_area("Enter your requirements (one per line or separated by periods):")
requirements = []
if input_text:
# Split by periods or newlines
requirements = [req.strip() for req in input_text.replace("\n", ".").split(".") if req.strip()]
# Analyze requirements and display results
if st.button("Analyze Requirements"):
if not requirements:
st.warning("Please enter requirements.")
else:
results = []
for req in requirements:
if req.strip(): # Ignore empty lines
results.append(analyze_requirement(req.strip()))
# Display results
st.subheader("Analysis Results")
for i, result in enumerate(results, start=1):
st.write(f"### Requirement R{i}: {result['Requirement']}")
st.write(f"**Type:** {result['Type']}")
st.write(f"**Stakeholders:** {result['Stakeholders']}")
st.write(f"**Domain:** {result['Domain']}")
st.write(f"**Defects:** {result['Defects']}")
st.write(f"**Rewritten:** {result['Rewritten']}")
st.write("---")
# Generate and download PDF report
pdf_report = generate_pdf_report(results)
with open(pdf_report, "rb") as f:
st.download_button(
label="Download PDF Report",
data=f,
file_name="requirements_report.pdf",
mime="application/pdf"
)
# Run the unit tests
st.subheader("Running Unit Tests...")
test_suite = unittest.defaultTestLoader.loadTestsFromTestCase(RequirementTestCase)
test_runner = DetailedTextTestRunner(verbosity=2)
test_runner.run(test_suite)
# Run the app
if __name__ == "__main__":
main()