JaishnaCodz commited on
Commit
4f4b697
·
verified ·
1 Parent(s): 768c740

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -19
app.py CHANGED
@@ -6,7 +6,7 @@ from transformers import pipeline
6
  import re
7
  import nltk
8
  from nltk.tokenize import sent_tokenize
9
- from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
10
 
11
  nltk.download('punkt')
12
 
@@ -14,14 +14,6 @@ nltk.download('punkt')
14
  toxicity_classifier = pipeline("text-classification", model="unitary/toxic-bert")
15
  summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-6-6")
16
 
17
- # AutoGen configuration
18
- config_list = [
19
- {
20
- "model": "local",
21
- "api_key": "none"
22
- }
23
- ]
24
-
25
  # Define AutoGen Agents (for modularity, but we'll call functions directly)
26
  user_proxy = UserProxyAgent(
27
  name="UserProxy",
@@ -32,32 +24,27 @@ user_proxy = UserProxyAgent(
32
 
33
  text_extraction_agent = AssistantAgent(
34
  name="TextExtractor",
35
- system_message="Extracts text from URLs or processes raw text.",
36
- llm_config={"config_list": config_list}
37
  )
38
 
39
  grammar_check_agent = AssistantAgent(
40
  name="GrammarChecker",
41
- system_message="Identifies spelling and grammar errors using LanguageTool.",
42
- llm_config={"config_list": config_list}
43
  )
44
 
45
  sensitive_content_agent = AssistantAgent(
46
  name="SensitiveContentDetector",
47
- system_message="Detects toxic or sensitive content (e.g., racism, gender bias).",
48
- llm_config={"config_list": config_list}
49
  )
50
 
51
  suggestion_agent = AssistantAgent(
52
  name="SuggestionGenerator",
53
- system_message="Generates suggestions to fix grammar and rephrase sensitive content.",
54
- llm_config={"config_list": config_list}
55
  )
56
 
57
  coordinator_agent = AssistantAgent(
58
  name="Coordinator",
59
- system_message="Combines results, highlights issues, and formats outputs.",
60
- llm_config={"config_list": config_list}
61
  )
62
 
63
  # Task functions
@@ -141,6 +128,7 @@ def review_blog(input_type, text_input, url_input):
141
 
142
  # Step 1: Text Extraction
143
  text = extract_text(input_type, text_input, url_input)
 
144
  if text.startswith("Error"):
145
  return text, "", []
146
 
 
6
  import re
7
  import nltk
8
  from nltk.tokenize import sent_tokenize
9
+ from autogen import AssistantAgent, UserProxyAgent
10
 
11
  nltk.download('punkt')
12
 
 
14
  toxicity_classifier = pipeline("text-classification", model="unitary/toxic-bert")
15
  summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-6-6")
16
 
 
 
 
 
 
 
 
 
17
  # Define AutoGen Agents (for modularity, but we'll call functions directly)
18
  user_proxy = UserProxyAgent(
19
  name="UserProxy",
 
24
 
25
  text_extraction_agent = AssistantAgent(
26
  name="TextExtractor",
27
+ system_message="Extracts text from URLs or processes raw text."
 
28
  )
29
 
30
  grammar_check_agent = AssistantAgent(
31
  name="GrammarChecker",
32
+ system_message="Identifies spelling and grammar errors using LanguageTool."
 
33
  )
34
 
35
  sensitive_content_agent = AssistantAgent(
36
  name="SensitiveContentDetector",
37
+ system_message="Detects toxic or sensitive content (e.g., racism, gender bias)."
 
38
  )
39
 
40
  suggestion_agent = AssistantAgent(
41
  name="SuggestionGenerator",
42
+ system_message="Generates suggestions to fix grammar and rephrase sensitive content."
 
43
  )
44
 
45
  coordinator_agent = AssistantAgent(
46
  name="Coordinator",
47
+ system_message="Combines results, highlights issues, and formats outputs."
 
48
  )
49
 
50
  # Task functions
 
128
 
129
  # Step 1: Text Extraction
130
  text = extract_text(input_type, text_input, url_input)
131
+ print(f"Processed text: {text}") # Debug print to check text processing
132
  if text.startswith("Error"):
133
  return text, "", []
134