Mridul2003 commited on
Commit
2e85bfc
·
verified ·
1 Parent(s): f260ed0

Upload llm_text_filter.py

Browse files
Files changed (1) hide show
  1. services/llm_text_filter.py +40 -0
services/llm_text_filter.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ from dotenv import load_dotenv
3
+ import os
4
+ import json
5
+
6
+ load_dotenv()
7
+ api_key = os.getenv("GOOGLE_API_KEY")
8
+ class ArticleClassifier:
9
+ def __init__(self, article):
10
+ self.article = article
11
+
12
+ def classify(self):
13
+ prompt = f"""You are an AI assistant. Your task is to classify the article on labels: toxic, severe_toxic, obscene, threat, insult, identity_hate.
14
+ Output Format (JSON only, no explanation):
15
+ {{
16
+ "toxic": float,
17
+ "severe_toxic": float,
18
+ "obscene": float,
19
+ "threat": float,
20
+ "insult": float,
21
+ "identity_hate": float
22
+ }}
23
+ ONLY output the JSON object. Do NOT include any explanation, thoughts, or extra text.
24
+ Given article: {self.article}
25
+ """
26
+ genai.configure(api_key=api_key)
27
+ model = genai.GenerativeModel('gemini-1.5-flash')
28
+ response = model.generate_content(prompt)
29
+ # Clean and parse the response
30
+ text = response.text.strip().replace("```json", "").replace("```", "").strip()
31
+ try:
32
+ result = json.loads(text)
33
+ except json.JSONDecodeError:
34
+ result = {"error": "Invalid JSON format", "raw": text}
35
+ except Exception as e:
36
+ result = {"error": str(e), "raw": text}
37
+ result["safe"] = (
38
+ all(result.get(label, 0) < 0.5 for label in ['toxic', 'severe_toxic', 'obscene', 'insult', 'identity_hate'])
39
+ )
40
+ return result