File size: 4,593 Bytes
5b744f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import streamlit as st
import requests
import nltk
from transformers import pipeline
from rake_nltk import Rake
from nltk.corpus import stopwords
from fuzzywuzzy import fuzz
from openai import OpenAI
import os
from dotenv import load_dotenv

# Load environment variables for OpenAI
load_dotenv()

# Initialize OpenAI client for Llama 3 model
client = OpenAI(
  base_url="https://api-inference.huggingface.co/v1",
  api_key=os.environ.get('HFSecret')  # Replace with your token
)
repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"

st.title("Parallel Sentiment Analysis: Transformers vs. Llama 3")

# Define the options for the dropdown menu, selecting a remote txt file already created to analyze the text
options = ['None', 'Apprecitation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2']

# Create a dropdown menu to select options
selected_option = st.selectbox("Select a preset option", options)

# Define URLs for different options
urls = {
    "Apprecitation Letter": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Appreciation_Letter.txt",
    "Regret Letter": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Regret_Letter.txt",
    "Kindness Tale": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Kindness_Tale.txt",
    "Lost Melody Tale": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Lost_Melody_Tale.txt",
    "Twitter Example 1": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_1.txt",
    "Twitter Example 2": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt"
}

# Function to fetch text content based on selected option
def fetch_text_content(selected_option):
    return requests.get(urls.get(selected_option, "")).text if selected_option in urls else ""

# Fetch text content
jd = fetch_text_content(selected_option)

# Download NLTK resources
nltk.download('punkt')
nltk.download('stopwords')

# Initialize transformer sentiment analysis pipeline
pipe_sent = pipeline('sentiment-analysis')

# Function to extract keywords
def extract_keywords(text):
    r = Rake()
    r.extract_keywords_from_text(text)
    phrases_with_scores = r.get_ranked_phrases_with_scores()
    stop_words = set(stopwords.words('english'))
    keywords = []
    for score, phrase in phrases_with_scores:
        if phrase.lower() not in stop_words:
            keywords.append((score, phrase))
    keywords.sort(key=lambda x: x[0], reverse=True)
    unique_keywords = []
    seen_phrases = set()
    for score, phrase in keywords:
        if phrase not in seen_phrases:
            similar_phrases = [seen_phrase for seen_phrase in seen_phrases if fuzz.ratio(phrase, seen_phrase) > 70]
            if similar_phrases:
                merged_phrase = max([phrase] + similar_phrases, key=len)
                unique_keywords.append((score, merged_phrase))
            else:
                unique_keywords.append((score, phrase))
            seen_phrases.add(phrase)
    return unique_keywords[:10]

# Text input
text = st.text_area('Enter the text to analyze', jd)

if st.button("Start Analysis"):
    col1, col2 = st.columns(2)

    # Transformers (Column 1)
    with col1:
        st.header("Transformers Model")
        with st.spinner("Analyzing with Transformers..."):
            out_sentiment = pipe_sent(text)
            sentiment_score = out_sentiment[0]['score']
            sentiment_label = out_sentiment[0]['label']
            sentiment_emoji = '😊' if sentiment_label == 'POSITIVE' else '😞'
            st.write(f"Sentiment Score: {sentiment_score}, Sentiment Label: {sentiment_label.capitalize()} {sentiment_emoji}")

        st.subheader("Keywords")
        keywords = extract_keywords(text)
        st.write([keyword[1] for keyword in keywords])

    # Llama 3 Model (Column 2)
    with col2:
        st.header("Llama 3 Model")
        with st.spinner("Analyzing with Llama 3..."):
            try:
                stream = client.chat.completions.create(
                    model=repo_id,
                    messages=[{"role": "user", "content": text}],
                    temperature=0.5,
                    stream=True,
                    max_tokens=3000
                )
                response = ''.join([chunk['choices'][0]['text'] for chunk in stream])
                st.write(response)
            except Exception as e:
                st.error("Error occurred while fetching response from Llama 3")