File size: 3,859 Bytes
eed818f
 
fc07358
eed818f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb48749
0d3caec
17f5d0b
bb48749
eed818f
0d3caec
 
 
17f5d0b
0d3caec
17f5d0b
0d3caec
 
 
 
 
64b9441
 
 
 
 
 
 
 
0d3caec
64b9441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0d3caec
64b9441
 
 
0d3caec
64b9441
 
 
 
 
 
 
 
 
 
7e3d8f5
07f5040
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e3d8f5
c7d3fd1
49565fd
07f5040
 
 
b4ee998
07f5040
 
ac6001e
07f5040
 
 
6f7ec70
 
07f5040
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# import streamlit as st
# from transformers import pipeline 

# # pipe=pipeline("sentiment-analysis")
# # col1, col2 = st.columns(2)

# # with col1:
# #     x=st.button("Sentiment Analysis")
# # with col2:    
# #     y=st.button("Text Summarization")

# # if x:
# #     t=st.text_input("Enter the Text")
# #     st.write(pipe(t))
# # if y:             
# t1=st.text_input("Enter the Text for Summarization")
# st.write(summarizer(t1))

#from transformers import AutoTokenizer, AutoModel
# import streamlit as st

#tokenizer = AutoTokenizer.from_pretrained("llmware/industry-bert-insurance-v0.1")

# #model = AutoModel.from_pretrained("llmware/industry-bert-insurance-v0.1")
# # Use a pipeline as a high-level helper
# from transformers import pipeline

# #pipe = pipeline("feature-extraction")
   
# t=st.text_input("Enter the Text")
# pipe = pipeline("summarization")
# st.write(pipe(t))


# import pandas as pd
# import numpy as np
# from ydata_synthetic.synthesizers.regular import RegularSynthesizer
# from ydata_synthetic.synthesizers import ModelParameters, TrainParameters
# import streamlit as st 
# from os import getcwd
# text_file=st.file_uploader("Upload the Data File")
# st.write("-------------------------")

# if text_file is not None:
#     df=pd.read_csv(text_file)
#     dd_list=df.columns
#     cat_cols=st.multiselect("Select the Categorical Columns",dd_list)
#     num_cols=st.multiselect("Select the Numerical Columns",dd_list)
#     Output_file=st.text_input('Enter Output File Name')
#     s=st.number_input('Enter the Sample Size',1000)
#     OP=Output_file + '.csv'
#     sub=st.button('Submit')
#     if sub:
#         batch_size = 50
#         epochs = 3
#         learning_rate = 2e-4
#         beta_1 = 0.5
#         beta_2 = 0.9

#         ctgan_args = ModelParameters(batch_size=batch_size,
#                                     lr=learning_rate,
#                                     betas=(beta_1, beta_2))

#         train_args = TrainParameters(epochs=epochs)
#         synth = RegularSynthesizer(modelname='ctgan', model_parameters=ctgan_args)
#         synth.fit(data=df, train_arguments=train_args, num_cols=num_cols, cat_cols=cat_cols)
#         df_syn = synth.sample(s)
#         df_syn.to_csv(OP)
#         c=getcwd()    
#         c=c + '/' + OP
#         with open(c,"rb") as file:
#             st.download_button(label=':blue[Download]',data=file,file_name=OP,mime="image/png") 
#         st.success("Thanks for using the app !!!")

# import torch
# import streamlit as st
# from transformers import AutoModelForCausalLM, AutoTokenizer

# #torch.set_default_device("cuda")

# model = AutoModelForCausalLM.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", torch_dtype="auto", trust_remote_code=True)
# tokenizer = AutoTokenizer.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", trust_remote_code=True)
# i=st.text_input('Prompt', 'Life of Brian')
# #inputs = tokenizer('''### Instruction: What Does Basic Homeowners Insurance Cover?\n### Response: ''', return_tensors="pt", return_attention_mask=False)
# inputs = tokenizer(i, return_tensors="pt", return_attention_mask=False)
# outputs = model.generate(**inputs, max_length=1024)
# text = tokenizer.batch_decode(outputs)[0]
# print(text)

import torch
import streamlit as st
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer

model_name="facebook/blenderbot-400M-distill"

model=AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
ch=[]
def chat():
    
    h_s="\n".join(ch)
    i=st.input("enter")
    i_s=tokenizer.encode_plus(h_s,i,return_tensors="pt")
    outputs=model.generate(**i_s,max_length=60)
    response=tokenizer.decode(outputs[0],skip_special_tokens=True).strip()
    c_h.appned(i)
    c_h.append(response)
    return response
if __name__ == "__main__":
    chat()