Update ff.py
Browse files
ff.py
CHANGED
@@ -4,21 +4,37 @@ import math as matha
|
|
4 |
from g4f.Provider import (
|
5 |
Bard)
|
6 |
import g4f
|
|
|
7 |
#from bardapi import Bard as Bd
|
8 |
import os
|
9 |
token=os.environ.get("TOKEN")
|
10 |
toto = os.environ.get("BARD")
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
#bard = Bd(token=toto)
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
)
|
19 |
-
|
20 |
-
model_name='gemini-pro'
|
21 |
-
)
|
22 |
model_vision = genai.GenerativeModel('gemini-pro-vision')
|
23 |
from g4f import Provider, models
|
24 |
from langchain.llms.base import LLM
|
@@ -46,15 +62,14 @@ def rees(input_text):
|
|
46 |
return re
|
47 |
|
48 |
|
49 |
-
def
|
50 |
-
response =
|
51 |
-
|
52 |
-
messages=[{"role": "user", "content":input_text}],
|
53 |
-
) # Alternative model setting
|
54 |
return response
|
55 |
|
56 |
|
57 |
|
|
|
58 |
def infer(im):
|
59 |
im.save("converted.png")
|
60 |
url = "https://ajax.thehive.ai/api/demo/classify?endpoint=text_recognition"
|
@@ -92,9 +107,8 @@ def gpt(prompt):
|
|
92 |
#answer = res(prompt)
|
93 |
|
94 |
print(prompt)
|
95 |
-
response =
|
96 |
-
|
97 |
-
return response.text
|
98 |
|
99 |
def gpt_francais(french_prompt,choix,autheur):
|
100 |
|
@@ -193,8 +207,8 @@ CONCLUSION
|
|
193 |
"""answer = rees(haha)
|
194 |
print(answer)
|
195 |
return answer"""
|
196 |
-
answer =
|
197 |
-
return answer
|
198 |
|
199 |
|
200 |
def gpt_hist(hist_prompt,p_1,p_2,p_3):
|
@@ -218,9 +232,9 @@ CONCLUSION
|
|
218 |
2. question douverture du sujet.
|
219 |
"""
|
220 |
|
221 |
-
answer =
|
222 |
print(answer)
|
223 |
-
return answer
|
224 |
|
225 |
|
226 |
|
|
|
4 |
from g4f.Provider import (
|
5 |
Bard)
|
6 |
import g4f
|
7 |
+
from llama_index.llms.gemini import Gemini
|
8 |
#from bardapi import Bard as Bd
|
9 |
import os
|
10 |
token=os.environ.get("TOKEN")
|
11 |
toto = os.environ.get("BARD")
|
12 |
+
safe = [
|
13 |
+
{
|
14 |
+
"category": "HARM_CATEGORY_HARASSMENT",
|
15 |
+
"threshold": "BLOCK_NONE",
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
19 |
+
"threshold": "BLOCK_NONE",
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
23 |
+
"threshold": "BLOCK_NONE",
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
27 |
+
"threshold": "BLOCK_NONE",
|
28 |
+
},
|
29 |
+
]
|
30 |
|
31 |
#bard = Bd(token=toto)
|
32 |
+
|
33 |
+
|
34 |
+
os.environ["GOOGLE_API_KEY"] = token
|
35 |
+
|
36 |
+
llm = Gemini(model="models/gemini-pro")
|
37 |
+
|
|
|
|
|
38 |
model_vision = genai.GenerativeModel('gemini-pro-vision')
|
39 |
from g4f import Provider, models
|
40 |
from langchain.llms.base import LLM
|
|
|
62 |
return re
|
63 |
|
64 |
|
65 |
+
async def greet(tt):
|
66 |
+
response = await llm.acomplete(tt,safety_settings=safe)
|
67 |
+
print(response)
|
|
|
|
|
68 |
return response
|
69 |
|
70 |
|
71 |
|
72 |
+
|
73 |
def infer(im):
|
74 |
im.save("converted.png")
|
75 |
url = "https://ajax.thehive.ai/api/demo/classify?endpoint=text_recognition"
|
|
|
107 |
#answer = res(prompt)
|
108 |
|
109 |
print(prompt)
|
110 |
+
response = greet(prompt)
|
111 |
+
return response
|
|
|
112 |
|
113 |
def gpt_francais(french_prompt,choix,autheur):
|
114 |
|
|
|
207 |
"""answer = rees(haha)
|
208 |
print(answer)
|
209 |
return answer"""
|
210 |
+
answer = greet(haha)
|
211 |
+
return answer
|
212 |
|
213 |
|
214 |
def gpt_hist(hist_prompt,p_1,p_2,p_3):
|
|
|
232 |
2. question douverture du sujet.
|
233 |
"""
|
234 |
|
235 |
+
answer = greet(histt_prompt)
|
236 |
print(answer)
|
237 |
+
return answer
|
238 |
|
239 |
|
240 |
|