Update ff.py
Browse files
ff.py
CHANGED
@@ -29,7 +29,9 @@ g4f.check_version = False # Disable automatic version checking
|
|
29 |
#print(g4f.version) # Check version
|
30 |
print(g4f.Provider.Ails.params)
|
31 |
|
|
|
32 |
|
|
|
33 |
def res(input_text):
|
34 |
llm: LLM = G4FLLM(
|
35 |
model=models.gpt_35_turbo,
|
@@ -42,9 +44,14 @@ def res(input_text):
|
|
42 |
re = llm(input_texxt)
|
43 |
print(re)
|
44 |
return re
|
|
|
45 |
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
48 |
|
49 |
|
50 |
|
|
|
29 |
#print(g4f.version) # Check version
|
30 |
print(g4f.Provider.Ails.params)
|
31 |
|
32 |
+
g4f.debug.version_check = False # Disable automatic version checking
|
33 |
|
34 |
+
"""
|
35 |
def res(input_text):
|
36 |
llm: LLM = G4FLLM(
|
37 |
model=models.gpt_35_turbo,
|
|
|
44 |
re = llm(input_texxt)
|
45 |
print(re)
|
46 |
return re
|
47 |
+
"""
|
48 |
|
49 |
+
def res(input_text):
|
50 |
+
response = g4f.ChatCompletion.create(
|
51 |
+
model=g4f.models.gpt_35_turbo,
|
52 |
+
messages=[{"role": "user", "content":input_text}],
|
53 |
+
) # Alternative model setting
|
54 |
+
return response
|
55 |
|
56 |
|
57 |
|