Upload 38 files
Browse files- Read +0 -0
- app.py +6 -1
- models/.DS_Store +0 -0
- models/fetch.py +2 -2
- models/text/deepinfra/main.py +99 -0
- models/text/together/main.py +1 -1
- runchecks.py +38 -0
- test.py +54 -46
- test2.py +40 -40
- test3.py +29 -0
- tools/__pycache__/fetch.cpython-311.pyc +0 -0
- tools/fetch.py +2 -0
- tools/googlesearch/__pycache__/gettyimages.cpython-311.pyc +0 -0
Read
ADDED
File without changes
|
app.py
CHANGED
@@ -5,6 +5,7 @@ from models.text.together.main import TogetherAPI
|
|
5 |
from models.text.vercel.main import XaiAPI, GroqAPI, DeepinfraAPI
|
6 |
from models.image.vercel.main import FalAPI
|
7 |
from models.image.together.main import TogetherImageAPI
|
|
|
8 |
from models.fetch import FetchModel
|
9 |
from auth.key import NimbusAuthKey
|
10 |
from tools.googlesearch.main import search
|
@@ -78,7 +79,8 @@ async def get_models():
|
|
78 |
'together': TogetherAPI().get_model_list(),
|
79 |
'xai': XaiAPI().get_model_list(),
|
80 |
'groq': GroqAPI().get_model_list(),
|
81 |
-
'deepinfra': DeepinfraAPI().get_model_list()
|
|
|
82 |
},
|
83 |
'image': {
|
84 |
'fal': FalAPI().get_model_list(),
|
@@ -162,6 +164,7 @@ async def text_generate(request: Request):
|
|
162 |
xai_models = XaiAPI().get_model_list()
|
163 |
groq_models = GroqAPI().get_model_list()
|
164 |
deepinfra_models = DeepinfraAPI().get_model_list()
|
|
|
165 |
|
166 |
if model in together_models:
|
167 |
streamModel = TogetherAPI()
|
@@ -171,6 +174,8 @@ async def text_generate(request: Request):
|
|
171 |
streamModel = GroqAPI()
|
172 |
elif model in deepinfra_models:
|
173 |
streamModel = DeepinfraAPI()
|
|
|
|
|
174 |
else:
|
175 |
return {"error": f"Model '{model}' is not supported."}
|
176 |
|
|
|
5 |
from models.text.vercel.main import XaiAPI, GroqAPI, DeepinfraAPI
|
6 |
from models.image.vercel.main import FalAPI
|
7 |
from models.image.together.main import TogetherImageAPI
|
8 |
+
from models.text.deepinfra.main import OFFDeepInfraAPI
|
9 |
from models.fetch import FetchModel
|
10 |
from auth.key import NimbusAuthKey
|
11 |
from tools.googlesearch.main import search
|
|
|
79 |
'together': TogetherAPI().get_model_list(),
|
80 |
'xai': XaiAPI().get_model_list(),
|
81 |
'groq': GroqAPI().get_model_list(),
|
82 |
+
'deepinfra': DeepinfraAPI().get_model_list(),
|
83 |
+
"official_deepinfra": OFFDeepInfraAPI().get_model_list()
|
84 |
},
|
85 |
'image': {
|
86 |
'fal': FalAPI().get_model_list(),
|
|
|
164 |
xai_models = XaiAPI().get_model_list()
|
165 |
groq_models = GroqAPI().get_model_list()
|
166 |
deepinfra_models = DeepinfraAPI().get_model_list()
|
167 |
+
official_deepinfra_models = OFFDeepInfraAPI().get_model_list()
|
168 |
|
169 |
if model in together_models:
|
170 |
streamModel = TogetherAPI()
|
|
|
174 |
streamModel = GroqAPI()
|
175 |
elif model in deepinfra_models:
|
176 |
streamModel = DeepinfraAPI()
|
177 |
+
elif model in official_deepinfra_models:
|
178 |
+
streamModel = OFFDeepInfraAPI()
|
179 |
else:
|
180 |
return {"error": f"Model '{model}' is not supported."}
|
181 |
|
models/.DS_Store
CHANGED
Binary files a/models/.DS_Store and b/models/.DS_Store differ
|
|
models/fetch.py
CHANGED
@@ -107,11 +107,11 @@ class FetchModel:
|
|
107 |
model = random.choice(options)
|
108 |
return model
|
109 |
elif id == "llama-3.3-70b":
|
110 |
-
options = ['meta-llama/Llama-3.3-70B-Instruct-Turbo', 'llama-3.3-70b-versatile']
|
111 |
model = random.choice(options)
|
112 |
return model
|
113 |
elif id == "deepseek-r1":
|
114 |
-
options = ['deepseek-ai/DeepSeek-R1', 'deepseek-r1-distill-llama-70b']
|
115 |
model = random.choice(options)
|
116 |
return model
|
117 |
elif id == "deepseek-v3":
|
|
|
107 |
model = random.choice(options)
|
108 |
return model
|
109 |
elif id == "llama-3.3-70b":
|
110 |
+
options = ['meta-llama/Llama-3.3-70B-Instruct-Turbo', 'llama-3.3-70b-versatile', 'meta-llama/Llama-3.3-70B-Instruct-Turbo']
|
111 |
model = random.choice(options)
|
112 |
return model
|
113 |
elif id == "deepseek-r1":
|
114 |
+
options = ['deepseek-ai/DeepSeek-R1', 'deepseek-r1-distill-llama-70b', 'deepseek-ai/DeepSeek-R1-Turbo', 'deepseek-ai/DeepSeek-R1-Distill-Llama-70B', 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B']
|
115 |
model = random.choice(options)
|
116 |
return model
|
117 |
elif id == "deepseek-v3":
|
models/text/deepinfra/main.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import httpx
|
3 |
+
import asyncio
|
4 |
+
import json
|
5 |
+
|
6 |
+
class OFFDeepInfraAPI:
|
7 |
+
|
8 |
+
headers = {
|
9 |
+
'Accept-Language': 'en-US,en;q=0.9,ja;q=0.8',
|
10 |
+
'Connection': 'keep-alive',
|
11 |
+
'Content-Type': 'application/json',
|
12 |
+
'Origin': 'https://deepinfra.com',
|
13 |
+
'Referer': 'https://deepinfra.com/',
|
14 |
+
'Sec-Fetch-Dest': 'empty',
|
15 |
+
'Sec-Fetch-Mode': 'cors',
|
16 |
+
'Sec-Fetch-Site': 'same-site',
|
17 |
+
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Mobile Safari/537.36',
|
18 |
+
'X-Deepinfra-Source': 'web-embed',
|
19 |
+
'accept': 'text/event-stream',
|
20 |
+
'sec-ch-ua': '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
|
21 |
+
'sec-ch-ua-mobile': '?1',
|
22 |
+
'sec-ch-ua-platform': '"Android"',
|
23 |
+
}
|
24 |
+
|
25 |
+
def __init__(self):
|
26 |
+
self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
27 |
+
|
28 |
+
def get_model_list(self):
|
29 |
+
models = ['meta-llama/Llama-3.3-70B-Instruct-Turbo', 'deepseek-ai/DeepSeek-R1-Turbo', 'deepseek-ai/DeepSeek-R1-Distill-Llama-70B', 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B']
|
30 |
+
return models
|
31 |
+
|
32 |
+
|
33 |
+
async def generate(self, json_data: dict):
|
34 |
+
messages = json_data
|
35 |
+
|
36 |
+
request_data = {
|
37 |
+
"id": "".join(random.choices("0123456789abcdef", k=16)),
|
38 |
+
"messages": messages,
|
39 |
+
"selectedModel": json_data.get("model", "deepseek-r1-distill-llama-70b"),
|
40 |
+
}
|
41 |
+
|
42 |
+
chunk_id = "chipling-deepinfraoff-" + "".join(random.choices("0123456789abcdef", k=32))
|
43 |
+
created = int(asyncio.get_event_loop().time())
|
44 |
+
total_tokens = 0
|
45 |
+
|
46 |
+
try:
|
47 |
+
async with httpx.AsyncClient(timeout=None) as client:
|
48 |
+
async with client.stream(
|
49 |
+
"POST",
|
50 |
+
"https://api.deepinfra.com/v1/openai/chat/completions",
|
51 |
+
headers=OFFDeepInfraAPI.headers,
|
52 |
+
json=request_data
|
53 |
+
) as request_ctx:
|
54 |
+
print(request_ctx.status_code)
|
55 |
+
if request_ctx.status_code == 200:
|
56 |
+
async for line in request_ctx.aiter_lines():
|
57 |
+
if line:
|
58 |
+
if line.startswith('0:'):
|
59 |
+
# Clean up the text and properly escape JSON characters
|
60 |
+
text = line[2:].strip()
|
61 |
+
if text.startswith('"') and text.endswith('"'):
|
62 |
+
text = text[1:-1]
|
63 |
+
text = text.replace('\\n', '\n').replace('\\', '')
|
64 |
+
|
65 |
+
response = {
|
66 |
+
"id": chunk_id,
|
67 |
+
"object": "chat.completion.chunk",
|
68 |
+
"created": created,
|
69 |
+
"model": json_data.get("model", "deepseek-r1-distill-llama-70b"),
|
70 |
+
"choices": [{
|
71 |
+
"index": 0,
|
72 |
+
"text": text,
|
73 |
+
"logprobs": None,
|
74 |
+
"finish_reason": None
|
75 |
+
}],
|
76 |
+
"usage": None
|
77 |
+
}
|
78 |
+
yield f"data: {json.dumps(response)}\n\n"
|
79 |
+
total_tokens += 1
|
80 |
+
elif line.startswith('d:'):
|
81 |
+
final = {
|
82 |
+
"id": chunk_id,
|
83 |
+
"object": "chat.completion.chunk",
|
84 |
+
"created": created,
|
85 |
+
"model": json_data.get("model", "deepseek-r1-distill-llama-70b"),
|
86 |
+
"choices": [],
|
87 |
+
"usage": {
|
88 |
+
"prompt_tokens": len(messages),
|
89 |
+
"completion_tokens": total_tokens,
|
90 |
+
"total_tokens": len(messages) + total_tokens
|
91 |
+
}
|
92 |
+
}
|
93 |
+
yield f"data: {json.dumps(final)}\n\n"
|
94 |
+
yield "data: [DONE]\n\n"
|
95 |
+
return
|
96 |
+
else:
|
97 |
+
yield f"data: [Unexpected status code: {request_ctx.status_code}]\n\n"
|
98 |
+
except Exception as e:
|
99 |
+
yield f"data: [Connection error: {str(e)}]\n\n"
|
models/text/together/main.py
CHANGED
@@ -40,7 +40,7 @@ class TogetherAPI:
|
|
40 |
self.base_url = "https://api.together.ai/inference"
|
41 |
|
42 |
def get_model_list(self):
|
43 |
-
models = ['meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8', 'meta-llama/Llama-4-Scout-17B-16E-Instruct', 'deepseek-ai/DeepSeek-R1', 'deepseek-ai/DeepSeek-V3', 'Qwen/Qwen2.5-VL-72B-Instruct', 'google/gemma-2-27b-it']
|
44 |
return models
|
45 |
|
46 |
async def generate(self, json_data: dict):
|
|
|
40 |
self.base_url = "https://api.together.ai/inference"
|
41 |
|
42 |
def get_model_list(self):
|
43 |
+
models = ['meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8', 'meta-llama/Llama-4-Scout-17B-16E-Instruct', 'deepseek-ai/DeepSeek-R1', 'deepseek-ai/DeepSeek-V3', 'Qwen/Qwen2.5-VL-72B-Instruct', 'google/gemma-2-27b-it', 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo']
|
44 |
return models
|
45 |
|
46 |
async def generate(self, json_data: dict):
|
runchecks.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import requests
|
3 |
+
|
4 |
+
|
5 |
+
def check(loop, model, messages):
|
6 |
+
model = "deepseek-r1"
|
7 |
+
|
8 |
+
url = "https://chipling-api.hf.space/api/v1/text/generate"
|
9 |
+
|
10 |
+
payload = {
|
11 |
+
"messages": messages,
|
12 |
+
"model": model,
|
13 |
+
"api_key":"test"
|
14 |
+
}
|
15 |
+
|
16 |
+
response = requests.post(url, json=payload, stream=True)
|
17 |
+
|
18 |
+
if response.status_code == 200:
|
19 |
+
for line in response.iter_lines():
|
20 |
+
if line:
|
21 |
+
print(line)
|
22 |
+
decoded_line = line.decode('utf-8')
|
23 |
+
if decoded_line.startswith('data: [DONE]'):
|
24 |
+
break
|
25 |
+
elif decoded_line.startswith('data: '):
|
26 |
+
try:
|
27 |
+
json_data = json.loads(decoded_line[6:])
|
28 |
+
if json_data["choices"] and "text" in json_data["choices"][0]:
|
29 |
+
print(json_data["choices"][0]["text"], end='')
|
30 |
+
except json.JSONDecodeError:
|
31 |
+
continue
|
32 |
+
else:
|
33 |
+
print(f"Request failed with status code {response.status_code}")
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
|
test.py
CHANGED
@@ -1,53 +1,61 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
36 |
|
37 |
|
38 |
-
import requests
|
39 |
|
40 |
-
url = 'https://chipling-api.hf.space/api/v1/generate-images'
|
41 |
|
42 |
-
query = {
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
}
|
47 |
|
48 |
-
response = requests.post(url, json=query)
|
49 |
-
if response.status_code == 200:
|
50 |
-
|
51 |
-
|
52 |
-
else:
|
53 |
-
|
|
|
1 |
+
import requests
|
2 |
+
import json
|
3 |
+
|
4 |
+
messages = [
|
5 |
+
{
|
6 |
+
"role": "user",
|
7 |
+
"content": [
|
8 |
+
{
|
9 |
+
"type": "text",
|
10 |
+
"text": "Depending on this image Create tell me a image generation prompt to create this:"
|
11 |
+
},
|
12 |
+
]
|
13 |
+
},
|
14 |
+
]
|
15 |
+
|
16 |
+
model = "deepseek-r1"
|
17 |
+
|
18 |
+
url = "https://chipling-api.hf.space/api/v1/text/generate"
|
19 |
+
|
20 |
+
payload = {
|
21 |
+
"messages": messages,
|
22 |
+
"model": model,
|
23 |
+
"api_key":"test"
|
24 |
+
}
|
25 |
|
26 |
+
response = requests.post(url, json=payload, stream=True)
|
27 |
|
28 |
+
if response.status_code == 200:
|
29 |
+
for line in response.iter_lines():
|
30 |
+
if line:
|
31 |
+
print(line)
|
32 |
+
decoded_line = line.decode('utf-8')
|
33 |
+
if decoded_line.startswith('data: [DONE]'):
|
34 |
+
break
|
35 |
+
elif decoded_line.startswith('data: '):
|
36 |
+
try:
|
37 |
+
json_data = json.loads(decoded_line[6:])
|
38 |
+
if json_data["choices"] and "text" in json_data["choices"][0]:
|
39 |
+
print(json_data["choices"][0]["text"], end='')
|
40 |
+
except json.JSONDecodeError:
|
41 |
+
continue
|
42 |
+
else:
|
43 |
+
print(f"Request failed with status code {response.status_code}")
|
44 |
|
45 |
|
46 |
+
# import requests
|
47 |
|
48 |
+
# url = 'https://chipling-api.hf.space/api/v1/generate-images'
|
49 |
|
50 |
+
# query = {
|
51 |
+
# 'prompt': 'a beautiful landscape',
|
52 |
+
# 'model': 'fal-ai/fast-sdxl',
|
53 |
+
# 'api_key': 'your_api_key_here',
|
54 |
+
# }
|
55 |
|
56 |
+
# response = requests.post(url, json=query)
|
57 |
+
# if response.status_code == 200:
|
58 |
+
# data = response.json()['image']
|
59 |
+
# print(data)
|
60 |
+
# else:
|
61 |
+
# print(f"Error: {response.status_code} - {response.text}")
|
test2.py
CHANGED
@@ -1,50 +1,50 @@
|
|
1 |
-
|
2 |
-
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
|
10 |
-
|
11 |
|
12 |
-
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
|
39 |
-
import requests
|
40 |
|
41 |
-
url = "http://127.0.0.1:8000/api/v1/tools/google-search"
|
42 |
|
43 |
-
payload = {
|
44 |
-
|
45 |
-
|
46 |
-
}
|
47 |
|
48 |
-
response = requests.get(url, json=payload)
|
49 |
|
50 |
-
print(response.json())
|
|
|
1 |
+
import requests
|
2 |
+
import json
|
3 |
|
4 |
+
messages = [
|
5 |
+
{"role": "user", "content": "helo"},
|
6 |
+
{"role": "assistant", "content": "Hello! How can I assist you today?"},
|
7 |
+
{"role": "user", "content": "who are you and give me a breif description of who created you "}
|
8 |
+
]
|
9 |
|
10 |
+
model = "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo"
|
11 |
|
12 |
+
url = "http://127.0.0.1:8000/api/v1/text/generate"
|
13 |
|
14 |
+
payload = {
|
15 |
+
"messages": messages,
|
16 |
+
"model": model,
|
17 |
+
"api_key": ""
|
18 |
+
}
|
19 |
|
20 |
+
response = requests.post(url, json=payload, stream=True)
|
21 |
+
|
22 |
+
if response.status_code == 200:
|
23 |
+
for line in response.iter_lines():
|
24 |
+
if line:
|
25 |
+
print(line)
|
26 |
+
decoded_line = line.decode('utf-8')
|
27 |
+
if decoded_line.startswith('data: [DONE]'):
|
28 |
+
break
|
29 |
+
elif decoded_line.startswith('data: '):
|
30 |
+
try:
|
31 |
+
json_data = json.loads(decoded_line[6:])
|
32 |
+
if json_data["choices"] and "text" in json_data["choices"][0]:
|
33 |
+
print(json_data["choices"][0]["text"], end='')
|
34 |
+
except json.JSONDecodeError:
|
35 |
+
continue
|
36 |
+
else:
|
37 |
+
print(f"Request failed with status code {response.status_code}")
|
38 |
|
39 |
+
# import requests
|
40 |
|
41 |
+
# url = "http://127.0.0.1:8000/api/v1/tools/google-search"
|
42 |
|
43 |
+
# payload = {
|
44 |
+
# "query": "who are you and give me a breif description of who created you",
|
45 |
+
# "num_results": 5,
|
46 |
+
# }
|
47 |
|
48 |
+
# response = requests.get(url, json=payload)
|
49 |
|
50 |
+
# print(response.json())
|
test3.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
3 |
+
"messages": [
|
4 |
+
{
|
5 |
+
"role": "user",
|
6 |
+
"content": [
|
7 |
+
{
|
8 |
+
"type": "text",
|
9 |
+
"text": "Depending on this image Create tell me a image generation prompt to create this:"
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"type": "image_url",
|
13 |
+
"image_url": {
|
14 |
+
"url": "https://together-ai-uploaded-user-images-prod.s3.us-west-2.amazonaws.com/7106a4de-0635-4c07-a27e-f2678db81227.webp?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=ASIAYWZW4HVCFP2GC22O%2F20250502%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Date=20250502T213211Z&X-Amz-Expires=3600&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEEYaCXVzLXdlc3QtMiJIMEYCIQDrhm91rv4lvFFpewlhfHc5eQo0ecttb0YYfpdJle%2BrBQIhAMUiHP2IlXFjlFGZXGKoDPSpw%2BfvhZUqIre9xvIb1s7RKo8FCN%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEQABoMNTk4NzI2MTYzNzgwIgy5YGbDhU%2BNUpTvRZ8q4wTeYh%2FUIV5VVQehrmbFs5D%2FBxsWjCnY2qHMmn3OzHjPPzLWVHGDfIDvOTV%2FMgmUczCBts0vN6qaRFWw8URe066v%2B7o9wOd5Q6PTGThr9%2BGZVm5eyJsDVV0YBpz8jdCB3lzDcKnXu9L075MKP2macxHZLA%2BqVqHG9V6u77MH3C7jHgEkA13cBZikPWOTYIdQoJPaICLz1svZUixikAcVHpiqn4iXB3D12d8BPr1HDXf1JwYzwcvM3VoBh2KM7I3EwH%2Bo6%2BR2b0TwyzoUprH5qStp8Bh1f9fRlPvelnoEIrgu5ExTzFf4A%2B3wNXnQC4dJjTAr9Xc4frOXZCXUm1aCTBwLcBa%2B70AHyWsiSJps34MwwUyJxPlaULgbrZhTQCTreNcWT6pounWBjVPAnsvkJJqVYJ6J7%2FvGKmoi%2Bh6%2F%2Boo2MlhPH9FoeWel9rOCPpDv0jN4%2BYaXgAp82JaSfIO4oF7Gwa1co7hARReAgy7ZJ9T03IzAqFX20%2FDdDAmrwSQhnPcKkHwhQAtbWANPR69mvH6XDyJ1U0azsXU5MURVDaLqe%2FzAsTqmFHFzF6r2Qc2RhdRmU98f4rxx2a3fKhCNkCfPI8K0ghjN9L%2BIa8xaZcd2B6YQQ6IcYgro2%2BlBXLtC%2FJ%2FipKxx2msTz3JpNvbA9wE7tURi7nsta2cn4mNNXgALWSznuUCB5%2F%2BSYePKxdGS1%2BqVWNl89VdibVP8RGILmysKDJa8xJp9RALA8GZgCBlXoD6hQKxvKeMvla4s8wRSSu1mRyvzlWLIGNucS0kFGlp%2BHtQ2QZlpyFeJ4gZ68CiOSdN5BTDb89TABjqZAd7%2FMVEES0SB%2Faqz2k7MLmmOlxYimpU%2B1A%2FEVUzTNMM4xiQjBWcguYMantTXqvmklQkQXeVMFg48LQpz7Q2uGo0mNGwBg%2FJDRD8bX76Nge3fhs%2FDilM8tNN%2FKBI%2BEw8iIn%2BJl6j2nNqCgLWcUmcgI9ErMbqxE93Y4mDegI2FnElXvGo%2FSiQJ9fzBSvWvUdzqkEFdkqjLPtlovw%3D%3D&X-Amz-Signature=0204b843552605e28d4f9481402ae283ad5cb552212d92583f3bc3dab639e7eb&X-Amz-SignedHeaders=host&x-id=GetObject"
|
15 |
+
}
|
16 |
+
}
|
17 |
+
]
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"role": "assistant",
|
21 |
+
"content": "The image depicts a man standing in front of a carpeted floor, with the words \"Clickbait!\" displayed prominently. The purpose of this image is likely to grab the viewer's attention and entice them to click on a related article or video.\n\nTo create an image generation prompt based on this input, I would suggest the following:\n\n* **Image Description:** A man standing in front of a carpeted floor with the words \"Clickbait!\" written in bold letters.\n* **Key Elements:**\n * Man\n * Carpeted floor\n * Words \"Clickbait!\"\n* **Color Scheme:**\n * Main color palette: Gray, white, and red\n * Accent colors: Black and orange\n* **Style:**\n * Realistic\n * Detailed\n* **Additional Elements:**\n * A red arrow pointing to the words \"Clickbait!\"\n * A black border around the image\n\nWith these details, an AI image generator can create an image that accurately represents the original input."
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"role": "user",
|
25 |
+
"content": "nic"
|
26 |
+
}
|
27 |
+
],
|
28 |
+
"stream": true
|
29 |
+
}
|
tools/__pycache__/fetch.cpython-311.pyc
CHANGED
Binary files a/tools/__pycache__/fetch.cpython-311.pyc and b/tools/__pycache__/fetch.cpython-311.pyc differ
|
|
tools/fetch.py
CHANGED
@@ -35,3 +35,5 @@ class Tools:
|
|
35 |
]
|
36 |
|
37 |
return data
|
|
|
|
|
|
35 |
]
|
36 |
|
37 |
return data
|
38 |
+
|
39 |
+
|
tools/googlesearch/__pycache__/gettyimages.cpython-311.pyc
CHANGED
Binary files a/tools/googlesearch/__pycache__/gettyimages.cpython-311.pyc and b/tools/googlesearch/__pycache__/gettyimages.cpython-311.pyc differ
|
|