Upload 36 files
Browse files- __pycache__/app.cpython-311.pyc +0 -0
- app.py +18 -1
- models/.DS_Store +0 -0
- models/__pycache__/fetch.cpython-311.pyc +0 -0
- requirements.txt +3 -1
- test.py +37 -36
- test2.py +43 -30
- tools/__pycache__/fetch.cpython-311.pyc +0 -0
- tools/fetch.py +37 -0
- tools/googlesearch/__pycache__/gettyimages.cpython-311.pyc +0 -0
- tools/googlesearch/__pycache__/main.cpython-311.pyc +0 -0
- tools/googlesearch/__pycache__/useragentka.cpython-311.pyc +0 -0
- tools/googlesearch/gettyimages.py +21 -0
- tools/googlesearch/main.py +163 -0
- tools/googlesearch/useragentka.py +20 -0
__pycache__/app.cpython-311.pyc
CHANGED
Binary files a/__pycache__/app.cpython-311.pyc and b/__pycache__/app.cpython-311.pyc differ
|
|
app.py
CHANGED
@@ -7,6 +7,8 @@ from models.image.vercel.main import FalAPI
|
|
7 |
from models.image.together.main import TogetherImageAPI
|
8 |
from models.fetch import FetchModel
|
9 |
from auth.key import NimbusAuthKey
|
|
|
|
|
10 |
|
11 |
app = FastAPI()
|
12 |
|
@@ -177,4 +179,19 @@ async def text_generate(request: Request):
|
|
177 |
return StreamingResponse(response, media_type="text/event-stream")
|
178 |
|
179 |
except Exception as e:
|
180 |
-
return {"error": f"An error occurred: {str(e)}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
from models.image.together.main import TogetherImageAPI
|
8 |
from models.fetch import FetchModel
|
9 |
from auth.key import NimbusAuthKey
|
10 |
+
from tools.googlesearch.main import search
|
11 |
+
from tools.fetch import Tools
|
12 |
|
13 |
app = FastAPI()
|
14 |
|
|
|
179 |
return StreamingResponse(response, media_type="text/event-stream")
|
180 |
|
181 |
except Exception as e:
|
182 |
+
return {"error": f"An error occurred: {str(e)}"}
|
183 |
+
|
184 |
+
@app.get('/api/v1/tools')
|
185 |
+
async def tools():
|
186 |
+
return Tools.fetch_tools()
|
187 |
+
|
188 |
+
|
189 |
+
@app.get('/api/v1/tools/google-search')
|
190 |
+
async def searchtool(request: Request):
|
191 |
+
data = await request.json()
|
192 |
+
query = data['query']
|
193 |
+
num_results = data.get('num_results', 10)
|
194 |
+
|
195 |
+
response = search(term=query, num_results=num_results, advanced=True, unique=False)
|
196 |
+
|
197 |
+
return response
|
models/.DS_Store
CHANGED
Binary files a/models/.DS_Store and b/models/.DS_Store differ
|
|
models/__pycache__/fetch.cpython-311.pyc
CHANGED
Binary files a/models/__pycache__/fetch.cpython-311.pyc and b/models/__pycache__/fetch.cpython-311.pyc differ
|
|
requirements.txt
CHANGED
@@ -7,4 +7,6 @@ asyncio
|
|
7 |
jinja2
|
8 |
aiofiles
|
9 |
curl_cffi
|
10 |
-
firebase_admin
|
|
|
|
|
|
7 |
jinja2
|
8 |
aiofiles
|
9 |
curl_cffi
|
10 |
+
firebase_admin
|
11 |
+
beautifulsoup4
|
12 |
+
curl_cffi
|
test.py
CHANGED
@@ -1,38 +1,38 @@
|
|
1 |
-
import requests
|
2 |
-
import json
|
3 |
-
|
4 |
-
messages = [
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
]
|
9 |
-
|
10 |
-
model = "Qwen/Qwen2.5-72B-Instruct"
|
11 |
-
|
12 |
-
url = "https://chipling-api.hf.space/api/v1/generate"
|
13 |
-
|
14 |
-
payload = {
|
15 |
-
|
16 |
-
|
17 |
-
}
|
18 |
-
|
19 |
-
response = requests.post(url, json=payload, stream=True)
|
20 |
-
|
21 |
-
if response.status_code == 200:
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
else:
|
35 |
-
|
36 |
|
37 |
|
38 |
import requests
|
@@ -42,11 +42,12 @@ url = 'https://chipling-api.hf.space/api/v1/generate-images'
|
|
42 |
query = {
|
43 |
'prompt': 'a beautiful landscape',
|
44 |
'model': 'fal-ai/fast-sdxl',
|
|
|
45 |
}
|
46 |
|
47 |
response = requests.post(url, json=query)
|
48 |
if response.status_code == 200:
|
49 |
-
data = response.json()
|
50 |
print(data)
|
51 |
else:
|
52 |
print(f"Error: {response.status_code} - {response.text}")
|
|
|
1 |
+
# import requests
|
2 |
+
# import json
|
3 |
+
|
4 |
+
# messages = [
|
5 |
+
# {"role": "user", "content": "helo"},
|
6 |
+
# {"role": "assistant", "content": "Hello! How can I assist you today?"},
|
7 |
+
# {"role": "user", "content": "who are you and give me a breif description of who created you "}
|
8 |
+
# ]
|
9 |
+
|
10 |
+
# model = "Qwen/Qwen2.5-72B-Instruct"
|
11 |
+
|
12 |
+
# url = "https://chipling-api.hf.space/api/v1/generate"
|
13 |
+
|
14 |
+
# payload = {
|
15 |
+
# "messages": messages,
|
16 |
+
# "model": model
|
17 |
+
# }
|
18 |
+
|
19 |
+
# response = requests.post(url, json=payload, stream=True)
|
20 |
+
|
21 |
+
# if response.status_code == 200:
|
22 |
+
# for line in response.iter_lines():
|
23 |
+
# if line:
|
24 |
+
# decoded_line = line.decode('utf-8')
|
25 |
+
# if decoded_line.startswith('data: [DONE]'):
|
26 |
+
# break
|
27 |
+
# elif decoded_line.startswith('data: '):
|
28 |
+
# try:
|
29 |
+
# json_data = json.loads(decoded_line[6:])
|
30 |
+
# if json_data["choices"] and "text" in json_data["choices"][0]:
|
31 |
+
# print(json_data["choices"][0]["text"], end='')
|
32 |
+
# except json.JSONDecodeError:
|
33 |
+
# continue
|
34 |
+
# else:
|
35 |
+
# print(f"Request failed with status code {response.status_code}")
|
36 |
|
37 |
|
38 |
import requests
|
|
|
42 |
query = {
|
43 |
'prompt': 'a beautiful landscape',
|
44 |
'model': 'fal-ai/fast-sdxl',
|
45 |
+
'api_key': 'your_api_key_here',
|
46 |
}
|
47 |
|
48 |
response = requests.post(url, json=query)
|
49 |
if response.status_code == 200:
|
50 |
+
data = response.json()['image']
|
51 |
print(data)
|
52 |
else:
|
53 |
print(f"Error: {response.status_code} - {response.text}")
|
test2.py
CHANGED
@@ -1,37 +1,50 @@
|
|
1 |
-
import requests
|
2 |
-
import json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
|
10 |
-
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
payload = {
|
15 |
-
"
|
16 |
-
"
|
17 |
-
"api_key": ""
|
18 |
}
|
19 |
|
20 |
-
response = requests.
|
21 |
-
|
22 |
-
|
23 |
-
for line in response.iter_lines():
|
24 |
-
if line:
|
25 |
-
print(line)
|
26 |
-
decoded_line = line.decode('utf-8')
|
27 |
-
if decoded_line.startswith('data: [DONE]'):
|
28 |
-
break
|
29 |
-
elif decoded_line.startswith('data: '):
|
30 |
-
try:
|
31 |
-
json_data = json.loads(decoded_line[6:])
|
32 |
-
if json_data["choices"] and "text" in json_data["choices"][0]:
|
33 |
-
print(json_data["choices"][0]["text"], end='')
|
34 |
-
except json.JSONDecodeError:
|
35 |
-
continue
|
36 |
-
else:
|
37 |
-
print(f"Request failed with status code {response.status_code}")
|
|
|
1 |
+
# import requests
|
2 |
+
# import json
|
3 |
+
|
4 |
+
# messages = [
|
5 |
+
# {"role": "user", "content": "helo"},
|
6 |
+
# {"role": "assistant", "content": "Hello! How can I assist you today?"},
|
7 |
+
# {"role": "user", "content": "who are you and give me a breif description of who created you "}
|
8 |
+
# ]
|
9 |
+
|
10 |
+
# model = "llama-4-scout-17b"
|
11 |
+
|
12 |
+
# url = "http://127.0.0.1:8000/api/v1/text/generate"
|
13 |
|
14 |
+
# payload = {
|
15 |
+
# "messages": messages,
|
16 |
+
# "model": model,
|
17 |
+
# "api_key": ""
|
18 |
+
# }
|
19 |
|
20 |
+
# response = requests.post(url, json=payload, stream=True)
|
21 |
|
22 |
+
# if response.status_code == 200:
|
23 |
+
# for line in response.iter_lines():
|
24 |
+
# if line:
|
25 |
+
# print(line)
|
26 |
+
# decoded_line = line.decode('utf-8')
|
27 |
+
# if decoded_line.startswith('data: [DONE]'):
|
28 |
+
# break
|
29 |
+
# elif decoded_line.startswith('data: '):
|
30 |
+
# try:
|
31 |
+
# json_data = json.loads(decoded_line[6:])
|
32 |
+
# if json_data["choices"] and "text" in json_data["choices"][0]:
|
33 |
+
# print(json_data["choices"][0]["text"], end='')
|
34 |
+
# except json.JSONDecodeError:
|
35 |
+
# continue
|
36 |
+
# else:
|
37 |
+
# print(f"Request failed with status code {response.status_code}")
|
38 |
+
|
39 |
+
import requests
|
40 |
+
|
41 |
+
url = "http://127.0.0.1:8000/api/v1/tools/google-search"
|
42 |
|
43 |
payload = {
|
44 |
+
"query": "who are you and give me a breif description of who created you",
|
45 |
+
"num_results": 5,
|
|
|
46 |
}
|
47 |
|
48 |
+
response = requests.get(url, json=payload)
|
49 |
+
|
50 |
+
print(response.json())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/__pycache__/fetch.cpython-311.pyc
ADDED
Binary file (994 Bytes). View file
|
|
tools/fetch.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class Tools:
|
2 |
+
|
3 |
+
def fetch_tools():
|
4 |
+
data = [
|
5 |
+
{
|
6 |
+
"name": "Google Search",
|
7 |
+
"description": "A tool to perform Google searches and retrieve results.",
|
8 |
+
"type": "search",
|
9 |
+
"url": "/api/v1/tools/google-search",
|
10 |
+
"method": "GET",
|
11 |
+
"payload": {
|
12 |
+
"query": "string",
|
13 |
+
"num_results": 5,
|
14 |
+
"api_key": "string",
|
15 |
+
},
|
16 |
+
"response": {
|
17 |
+
"results": [
|
18 |
+
{
|
19 |
+
"title": "string",
|
20 |
+
"url": "string",
|
21 |
+
"description": "string",
|
22 |
+
'link': 'string',
|
23 |
+
'page_text': 'string'
|
24 |
+
}
|
25 |
+
],
|
26 |
+
"images": [
|
27 |
+
{
|
28 |
+
"src": "string",
|
29 |
+
"alt": "string",
|
30 |
+
"class": ["string"]
|
31 |
+
}
|
32 |
+
]
|
33 |
+
}
|
34 |
+
}
|
35 |
+
]
|
36 |
+
|
37 |
+
return data
|
tools/googlesearch/__pycache__/gettyimages.cpython-311.pyc
ADDED
Binary file (1.19 kB). View file
|
|
tools/googlesearch/__pycache__/main.cpython-311.pyc
ADDED
Binary file (8.08 kB). View file
|
|
tools/googlesearch/__pycache__/useragentka.cpython-311.pyc
ADDED
Binary file (1.73 kB). View file
|
|
tools/googlesearch/gettyimages.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from curl_cffi import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
|
4 |
+
def get_images(query):
|
5 |
+
res = requests.get(f'https://www.gettyimages.in/search/2/image?phrase={query}=editorial', impersonate='chrome110')
|
6 |
+
|
7 |
+
soup = BeautifulSoup(res.text, 'html.parser')
|
8 |
+
|
9 |
+
images = soup.find_all('img')
|
10 |
+
|
11 |
+
results = []
|
12 |
+
|
13 |
+
for image in images:
|
14 |
+
print(image['src'])
|
15 |
+
if image['src'].startswith('https://media.gettyimages.com'):
|
16 |
+
results.append({'src': image['src'], 'alt': image['alt'], 'class':''})
|
17 |
+
else:
|
18 |
+
continue
|
19 |
+
|
20 |
+
return results
|
21 |
+
|
tools/googlesearch/main.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""googlesearch is a Python library for searching Google, easily."""
|
2 |
+
from time import sleep
|
3 |
+
from bs4 import BeautifulSoup
|
4 |
+
from requests import get
|
5 |
+
from urllib.parse import unquote # to decode the url
|
6 |
+
from tools.googlesearch.useragentka import get_useragent
|
7 |
+
from curl_cffi import requests as curlreq
|
8 |
+
from tools.googlesearch.gettyimages import get_images
|
9 |
+
|
10 |
+
def _req(term, results, lang, start, proxies, timeout, safe, ssl_verify, region):
|
11 |
+
resp = get(
|
12 |
+
url="https://www.google.com/search",
|
13 |
+
headers={
|
14 |
+
"User-Agent": get_useragent(),
|
15 |
+
"Accept": "*/*"
|
16 |
+
},
|
17 |
+
params={
|
18 |
+
"q": term,
|
19 |
+
"num": results + 2, # Prevents multiple requests
|
20 |
+
"hl": lang,
|
21 |
+
"start": start,
|
22 |
+
"safe": safe,
|
23 |
+
"gl": region,
|
24 |
+
},
|
25 |
+
proxies=proxies,
|
26 |
+
timeout=timeout,
|
27 |
+
verify=ssl_verify,
|
28 |
+
cookies = {
|
29 |
+
'CONSENT': 'PENDING+987', # Bypasses the consent page
|
30 |
+
'SOCS': 'CAESHAgBEhIaAB',
|
31 |
+
}
|
32 |
+
)
|
33 |
+
resp.raise_for_status()
|
34 |
+
return resp
|
35 |
+
|
36 |
+
|
37 |
+
class SearchResult:
|
38 |
+
def __init__(self, url, title, description):
|
39 |
+
self.url = url
|
40 |
+
self.title = title
|
41 |
+
self.description = description
|
42 |
+
|
43 |
+
def __repr__(self):
|
44 |
+
return f"SearchResult(url={self.url}, title={self.title}, description={self.description})"
|
45 |
+
|
46 |
+
|
47 |
+
def search(term, num_results=10, lang="en", proxy=None, advanced=False, sleep_interval=0, timeout=5, safe="active", ssl_verify=None, region=None, start_num=0, unique=False):
|
48 |
+
"""Search the Google search engine"""
|
49 |
+
|
50 |
+
# Proxy setup
|
51 |
+
proxies = {"https": proxy, "http": proxy} if proxy and (proxy.startswith("https") or proxy.startswith("http")) else None
|
52 |
+
|
53 |
+
start = start_num
|
54 |
+
fetched_results = 0
|
55 |
+
fetched_links = set()
|
56 |
+
results_list = []
|
57 |
+
image_results = [] # New list for image results
|
58 |
+
|
59 |
+
while fetched_results < num_results:
|
60 |
+
# Send request
|
61 |
+
resp = _req(term, num_results - start,
|
62 |
+
lang, start, proxies, timeout, safe, ssl_verify, region)
|
63 |
+
|
64 |
+
# Parse
|
65 |
+
soup = BeautifulSoup(resp.text, "html.parser")
|
66 |
+
result_block = soup.find_all("div", class_="ezO2md")
|
67 |
+
new_results = 0
|
68 |
+
|
69 |
+
# Find all images on the page
|
70 |
+
try:
|
71 |
+
all_images = soup.find_all("img") # Google's image class
|
72 |
+
for img in all_images:
|
73 |
+
img_src = img.get("src") or img.get("data-src")
|
74 |
+
if img_src:
|
75 |
+
# Handle base64 images
|
76 |
+
if img_src.startswith("data:image"):
|
77 |
+
image_results.append({
|
78 |
+
"src": img_src, # Already base64 encoded
|
79 |
+
"alt": img.get("alt", ""),
|
80 |
+
"class": img.get("class", []),
|
81 |
+
})
|
82 |
+
# Handle regular image URLs
|
83 |
+
elif img_src.startswith("http"):
|
84 |
+
image_results.append({
|
85 |
+
"src": img_src,
|
86 |
+
"alt": img.get("alt", ""),
|
87 |
+
"class": img.get("class", []),
|
88 |
+
})
|
89 |
+
except Exception as e:
|
90 |
+
print(f"Error parsing images: {str(e)}")
|
91 |
+
|
92 |
+
for result in result_block:
|
93 |
+
link_tag = result.find("a", href=True)
|
94 |
+
title_tag = link_tag.find("span", class_="CVA68e") if link_tag else None
|
95 |
+
description_tag = result.find("span", class_="FrIlee")
|
96 |
+
|
97 |
+
if link_tag and title_tag and description_tag:
|
98 |
+
link = unquote(link_tag["href"].split("&")[0].replace("/url?q=", ""))
|
99 |
+
if link in fetched_links and unique:
|
100 |
+
continue
|
101 |
+
fetched_links.add(link)
|
102 |
+
title = title_tag.text if title_tag else ""
|
103 |
+
description = description_tag.text if description_tag else ""
|
104 |
+
|
105 |
+
# Only get page_text if advanced mode and we haven't gotten any yet
|
106 |
+
if advanced and not any('page_text' in result for result in results_list):
|
107 |
+
try:
|
108 |
+
page_scrape = curlreq.get(link, impersonate='chrome110')
|
109 |
+
page_scrape.encoding = 'utf-8'
|
110 |
+
page_soup = BeautifulSoup(page_scrape.text, "html.parser")
|
111 |
+
|
112 |
+
# Try multiple strategies to find main content
|
113 |
+
main_content = (
|
114 |
+
page_soup.find(['article', 'main']) or
|
115 |
+
page_soup.find('div', {'id': ['content', 'main-content', 'body-content']}) or
|
116 |
+
page_soup.find('div', {'class': ['content', 'main', 'article', 'post']}) or
|
117 |
+
page_soup.find('div', {'role': 'main'}) or
|
118 |
+
page_soup.body
|
119 |
+
)
|
120 |
+
if main_content:
|
121 |
+
# Remove unwanted elements
|
122 |
+
for element in main_content(['script', 'style', 'noscript', 'svg', 'header', 'footer', 'nav']):
|
123 |
+
element.decompose()
|
124 |
+
# Extract text with better cleaning
|
125 |
+
text = main_content.get_text(separator=' ', strip=True)
|
126 |
+
text = ' '.join(line.strip() for line in text.splitlines() if line.strip())
|
127 |
+
page_text = ' '.join(word for word in text.split() if len(word) > 1)[:3000]
|
128 |
+
else:
|
129 |
+
page_text = ""
|
130 |
+
except Exception as e:
|
131 |
+
print(f"Error scraping {link}: {str(e)}")
|
132 |
+
page_text = ""
|
133 |
+
else:
|
134 |
+
page_text = ""
|
135 |
+
|
136 |
+
|
137 |
+
fetched_results += 1
|
138 |
+
new_results += 1
|
139 |
+
|
140 |
+
if advanced:
|
141 |
+
results_list.append({
|
142 |
+
"link": link,
|
143 |
+
"title": title,
|
144 |
+
"description": description,
|
145 |
+
"page_text": page_text,
|
146 |
+
})
|
147 |
+
else:
|
148 |
+
results_list.append(link)
|
149 |
+
|
150 |
+
if fetched_results >= num_results:
|
151 |
+
break
|
152 |
+
|
153 |
+
if new_results == 0:
|
154 |
+
break
|
155 |
+
|
156 |
+
start += 10
|
157 |
+
sleep(sleep_interval)
|
158 |
+
|
159 |
+
if image_results == [] :
|
160 |
+
images = get_images(term)
|
161 |
+
return {"results": results_list, "images": images}
|
162 |
+
else:
|
163 |
+
return {"results": results_list, "images": image_results}
|
tools/googlesearch/useragentka.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
|
3 |
+
def get_useragent():
|
4 |
+
"""
|
5 |
+
Generates a random user agent string mimicking the format of various software versions.
|
6 |
+
|
7 |
+
The user agent string is composed of:
|
8 |
+
- Lynx version: Lynx/x.y.z where x is 2-3, y is 8-9, and z is 0-2
|
9 |
+
- libwww version: libwww-FM/x.y where x is 2-3 and y is 13-15
|
10 |
+
- SSL-MM version: SSL-MM/x.y where x is 1-2 and y is 3-5
|
11 |
+
- OpenSSL version: OpenSSL/x.y.z where x is 1-3, y is 0-4, and z is 0-9
|
12 |
+
|
13 |
+
Returns:
|
14 |
+
str: A randomly generated user agent string.
|
15 |
+
"""
|
16 |
+
lynx_version = f"Lynx/{random.randint(2, 3)}.{random.randint(8, 9)}.{random.randint(0, 2)}"
|
17 |
+
libwww_version = f"libwww-FM/{random.randint(2, 3)}.{random.randint(13, 15)}"
|
18 |
+
ssl_mm_version = f"SSL-MM/{random.randint(1, 2)}.{random.randint(3, 5)}"
|
19 |
+
openssl_version = f"OpenSSL/{random.randint(1, 3)}.{random.randint(0, 4)}.{random.randint(0, 9)}"
|
20 |
+
return f"{lynx_version} {libwww_version} {ssl_mm_version} {openssl_version}"
|