Spaces:
Running
Running
Upload 4 files
Browse files- app.py +48 -0
- arch.py +39 -0
- full_inference.py +190 -0
- requirements.txt +147 -0
app.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
from arch import SegFormerUNet
|
7 |
+
from albumentations import Compose, Resize, Normalize
|
8 |
+
from albumentations.pytorch import ToTensorV2
|
9 |
+
|
10 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
11 |
+
model = SegFormerUNet().to(device)
|
12 |
+
checkpoint_path = "model/segformer_unet_focal_loss_97_63.pth"
|
13 |
+
checkpoint = torch.load(checkpoint_path, map_location=device)
|
14 |
+
model.load_state_dict(checkpoint)
|
15 |
+
model.eval()
|
16 |
+
print("Model weights loaded successfully!")
|
17 |
+
|
18 |
+
# Image Transformation
|
19 |
+
transform = Compose([
|
20 |
+
Resize(256, 256),
|
21 |
+
Normalize(mean=[0.5], std=[0.5]),
|
22 |
+
ToTensorV2()
|
23 |
+
])
|
24 |
+
|
25 |
+
def process_image(image):
|
26 |
+
"""Process uploaded image, perform segmentation, and compute energy output."""
|
27 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
28 |
+
transformed = transform(image=image)['image'].unsqueeze(0).to(device)
|
29 |
+
|
30 |
+
with torch.no_grad():
|
31 |
+
output = model(transformed)
|
32 |
+
pred_mask = torch.sigmoid(output).squeeze().cpu().numpy()
|
33 |
+
pred_mask = (pred_mask > 0.5).astype(np.uint8)
|
34 |
+
|
35 |
+
area_m2 = np.sum(pred_mask) * (0.125 ** 2)
|
36 |
+
energy_kwh = area_m2 * 0.19 * 1676.2 * 0.935 / 1000
|
37 |
+
|
38 |
+
return pred_mask * 255, f"Estimated Solar Panel Area: {area_m2:.2f} m²", f"Estimated Energy Output: {energy_kwh:.2f} MWh per year"
|
39 |
+
|
40 |
+
demo = gr.Interface(
|
41 |
+
fn=process_image,
|
42 |
+
inputs=gr.Image(type="numpy"),
|
43 |
+
outputs=[gr.Image(type="numpy"), gr.Text(), gr.Text()],
|
44 |
+
title="Solar Panel Segmentation",
|
45 |
+
description="Upload an image to detect solar panels and estimate energy output.",
|
46 |
+
)
|
47 |
+
|
48 |
+
demo.launch()
|
arch.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from transformers import SegformerForSemanticSegmentation
|
4 |
+
|
5 |
+
class SegFormerUNet(nn.Module):
|
6 |
+
def __init__(self, model_name="nvidia/segformer-b2-finetuned-ade-512-512", num_classes=1):
|
7 |
+
super(SegFormerUNet, self).__init__()
|
8 |
+
|
9 |
+
# Load Pretrained SegFormer
|
10 |
+
self.segformer = SegformerForSemanticSegmentation.from_pretrained(model_name)
|
11 |
+
|
12 |
+
# Extract Encoder
|
13 |
+
self.encoder = self.segformer.segformer.encoder # Correct way to get encoder
|
14 |
+
|
15 |
+
# U-Net Style Decoder (Upsampling to match input size)
|
16 |
+
self.decoder = nn.Sequential(
|
17 |
+
nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2), # 16x16 -> 32x32
|
18 |
+
nn.ReLU(),
|
19 |
+
nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2), # 32x32 -> 64x64
|
20 |
+
nn.ReLU(),
|
21 |
+
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2), # 64x64 -> 128x128
|
22 |
+
nn.ReLU(),
|
23 |
+
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2), # 128x128 -> 256x256
|
24 |
+
nn.ReLU(),
|
25 |
+
nn.ConvTranspose2d(32, num_classes, kernel_size=2, stride=2) # 256x256 -> 512x512
|
26 |
+
)
|
27 |
+
|
28 |
+
def forward(self, x):
|
29 |
+
retained_input = x # Keep input image
|
30 |
+
|
31 |
+
# Encoder processing
|
32 |
+
encoder_output = self.encoder(x) # Extract encoder features
|
33 |
+
encoder_output = encoder_output.last_hidden_state.permute(0, 1, 2, 3) # (B, C, H, W)
|
34 |
+
# print("Encoder Output Shape:", encoder_output.shape) # Should be (B, 512, 16, 16)
|
35 |
+
|
36 |
+
# Decoder (Upsample back to input size)
|
37 |
+
output = self.decoder(encoder_output) # (B, num_classes, 512, 512)
|
38 |
+
|
39 |
+
return output # return segmentation mask
|
full_inference.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from arch import SegFormerUNet
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
import os
|
8 |
+
import cv2
|
9 |
+
import torch
|
10 |
+
from torch.utils.data import Dataset, DataLoader
|
11 |
+
import albumentations as A
|
12 |
+
from albumentations.pytorch import ToTensorV2
|
13 |
+
|
14 |
+
# Define Transformations
|
15 |
+
# transform = A.Compose([
|
16 |
+
# A.Resize(256, 256), # Resize to SegFormer input size
|
17 |
+
# A.HorizontalFlip(p=0.5),
|
18 |
+
# A.RandomBrightnessContrast(p=0.2),
|
19 |
+
# A.Normalize(mean=[0.5], std=[0.5]),
|
20 |
+
# ToTensorV2()
|
21 |
+
# ])
|
22 |
+
transform = A.Compose([
|
23 |
+
A.Resize(256, 256), # Resize to SegFormer input size
|
24 |
+
A.HorizontalFlip(p=0.5), # Randomly flip horizontally
|
25 |
+
A.VerticalFlip(p=0.2), # Randomly flip vertically
|
26 |
+
A.RandomBrightnessContrast(p=0.2), # Adjust brightness and contrast
|
27 |
+
A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=20, p=0.5), # Small shifts, scaling, rotation
|
28 |
+
A.GaussianBlur(blur_limit=(3, 5), p=0.2), # Slight blurring for robustness
|
29 |
+
# A.GaussNoise(var_limit=(10.0, 50.0), p=0.3), # Add slight Gaussian noise
|
30 |
+
A.GridDistortion(num_steps=5, distort_limit=0.3, p=0.2), # Slight grid distortion
|
31 |
+
A.Normalize(mean=[0.5], std=[0.5]), # Normalize
|
32 |
+
ToTensorV2() # Convert to tensor
|
33 |
+
])
|
34 |
+
|
35 |
+
# Custom Dataset Class
|
36 |
+
class SolarPanelDataset(Dataset):
|
37 |
+
def __init__(self, image_dir, mask_dir, transform=None):
|
38 |
+
self.image_dir = image_dir
|
39 |
+
self.mask_dir = mask_dir
|
40 |
+
self.transform = transform
|
41 |
+
self.images = sorted(os.listdir(image_dir))
|
42 |
+
|
43 |
+
def __len__(self):
|
44 |
+
return len(self.images)
|
45 |
+
|
46 |
+
def __getitem__(self, idx):
|
47 |
+
img_path = os.path.join(self.image_dir, self.images[idx])
|
48 |
+
mask_path = os.path.join(self.mask_dir, self.images[idx].replace(".bmp", "_label.bmp"))
|
49 |
+
|
50 |
+
# Load Image & Mask
|
51 |
+
image = cv2.imread(img_path)
|
52 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
53 |
+
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
|
54 |
+
mask = (mask > 0).astype("uint8") # Convert to binary mask
|
55 |
+
# Apply Transformations
|
56 |
+
if self.transform:
|
57 |
+
augmented = self.transform(image=image, mask=mask)
|
58 |
+
image = augmented["image"]
|
59 |
+
mask = augmented["mask"]
|
60 |
+
|
61 |
+
return image, mask.unsqueeze(0) # Add channel dimension
|
62 |
+
|
63 |
+
# Load Dataset
|
64 |
+
val_dataset = SolarPanelDataset("dataset/val/images", "dataset/val/labels", transform=transform)
|
65 |
+
|
66 |
+
def compute_solar_area(mask, PTM=0.125, OPTA=34):
|
67 |
+
"""
|
68 |
+
Compute solar panel area from a binary segmentation mask.
|
69 |
+
"""
|
70 |
+
if isinstance(mask, torch.Tensor):
|
71 |
+
mask = mask.cpu().detach().numpy() # Convert to NumPy if Tensor
|
72 |
+
if mask.ndim == 3:
|
73 |
+
mask = mask.squeeze(0) # Remove extra channel
|
74 |
+
|
75 |
+
mask = (mask > 0.5).astype(np.float32) # Ensure binary mask
|
76 |
+
panel_pixels = mask.sum() # Count solar panel pixels
|
77 |
+
area_m2 = (panel_pixels * (PTM ** 2)) / np.cos(np.radians(OPTA)) # Convert to m²
|
78 |
+
return area_m2
|
79 |
+
|
80 |
+
def compute_accuracy_metrics(segmented_mask, actual_mask, PTM=0.125):
|
81 |
+
"""
|
82 |
+
Compute accuracy of segmented area vs. actual area using MAPE and IoU.
|
83 |
+
"""
|
84 |
+
# Compute solar panel areas
|
85 |
+
segmented_area = compute_solar_area(segmented_mask, PTM)
|
86 |
+
actual_area = compute_solar_area(actual_mask, PTM)
|
87 |
+
|
88 |
+
# Compute Mean Absolute Percentage Error (MAPE)
|
89 |
+
mape_error = np.abs((segmented_area - actual_area) / actual_area) * 100 if actual_area != 0 else 0
|
90 |
+
|
91 |
+
# Compute Intersection over Union (IoU)
|
92 |
+
intersection = ((segmented_mask > 0.5) & (actual_mask > 0.5)).sum()
|
93 |
+
union = ((segmented_mask > 0.5) | (actual_mask > 0.5)).sum()
|
94 |
+
iou_score = intersection / union if union != 0 else 0
|
95 |
+
|
96 |
+
return {
|
97 |
+
"Segmented Area (m²)": segmented_area,
|
98 |
+
"Actual Area (m²)": actual_area,
|
99 |
+
"MAPE (%)": mape_error,
|
100 |
+
"IoU Score": iou_score
|
101 |
+
}
|
102 |
+
|
103 |
+
|
104 |
+
def compute_energy_output(area_m2, efficiency=0.19, GTI=1676.2, PR=0.935):
|
105 |
+
"""
|
106 |
+
Compute estimated solar energy output.
|
107 |
+
"""
|
108 |
+
return area_m2 * efficiency * GTI * PR
|
109 |
+
|
110 |
+
def Calculate_solar_energy(val_dataset, model, idx=0):
|
111 |
+
model.eval()
|
112 |
+
|
113 |
+
# Load image and mask from validation set
|
114 |
+
image, mask = val_dataset[idx]
|
115 |
+
orig_image = np.moveaxis(image.numpy(), 0, -1) # Convert from (C, H, W) to (H, W, C)
|
116 |
+
|
117 |
+
# Move image to GPU
|
118 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
119 |
+
image = image.unsqueeze(0).to(device)
|
120 |
+
|
121 |
+
with torch.no_grad():
|
122 |
+
output = model(image) # Get raw logits
|
123 |
+
pred_mask = torch.sigmoid(output).squeeze().cpu().numpy() # Convert logits to probabilities
|
124 |
+
pred_mask = (pred_mask > 0.5).astype(np.uint8) # Convert to binary mask
|
125 |
+
|
126 |
+
# Resize ground truth mask for plotting
|
127 |
+
mask = mask.squeeze().numpy()
|
128 |
+
|
129 |
+
difference = np.sum(mask != pred_mask)
|
130 |
+
print(f"Number of different pixels: {difference}")
|
131 |
+
|
132 |
+
print("-"*20)
|
133 |
+
# Compute area in m²
|
134 |
+
area_m2 = compute_solar_area(mask)
|
135 |
+
print("ORIGINAL MASK ENERGY OUTPUT")
|
136 |
+
print(f"Estimated Solar Panel Area: {area_m2:.2f} m²")
|
137 |
+
|
138 |
+
# Compute energy output in kWh
|
139 |
+
energy_kwh = compute_energy_output(area_m2)
|
140 |
+
print(f"Estimated Energy Output: {(energy_kwh/1000):.2f} MWh per year")
|
141 |
+
print("-"*20)
|
142 |
+
|
143 |
+
print("-"*20)
|
144 |
+
# Compute area in m²
|
145 |
+
area_m2 = compute_solar_area(pred_mask)
|
146 |
+
print("PREDICTED MASK ENERGY OUTPUT")
|
147 |
+
print(f"Estimated Solar Panel Area: {area_m2:.2f} m²")
|
148 |
+
|
149 |
+
# Compute energy output in kWh
|
150 |
+
energy_kwh = compute_energy_output(area_m2)
|
151 |
+
print(f"Estimated Energy Output: {(energy_kwh/1000):.2f} MWh per year")
|
152 |
+
print("-"*20)
|
153 |
+
|
154 |
+
# Plot the results
|
155 |
+
plt.figure(figsize=(15, 5))
|
156 |
+
|
157 |
+
plt.subplot(1, 3, 1)
|
158 |
+
plt.imshow(orig_image)
|
159 |
+
plt.title("Original Image")
|
160 |
+
plt.axis("off")
|
161 |
+
|
162 |
+
plt.subplot(1, 3, 2)
|
163 |
+
plt.imshow(mask, cmap="gray")
|
164 |
+
plt.title("Ground Truth Mask")
|
165 |
+
plt.axis("off")
|
166 |
+
|
167 |
+
plt.subplot(1, 3, 3)
|
168 |
+
plt.imshow(pred_mask, cmap="gray")
|
169 |
+
plt.title("Predicted Mask")
|
170 |
+
plt.axis("off")
|
171 |
+
|
172 |
+
plt.show()
|
173 |
+
|
174 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
175 |
+
# sample_input = torch.randn(1, 3, 512, 512).to(device)
|
176 |
+
model = SegFormerUNet().to(device)
|
177 |
+
model.eval()
|
178 |
+
checkpoint_path = "model/segformer_unet_focal_loss_97_63.pth"
|
179 |
+
checkpoint = torch.load(checkpoint_path, map_location=device)
|
180 |
+
|
181 |
+
# Load model state dict
|
182 |
+
model.load_state_dict(checkpoint)
|
183 |
+
|
184 |
+
print("Model weights loaded successfully!")
|
185 |
+
|
186 |
+
# with torch.no_grad():
|
187 |
+
# output = model(sample_input)
|
188 |
+
|
189 |
+
# Run visualization for a random validation sample
|
190 |
+
Calculate_solar_energy(val_dataset, model, idx=21)
|
requirements.txt
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==23.2.1
|
2 |
+
albucore==0.0.23
|
3 |
+
albumentations==2.0.5
|
4 |
+
annotated-types==0.7.0
|
5 |
+
anyio==4.8.0
|
6 |
+
apturl==0.5.2
|
7 |
+
bcrypt==3.2.0
|
8 |
+
blinker==1.4
|
9 |
+
Brlapi==0.8.3
|
10 |
+
certifi==2020.6.20
|
11 |
+
chardet==4.0.0
|
12 |
+
click==8.0.3
|
13 |
+
colorama==0.4.4
|
14 |
+
command-not-found==0.3
|
15 |
+
contourpy==1.3.1
|
16 |
+
cryptography==3.4.8
|
17 |
+
cupshelpers==1.0
|
18 |
+
cycler==0.12.1
|
19 |
+
dbus-python==1.2.18
|
20 |
+
defer==1.0.6
|
21 |
+
distro==1.7.0
|
22 |
+
distro-info==1.1+ubuntu0.2
|
23 |
+
duplicity==0.8.21
|
24 |
+
exceptiongroup==1.2.2
|
25 |
+
fastapi==0.115.11
|
26 |
+
fasteners==0.14.1
|
27 |
+
ffmpy==0.5.0
|
28 |
+
filelock==3.18.0
|
29 |
+
fonttools==4.56.0
|
30 |
+
fsspec==2025.3.0
|
31 |
+
future==0.18.2
|
32 |
+
gradio==5.21.0
|
33 |
+
gradio_client==1.7.2
|
34 |
+
groovy==0.1.2
|
35 |
+
gyp==0.1
|
36 |
+
h11==0.14.0
|
37 |
+
httpcore==1.0.7
|
38 |
+
httplib2==0.20.2
|
39 |
+
httpx==0.28.1
|
40 |
+
huggingface-hub==0.29.3
|
41 |
+
idna==3.3
|
42 |
+
importlib-metadata==4.6.4
|
43 |
+
jeepney==0.7.1
|
44 |
+
Jinja2==3.1.6
|
45 |
+
keyring==23.5.0
|
46 |
+
kiwisolver==1.4.8
|
47 |
+
language-selector==0.1
|
48 |
+
launchpadlib==1.10.16
|
49 |
+
lazr.restfulclient==0.14.4
|
50 |
+
lazr.uri==1.0.6
|
51 |
+
lockfile==0.12.2
|
52 |
+
louis==3.20.0
|
53 |
+
macaroonbakery==1.3.1
|
54 |
+
Mako==1.1.3
|
55 |
+
markdown-it-py==3.0.0
|
56 |
+
MarkupSafe==2.0.1
|
57 |
+
matplotlib==3.10.1
|
58 |
+
mdurl==0.1.2
|
59 |
+
monotonic==1.6
|
60 |
+
more-itertools==8.10.0
|
61 |
+
mpmath==1.3.0
|
62 |
+
netifaces==0.11.0
|
63 |
+
networkx==3.4.2
|
64 |
+
numpy==2.2.3
|
65 |
+
nvidia-cublas-cu12==12.4.5.8
|
66 |
+
nvidia-cuda-cupti-cu12==12.4.127
|
67 |
+
nvidia-cuda-nvrtc-cu12==12.4.127
|
68 |
+
nvidia-cuda-runtime-cu12==12.4.127
|
69 |
+
nvidia-cudnn-cu12==9.1.0.70
|
70 |
+
nvidia-cufft-cu12==11.2.1.3
|
71 |
+
nvidia-curand-cu12==10.3.5.147
|
72 |
+
nvidia-cusolver-cu12==11.6.1.9
|
73 |
+
nvidia-cusparse-cu12==12.3.1.170
|
74 |
+
nvidia-cusparselt-cu12==0.6.2
|
75 |
+
nvidia-nccl-cu12==2.21.5
|
76 |
+
nvidia-nvjitlink-cu12==12.4.127
|
77 |
+
nvidia-nvtx-cu12==12.4.127
|
78 |
+
oauthlib==3.2.0
|
79 |
+
olefile==0.46
|
80 |
+
opencv-python==4.11.0.86
|
81 |
+
opencv-python-headless==4.11.0.86
|
82 |
+
orjson==3.10.15
|
83 |
+
packaging==24.2
|
84 |
+
pandas==2.2.3
|
85 |
+
paramiko==2.9.3
|
86 |
+
pexpect==4.8.0
|
87 |
+
Pillow==9.0.1
|
88 |
+
protobuf==3.12.4
|
89 |
+
ptyprocess==0.7.0
|
90 |
+
pycairo==1.20.1
|
91 |
+
pycups==2.0.1
|
92 |
+
pydantic==2.10.6
|
93 |
+
pydantic_core==2.27.2
|
94 |
+
pydub==0.25.1
|
95 |
+
Pygments==2.19.1
|
96 |
+
PyGObject==3.42.1
|
97 |
+
PyJWT==2.3.0
|
98 |
+
pymacaroons==0.13.0
|
99 |
+
PyNaCl==1.5.0
|
100 |
+
pyparsing==2.4.7
|
101 |
+
pyRFC3339==1.1
|
102 |
+
python-apt==2.4.0+ubuntu4
|
103 |
+
python-dateutil==2.9.0.post0
|
104 |
+
python-debian==0.1.43+ubuntu1.1
|
105 |
+
python-multipart==0.0.20
|
106 |
+
pytz==2022.1
|
107 |
+
pyxdg==0.27
|
108 |
+
PyYAML==5.4.1
|
109 |
+
regex==2024.11.6
|
110 |
+
reportlab==3.6.8
|
111 |
+
requests==2.25.1
|
112 |
+
rich==13.9.4
|
113 |
+
ruff==0.11.0
|
114 |
+
safehttpx==0.1.6
|
115 |
+
safetensors==0.5.3
|
116 |
+
scipy==1.15.2
|
117 |
+
SecretStorage==3.3.1
|
118 |
+
semantic-version==2.10.0
|
119 |
+
shellingham==1.5.4
|
120 |
+
simsimd==6.2.1
|
121 |
+
six==1.16.0
|
122 |
+
sniffio==1.3.1
|
123 |
+
starlette==0.46.1
|
124 |
+
stringzilla==3.12.3
|
125 |
+
sympy==1.13.1
|
126 |
+
systemd-python==234
|
127 |
+
tokenizers==0.21.1
|
128 |
+
tomlkit==0.13.2
|
129 |
+
torch==2.6.0
|
130 |
+
tqdm==4.67.1
|
131 |
+
transformers==4.49.0
|
132 |
+
triton==3.2.0
|
133 |
+
typer==0.15.2
|
134 |
+
typing_extensions==4.12.2
|
135 |
+
tzdata==2025.1
|
136 |
+
ubuntu-drivers-common==0.0.0
|
137 |
+
ubuntu-pro-client==8001
|
138 |
+
ufw==0.36.1
|
139 |
+
unattended-upgrades==0.1
|
140 |
+
urllib3==1.26.5
|
141 |
+
usb-creator==0.3.7
|
142 |
+
uvicorn==0.34.0
|
143 |
+
wadllib==1.3.6
|
144 |
+
websockets==15.0.1
|
145 |
+
xdg==5
|
146 |
+
xkit==0.0.0
|
147 |
+
zipp==1.0.0
|