Spaces:
Running
Running
ThibaultGROUEIX
commited on
Commit
·
bf62248
0
Parent(s):
Clean start: purge history, track binaries with Git LFS
Browse files- .gitattributes +2 -0
- .gitignore +4 -0
- README.md +35 -0
- app.py +20 -0
- copaint/cli.py +173 -0
- copaint/copaint.py +574 -0
- copaint/gradio_ui.py +262 -0
- copaint/presets.yaml +0 -0
- pyproject.toml +34 -0
- requirements.txt +13 -0
.gitattributes
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.DS_Store
|
2 |
+
poetry.lock
|
3 |
+
/.gradio
|
4 |
+
/.copaint/__pycache__
|
README.md
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# COPAINT
|
2 |
+
TLDR: From generated Image -> Copaint PDF
|
3 |
+
<!-- insert image data/demo.png -->
|
4 |
+
[](data/demo.jpg)
|
5 |
+
|
6 |
+
|
7 |
+
## Usage and Install
|
8 |
+
```
|
9 |
+
# Install dependencies
|
10 |
+
pip install torch torchvision reportlab PyPDF2 Pillow argparse gradio_pdf
|
11 |
+
|
12 |
+
# Run
|
13 |
+
python copaint.py --input_image data/bear.jpg --copaint_logo data/logo_copaint.jpg --outputfolder output
|
14 |
+
|
15 |
+
# Using the CLI
|
16 |
+
pip install --upgrade pip
|
17 |
+
pip install -e .
|
18 |
+
|
19 |
+
# Case 1 : generate all the presets
|
20 |
+
copaint --input_image data/bear.jpg --copaint_logo data/logo_copaint.jpg --outputfolder output --use_presets True
|
21 |
+
|
22 |
+
# Case 2 : provide a number of participants, the program will generate the best grid
|
23 |
+
copaint --input_image data/bear.jpg --copaint_logo data/logo_copaint.jpg --outputfolder output --nparticipants 45
|
24 |
+
|
25 |
+
# Case 3 : provide the number of cells in the grid
|
26 |
+
copaint --input_image data/bear.jpg --copaint_logo data/logo_copaint.jpg --outputfolder output --h_cells 3 --w_cells 4
|
27 |
+
```
|
28 |
+
|
29 |
+
## Build and deploy
|
30 |
+
```
|
31 |
+
# Build
|
32 |
+
poetry build
|
33 |
+
# Deploy
|
34 |
+
poetry publish
|
35 |
+
```
|
app.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
from copaint.gradio_ui import build_gradio_ui
|
3 |
+
|
4 |
+
|
5 |
+
def main():
|
6 |
+
parser = argparse.ArgumentParser()
|
7 |
+
parser.add_argument(
|
8 |
+
"-s", "--share",
|
9 |
+
action=argparse.BooleanOptionalAction, # enables --share / --no-share
|
10 |
+
default=True,
|
11 |
+
help="Share the app publicly (default: true)"
|
12 |
+
)
|
13 |
+
args = parser.parse_args()
|
14 |
+
|
15 |
+
demo = build_gradio_ui()
|
16 |
+
demo.launch(share=args.share)
|
17 |
+
|
18 |
+
|
19 |
+
if __name__ == "__main__":
|
20 |
+
main()
|
copaint/cli.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
from copaint.copaint import image_to_pdf
|
3 |
+
# from copaint import image_to_copaint_pdf
|
4 |
+
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
+
import random
|
7 |
+
actual_participants_over_participants = 0.7 # how many people actually show up compared to the number of participants
|
8 |
+
|
9 |
+
# Default list of identifiers
|
10 |
+
default_identifiers = [
|
11 |
+
"sunshine",
|
12 |
+
"bliss",
|
13 |
+
"smile",
|
14 |
+
"serenity",
|
15 |
+
"laughter",
|
16 |
+
"breeze",
|
17 |
+
"harmony",
|
18 |
+
"glee",
|
19 |
+
"cheer",
|
20 |
+
"delight",
|
21 |
+
"hope",
|
22 |
+
"sparkle",
|
23 |
+
"kindness",
|
24 |
+
"charm",
|
25 |
+
"grace",
|
26 |
+
"radiance",
|
27 |
+
"jubilee",
|
28 |
+
"flutter",
|
29 |
+
"playful",
|
30 |
+
"whimsy",
|
31 |
+
"gleam",
|
32 |
+
"glow",
|
33 |
+
"twinkle",
|
34 |
+
"love",
|
35 |
+
"joy",
|
36 |
+
"peace",
|
37 |
+
"cheeky",
|
38 |
+
"amity",
|
39 |
+
"blissful",
|
40 |
+
"grateful"
|
41 |
+
]
|
42 |
+
|
43 |
+
# Function to get a default identifier (random or specified)
|
44 |
+
def default_identifier(index=None):
|
45 |
+
"""
|
46 |
+
Get a default identifier from the list.
|
47 |
+
|
48 |
+
Args:
|
49 |
+
index: Optional index to get a specific identifier.
|
50 |
+
If None, returns a random identifier.
|
51 |
+
|
52 |
+
Returns:
|
53 |
+
str: A default identifier
|
54 |
+
"""
|
55 |
+
if index is not None and 0 <= index < len(default_identifiers):
|
56 |
+
return default_identifiers[index]
|
57 |
+
return random.choice(default_identifiers)
|
58 |
+
|
59 |
+
def get_grid_size(nparticipants, input_image):
|
60 |
+
""" Takes the number of participants and the input image and returns the grid size, with the objective of making each cell as square as possible."""
|
61 |
+
# get the dimensions of the input image, load with PIL
|
62 |
+
input_image = Image.open(input_image)
|
63 |
+
w, h = input_image.size
|
64 |
+
n_cell = nparticipants * actual_participants_over_participants
|
65 |
+
aspect_ratio = w/h
|
66 |
+
h_cells = np.sqrt(aspect_ratio * n_cell)
|
67 |
+
w_cells = aspect_ratio * h_cells
|
68 |
+
|
69 |
+
"""
|
70 |
+
We have the following equations:
|
71 |
+
(1) w_cells/h_cells = aspect_ratio (as close as possible up to w_cells and h_cells being integers)
|
72 |
+
(2) h_cells * w_cells = n_cell
|
73 |
+
|
74 |
+
Solving to
|
75 |
+
(1) w_cells = aspect_ratio * h_cells
|
76 |
+
# replace in (2)
|
77 |
+
(2) h_cells * aspect_ratio * h_cells = n_cell
|
78 |
+
Leads to (3) h_cells^2 * aspect_ratio = n_cell
|
79 |
+
Leads to h_cells = sqrt(n_cell / aspect_ratio)
|
80 |
+
"""
|
81 |
+
h_cells = np.round(np.sqrt(n_cell / aspect_ratio))
|
82 |
+
w_cells = np.round(aspect_ratio * h_cells)
|
83 |
+
|
84 |
+
# convert to integers
|
85 |
+
h_cells = int(h_cells)
|
86 |
+
w_cells = int(w_cells)
|
87 |
+
|
88 |
+
|
89 |
+
print(f"Using {h_cells} x {w_cells} = {h_cells*w_cells} grid size for a canvas of size {h}*{w} and {nparticipants} participants (actual {n_cell})")
|
90 |
+
return int(h_cells), int(w_cells)
|
91 |
+
|
92 |
+
def main():
|
93 |
+
parser = argparse.ArgumentParser(description='CoPaint')
|
94 |
+
parser.add_argument('--input_image', type=str, default='./data/bear.png', help='input image')
|
95 |
+
parser.add_argument('--copaint_logo', type=str, default='./data/logo_copaint.png', help='copaint logo')
|
96 |
+
parser.add_argument('--outputfolder', type=str, default='output/', help='output image')
|
97 |
+
parser.add_argument('--nparticipants', type=int, help='number of participants', default=None)
|
98 |
+
parser.add_argument('--h_cells', type=int, help='number of cells in height', default=None)
|
99 |
+
parser.add_argument('--w_cells', type=int, help='number of cells in width', default=None)
|
100 |
+
parser.add_argument('--unique_identifier', type=str, help='unique identifier like Mauricette, in case users are doing multiple copaint at the same time, to avoid mixing the tiles', default=None)
|
101 |
+
parser.add_argument('--cell_size_in_cm', type=float, default=None, help='size of a cell in cm, for printing purposes')
|
102 |
+
parser.add_argument('--use_presets', action='store_true', help='use a couple of presets cuts based on the size of the number of people attending')
|
103 |
+
parser.add_argument('--a4', action='store_true', help='use A4 format for the pdf')
|
104 |
+
parser.add_argument('--high_res', action='store_true', help='save in high resolution mode (PNG format without resizing)')
|
105 |
+
parser.add_argument('--min_cell_size_in_cm', type=int, default=2, help='minimum size of cells in cm')
|
106 |
+
parser.add_argument('--debug', action='store_true', help='debug mode')
|
107 |
+
|
108 |
+
# done adding arguments
|
109 |
+
args = parser.parse_args()
|
110 |
+
|
111 |
+
|
112 |
+
if args.unique_identifier is None:
|
113 |
+
# select one at random
|
114 |
+
idx = np.random.choice(len(default_identifiers), 1)
|
115 |
+
args.unique_identifier = default_identifiers[idx[0]]
|
116 |
+
|
117 |
+
presets = [
|
118 |
+
[2, 3], # 6 people
|
119 |
+
[3, 3], # 9 people
|
120 |
+
[3, 4], # 12 people
|
121 |
+
[4, 4], # 16 people
|
122 |
+
[4, 5], # 20 people
|
123 |
+
[4, 6], # 24 people
|
124 |
+
[5, 6], # 30 people
|
125 |
+
[6, 8], # 48 people
|
126 |
+
[7, 9], # 63 people
|
127 |
+
[7, 10], # 70 people
|
128 |
+
[8, 10], # 80 people
|
129 |
+
[8, 12], # 96 people
|
130 |
+
]
|
131 |
+
preset_number_of_guests = [presets[i][0]*presets[i][1] for i in range(len(presets))]
|
132 |
+
|
133 |
+
# generate all presets
|
134 |
+
if args.use_presets:
|
135 |
+
# disregard other parameters and use the presets
|
136 |
+
# assert other parameters are not set
|
137 |
+
assert(args.h_cells is None), "When using presets, the number of H cells can't be set"
|
138 |
+
assert(args.w_cells is None), "When using presets, the number of W cells can't be set"
|
139 |
+
assert(args.nparticipants is None), "When using presets, the number of participants can't be set"
|
140 |
+
|
141 |
+
for preset in presets:
|
142 |
+
image_to_pdf(args.input_image, args.copaint_logo, args.outputfolder, preset[0], preset[1],
|
143 |
+
unique_identifier=args.unique_identifier, cell_size_in_cm=args.cell_size_in_cm,
|
144 |
+
a4=args.a4, high_res=args.high_res, min_cell_size_in_cm=args.min_cell_size_in_cm, debug=args.debug)
|
145 |
+
|
146 |
+
# generate a copaint pdf based on the number of participants
|
147 |
+
elif args.nparticipants:
|
148 |
+
# assert other parameters are not set
|
149 |
+
assert(args.h_cells is None), "When choosing via number of participants, the number of H cells can't be set"
|
150 |
+
assert(args.w_cells is None ), "When choosing via number of participants, the number of W cells can't be set"
|
151 |
+
|
152 |
+
# get the grid size based on the number of participants
|
153 |
+
h_cells, w_cells = get_grid_size(args.nparticipants, args.input_image)
|
154 |
+
image_to_pdf(args.input_image, args.copaint_logo, args.outputfolder, h_cells, w_cells,
|
155 |
+
unique_identifier=args.unique_identifier, cell_size_in_cm=args.cell_size_in_cm,
|
156 |
+
a4=args.a4, high_res=args.high_res, min_cell_size_in_cm=args.min_cell_size_in_cm, debug=args.debug)
|
157 |
+
|
158 |
+
# # Depracated find the first preset that can accomodate the number of participants
|
159 |
+
# preset_number_of_guests_inflated_by_losers = actual_participants_over_participants*args.nparticipants
|
160 |
+
# for i, preset in enumerate(presets):
|
161 |
+
# if preset_number_of_guests_inflated_by_losers <= preset_number_of_guests[i]:
|
162 |
+
# print(f"Using preset {preset} for {args.nparticipants} participants")
|
163 |
+
# image_to_copaint_pdf(args.input_image, args.copaint_logo, args.outputfolder, preset[0], preset[1])
|
164 |
+
# break
|
165 |
+
|
166 |
+
# Generate the copaint pdf using the specified number of cells
|
167 |
+
else:
|
168 |
+
image_to_pdf(args.input_image, args.copaint_logo, args.outputfolder, args.h_cells, args.w_cells,
|
169 |
+
unique_identifier=args.unique_identifier, cell_size_in_cm=args.cell_size_in_cm,
|
170 |
+
a4=args.a4, high_res=args.high_res, min_cell_size_in_cm=args.min_cell_size_in_cm, debug=args.debug)
|
171 |
+
|
172 |
+
if __name__ == '__main__':
|
173 |
+
main()
|
copaint/copaint.py
ADDED
@@ -0,0 +1,574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# cc @2024 COPAINT
|
2 |
+
# troubleshooting: groueix@copaint.com
|
3 |
+
|
4 |
+
"""
|
5 |
+
# usage
|
6 |
+
python copaint.py --input data/input_design.png --back data/back_design.png --outputfolder output
|
7 |
+
|
8 |
+
# install dependencies
|
9 |
+
pip install torch torchvision reportlab PyPDF2 Pillow argparse
|
10 |
+
|
11 |
+
# if you are using a mac, you might need to install cairosvg and cairo to load SVG files
|
12 |
+
pip install cairosvg ; brew install cairo libffi
|
13 |
+
export PKG_CONFIG_PATH="/usr/local/lib/pkgconfig:/opt/homebrew/lib/pkgconfig:$PKG_CONFIG_PATH"
|
14 |
+
export DYLD_LIBRARY_PATH="/usr/local/lib:/opt/homebrew/lib:$DYLD_LIBRARY_PATH"
|
15 |
+
"""
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import os
|
20 |
+
import numpy as np
|
21 |
+
import torchvision
|
22 |
+
import torch
|
23 |
+
import time # Add this import for timing
|
24 |
+
from reportlab.pdfgen import canvas
|
25 |
+
from reportlab.lib.pagesizes import letter, A4
|
26 |
+
from reportlab.lib.units import inch
|
27 |
+
import PyPDF2
|
28 |
+
|
29 |
+
from functools import lru_cache
|
30 |
+
from matplotlib import font_manager
|
31 |
+
|
32 |
+
from PIL import Image, ImageDraw, ImageFont
|
33 |
+
Image.MAX_IMAGE_PIXELS = None # Removes the limit entirely
|
34 |
+
fromPIltoTensor = torchvision.transforms.ToTensor()
|
35 |
+
fromTensortoPIL = torchvision.transforms.ToPILImage()
|
36 |
+
|
37 |
+
|
38 |
+
@lru_cache(maxsize=1)
|
39 |
+
def get_font(debug=False) -> str:
|
40 |
+
"""
|
41 |
+
Get the path to the Bradley Hand font, cached after first call.
|
42 |
+
"""
|
43 |
+
start_time = time.time()
|
44 |
+
good_font_options = ["Avenir Next", "HelveticaNeue", "AdobeClean-Regular", "Arial"] # "Bradley Hand"
|
45 |
+
font_paths = ["/System/Library/Fonts/Avenir Next.ttc"]
|
46 |
+
for font_path in font_paths:
|
47 |
+
if os.path.exists(font_path):
|
48 |
+
print(f"Found '{font_path}' font")
|
49 |
+
return font_path
|
50 |
+
|
51 |
+
available_fonts = font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
|
52 |
+
font_path = None
|
53 |
+
for good_font in good_font_options:
|
54 |
+
font_path = next((font for font in available_fonts if good_font in font), None)
|
55 |
+
if font_path:
|
56 |
+
print(f"Found '{good_font}' font: {font_path}")
|
57 |
+
break
|
58 |
+
|
59 |
+
if font_path is None:
|
60 |
+
font_path = available_fonts[0]
|
61 |
+
print(f"No good fonts found. Using default: {font_path}")
|
62 |
+
print("Please install one of the recommended fonts.")
|
63 |
+
|
64 |
+
if debug:
|
65 |
+
print(f"Font loading took {time.time() - start_time:.4f} seconds")
|
66 |
+
return font_path
|
67 |
+
|
68 |
+
font_path = get_font()
|
69 |
+
|
70 |
+
|
71 |
+
def load_image(image_path, debug=False):
|
72 |
+
""" Load an image from a file path and return a tensor. """
|
73 |
+
start_time = time.time()
|
74 |
+
# check if the path exists
|
75 |
+
assert os.path.exists(image_path), f"File not found: {image_path}"
|
76 |
+
# check if the file is an SVG
|
77 |
+
if image_path.endswith(".svg"):
|
78 |
+
import cairosvg
|
79 |
+
import io
|
80 |
+
# Convert SVG to PNG
|
81 |
+
with open(image_path, "rb") as svg_file:
|
82 |
+
png_data = cairosvg.svg2png(file_obj=svg_file)
|
83 |
+
# Load the PNG data into a Pillow Image
|
84 |
+
image = Image.open(io.BytesIO(png_data))
|
85 |
+
# display
|
86 |
+
# image.show()
|
87 |
+
else:
|
88 |
+
image = Image.open(image_path)
|
89 |
+
|
90 |
+
image_open_time = time.time()
|
91 |
+
if debug:
|
92 |
+
print(f"Image opening took {image_open_time - start_time:.4f} seconds")
|
93 |
+
|
94 |
+
image = fromPIltoTensor(image).unsqueeze(0)
|
95 |
+
print(f"Loaded image of shape {image.shape}, from {image_path}")
|
96 |
+
# resize to low res for testing
|
97 |
+
# image = torch.nn.functional.interpolate(image, size=1000)
|
98 |
+
|
99 |
+
if debug:
|
100 |
+
print(f"Image to tensor conversion took {time.time() - image_open_time:.4f} seconds")
|
101 |
+
print(f"Total image loading took {time.time() - start_time:.4f} seconds")
|
102 |
+
return image
|
103 |
+
|
104 |
+
|
105 |
+
def save_image(tensor, image_path, debug=False):
|
106 |
+
""" Save a tensor to an image file. """
|
107 |
+
start_time = time.time()
|
108 |
+
print(f"Saving image of shape {tensor.shape} to {image_path}")
|
109 |
+
image = fromTensortoPIL(tensor.squeeze(0))
|
110 |
+
conversion_time = time.time()
|
111 |
+
if debug:
|
112 |
+
print(f"Tensor to PIL conversion took {conversion_time - start_time:.4f} seconds")
|
113 |
+
|
114 |
+
image.save(image_path)
|
115 |
+
if debug:
|
116 |
+
print(f"Image saving took {time.time() - conversion_time:.4f} seconds")
|
117 |
+
print(f"Total save_image took {time.time() - start_time:.4f} seconds")
|
118 |
+
|
119 |
+
|
120 |
+
def save_tensor_to_pdf(tensor, pdf_path, is_front=True, margin=0.25, img_small_side_in_cm=None, a4=False, high_res=False, scale=None, debug=False):
|
121 |
+
"""
|
122 |
+
Save a tensor to a PDF, the tensor is assumed to be a single image, and is centered on the page.
|
123 |
+
"""
|
124 |
+
start_time = time.time()
|
125 |
+
image = fromTensortoPIL(tensor.squeeze(0))
|
126 |
+
img_width, img_height = image.size
|
127 |
+
# 1 Inch = 72 Points : ad-hoc metric used in typography and the printing industry.
|
128 |
+
# The US Letter format is US Letter size: 8.5 by 11 inches
|
129 |
+
W, H = 8.5, 11 # the unit is inch
|
130 |
+
if a4:
|
131 |
+
print("Using A4 format")
|
132 |
+
W, H = 8.27, 11.69 # the unit is inch
|
133 |
+
|
134 |
+
page_width_in_pt = (W - 2*margin) * inch
|
135 |
+
page_height_in_pt = (H - 2*margin) * inch
|
136 |
+
|
137 |
+
|
138 |
+
img_small_side_in_pt = None
|
139 |
+
img_large_side_in_pt = None
|
140 |
+
if img_small_side_in_cm is not None:
|
141 |
+
img_small_side_in_pt = img_small_side_in_cm * inch * 0.393701 # 1 cm = 0.393701 inches
|
142 |
+
img_large_side_in_pt = img_small_side_in_pt * max(img_width, img_height) / min(img_width, img_height)
|
143 |
+
assert(img_small_side_in_pt < page_width_in_pt and img_large_side_in_pt < page_height_in_pt), f"Cell size in cm is too large for the page, max in pt unit is {page_width_in_pt}x{page_height_in_pt}, got {img_small_side_in_pt}x{img_large_side_in_pt}. It looks you manually set the size of the cell in cm, but the image is too large for the page, try a smaller cell size."
|
144 |
+
|
145 |
+
|
146 |
+
print(f"Saving tensor of shape {tensor.shape} to {pdf_path}")
|
147 |
+
|
148 |
+
# Convert tensor to image
|
149 |
+
t1 = time.time()
|
150 |
+
if debug:
|
151 |
+
print(f"Tensor to PIL conversion took {time.time() - t1:.4f} seconds")
|
152 |
+
|
153 |
+
t2 = time.time()
|
154 |
+
# Check if image should be rotated
|
155 |
+
scale_1, rotated = None, True
|
156 |
+
if scale is not None:
|
157 |
+
scale_1, rotated = scale
|
158 |
+
|
159 |
+
if image.width > image.height and rotated:
|
160 |
+
print("Rotating image. Size in pixels: ", image.width, image.height)
|
161 |
+
image = image.rotate(90, expand=True)
|
162 |
+
print("Rotated image. Size in pixels: ", image.width, image.height)
|
163 |
+
img_width, img_height = image.width, image.height
|
164 |
+
rotated = True
|
165 |
+
else:
|
166 |
+
rotated = False
|
167 |
+
# check if it's better to maxout width or height
|
168 |
+
|
169 |
+
|
170 |
+
if scale_1 is None:
|
171 |
+
if img_small_side_in_pt is not None:
|
172 |
+
scale_1 = img_small_side_in_pt / min(img_width, img_height) # this might go over the page
|
173 |
+
else:
|
174 |
+
# Calculate the scaling factor to fit the image within the page
|
175 |
+
scale_width = page_width_in_pt / img_width
|
176 |
+
scale_height = page_height_in_pt / img_height
|
177 |
+
scale_1 = min(scale_width, scale_height) # Choose the smaller scale to preserve aspect ratio
|
178 |
+
|
179 |
+
# Calculate the resized image dimensions
|
180 |
+
new_width = img_width * scale_1
|
181 |
+
new_height = img_height * scale_1
|
182 |
+
|
183 |
+
# Calculate offsets to center the image on the page
|
184 |
+
x_offset = (page_width_in_pt - new_width) // 2
|
185 |
+
y_offset = (page_height_in_pt - new_height) // 2
|
186 |
+
if debug:
|
187 |
+
print(f"Image calculations took {time.time() - t2:.4f} seconds")
|
188 |
+
|
189 |
+
# Save image to PDF
|
190 |
+
t3 = time.time()
|
191 |
+
# Use PNG for high-res mode instead of JPG
|
192 |
+
image_path = "temp.png" if high_res else "temp.jpg"
|
193 |
+
image.save(image_path)
|
194 |
+
if debug:
|
195 |
+
print(f"Temporary image saving took {time.time() - t3:.4f} seconds")
|
196 |
+
|
197 |
+
# Create a PDF
|
198 |
+
t4 = time.time()
|
199 |
+
if a4:
|
200 |
+
c = canvas.Canvas(pdf_path, pagesize = A4)
|
201 |
+
else:
|
202 |
+
c = canvas.Canvas(pdf_path, pagesize = letter)
|
203 |
+
c.drawImage(image_path, x_offset+margin*inch, y_offset+margin*inch, width=new_width, height=new_height, preserveAspectRatio=True)
|
204 |
+
c.save()
|
205 |
+
if debug:
|
206 |
+
print(f"PDF creation took {time.time() - t4:.4f} seconds")
|
207 |
+
|
208 |
+
os.remove(image_path)
|
209 |
+
if debug:
|
210 |
+
print(f"Total PDF saving took {time.time() - start_time:.4f} seconds")
|
211 |
+
return pdf_path, (scale_1, rotated)
|
212 |
+
|
213 |
+
|
214 |
+
def merge_pdf_list(pdfs, output_path, debug=False):
|
215 |
+
""" Merge a list of PDFs into a single PDF. """
|
216 |
+
start_time = time.time()
|
217 |
+
merger = PyPDF2.PdfMerger()
|
218 |
+
for pdf in pdfs:
|
219 |
+
merger.append(pdf)
|
220 |
+
merger.write(output_path)
|
221 |
+
merger.close()
|
222 |
+
if debug:
|
223 |
+
print(f"PDF merging took {time.time() - start_time:.4f} seconds")
|
224 |
+
return output_path
|
225 |
+
|
226 |
+
|
227 |
+
def create_image_with_text(text: str = "1", size: int = 400, underline: bool = True, debug=False) -> torch.Tensor:
|
228 |
+
""" Create an image with text using PIL. Returns a torch tensor. """
|
229 |
+
start_time = time.time()
|
230 |
+
# Create a blank image (200x200 pixels, white background)
|
231 |
+
if isinstance(size, int):
|
232 |
+
size = (size, size)
|
233 |
+
image = Image.new("RGB", size, "white")
|
234 |
+
|
235 |
+
# Create a drawing object
|
236 |
+
draw = ImageDraw.Draw(image)
|
237 |
+
|
238 |
+
# Set the font (optional)
|
239 |
+
try:
|
240 |
+
font = ImageFont.truetype(font_path, size=int(size[1]/1.3)) # Ensure the font is available
|
241 |
+
except IOError:
|
242 |
+
font = ImageFont.load_default()
|
243 |
+
# turn size to 100
|
244 |
+
|
245 |
+
# Use textbbox to measure the text dimensions
|
246 |
+
visual_bbox = draw.textbbox((0, 0), text, font=font)
|
247 |
+
# (-4, 101, 340, 260)
|
248 |
+
text_width = visual_bbox[2] - visual_bbox[0] # Width of the text
|
249 |
+
text_height = visual_bbox[3] - visual_bbox[1] # Height of the text
|
250 |
+
|
251 |
+
center_point = (size[0] // 2, size[1] // 2)
|
252 |
+
top_left_of_BB = (center_point[0] - text_width // 2, center_point[1] - text_height // 2)
|
253 |
+
baseline = (top_left_of_BB[0] - visual_bbox[0], top_left_of_BB[1] - visual_bbox[1])
|
254 |
+
visual_bbox = draw.textbbox(baseline, text, font=font)
|
255 |
+
# draw.rectangle(visual_bbox, outline="red", width=2)
|
256 |
+
# print(f" text {text} Text width: {text_width}, Text height: {text_height}", f"Image width: {image.width}, Image height: {image.height}", f"Text position: {baseline}")
|
257 |
+
|
258 |
+
# # Draw the text
|
259 |
+
draw.text(baseline, text, fill="black", font=font)
|
260 |
+
|
261 |
+
if underline:
|
262 |
+
# # Add a line under the text
|
263 |
+
x = baseline[0]
|
264 |
+
y = visual_bbox[3] + 20
|
265 |
+
draw.line((x, y, x+text_width, y), fill="black", width=5)
|
266 |
+
|
267 |
+
tensor = fromPIltoTensor(image).unsqueeze(0)
|
268 |
+
|
269 |
+
if debug and len(text) <= 2: # Only log for short texts (cell numbers) when debugging
|
270 |
+
print(f"Creating image with text '{text}' took {time.time() - start_time:.4f} seconds")
|
271 |
+
return tensor
|
272 |
+
|
273 |
+
|
274 |
+
def create_back_image(h, w, h_cells, w_cells, logo_image, unique_identifier, list_of_cell_idx=None, debug=False):
|
275 |
+
"""
|
276 |
+
Create back image tensor, of size hxw -
|
277 |
+
Black pixels at the separation of cells to draw the lines
|
278 |
+
The logo is in each cell, with the cell number underlined
|
279 |
+
logo_image : tensor of size 1x3xhxw
|
280 |
+
"""
|
281 |
+
print(f"Creating back image of size {h}x{w} for {h_cells}x{w_cells} cells")
|
282 |
+
start_time = time.time()
|
283 |
+
num_channels = 3 # do not consider the alpha channel
|
284 |
+
back_image = torch.ones(1, num_channels, h, w)
|
285 |
+
# cell size in pixels
|
286 |
+
cell_h = h // h_cells
|
287 |
+
cell_w = w // w_cells
|
288 |
+
# hyperparameters controlling the thickness of the lines and the logo size
|
289 |
+
line_thickness = min(cell_h, cell_w) // 100
|
290 |
+
logo_size = min(cell_h, cell_w) // 4
|
291 |
+
logo_offset = min(cell_h, cell_w) // 50
|
292 |
+
number_size = min(cell_h, cell_w) // 2
|
293 |
+
|
294 |
+
if debug:
|
295 |
+
print(f"thickness of the lines: {line_thickness}")
|
296 |
+
print(f"Initialization took {time.time() - start_time:.4f} seconds")
|
297 |
+
|
298 |
+
# Create the grid lines
|
299 |
+
grid_start_time = time.time()
|
300 |
+
line_half_thickness = line_thickness // 2
|
301 |
+
for i in range(h_cells):
|
302 |
+
for j in range(w_cells):
|
303 |
+
h0 = i * cell_h # height start
|
304 |
+
h1 = (i + 1) * cell_h # height end
|
305 |
+
w0 = j * cell_w # width start
|
306 |
+
w1 = (j + 1) * cell_w # width end
|
307 |
+
|
308 |
+
if h0+line_half_thickness < h:
|
309 |
+
back_image[:, :num_channels, h0:(h0+line_half_thickness), :] = 0
|
310 |
+
if w0+line_half_thickness < w:
|
311 |
+
back_image[:, :num_channels, :, w0:(w0+line_half_thickness)] = 0
|
312 |
+
if h1 - line_half_thickness > 0:
|
313 |
+
back_image[:, :num_channels, (h1-line_half_thickness):h1, :] = 0
|
314 |
+
if w1 - line_half_thickness > 0:
|
315 |
+
back_image[:, :num_channels, :, (w1-line_half_thickness):w1] = 0
|
316 |
+
if debug:
|
317 |
+
print(f"Creating grid lines took {time.time() - grid_start_time:.4f} seconds")
|
318 |
+
|
319 |
+
# Resize logo for all cells
|
320 |
+
logo_resize_time = time.time()
|
321 |
+
_, _, h, w = logo_image.size()
|
322 |
+
scale_logo = min(logo_size / h, logo_size / w)
|
323 |
+
new_h, new_w = int(h * scale_logo), int(w * scale_logo)
|
324 |
+
logo_image_resized = torch.nn.functional.interpolate(logo_image, size=(new_h, new_w), mode='bilinear')
|
325 |
+
|
326 |
+
if debug:
|
327 |
+
print(f"Logo resizing took {time.time() - logo_resize_time:.4f} seconds")
|
328 |
+
|
329 |
+
# Add content to cells
|
330 |
+
cell_content_time = time.time()
|
331 |
+
letscopaint = create_image_with_text("@letscopaint", underline=False,
|
332 |
+
size=(int(0.8*number_size), number_size//8),
|
333 |
+
debug=debug)
|
334 |
+
for i in range(h_cells):
|
335 |
+
for j in range(w_cells):
|
336 |
+
h0 = i * cell_h # height start
|
337 |
+
h1 = (i + 1) * cell_h # height end
|
338 |
+
w0 = j * cell_w # width start
|
339 |
+
w1 = (j + 1) * cell_w # width end
|
340 |
+
|
341 |
+
# add logo at the bottom right of the cell
|
342 |
+
logo_size_h, logo_size_w = logo_image_resized.shape[2:]
|
343 |
+
back_image[:, :, h1-logo_size_h-logo_offset:h1-logo_offset, w1-logo_size_w-logo_offset:w1-logo_offset] = logo_image_resized[:, :num_channels, :, :]
|
344 |
+
|
345 |
+
# add cell number at the center of the cell
|
346 |
+
# invert cell number to match the order of the canvas. 1 is at the top right, and w_cells is at the top left
|
347 |
+
if list_of_cell_idx is not None:
|
348 |
+
print(f"list_of_cell_idx: {list_of_cell_idx}")
|
349 |
+
if list_of_cell_idx is not None:
|
350 |
+
cell_number = list_of_cell_idx[i*w_cells+j]
|
351 |
+
else:
|
352 |
+
cell_number = i*w_cells+(w_cells-j)
|
353 |
+
image_with_number = create_image_with_text(f"{cell_number}", size=number_size, debug=debug)
|
354 |
+
start_h_big = h0 + (h1 - h0) // 2 - number_size // 2
|
355 |
+
start_w_big = w0 + (w1 - w0) // 2 - number_size // 2
|
356 |
+
back_image[:, :, start_h_big:start_h_big+number_size, start_w_big:start_w_big+number_size] = image_with_number[:, :num_channels, :, :]
|
357 |
+
|
358 |
+
# add unique identifier
|
359 |
+
unique_identifier_size_w = number_size
|
360 |
+
unique_identifier_size_h = number_size // 4
|
361 |
+
image_with_unique_identifier = create_image_with_text(unique_identifier, underline=False,
|
362 |
+
size=(unique_identifier_size_w, unique_identifier_size_h),
|
363 |
+
debug=debug)
|
364 |
+
start_h = h0 + unique_identifier_size_h // 2 # Fix
|
365 |
+
start_w = w0 + unique_identifier_size_h // 2 # Fix
|
366 |
+
back_image[:, :, start_h:start_h+unique_identifier_size_h, start_w:start_w+unique_identifier_size_w] = image_with_unique_identifier[:, :num_channels, :, :]
|
367 |
+
|
368 |
+
start_letscopaint_h = h1-logo_offset # Fix
|
369 |
+
start_letscopaint_w = w0 + unique_identifier_size_h // 16 # Fix
|
370 |
+
back_image[:, :, start_letscopaint_h-(number_size//8):start_letscopaint_h, start_letscopaint_w:start_letscopaint_w+(int(0.8*number_size))] = letscopaint[:, :num_channels, :, :]
|
371 |
+
|
372 |
+
if debug:
|
373 |
+
print(f"Adding content to cells took {time.time() - cell_content_time:.4f} seconds")
|
374 |
+
print(f"Created back image of shape {back_image.shape}")
|
375 |
+
print(f"Total back image creation took {time.time() - start_time:.4f} seconds")
|
376 |
+
return back_image
|
377 |
+
|
378 |
+
|
379 |
+
def image_to_pdf_core(input_image, file_name, logo_image, outputfolder, h_cells, w_cells, unique_identifier="Mauricette", cell_size_in_cm=None, a4=False, high_res=False, list_of_cell_idx=None, scale=None, debug=False):
|
380 |
+
overall_start_time = time.time()
|
381 |
+
os.makedirs(outputfolder, exist_ok=True)
|
382 |
+
scale_1, scale_2, scale_3, scale_4 = None, None, None, None
|
383 |
+
if scale is not None:
|
384 |
+
scale_1, scale_2, scale_3, scale_4 = scale
|
385 |
+
|
386 |
+
# Load image
|
387 |
+
t1 = time.time()
|
388 |
+
if not isinstance(input_image, torch.Tensor):
|
389 |
+
image = load_image(input_image, debug=debug)
|
390 |
+
else:
|
391 |
+
image = input_image
|
392 |
+
|
393 |
+
_, c, h, w = image.shape
|
394 |
+
print(f"Image shape: {image.shape}")
|
395 |
+
logo_image = load_image(logo_image, debug=debug)
|
396 |
+
if debug:
|
397 |
+
print(f"Image loading took {time.time() - t1:.4f} seconds")
|
398 |
+
|
399 |
+
# # Quick check that the greatest dimension corresponds to the greatest number of cells
|
400 |
+
# if h > w and h_cells < w_cells:
|
401 |
+
# print("Swapping h_cells and w_cells")
|
402 |
+
# h_cells, w_cells = w_cells, h_cells
|
403 |
+
# elif w > h and w_cells < h_cells:
|
404 |
+
# print("Swapping h_cells and w_cells")
|
405 |
+
# h_cells, w_cells = w_cells, h_cells
|
406 |
+
|
407 |
+
# Create back image
|
408 |
+
t2 = time.time()
|
409 |
+
multiplier_w = max(1, 10000 // w)
|
410 |
+
multiplier_h = max(1, 10000 // h)
|
411 |
+
if scale_3 is None:
|
412 |
+
scale_3 = max(multiplier_w, multiplier_h)
|
413 |
+
|
414 |
+
print(f"Creating back image with {h*scale_3} x {w*scale_3} pixels for {h_cells} x {w_cells} cells")
|
415 |
+
back_image = create_back_image(h*scale_3, w*scale_3, h_cells, w_cells, logo_image,
|
416 |
+
unique_identifier=unique_identifier, list_of_cell_idx=list_of_cell_idx, debug=debug)
|
417 |
+
if debug:
|
418 |
+
save_image(back_image, os.path.join(outputfolder, "back_image.png"), debug=debug)
|
419 |
+
print(f"Back image creation and saving took {time.time() - t2:.4f} seconds")
|
420 |
+
|
421 |
+
# Save to PDF
|
422 |
+
t3 = time.time()
|
423 |
+
os.makedirs(outputfolder, exist_ok=True)
|
424 |
+
output_path_front = os.path.join(outputfolder, "output_front.pdf")
|
425 |
+
output_path_back = os.path.join(outputfolder, "output_back.pdf")
|
426 |
+
|
427 |
+
img_small_side_in_cm = None
|
428 |
+
if cell_size_in_cm is not None:
|
429 |
+
# Why Min? Cells are not neccearily square, depending on the aspect ratio of the image, and the number of H and W cells, so we assume cell_size_in_cm is the smallest side of the cell.
|
430 |
+
print(f"cell_size_in_cm: {cell_size_in_cm}")
|
431 |
+
min_cells = min(h_cells, w_cells)
|
432 |
+
img_small_side_in_cm = cell_size_in_cm * min_cells # smallest side in cm.
|
433 |
+
|
434 |
+
# print image and back image shapes
|
435 |
+
if debug:
|
436 |
+
print(f"Image shape: {image.shape}")
|
437 |
+
print(f"Back image shape: {back_image.shape}")
|
438 |
+
|
439 |
+
# Only resize back image if not high-res
|
440 |
+
if not high_res:
|
441 |
+
back_image_h, back_image_w = back_image.shape[2:]
|
442 |
+
scale_h = 4096 / back_image_h
|
443 |
+
if scale_4 is None:
|
444 |
+
scale_4 = scale_h
|
445 |
+
back_image = torch.nn.functional.interpolate(back_image, scale_factor=scale_4, mode='bilinear')
|
446 |
+
|
447 |
+
|
448 |
+
|
449 |
+
_, scale_1 = save_tensor_to_pdf(image, output_path_front, is_front=True, img_small_side_in_cm=img_small_side_in_cm, a4=a4, high_res=high_res, scale=scale_1, debug=debug)
|
450 |
+
_, scale_2 = save_tensor_to_pdf(back_image, output_path_back, is_front=False, img_small_side_in_cm=img_small_side_in_cm, a4=a4, high_res=high_res, scale=scale_2, debug=debug)
|
451 |
+
|
452 |
+
scale = (scale_1 , scale_2, scale_3, scale_4)
|
453 |
+
if debug:
|
454 |
+
print(f"PDF creation took {time.time() - t3:.4f} seconds")
|
455 |
+
|
456 |
+
# concatenate pdfs
|
457 |
+
t4 = time.time()
|
458 |
+
print("Concatenating PDFs")
|
459 |
+
|
460 |
+
output_path = os.path.join(outputfolder, f"{file_name}_{h_cells}x{w_cells}_copaint.pdf")
|
461 |
+
merge_pdf_list([output_path_front, output_path_back], output_path, debug=debug)
|
462 |
+
# clean unnecessary files
|
463 |
+
os.remove(output_path_front)
|
464 |
+
os.remove(output_path_back)
|
465 |
+
if debug:
|
466 |
+
print(f"PDF concatenation and cleanup took {time.time() - t4:.4f} seconds")
|
467 |
+
|
468 |
+
print(f"Total processing time: {time.time() - overall_start_time:.4f} seconds")
|
469 |
+
print(f"Done! Output saved to {output_path}")
|
470 |
+
return output_path, scale
|
471 |
+
|
472 |
+
|
473 |
+
def image_to_pdf(input_image, logo_image, outputfolder, h_cells, w_cells, unique_identifier="Mauricette", cell_size_in_cm=None, a4=False, high_res=False, min_cell_size_in_cm=2, list_of_cell_idx=None, debug=False):
|
474 |
+
"""
|
475 |
+
Create a copaint PDF from an image and a logo.
|
476 |
+
"""
|
477 |
+
print(f"h_cells: {h_cells}, w_cells: {w_cells}, a4: {a4}")
|
478 |
+
|
479 |
+
image = load_image(input_image, debug=debug)
|
480 |
+
_, c, h, w = image.shape
|
481 |
+
|
482 |
+
file_name = os.path.basename(input_image)
|
483 |
+
|
484 |
+
# Check if the image needs to be split to fit in the page.
|
485 |
+
|
486 |
+
if cell_size_in_cm is not None:
|
487 |
+
min_cell_size_in_cm = cell_size_in_cm
|
488 |
+
|
489 |
+
# The US Letter format is US Letter size: 8.5 by 11 inches
|
490 |
+
W, H = 8.5, 11 # the unit is inch
|
491 |
+
if a4:
|
492 |
+
print("Using A4 format")
|
493 |
+
W, H = 8.27, 11.69 # the unit is inch
|
494 |
+
|
495 |
+
margin = 0.25 # hardcoded margin
|
496 |
+
page_width_in_pt = (W - 2 * margin) * inch
|
497 |
+
page_height_in_pt = (H - 2 * margin) * inch
|
498 |
+
|
499 |
+
max_cell_per_page_h = h_cells
|
500 |
+
max_cell_per_page_w = w_cells
|
501 |
+
|
502 |
+
established_cell_size = False
|
503 |
+
while not established_cell_size:
|
504 |
+
img_small_side_in_pt = min(max_cell_per_page_h, max_cell_per_page_w) * min_cell_size_in_cm * inch * 0.393701 # 1 cm = 0.393701 inches
|
505 |
+
minimum_is_width = min(w, h) == w
|
506 |
+
img_large_side_in_pt = img_small_side_in_pt * max(w, h) / min(w, h)
|
507 |
+
print(f"img_small_side_in_pt: {img_small_side_in_pt}, img_large_side_in_pt: {img_large_side_in_pt}")
|
508 |
+
print(f"page_width_in_pt: {page_width_in_pt}, page_height_in_pt: {page_height_in_pt}")
|
509 |
+
if img_large_side_in_pt < page_height_in_pt and img_small_side_in_pt < page_width_in_pt:
|
510 |
+
established_cell_size = True
|
511 |
+
|
512 |
+
else:
|
513 |
+
max_cell_per_page_h = max_cell_per_page_h // 2
|
514 |
+
max_cell_per_page_w = max_cell_per_page_w // 2
|
515 |
+
|
516 |
+
print(f"Decreasing max_cell_per_page to {max_cell_per_page_h}x{max_cell_per_page_w}")
|
517 |
+
|
518 |
+
|
519 |
+
divide_factor_h = int(np.ceil(h_cells / max_cell_per_page_h))
|
520 |
+
divide_factor_w = int(np.ceil(w_cells / max_cell_per_page_w))
|
521 |
+
|
522 |
+
print(f"divide_factor_h: {divide_factor_h}, divide_factor_w: {divide_factor_w}")
|
523 |
+
copaint_pdfs = []
|
524 |
+
scale = None
|
525 |
+
for i in range(divide_factor_h):
|
526 |
+
for j in range(divide_factor_w):
|
527 |
+
cell_h_start = i * max_cell_per_page_h
|
528 |
+
cell_h_end = min((i + 1) * max_cell_per_page_h, h_cells)
|
529 |
+
cell_w_start = j * max_cell_per_page_w
|
530 |
+
cell_w_end = min((j + 1) * max_cell_per_page_w, w_cells)
|
531 |
+
list_of_cell_idx = [cell_h_idx * w_cells + (w_cells-cell_w_idx) for cell_h_idx in range(cell_h_start, cell_h_end) for cell_w_idx in range(cell_w_start, cell_w_end)]
|
532 |
+
|
533 |
+
print(f"cell_h_start: {cell_h_start}, cell_h_end: {cell_h_end}, cell_w_start: {cell_w_start}, cell_w_end: {cell_w_end}")
|
534 |
+
h_cells_new = cell_h_end - cell_h_start
|
535 |
+
w_cells_new = cell_w_end - cell_w_start
|
536 |
+
file_name_new = f"{file_name}_{i}x{j}"
|
537 |
+
|
538 |
+
px_h_start = int(cell_h_start * h / h_cells)
|
539 |
+
px_h_end = int(cell_h_end * h / h_cells)
|
540 |
+
px_w_start = int(cell_w_start * w / w_cells)
|
541 |
+
px_w_end = int(cell_w_end * w / w_cells)
|
542 |
+
|
543 |
+
image_new = image[:, :, px_h_start:px_h_end, px_w_start:px_w_end]
|
544 |
+
|
545 |
+
|
546 |
+
pdf_path, new_scale = image_to_pdf_core(image_new, file_name_new, logo_image, outputfolder, h_cells_new, w_cells_new, unique_identifier, cell_size_in_cm, a4, high_res, list_of_cell_idx=list_of_cell_idx, scale=scale, debug=debug)
|
547 |
+
if scale is None:
|
548 |
+
scale = new_scale
|
549 |
+
copaint_pdfs.append(pdf_path)
|
550 |
+
|
551 |
+
# Merge the copaint PDFs
|
552 |
+
output_path = os.path.join(outputfolder, "copaint-design.pdf")
|
553 |
+
merge_pdf_list(copaint_pdfs, output_path, debug=debug)
|
554 |
+
|
555 |
+
# clean unnecessary files
|
556 |
+
for pdf in copaint_pdfs:
|
557 |
+
os.remove(pdf)
|
558 |
+
|
559 |
+
print(f"Done! Final output saved to {output_path}")
|
560 |
+
return output_path
|
561 |
+
|
562 |
+
|
563 |
+
if __name__ == "__main__":
|
564 |
+
parser = argparse.ArgumentParser(description='CoPaint')
|
565 |
+
parser.add_argument('--input_image', type=str, default='./data/bear.png', help='input image')
|
566 |
+
parser.add_argument('--copaint_logo', type=str, default='./data/logo_copaint.png', help='copaint logo')
|
567 |
+
parser.add_argument('--outputfolder', type=str, default='output/', help='output image')
|
568 |
+
parser.add_argument('--h_cells', type=int, help='number of cells in height', default=9)
|
569 |
+
parser.add_argument('--w_cells', type=int, help='number of cells in width', default=6)
|
570 |
+
parser.add_argument('--debug', action='store_true', help='show timing information')
|
571 |
+
|
572 |
+
# done adding arguments
|
573 |
+
args = parser.parse_args()
|
574 |
+
image_to_pdf(args.input_image, args.copaint_logo, args.outputfolder, args.h_cells, args.w_cells, cell_size_in_cm=None, debug=args.debug)
|
copaint/gradio_ui.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Gradio based ui for Copaint pdf generator.
|
2 |
+
"""
|
3 |
+
import gradio as gr
|
4 |
+
import shutil
|
5 |
+
import tempfile
|
6 |
+
|
7 |
+
from gradio_pdf import PDF
|
8 |
+
from importlib.resources import files
|
9 |
+
from pathlib import Path
|
10 |
+
|
11 |
+
from copaint.copaint import image_to_pdf
|
12 |
+
import torchvision
|
13 |
+
import torch
|
14 |
+
|
15 |
+
fromPIltoTensor = torchvision.transforms.ToTensor()
|
16 |
+
fromTensortoPIL = torchvision.transforms.ToPILImage()
|
17 |
+
|
18 |
+
|
19 |
+
def add_grid_to_image(image, h_cells, w_cells):
|
20 |
+
# image is a torch tensor of shape (3, h, w)
|
21 |
+
image = image.convert("RGB")
|
22 |
+
image = fromPIltoTensor(image)
|
23 |
+
grid_color = torch.tensor([16,15,46]).unsqueeze(1).unsqueeze(1) / 255.0
|
24 |
+
h,w = image.shape[1:]
|
25 |
+
thickness = max(min(1, int(min(h,w)/100)), 1)
|
26 |
+
print("thickness, h, w", thickness, h, w)
|
27 |
+
for i in range(h_cells+1):
|
28 |
+
idx_i = int(i*h/h_cells)
|
29 |
+
image[:, idx_i-thickness:idx_i+thickness, :] = grid_color
|
30 |
+
|
31 |
+
for j in range(w_cells+1):
|
32 |
+
idx_j = int(j*w/w_cells)
|
33 |
+
image[:, :, idx_j-thickness:idx_j+thickness] = grid_color
|
34 |
+
image = fromTensortoPIL(image)
|
35 |
+
|
36 |
+
return image
|
37 |
+
|
38 |
+
|
39 |
+
def canvas_ratio(image):
|
40 |
+
w,h = image.size
|
41 |
+
aspect_ratio = w/h
|
42 |
+
if aspect_ratio > 1:
|
43 |
+
aspect_ratio = 1/aspect_ratio
|
44 |
+
|
45 |
+
# find nearest aspect ratio in the list of predefined ones
|
46 |
+
predefined_aspect_ratios = [2/3, 1/2, 1/1, 5/6, 4/5, 5/7]
|
47 |
+
predefined_aspect_ratios_str = ["2/3", "1/2", "1/1", "5/6", "4/5", "5/7"]
|
48 |
+
min_diff = float('inf')
|
49 |
+
closest_ratio_idx = None
|
50 |
+
for idx, ratio in enumerate(predefined_aspect_ratios):
|
51 |
+
diff = abs(aspect_ratio - ratio)
|
52 |
+
if diff < min_diff:
|
53 |
+
min_diff = diff
|
54 |
+
closest_ratio_idx = idx
|
55 |
+
closest_ratio_str = predefined_aspect_ratios_str[closest_ratio_idx]
|
56 |
+
|
57 |
+
if min_diff > 0.1:
|
58 |
+
return None
|
59 |
+
else:
|
60 |
+
return f"Best canvas ratio: {closest_ratio_str}"
|
61 |
+
|
62 |
+
|
63 |
+
def add_grid_and_display_ratio(image, h_cells, w_cells):
|
64 |
+
if image is None:
|
65 |
+
return None, gr.update(visible=False)
|
66 |
+
return add_grid_to_image(image, h_cells, w_cells), gr.update(visible=True, value=canvas_ratio(image))
|
67 |
+
|
68 |
+
|
69 |
+
def process_copaint(
|
70 |
+
input_image,
|
71 |
+
h_cells=None,
|
72 |
+
w_cells=None,
|
73 |
+
a4=False,
|
74 |
+
high_res=False,
|
75 |
+
cell_size_in_cm=None,
|
76 |
+
min_cell_size_in_cm=2,
|
77 |
+
copaint_name="",
|
78 |
+
copaint_logo=None,
|
79 |
+
):
|
80 |
+
"""Process the input and generate CoPaint PDF"""
|
81 |
+
# Create temporary directories for processing
|
82 |
+
temp_input_dir = tempfile.mkdtemp()
|
83 |
+
temp_output_dir = tempfile.mkdtemp()
|
84 |
+
|
85 |
+
try:
|
86 |
+
# Save uploaded images to temp directory
|
87 |
+
input_path = Path(temp_input_dir) / "input_image.png"
|
88 |
+
input_image.save(input_path)
|
89 |
+
|
90 |
+
logo_path = None
|
91 |
+
if copaint_logo is not None:
|
92 |
+
logo_path = Path(temp_input_dir) / "logo.png"
|
93 |
+
copaint_logo.save(logo_path)
|
94 |
+
else:
|
95 |
+
# Use default logo path from the package
|
96 |
+
logo_path = files("copaint.static") / "logo_copaint.png"
|
97 |
+
|
98 |
+
if copaint_name == "" or copaint_name is None:
|
99 |
+
from copaint.cli import default_identifier
|
100 |
+
copaint_name = default_identifier()
|
101 |
+
|
102 |
+
if a4 == "A4":
|
103 |
+
a4 = True
|
104 |
+
else:
|
105 |
+
a4 = False
|
106 |
+
|
107 |
+
# Generate the PDF
|
108 |
+
pdf_path = image_to_pdf(
|
109 |
+
input_image=str(input_path),
|
110 |
+
logo_image=str(logo_path),
|
111 |
+
outputfolder=temp_output_dir,
|
112 |
+
h_cells=h_cells,
|
113 |
+
w_cells=w_cells,
|
114 |
+
unique_identifier=copaint_name,
|
115 |
+
cell_size_in_cm=cell_size_in_cm,
|
116 |
+
a4=a4,
|
117 |
+
high_res=high_res,
|
118 |
+
min_cell_size_in_cm=min_cell_size_in_cm
|
119 |
+
)
|
120 |
+
|
121 |
+
return pdf_path, None # Return path and no error
|
122 |
+
except Exception as e:
|
123 |
+
# Return error message
|
124 |
+
return None, f"Error generating PDF: {str(e)}"
|
125 |
+
finally:
|
126 |
+
# Clean up temporary input directory
|
127 |
+
shutil.rmtree(temp_input_dir)
|
128 |
+
|
129 |
+
|
130 |
+
def build_gradio_ui():
|
131 |
+
# Create Gradio Interface
|
132 |
+
with gr.Blocks(title="CoPaint Generator", theme='NoCrypt/miku') as demo:
|
133 |
+
|
134 |
+
gr.Markdown("# 🤖 CoPaint Generator")
|
135 |
+
gr.Markdown("Upload an image with your painting design and set grid parameters to generate a CoPaint PDF template 🖨️📄✂️ for your next collaborative painting activities. 🎨🖌️")
|
136 |
+
|
137 |
+
# --- inputs ---
|
138 |
+
with gr.Row(equal_height=True):
|
139 |
+
|
140 |
+
# Upload Design Template
|
141 |
+
with gr.Column(scale=2):
|
142 |
+
input_image = gr.Image(type="pil", label="Upload Your Design")
|
143 |
+
|
144 |
+
with gr.Column(scale=1):
|
145 |
+
# Grid
|
146 |
+
with gr.Tab("Grid Layout"):
|
147 |
+
gr.Markdown("<div style='text-align: center; font-weight: bold;'>Squares' Grid</div>")
|
148 |
+
w_cells = gr.Number(label="↔ (width)", value=4, precision=0)
|
149 |
+
h_cells = gr.Number(label=" by ↕ (heigth)", value=6, precision=0)
|
150 |
+
|
151 |
+
gr.Examples(
|
152 |
+
examples=[
|
153 |
+
[6, 9],
|
154 |
+
[4, 6],
|
155 |
+
[3, 3],
|
156 |
+
[3, 4],
|
157 |
+
[2, 2]
|
158 |
+
],
|
159 |
+
example_labels=[
|
160 |
+
"Copaint Wedding 6x9 Grid (54 squares)",
|
161 |
+
"Copaint Classic 4x6 Grid (24 squares)",
|
162 |
+
"Copaint Mini 3x3 Grid (9 squares)",
|
163 |
+
"Copaint Mini 3x4 Grid (12 squares)",
|
164 |
+
"Copaint Mini 2x2 Grid (4 squares)"],
|
165 |
+
inputs=[w_cells, h_cells],
|
166 |
+
)
|
167 |
+
|
168 |
+
# Grid + Design preview
|
169 |
+
gr.Markdown("<div style='text-align: center; font-weight: bold;'>Preview</div>")
|
170 |
+
output_image = gr.Image(label="Squares' Grid Preview", interactive=False)
|
171 |
+
|
172 |
+
# canvas ratio message
|
173 |
+
canvas_msg = gr.Markdown(label="Canvas Ratio", visible=False)
|
174 |
+
|
175 |
+
# PDF options
|
176 |
+
with gr.Tab("PDF Printing Options"):
|
177 |
+
use_a4 = gr.Dropdown(choices=["US letter", "A4"], label="Paper Format", value="US letter")
|
178 |
+
|
179 |
+
with gr.Accordion("Advanced settings (optional)", open=False):
|
180 |
+
with gr.Row():
|
181 |
+
with gr.Column(scale=1):
|
182 |
+
high_res = gr.Checkbox(label="High Resolution Mode (>20sec long processing)")
|
183 |
+
|
184 |
+
cell_size = gr.Number(label="Square Size, in cm (optional)",
|
185 |
+
value="",
|
186 |
+
info="If none is provided, the design size automatically adjusts to fit on a single page.")
|
187 |
+
|
188 |
+
copaint_name = gr.Textbox(label="Add a Custom Design Name (optional)",
|
189 |
+
value="",
|
190 |
+
max_length=10,
|
191 |
+
info="You can add a custom design name: it will appear on the back of each square, in the top left corner.")
|
192 |
+
|
193 |
+
copaint_logo = gr.Image(type="pil",
|
194 |
+
label="Add a Custom Logo (optional)")
|
195 |
+
gr.Markdown(
|
196 |
+
"<div style='font-size: 0.85em;'>"
|
197 |
+
"You can add a custom logo: it will appear on the back of each square, in the bottom right corner."
|
198 |
+
"</div>")
|
199 |
+
|
200 |
+
# --- outputs ---
|
201 |
+
with gr.Row():
|
202 |
+
# PDF
|
203 |
+
with gr.Column(scale=1):
|
204 |
+
submit_btn = gr.Button("Generate Copaint PDF", variant="primary")
|
205 |
+
|
206 |
+
with gr.Row():
|
207 |
+
with gr.Column(scale=1):
|
208 |
+
output_file = gr.File(label="Download PDF", visible=False, interactive=False)
|
209 |
+
with gr.Column(scale=1):
|
210 |
+
output_error_msg = gr.Textbox(label="Error Message", visible=False)
|
211 |
+
|
212 |
+
with gr.Row():
|
213 |
+
with gr.Column(scale=1):
|
214 |
+
output_pdf = PDF(label="PDF Preview")
|
215 |
+
|
216 |
+
# Update output_image: trigger update when any input changes
|
217 |
+
input_image.change(
|
218 |
+
fn=add_grid_and_display_ratio,
|
219 |
+
inputs=[input_image, h_cells, w_cells],
|
220 |
+
outputs=[output_image, canvas_msg]
|
221 |
+
)
|
222 |
+
for component in [h_cells, w_cells]:
|
223 |
+
component.change(
|
224 |
+
fn=add_grid_to_image,
|
225 |
+
inputs=[input_image, h_cells, w_cells],
|
226 |
+
outputs=output_image
|
227 |
+
)
|
228 |
+
|
229 |
+
# Submit function: generate pdf
|
230 |
+
def on_submit(input_image, h_cells, w_cells, use_a4, high_res, cell_size, copaint_name, copaint_logo):
|
231 |
+
if input_image is None:
|
232 |
+
return None, None, gr.update(visible=True, value="Please upload an image first 👀")
|
233 |
+
|
234 |
+
if cell_size is None or cell_size == "" or cell_size == 0:
|
235 |
+
cell_size = None
|
236 |
+
|
237 |
+
pdf_path, error = process_copaint(
|
238 |
+
input_image=input_image,
|
239 |
+
h_cells=int(h_cells),
|
240 |
+
w_cells=int(w_cells),
|
241 |
+
a4=use_a4,
|
242 |
+
high_res=high_res,
|
243 |
+
cell_size_in_cm=cell_size if cell_size else None,
|
244 |
+
min_cell_size_in_cm=float(2),
|
245 |
+
copaint_name=copaint_name,
|
246 |
+
copaint_logo=copaint_logo
|
247 |
+
)
|
248 |
+
|
249 |
+
if error:
|
250 |
+
# Show error message
|
251 |
+
return None, None, gr.update(visible=True, value=error)
|
252 |
+
else:
|
253 |
+
# Show successful PDF
|
254 |
+
return pdf_path, gr.update(visible=True, value=pdf_path, interactive=False), gr.update(visible=False)
|
255 |
+
|
256 |
+
submit_btn.click(
|
257 |
+
on_submit,
|
258 |
+
inputs=[input_image, h_cells, w_cells, use_a4, high_res, cell_size, copaint_name, copaint_logo],
|
259 |
+
outputs=[output_pdf, output_file, output_error_msg]
|
260 |
+
)
|
261 |
+
|
262 |
+
return demo
|
copaint/presets.yaml
ADDED
File without changes
|
pyproject.toml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "copaint"
|
3 |
+
version = "1.0.0"
|
4 |
+
description = "A backend to go from Image to copaint PDF, using torch and Pillow."
|
5 |
+
authors = ["Thibault Groueix <thibault.groueix.2012@polytechnique,org>"]
|
6 |
+
license = "All rights reserved"
|
7 |
+
readme = "README.md"
|
8 |
+
include = [
|
9 |
+
"copaint/static/*"
|
10 |
+
]
|
11 |
+
|
12 |
+
[tool.poetry.dependencies]
|
13 |
+
# pip install torch torchvision reportlab PyPDF2 Pillow argparse
|
14 |
+
python = ">=3.10,<4.0"
|
15 |
+
Pillow = "^10.0.0"
|
16 |
+
torch = "^2.0.0" # If you're using PyTorch
|
17 |
+
click = "^8.0.0" # For the CLI
|
18 |
+
torchvision = "^0.20.1" # If you're using PyTorch
|
19 |
+
reportlab = "^3.6.0" # For PDF generation
|
20 |
+
PyPDF2 = "^1.26.0" # For PDF generation
|
21 |
+
argparse = "^1.4.0" # For the CLI
|
22 |
+
matplotlib = "^3.9.2"
|
23 |
+
|
24 |
+
[tool.poetry.scripts]
|
25 |
+
copaint = "copaint.cli:main"
|
26 |
+
copaint-app = "app:main" # Launch Gradio UI
|
27 |
+
|
28 |
+
[tool.poetry.group.ui.dependencies]
|
29 |
+
gradio = "^5.23.3"
|
30 |
+
gradio-pdf = "^0.0.22"
|
31 |
+
|
32 |
+
[build-system]
|
33 |
+
requires = ["poetry-core>=1.0.0"]
|
34 |
+
build-backend = "poetry.core.masonry.api"
|
requirements.txt
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Core dependencies
|
2 |
+
Pillow==10.0.0
|
3 |
+
torch==2.0.0
|
4 |
+
torchvision==0.20.1
|
5 |
+
click==8.0.0
|
6 |
+
reportlab==3.6.0
|
7 |
+
PyPDF2==1.26.0
|
8 |
+
argparse==1.4.0
|
9 |
+
matplotlib==3.9.2
|
10 |
+
|
11 |
+
# UI dependencies (from [tool.poetry.group.ui.dependencies])
|
12 |
+
gradio==5.23.3
|
13 |
+
gradio-pdf==0.0.22
|