Spaces:
Sleeping
Sleeping
app.py
CHANGED
@@ -74,13 +74,19 @@ def summarize_text(input_text, model_label, char_limit):
|
|
74 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
|
75 |
input_ids = inputs["input_ids"].to(device)
|
76 |
|
|
|
|
|
|
|
|
|
|
|
77 |
summary_ids = model.generate(
|
78 |
input_ids,
|
79 |
-
max_length=
|
80 |
-
min_length=
|
81 |
do_sample=False
|
82 |
)
|
83 |
|
|
|
84 |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
85 |
return summary[:char_limit].strip()
|
86 |
|
|
|
74 |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
|
75 |
input_ids = inputs["input_ids"].to(device)
|
76 |
|
77 |
+
# Adjust the length constraints to make sure min_length < max_length
|
78 |
+
max_len = 30 # Set your desired max length
|
79 |
+
min_len = 5 # Ensure min_length is smaller than max_length
|
80 |
+
|
81 |
+
# Generate summary
|
82 |
summary_ids = model.generate(
|
83 |
input_ids,
|
84 |
+
max_length=max_len,
|
85 |
+
min_length=min_len,
|
86 |
do_sample=False
|
87 |
)
|
88 |
|
89 |
+
# Decode the summary
|
90 |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
91 |
return summary[:char_limit].strip()
|
92 |
|