|
'''Functions to summarize article content.''' |
|
|
|
import os |
|
import logging |
|
|
|
from openai import OpenAI |
|
from upstash_redis import Redis |
|
|
|
REDIS = Redis( |
|
url='https://sensible-midge-19304.upstash.io', |
|
token=os.environ['UPSTASH_REDIS_KEY'] |
|
) |
|
|
|
def summarize_content(title: str, content: str) -> str: |
|
'''Generates summary of article content using Modal inference endpoint. |
|
|
|
Args: |
|
content: string containing the text content to be summarized |
|
|
|
Returns: |
|
Summarized text as string |
|
''' |
|
|
|
logger = logging.getLogger(__name__ + '.summarize_content') |
|
logger.info('Summarizing extracted content') |
|
|
|
|
|
cache_key = f"{title.lower().replace(' ', '_')}-summary" |
|
cached_summary = REDIS.get(cache_key) |
|
|
|
if cached_summary: |
|
logger.info('Got summary from Redis cache: "%s"', title) |
|
return cached_summary |
|
|
|
|
|
client = OpenAI(api_key=os.environ['MODAL_API_KEY']) |
|
|
|
client.base_url = ( |
|
'https://gperdrizet--vllm-openai-compatible-summarization-serve.modal.run/v1' |
|
) |
|
|
|
|
|
model = client.models.list().data[0] |
|
model_id = model.id |
|
|
|
messages = [ |
|
{ |
|
'role': 'system', |
|
'content': f'Summarize the following text in 50 words returning only the summary: {content}' |
|
} |
|
] |
|
|
|
completion_args = { |
|
'model': model_id, |
|
'messages': messages, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
} |
|
|
|
try: |
|
response = client.chat.completions.create(**completion_args) |
|
|
|
except Exception as e: |
|
response = None |
|
logger.error('Error during Modal API call: %s', e) |
|
|
|
if response is not None: |
|
summary = response.choices[0].message.content |
|
|
|
else: |
|
summary = None |
|
|
|
REDIS.set(cache_key, summary) |
|
logger.info('Summarized: "%s"', title) |
|
return summary |
|
|