File size: 1,399 Bytes
e66e891
 
e97f932
f8a041b
687d26a
4c58071
e66e891
00764df
687d26a
 
 
 
 
8863982
e66e891
687d26a
f8a041b
e66e891
687d26a
 
8863982
 
f8a041b
00764df
 
687d26a
e97f932
f8a041b
e97f932
 
8863982
687d26a
e97f932
8863982
4c58071
 
 
 
 
 
 
 
e97f932
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
'''Tool functions for MCP server'''

import json
import logging
import functions.feed_extraction as extraction_funcs
import functions.summarization as summarization_funcs


def get_feed(website: str) -> list:
    '''Gets RSS feed content from a given website. Can take a website or RSS
    feed URL directly, or the name of a website. Will attempt to find RSS
    feed and return title, summary and link to full article for most recent
    items in feed
    
    Args:
        website: URL or name of website to extract RSS feed content from

    Returns:
        JSON string containing the feed content or 'No feed found' if a RSS
        feed for the requested website could not be found
    '''

    logger = logging.getLogger(__name__ + '.get_content')
    logger.info('Getting feed content for: %s', website)

    feed_uri = extraction_funcs.find_feed_uri(website)
    logger.info('find_feed_uri() returned %s', feed_uri)

    if 'No feed found' in feed_uri:
        return 'No feed found'

    content = extraction_funcs.parse_feed(feed_uri)
    logger.info('parse_feed() returned %s entries', len(list(content.keys())))

    for i, item in content.items():

        if item['content'] is not None:
            summary = summarization_funcs.summarize_content(item['content'])
            content[i]['summary'] = summary

        content[i].pop('content', None)

    return json.dumps(content)