File size: 5,970 Bytes
abdffe5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
###############################################################################################################################################################
#                                         _____  _           ___  _         ___                                
#                                        |_   _|| |_   ___  | _ )(_) __ _  / __| __  _ _  __ _  _ __  ___  _ _ 
#                                          | |  | ' \ / -_) | _ \| |/ _` | \__ \/ _|| '_|/ _` || '_ \/ -_)| '_|
#                                          |_|  |_||_|\___| |___/|_|\__, | |___/\__||_|  \__,_|| .__/\___||_|  
#                                                                   |___/                      |_|   
#
##############################################################################################################################################################
#                          _                         ______              _         _                  _______               _         _ 
#                     _   | |                       (_____ \            | |       (_)                (_______)             (_)       (_)
#     _____  _   _  _| |_ | |__    ___    ____  _    _____) )  ___    __| |  ____  _   ____   ___     _  _  _  _____   ___  _  ____   _ 
#    (____ || | | |(_   _)|  _ \  / _ \  / ___)(_)  |  __  /  / _ \  / _  | / ___)| | / _  | / _ \   | ||_|| |(____ | /___)| ||  _ \ | |
#    / ___ || |_| |  | |_ | | | || |_| || |     _   | |  \ \ | |_| |( (_| || |    | |( (_| || |_| |  | |   | |/ ___ ||___ || || | | || |
#    \_____||____/    \__)|_| |_| \___/ |_|    (_)  |_|   |_| \___/  \____||_|    |_| \___ | \___/   |_|   |_|\_____|(___/ |_||_| |_||_|
#                                                                                    (_____|                                            
###############################################################################################################################################################
#
# Last updated in: 8/15/2024
#
###############################################################################################################################################################

# ------------------------------------------------------------------------------
# IMPORTS
# ------------------------------------------------------------------------------
import gradio as gr
from bs4 import BeautifulSoup as Soup
from langchain_community.document_loaders import (AsyncHtmlLoader,
                                               NewsURLLoader, PubMedLoader,
                                               PlaywrightURLLoader,
                                               RecursiveUrlLoader,
                                               SeleniumURLLoader,
                                               UnstructuredURLLoader,
                                               WebBaseLoader)


# ------------------------------------------------------------------------------
# THE BIG SCRAPER METHOD
# ------------------------------------------------------------------------------

def extractDataFromUrls(urls: str, loader_type: str):
    """Extracts data from provided URLs using specified loader type.

    Args:
        urls (str): Comma-separated URLs to extract data from.
        loader_type (str): Type of loader to use for data extraction.

    Returns:
        tuple: A tuple containing the extracted data in JSON format and as a list of Document objects.
               Returns error messages if an exception occurs.
    """
    try:
        urls = urls.split(',')
        data = []

        # Instantiate the selected loader based on loader_type
        if loader_type == 'AsyncHtmlLoader':
            loader = AsyncHtmlLoader(urls)
            
        elif loader_type == 'UnstructuredURL':
            loader = UnstructuredURLLoader(urls=urls)
            
        elif loader_type == 'RecursiveURL':
            loader = RecursiveUrlLoader(
                url=urls[0], max_depth=2, extractor=lambda x: Soup(x, "html.parser").text
            )
            
        elif loader_type == 'SeleniumURL':
            loader = SeleniumURLLoader(urls=urls)
            
        elif loader_type == 'SeleniumURLH':
            loader = SeleniumURLLoader(urls=urls, headless=False)
            
        elif loader_type == 'PlaywrightURL':
            loader = PlaywrightURLLoader(urls=urls)
            
        elif loader_type == 'PubMed':
            loader = PubMedLoader(urls[0])
            
        elif loader_type == 'NewsURL':
            loader = NewsURLLoader(urls)
            
        elif loader_type == 'WebBaseLoader':
            loader = WebBaseLoader(urls)
            
        else:
            return "Not Implemented. Development in Progress", "Work In Progress"

        # Load data using the selected loader
        data = loader.load()
        
        # Convert data to JSON format
        jsonData = []
        
        for item in data:
            jsonData.append(item.to_json())

        return jsonData, data

    except Exception as err:
        return "An Error Occurred. Contact Developer" + str(err), "Error Occured. Boom" 


# ------------------------------------------------------------------------------
# GRADIO
# ------------------------------------------------------------------------------

# Define choices for the dropdown menu
choices = [
    'AsyncHtmlLoader', 'UnstructuredURL', 'RecursiveURL', 'PubMed',
    'WebBaseLoader', 'Scrapy', 'PySpider', 'Beautiful Soup',
    'SeleniumURL', 'SeleniumURLH', 'PlaywrightURL', 'NewsURL',
]

# Create the Gradio interface
demo = gr.Interface(
    fn=extractDataFromUrls,
    inputs=[
        gr.Textbox(label="Enter your comma separated URLs here"),
        gr.Dropdown(choices=choices, label="Pick your Loader from here")
    ],
    outputs=["json", "textbox"],
    allow_flagging='never',
    theme="sudeepshouche/minimalist"
)

# Launch the Gradio interface
demo.launch()