PetraAI commited on
Commit
b33ad61
·
verified ·
1 Parent(s): d2a3ae7

Upload 4 files

Browse files
Microsoft_Learn_Scrap_with_Google_Colab.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Scrapping.md ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """# %% [markdown]
3
+ # # Web Scraping, Processing, and Embedding Project
4
+ #
5
+ # This notebook demonstrates a workflow for web scraping text data from a website, processing it into manageable chunks, and then creating numerical representations (embeddings) of these chunks using a sentence transformer model. Finally, the embedded data is saved to Google Drive.
6
+ #
7
+ # %% [markdown]
8
+ # # Install necessary libraries
9
+ # This cell installs all the required Python packages.
10
+ # %%
11
+ !pip install -q ipywidgets google-colab python-docx pypdf pandas nltk sentence-transformers torch tqdm pyarrow httpx beautifulsoup4 datasets requests
12
+
13
+ # %% [markdown]
14
+ # # Web scraping and data extraction script
15
+ # This script crawls a website and extracts text content from each page.
16
+ #
17
+ # %%
18
+ # prompt: write a script to navigate to the link https://learn.microsoft.com/en-us/ and start web scrapping and data extraction automatically on every page must scrap and extract all data, 100% data
19
+
20
+ import requests
21
+ from bs4 import BeautifulSoup
22
+ from urllib.parse import urljoin, urlparse
23
+
24
+ def is_valid(url):
25
+ '''Checks whether `url` is a valid URL.'''
26
+ try:
27
+ result = urlparse(url)
28
+ return all([result.scheme, result.netloc])
29
+ except:
30
+ return False
31
+
32
+ def get_all_website_links(url):
33
+ '''
34
+ Returns all URLs that is found on `url` in which it belongs to the same website
35
+ '''
36
+ urls = set()
37
+ domain_name = urlparse(url).netloc
38
+ try:
39
+ soup = BeautifulSoup(requests.get(url).content, "html.parser")
40
+ for a_tag in soup.findAll("a"):
41
+ href = a_tag.attrs.get("href")
42
+ if href == "" or href is None:
43
+ continue
44
+ href = urljoin(url, href)
45
+ parsed_href = urlparse(href)
46
+ href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
47
+ if not is_valid(href):
48
+ continue
49
+ if parsed_href.netloc == domain_name:
50
+ urls.add(href)
51
+ except Exception as e:
52
+ print(f"Error processing {url}: {e}")
53
+ return urls
54
+
55
+ def scrape_page_data(url):
56
+ '''Scrapes all text content from a given URL.'''
57
+ try:
58
+ response = requests.get(url)
59
+ soup = BeautifulSoup(response.content, 'html.parser')
60
+ # Extract all text from the page
61
+ text = soup.get_text(separator='\n', strip=True)
62
+ return text
63
+ except Exception as e:
64
+ print(f"Error scraping {url}: {e}")
65
+ return None
66
+
67
+ def crawl_website(start_url, max_pages=100):
68
+ '''Crawls a website and scrapes data from each page.'''
69
+ visited_urls = set()
70
+ urls_to_visit = {start_url}
71
+ scraped_data = {}
72
+
73
+ while urls_to_visit and len(visited_urls) < max_pages:
74
+ current_url = urls_to_visit.pop()
75
+ if current_url in visited_urls:
76
+ continue
77
+
78
+ print(f"Visiting: {current_url}")
79
+ visited_urls.add(current_url)
80
+
81
+ # Scrape data
82
+ data = scrape_page_data(current_url)
83
+ if data:
84
+ scraped_data[current_url] = data
85
+
86
+ # Find new links
87
+ new_links = get_all_website_links(current_url)
88
+ for link in new_links:
89
+ if link not in visited_urls:
90
+ urls_to_visit.add(link)
91
+
92
+ return scraped_data
93
+
94
+ # Start the crawling process
95
+ start_url = "https://learn.microsoft.com/en-us/"
96
+ all_scraped_data = crawl_website(start_url)
97
+
98
+ # You can now process the `all_scraped_data` dictionary
99
+ # For example, print the number of pages scraped and the data from one page:
100
+ print(f"\nScraped data from {len(all_scraped_data)} pages.")
101
+ if all_scraped_data:
102
+ first_url = list(all_scraped_data.keys())[0]
103
+ print(f"\nData from the first scraped page ({first_url}):")
104
+ # print(all_scraped_data[first_url][:500]) # Print first 500 characters
105
+
106
+ # %% [markdown]
107
+ # # Data processing and embedding script
108
+ # This script takes the scraped data, chunks it, and creates embeddings using a sentence transformer model.
109
+ # %%
110
+ # prompt: write a script to convert, format, embed the full scrapped and extracted data to structured, embedded data chunks
111
+
112
+ import torch
113
+ from sentence_transformers import SentenceTransformer
114
+ from datasets import Dataset
115
+ from tqdm.auto import tqdm
116
+
117
+ # Check for GPU availability
118
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
119
+ print(f"Using device: {device}")
120
+
121
+ # Load a pre-trained sentence transformer model
122
+ model = SentenceTransformer('all-MiniLM-L6-v2').to(device)
123
+
124
+ def chunk_text(text, chunk_size=500, chunk_overlap=50):
125
+ '''Splits text into chunks with overlap.'''
126
+ words = text.split()
127
+ chunks = []
128
+ i = 0
129
+ while i < len(words):
130
+ chunk = words[i:i + chunk_size]
131
+ chunks.append(" ".join(chunk))
132
+ i += chunk_size - chunk_overlap
133
+ if i >= len(words) - chunk_overlap and i < len(words): # Handle the last chunk
134
+ chunks.append(" ".join(words[i:]))
135
+ break
136
+
137
+ return chunks
138
+
139
+ def process_scraped_data(scraped_data, chunk_size=500, chunk_overlap=50):
140
+ '''
141
+ Converts scraped data into formatted chunks and embeds them.
142
+ Returns a list of dictionaries, each containing chunk text, source URL, and embedding.
143
+ '''
144
+ processed_chunks = []
145
+ for url, text in tqdm(scraped_data.items(), desc="Processing scraped data"):
146
+ if text:
147
+ chunks = chunk_text(text, chunk_size=chunk_size, chunk_overlap=chunk_overlap)
148
+ for chunk in chunks:
149
+ processed_chunks.append({
150
+ 'text': chunk,
151
+ 'source': url,
152
+ })
153
+ return processed_chunks
154
+
155
+ def embed_chunks(processed_chunks, model, batch_size=32):
156
+ '''Embeds the text chunks using the sentence transformer model.'''
157
+ # Extract texts for embedding
158
+ texts_to_embed = [chunk['text'] for chunk in processed_chunks]
159
+
160
+ # Create a Hugging Face Dataset
161
+ dataset = Dataset.from_dict({'text': texts_to_embed})
162
+
163
+ # Define a function to apply embeddings
164
+ def get_embeddings(batch):
165
+ return {'embedding': model.encode(batch['text'], convert_to_tensor=True).tolist()}
166
+
167
+ # Apply the embedding function in batches
168
+ dataset = dataset.map(get_embeddings, batched=True, batch_size=batch_size)
169
+
170
+ # Update the original processed_chunks list with embeddings
171
+ for i, item in enumerate(processed_chunks):
172
+ item['embedding'] = dataset[i]['embedding']
173
+
174
+ return processed_chunks
175
+
176
+ # --- Main script for processing and embedding ---
177
+
178
+ # Process the scraped data into chunks
179
+ formatted_chunks = process_scraped_data(all_scraped_data)
180
+
181
+ # Embed the chunks
182
+ embedded_data = embed_chunks(formatted_chunks, model)
183
+
184
+ # `embedded_data` is now a list of dictionaries, where each dictionary
185
+ # represents a chunk with its text, source URL, and embedding.
186
+ # You can now use this data for similarity search, indexing, etc.
187
+
188
+ print(f"\nCreated {len(embedded_data)} embedded chunks.")
189
+ if embedded_data:
190
+ print("\nExample of an embedded chunk:")
191
+ embedded_data[0]
192
+
193
+ # %% [markdown]
194
+ # # Save the embedded dataset to Google Drive
195
+ # This script saves the processed and embedded data to a JSON file in your Google Drive.
196
+ #
197
+ # %%
198
+ # prompt: write a script to save all converted, formatted, embedded dataset to the "Output" file on My Drive
199
+
200
+ import json
201
+ from google.colab import drive
202
+
203
+ # Mount Google Drive
204
+ drive.mount('/content/drive')
205
+
206
+ # Define the output file path
207
+ output_file_path = '/content/drive/My Drive/Output/embedded_dataset.json'
208
+
209
+ # Ensure the output directory exists
210
+ import os
211
+ output_dir = os.path.dirname(output_file_path)
212
+ os.makedirs(output_dir, exist_ok=True)
213
+
214
+ # Save the embedded data to a JSON file
215
+ with open(output_file_path, 'w') as f:
216
+ json.dump(embedded_data, f, indent=2)
217
+
218
+ print(f"\nSaved embedded dataset to: {output_file_path}")
219
+ """
embedded_dataset.json ADDED
The diff for this file is too large to render. See raw diff
 
microsoft_learn_scrap_with_google_colab.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Microsoft Learn Scrap with Google Colab.py
3
+
4
+ # Web Scraping, Processing, and Embedding
5
+
6
+ ## Install necessary libraries
7
+ """
8
+
9
+ ## pip install -q ipywidgets google-colab python-docx pypdf pandas nltk sentence-transformers torch tqdm pyarrow httpx beautifulsoup4 datasets requests
10
+
11
+ """## Web scraping and data extraction script
12
+ This script crawls a website and extracts text content from each page.
13
+
14
+ """
15
+
16
+ # This script to navigate to the link https://learn.microsoft.com/en-us/ and start web scrapping and data extraction automatically on every page must scrap and extract all data, 100% data
17
+
18
+ import requests
19
+ from bs4 import BeautifulSoup
20
+ from urllib.parse import urljoin, urlparse
21
+
22
+ def is_valid(url):
23
+ """Checks whether `url` is a valid URL."""
24
+ try:
25
+ result = urlparse(url)
26
+ return all([result.scheme, result.netloc])
27
+ except:
28
+ return False
29
+
30
+ def get_all_website_links(url):
31
+ """
32
+ Returns all URLs that is found on `url` in which it belongs to the same website
33
+ """
34
+ urls = set()
35
+ domain_name = urlparse(url).netloc
36
+ try:
37
+ soup = BeautifulSoup(requests.get(url).content, "html.parser")
38
+ for a_tag in soup.findAll("a"):
39
+ href = a_tag.attrs.get("href")
40
+ if href == "" or href is None:
41
+ continue
42
+ href = urljoin(url, href)
43
+ parsed_href = urlparse(href)
44
+ href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
45
+ if not is_valid(href):
46
+ continue
47
+ if parsed_href.netloc == domain_name:
48
+ urls.add(href)
49
+ except Exception as e:
50
+ print(f"Error processing {url}: {e}")
51
+ return urls
52
+
53
+ def scrape_page_data(url):
54
+ """Scrapes all text content from a given URL."""
55
+ try:
56
+ response = requests.get(url)
57
+ soup = BeautifulSoup(response.content, 'html.parser')
58
+ # Extract all text from the page
59
+ text = soup.get_text(separator='\n', strip=True)
60
+ return text
61
+ except Exception as e:
62
+ print(f"Error scraping {url}: {e}")
63
+ return None
64
+
65
+ def crawl_website(start_url, max_pages=100):
66
+ """Crawls a website and scrapes data from each page."""
67
+ visited_urls = set()
68
+ urls_to_visit = {start_url}
69
+ scraped_data = {}
70
+
71
+ while urls_to_visit and len(visited_urls) < max_pages:
72
+ current_url = urls_to_visit.pop()
73
+ if current_url in visited_urls:
74
+ continue
75
+
76
+ print(f"Visiting: {current_url}")
77
+ visited_urls.add(current_url)
78
+
79
+ # Scrape data
80
+ data = scrape_page_data(current_url)
81
+ if data:
82
+ scraped_data[current_url] = data
83
+
84
+ # Find new links
85
+ new_links = get_all_website_links(current_url)
86
+ for link in new_links:
87
+ if link not in visited_urls:
88
+ urls_to_visit.add(link)
89
+
90
+ return scraped_data
91
+
92
+ # Start the crawling process
93
+ start_url = "https://learn.microsoft.com/en-us/"
94
+ all_scraped_data = crawl_website(start_url)
95
+
96
+ # You can now process the `all_scraped_data` dictionary
97
+ # For example, print the number of pages scraped and the data from one page:
98
+ print(f"\nScraped data from {len(all_scraped_data)} pages.")
99
+ if all_scraped_data:
100
+ first_url = list(all_scraped_data.keys())[0]
101
+ print(f"\nData from the first scraped page ({first_url}):")
102
+ # print(all_scraped_data[first_url][:500]) # Print first 500 characters
103
+
104
+ """## Data processing and embedding script
105
+ This script takes the scraped data, chunks it, and creates embeddings using a sentence transformer model.
106
+ """
107
+
108
+ # This script to convert, format, embed the full scrapped and extracted data to structured, embedded data chunks
109
+
110
+ import torch
111
+ from sentence_transformers import SentenceTransformer # Changed import
112
+ from datasets import Dataset
113
+ from tqdm.auto import tqdm
114
+
115
+ # Check for GPU availability
116
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
117
+ print(f"Using device: {device}")
118
+
119
+ # Load a pre-trained sentence transformer model
120
+ model = SentenceTransformer('all-MiniLM-L6-v2').to(device)
121
+
122
+ def chunk_text(text, chunk_size=500, chunk_overlap=50):
123
+ """Splits text into chunks with overlap."""
124
+ words = text.split()
125
+ chunks = []
126
+ i = 0
127
+ while i < len(words):
128
+ chunk = words[i:i + chunk_size]
129
+ chunks.append(" ".join(chunk))
130
+ i += chunk_size - chunk_overlap
131
+ if i >= len(words) - chunk_overlap and i < len(words): # Handle the last chunk
132
+ chunks.append(" ".join(words[i:]))
133
+ break
134
+
135
+ return chunks
136
+
137
+ def process_scraped_data(scraped_data, chunk_size=500, chunk_overlap=50):
138
+ """
139
+ Converts scraped data into formatted chunks and embeds them.
140
+ Returns a list of dictionaries, each containing chunk text, source URL, and embedding.
141
+ """
142
+ processed_chunks = []
143
+ for url, text in tqdm(scraped_data.items(), desc="Processing scraped data"):
144
+ if text:
145
+ chunks = chunk_text(text, chunk_size=chunk_size, chunk_overlap=chunk_overlap)
146
+ for chunk in chunks:
147
+ processed_chunks.append({
148
+ 'text': chunk,
149
+ 'source': url,
150
+ })
151
+ return processed_chunks
152
+
153
+ def embed_chunks(processed_chunks, model, batch_size=32):
154
+ """Embeds the text chunks using the sentence transformer model."""
155
+ # Extract texts for embedding
156
+ texts_to_embed = [chunk['text'] for chunk in processed_chunks]
157
+
158
+ # Create a Hugging Face Dataset
159
+ dataset = Dataset.from_dict({'text': texts_to_embed})
160
+
161
+ # Define a function to apply embeddings
162
+ def get_embeddings(batch):
163
+ return {'embedding': model.encode(batch['text'], convert_to_tensor=True).tolist()}
164
+
165
+ # Apply the embedding function in batches
166
+ dataset = dataset.map(get_embeddings, batched=True, batch_size=batch_size)
167
+
168
+ # Update the original processed_chunks list with embeddings
169
+ for i, item in enumerate(processed_chunks):
170
+ item['embedding'] = dataset[i]['embedding']
171
+
172
+ return processed_chunks
173
+
174
+ # --- Main script for processing and embedding ---
175
+
176
+ # Process the scraped data into chunks
177
+ formatted_chunks = process_scraped_data(all_scraped_data)
178
+
179
+ # Embed the chunks
180
+ embedded_data = embed_chunks(formatted_chunks, model)
181
+
182
+ # `embedded_data` is now a list of dictionaries, where each dictionary
183
+ # represents a chunk with its text, source URL, and embedding.
184
+ # You can now use this data for similarity search, indexing, etc.
185
+
186
+ print(f"\nCreated {len(embedded_data)} embedded chunks.")
187
+ if embedded_data:
188
+ print("\nExample of an embedded chunk:")
189
+ embedded_data[0]
190
+
191
+ """## Save the embedded dataset to Google Drive
192
+ This script saves the processed and embedded data to a JSON file in your Google Drive.
193
+
194
+ """
195
+
196
+ # This script to save all converted, formatted, embedded dataset to the "Output" file on My Drive
197
+
198
+ import json
199
+ from google.colab import drive
200
+
201
+ # Mount Google Drive
202
+ drive.mount('/content/drive')
203
+
204
+ # Define the output file path
205
+ output_file_path = '/content/drive/My Drive/Output/embedded_dataset.json'
206
+
207
+ # Ensure the output directory exists
208
+ import os
209
+ output_dir = os.path.dirname(output_file_path)
210
+ os.makedirs(output_dir, exist_ok=True)
211
+
212
+ # Save the embedded data to a JSON file
213
+ with open(output_file_path, 'w') as f:
214
+ json.dump(embedded_data, f, indent=2)
215
+
216
+ print(f"\nSaved embedded dataset to: {output_file_path}")