Parvind
commited on
Commit
·
19668e9
0
Parent(s):
Initial commit
Browse files- README.md +37 -0
- pincone-upload.py +23 -0
- pinecone-trans.py +20 -0
- train.jsonl +0 -0
README.md
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# msxgpt
|
2 |
+
|
3 |
+
## Description
|
4 |
+
|
5 |
+
This dataset, "msxgpt," is designed for training the GPT-3.5-turbo/ GPT-4 based language model for a task. The data consists of JSON lines, each representing an individual example for the model.
|
6 |
+
|
7 |
+
The dataset has been created with an emphasis on encoding, which is pivotal to the functionality of Memory Features, Security, and API Endpoints. It is designed to process and store documents from various data sources continuously, using incoming webhooks to the upsert and delete endpoints.
|
8 |
+
|
9 |
+
Potential applications of this dataset could range from natural language understanding tasks to more specialized uses. For instance, tools like Zapier or Make can help configure the webhooks based on events or schedule, enabling sophisticated automation and workflow configuration capabilities.
|
10 |
+
|
11 |
+
|
12 |
+
## Data Structure
|
13 |
+
Each line in the `train.jsonl` file is a JSON object with the following structure:
|
14 |
+
|
15 |
+
```json
|
16 |
+
{
|
17 |
+
"input": "string",
|
18 |
+
"target": "string"
|
19 |
+
}
|
20 |
+
|
21 |
+
Usage
|
22 |
+
|
23 |
+
import json
|
24 |
+
|
25 |
+
with open('train.jsonl', 'r') as f:
|
26 |
+
for line in f:
|
27 |
+
obj = json.loads(line)
|
28 |
+
print(obj['input'], obj['target'])
|
29 |
+
|
30 |
+
License
|
31 |
+
|
32 |
+
This dataset is made available under the Creative Commons CC0 4.0 Universal (CC0 4.0) Public Domain Dedication. You can copy, modify, distribute and perform the work, even for commercial purposes, all without asking permission.
|
33 |
+
|
34 |
+
The above example states that anyone can use the dataset for any purpose without needing to ask for permission, which maximizes the dataset's usability.
|
35 |
+
|
36 |
+
|
37 |
+
|
pincone-upload.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pickle
|
2 |
+
import pinecone
|
3 |
+
|
4 |
+
# Load embeddings
|
5 |
+
with open('embeddings.pkl', 'rb') as f:
|
6 |
+
embeddings = pickle.load(f)
|
7 |
+
|
8 |
+
# Initialize Pinecone
|
9 |
+
pinecone.init(api_key="6d5279f7-a4df-498e-a379-c975cfa2641b")
|
10 |
+
index_name = "msxgpt"
|
11 |
+
|
12 |
+
# Deindex if index already exists
|
13 |
+
if index_name in pinecone.list_indexes():
|
14 |
+
pinecone.deindex(index_name)
|
15 |
+
|
16 |
+
# Create index
|
17 |
+
pinecone.create_index(index_name, metric="cosine", shards=1)
|
18 |
+
|
19 |
+
# Upsert data to Pinecone
|
20 |
+
pinecone.upsert(index=index_name, items=embeddings)
|
21 |
+
|
22 |
+
# Don't forget to deactivate the Pinecone when you're done
|
23 |
+
pinecone.deinit()
|
pinecone-trans.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModel
|
2 |
+
import torch
|
3 |
+
|
4 |
+
# Initialize the tokenizer and model
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
|
6 |
+
model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
|
7 |
+
|
8 |
+
# Assuming your data is in a list called data
|
9 |
+
with open("train.jsonl", "r") as file:
|
10 |
+
data = [json.loads(line) for line in file]
|
11 |
+
|
12 |
+
# Process the data
|
13 |
+
embeddings = {}
|
14 |
+
for item in data:
|
15 |
+
# Tokenize the text
|
16 |
+
tokens = tokenizer(item['input'], truncation=True, padding=True, return_tensors="pt")
|
17 |
+
# Generate the embeddings
|
18 |
+
embeddings[item['input']] = model(**tokens).pooler_output.detach().numpy()
|
19 |
+
|
20 |
+
# Now `embeddings` is a dictionary where keys are the original text and values are the corresponding embeddings
|
train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|