Parvind commited on
Commit
9e176d1
·
1 Parent(s): 19668e9
Files changed (2) hide show
  1. dataset-card.yaml +19 -0
  2. pinecone-trans.py +7 -1
dataset-card.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: MSxGPT Dataset
3
+ tags:
4
+ - language modeling
5
+ - question answering
6
+ license: CC-BY-4.0
7
+ size: 8.4MB
8
+ languages:
9
+ - en
10
+ multilinguality:
11
+ - monolingual
12
+ references:
13
+ - title: "ChatGPT Retrieval Plugin Repository"
14
+ link: "https://github.com/MapleSage/chatgpt-retrieval-plugin"
15
+ - title: "Pinecone Langchain Chunking Notebook"
16
+ link: "https://github.com/pinecone-io/examples/blob/master/generation/langchain/handbook/xx-langchain-chunking.ipynb"
17
+ - title: "Pinecone Prompt Engineering Notebook"
18
+ link: "https://github.com/pinecone-io/examples/blob/master/generation/prompt-engineering.ipynb"
19
+ ---
pinecone-trans.py CHANGED
@@ -1,12 +1,18 @@
1
  from transformers import AutoTokenizer, AutoModel
2
  import torch
 
 
3
 
 
 
 
 
4
  # Initialize the tokenizer and model
5
  tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
6
  model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
7
 
8
  # Assuming your data is in a list called data
9
- with open("train.jsonl", "r") as file:
10
  data = [json.loads(line) for line in file]
11
 
12
  # Process the data
 
1
  from transformers import AutoTokenizer, AutoModel
2
  import torch
3
+ import json
4
+ from transformers import GPT2Tokenizer
5
 
6
+ tokenizer = GPT2Tokenizer.from_pretrained("MapleSage/gpt2-small")
7
+
8
+ with open('train.jsonl', 'r') as file:
9
+ data = [json.loads(line) for line in file]
10
  # Initialize the tokenizer and model
11
  tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
12
  model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
13
 
14
  # Assuming your data is in a list called data
15
+ with open('train.jsonl', 'r') as file:
16
  data = [json.loads(line) for line in file]
17
 
18
  # Process the data