msxgpt-dataset / pinecone-trans.py
Parvind
update
9e176d1
raw
history blame
1.01 kB
from transformers import AutoTokenizer, AutoModel
import torch
import json
from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained("MapleSage/gpt2-small")
with open('train.jsonl', 'r') as file:
data = [json.loads(line) for line in file]
# Initialize the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
# Assuming your data is in a list called data
with open('train.jsonl', 'r') as file:
data = [json.loads(line) for line in file]
# Process the data
embeddings = {}
for item in data:
# Tokenize the text
tokens = tokenizer(item['input'], truncation=True, padding=True, return_tensors="pt")
# Generate the embeddings
embeddings[item['input']] = model(**tokens).pooler_output.detach().numpy()
# Now `embeddings` is a dictionary where keys are the original text and values are the corresponding embeddings