File size: 1,006 Bytes
19668e9
 
9e176d1
 
19668e9
9e176d1
 
 
 
19668e9
 
 
 
 
9e176d1
19668e9
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
from transformers import AutoTokenizer, AutoModel
import torch
import json
from transformers import GPT2Tokenizer

tokenizer = GPT2Tokenizer.from_pretrained("MapleSage/gpt2-small")

with open('train.jsonl', 'r') as file:
    data = [json.loads(line) for line in file]
# Initialize the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")
model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens")

# Assuming your data is in a list called data
with open('train.jsonl', 'r') as file:
    data = [json.loads(line) for line in file]

# Process the data
embeddings = {}
for item in data:
    # Tokenize the text
    tokens = tokenizer(item['input'], truncation=True, padding=True, return_tensors="pt")
    # Generate the embeddings
    embeddings[item['input']] = model(**tokens).pooler_output.detach().numpy()

# Now `embeddings` is a dictionary where keys are the original text and values are the corresponding embeddings