|
from transformers import AutoTokenizer, AutoModel |
|
import torch |
|
import json |
|
from transformers import GPT2Tokenizer |
|
|
|
tokenizer = GPT2Tokenizer.from_pretrained("MapleSage/gpt2-small") |
|
|
|
with open('train.jsonl', 'r') as file: |
|
data = [json.loads(line) for line in file] |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens") |
|
model = AutoModel.from_pretrained("sentence-transformers/bert-base-nli-mean-tokens") |
|
|
|
|
|
with open('train.jsonl', 'r') as file: |
|
data = [json.loads(line) for line in file] |
|
|
|
|
|
embeddings = {} |
|
for item in data: |
|
|
|
tokens = tokenizer(item['input'], truncation=True, padding=True, return_tensors="pt") |
|
|
|
embeddings[item['input']] = model(**tokens).pooler_output.detach().numpy() |
|
|
|
|
|
|