File size: 1,575 Bytes
9a6d76f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from typing import Any, List, Dict
from pathlib import Path

import torch
from transformers import AutoModelForMaskedLM, AutoTokenizer



class EndpointHandler():
    def __init__(self, path="."):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.tokenizer = AutoTokenizer.from_pretrained(path)
        self.model = AutoModelForMaskedLM.from_pretrained(path).to(self.device)

    def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
        """
        Args:
            data (:obj:):
                includes the input data and the parameters for the inference.
        Return:
            A :obj:`list`:. The list contains the embeddings of the inference inputs
        """
        inputs = data.get("inputs", data)
        with torch.no_grad():
            tokens = self.tokenizer(
                inputs, padding=True, truncation=True, return_tensors='pt'
            ).to(self.device)
            outputs = self.model(**tokens)
        vecs = torch.max(
            torch.log(
                1 + torch.relu(outputs.logits)
            ) * tokens.attention_mask.unsqueeze(-1),
            dim=1
        )[0]
        embeds = []
        for vec in vecs:
            # extract non-zero positions
            cols = vec.nonzero().squeeze().cpu().tolist()
            
            # extract the non-zero values
            weights = vec[cols].cpu().tolist()
            sparse = {
                "indices": cols,
                "values": weights,
            }
            embeds.append(sparse)
        return embeds