Lord-Raven commited on
Commit
ed1be57
·
1 Parent(s): 8e36dd1

Trying ONNX model on CPU.

Browse files
Files changed (2) hide show
  1. app.py +6 -2
  2. requirements.txt +1 -1
app.py CHANGED
@@ -2,12 +2,12 @@ import spaces
2
  import torch
3
  import gradio
4
  import json
5
- import onnxruntime
6
  import time
7
  from datetime import datetime
8
  from transformers import pipeline
9
  from fastapi import FastAPI
10
  from fastapi.middleware.cors import CORSMiddleware
 
11
 
12
  # CORS Config - This isn't actually working; instead, I am taking a gross approach to origin whitelisting within the service.
13
  app = FastAPI()
@@ -27,8 +27,12 @@ print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
27
 
28
  model_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
29
  tokenizer_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
 
30
 
31
- classifier_cpu = pipeline(task="zero-shot-classification", model=model_name, tokenizer=tokenizer_name)
 
 
 
32
  classifier_gpu = pipeline(task="zero-shot-classification", model=model_name, tokenizer=tokenizer_name, device="cuda:0")
33
 
34
  def classify(data_string, request: gradio.Request):
 
2
  import torch
3
  import gradio
4
  import json
 
5
  import time
6
  from datetime import datetime
7
  from transformers import pipeline
8
  from fastapi import FastAPI
9
  from fastapi.middleware.cors import CORSMiddleware
10
+ from optimum.onnxruntime import ORTModelForSequenceClassification
11
 
12
  # CORS Config - This isn't actually working; instead, I am taking a gross approach to origin whitelisting within the service.
13
  app = FastAPI()
 
27
 
28
  model_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
29
  tokenizer_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
30
+ file_name = "onnx/model.onnx"
31
 
32
+ model_cpu = ORTModelForSequenceClassification.from_pretrained(model_id=model_name, file_name=file_name)
33
+ tokenizer_cpu = AutoTokenizer.from_pretrained(model_name)
34
+
35
+ classifier_cpu = pipeline(task="zero-shot-classification", model=model_cpu, tokenizer=tokenizer_cpu)
36
  classifier_gpu = pipeline(task="zero-shot-classification", model=model_name, tokenizer=tokenizer_name, device="cuda:0")
37
 
38
  def classify(data_string, request: gradio.Request):
requirements.txt CHANGED
@@ -4,5 +4,5 @@ huggingface_hub==0.26.0
4
  json5==0.9.25
5
  numpy
6
  uvicorn
7
- optimum[onnxruntime-gpu]==1.24.0
8
  transformers==4.36
 
4
  json5==0.9.25
5
  numpy
6
  uvicorn
7
+ optimum[exporters,onnxruntime,onnxruntime-gpu]==1.24.0
8
  transformers==4.36