TURNA-GPU / app.py
merve's picture
merve HF Staff
Update app.py
b58d104 verified
raw
history blame
5.56 kB
import gradio as gr
import spaces
from transformers import pipeline
import torch
DESCRIPTION="""
This is the space for the Language Modeling Group at TABILAB in Computer Engineering of Bogazici University.
We released the first version of our Turkish language model TURNA.
This model is based on an encoder-decoder T5 architecture with 1.1B parameters.
For more details, please refer to our paper.
"""
sentiment_example = [["Bu üründen çok memnun kaldım."]]
long_text = [["Eyfel Kulesi (Fransızca: La tour Eiffel [la tuʀ ɛˈfɛl]), Paris'teki demir kule. Kule, aynı zamanda tüm dünyada Fransa'nın sembolü halini almıştır. İsmini, inşa ettiren Fransız inşaat mühendisi Gustave Eiffel'den alır.[1] En büyük turizm cazibelerinden biri olan Eyfel Kulesi, yılda 6 milyon turist çeker. 2002 yılında toplam ziyaretçi sayısı 200 milyona ulaşmıştır."]]
ner_example = [["Benim adım Turna."]]
t2t_example = [["Paraphrase: Bu üründen çok memnun kaldım."]]
nli_example = [["Bunu çok beğendim. Bunu çok sevdim."]]
t2t_gen_model = pipeline(model="boun-tabi-LMG/TURNA", device=0)
# examples =t2t_example, title="Text-to-Text Generation", description="Please enter an instruction with a prefix to generate.")
summarization_model = pipeline(model="boun-tabi-LMG/turna_summarization_mlsum", device=0)
# examples =long_text, title="Summarization", description="TURNA fine-tuned on MLSUM. Enter a text to summarize below.")
news_sum = pipeline(model="boun-tabi-LMG/turna_summarization_tr_news", device=0)
# examples =long_text, title="News Summarization", description="TURNA fine-tuned on News summarization. Enter a news to summarize.")
paraphrasing = pipeline(model="boun-tabi-LMG/turna_paraphrasing_tatoeba", device=0)
# examples =long_text,title="Paraphrasing")
paraphrasing_sub = pipeline(model="boun-tabi-LMG/turna_paraphrasing_opensubtitles", device=0)
# examples =long_text, title="Paraphrasing on Subtitles")
ttc = pipeline(model="boun-tabi-LMG/turna_classification_ttc4900", device=0)
# examples =long_text, title="Text Categorization")
product_reviews = pipeline(model="boun-tabi-LMG/turna_classification_tr_product_reviews", device=0)
# examples=sentiment_example, title="Product Reviews Categorization")
title_gen = pipeline(model="boun-tabi-LMG/turna_title_generation_mlsum", device=0)
# examples =long_text, title="Title Generation", description="Enter a text to generate title to.")
sentiment_model = pipeline(model="boun-tabi-LMG/turna_classification_17bintweet_sentiment", device=0)
#examples=sentiment_example, title="Sentiment Analysis", description="Enter a text to generate title to.")
pos_imst = pipeline(model="boun-tabi-LMG/turna_pos_imst", device=0)
# title="Part of Speech Tagging", examples=ner_example,description="Enter a text to generate title to.")
nli_model = pipeline(model="boun-tabi-LMG/turna_nli_nli_tr", device=0)
# title="NLI",examples=nli_example, description="Enter two texts to infer entailment.")
pos_boun = pipeline(model="boun-tabi-LMG/turna_pos_boun", device=0)
# examples = ner_example, title="Part of Speech Tagging", description="Enter a text to tag parts of speech (POS).")
stsb_model = pipeline(model="boun-tabi-LMG/turna_semantic_similarity_stsb_tr", device=0)
# examples=nli_example, title="Semantic Similarity", description="Enter two texts in the input to assess semantic similarity.")
ner_model = pipeline(model="boun-tabi-LMG/turna_ner_milliyet", device=0)
# title="NER WikiANN", examples=ner_example, description="Enter a text for NER.")
ner_wikiann = pipeline(model="boun-tabi-LMG/turna_ner_wikiann", device=0)
#title="NER",examples=ner_example, description="Enter a text for NER.")
@spaces.GPU
def sentiment_analysis(input, sentiment=True):
if sentiment==True:
return sentiment_model(input)
else:
return product_reviews(input)
@spaces.GPU
def nli_stsb(input, nli=True):
if nli==True:
return nli_model(input)
else:
return stsb_model(input)
@spaces.GPU
def t2t(input):
return t2t_gen_model(input)
@spaces.GPU
def pos(input, boun=True):
if boun==True:
return pos_boun(input)
else:
return pos_imst(input)
@spaces.GPU
def ner(input, wikiann=True):
if wikiann==True:
return ner_wikiann(input)
else:
return ner_model(input)
@spaces.GPU
def paraphrase(input, model_choice="turna_paraphrasing_tatoeba"):
if model_choice=="turna_paraphrasing_tatoeba":
return paraphrasing(input)
else:
return paraphrasing_sub(input)
@spaces.GPU
def summarize(input, model_choice="turna_summarization_tr_news"):
if model_choice=="turna_summarization_tr_news":
return news_sum(input)
else:
return summarization_model(input)
with gr.Blocks(theme="shivi/calm_seafoam") as demo:
gr.Markdown("# TURNA 🐦")
gr.Markdown(DESCRIPTION)
with gr.Tab("Summarization"):
gr.Markdown("TURNA fine-tuned on ummarization. Enter a news to summarize and pick the model.")
with gr.Column():
with gr.Row():
sum_choice = gr.Radio(choices = ["turna_summarization_mlsum", "turna_summarization_tr_news"])
sum_input = gr.Text()
sum_output = gr.Text()
sum_submit = gr.Button()
sum_submit.click(summarize, inputs=[sum_input, sum_choice], outputs=sum_output)
examples = gr.Examples(examples = long_text, inputs = [sum_input, sum_choice], outputs=sum_output, cache_examples=True, fn=summarize)
demo.launch()