File size: 1,839 Bytes
f9bfd32
5f6428d
43e14eb
bdd283c
5f6428d
bdd283c
5f6428d
 
 
94affbe
7758f36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63ab98e
f7d6ced
f9bfd32
5f6428d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34b20ac
5f6428d
bdd283c
5f6428d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
import openai
title = "Mariam 💎"
description = """" Banana Banana ? 👀 bon ok ok. Bref comme vous le voyez c'est simple ! Pas besoin d'explication. C'est un script simple, c'est basé sur néoX, python, et gradio.

Mon numéro : +24165362371"""
#app 1

import requests


def infer(im):
    im.save("converted.png")
    url = "https://ajax.thehive.ai/api/demo/classify?endpoint=text_recognition"
    files = {
        "image": ("converted.png", open("converted.png", "rb"), "image/png"),
        "model_type": (None, "detection"),
        "media_type": (None, "photo"),
    }
    headers = {"referer": "https://thehive.ai/"}

    res = requests.post(url, headers=headers, files=files)

    text = ""
    blocks = []
    for output in res.json()["response"]["output"]:
        text += output["block_text"]
        for poly in output["bounding_poly"]:
            blocks.append({
                "text": "".join([c["class"] for c in poly["classes"]]),
                "rect": poly["dimensions"]
            })

    return text






#app 2

openai.api_key = "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"

def gpt(prompt):
    if not prompt:
        return "Veuillez saisir une question."
    f_prompt = f"""
    {prompt}. """

    response = openai.Completion.create(
        model="text-davinci-003",
        prompt=f_prompt,
        temperature=0.9,
        max_tokens=3500,
        top_p=1)

    answer = response.choices[0].text.strip()
    print(answer)
    return answer



#interface 1
app1 =  gr.Interface(fn = infer,title="Mariam -Ocr ", inputs=[gr.Image(type="pil")], outputs=["text"])
#interface 2

app2 =  gr.Interface(fn = gpt, inputs=gr.Textbox(label="Question:",lines=8), outputs=gr.Textbox())

demo = gr.TabbedInterface([app1, app2], ["OCR", "MARIAM-u"],description=description)

demo.launch()