File size: 1,356 Bytes
9fed6cd
4f6e6aa
9fed6cd
4f6e6aa
 
 
 
 
9fed6cd
4f6e6aa
 
 
 
 
 
 
 
 
 
 
 
 
ec748f4
4f6e6aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
082d04d
ec748f4
 
 
 
082d04d
4571273
4f6e6aa
9fed6cd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import gradio as gr
from llama_cpp import Llama

llm = Llama.from_pretrained(
    repo_id="amir22010/fine_tuned_product_marketing_email_gemma_2_9b_q4_k_m",
    filename="unsloth.Q4_K_M.gguf",
    verbose=False
)

#marketing prompt
marketing_email_prompt = """Below is a product and description, please write a marketing email for this product.

### Product:
{}

### Description:
{}

### Marketing Email:
{}"""

def greet(product,description):
    output = llm.create_chat_completion(
    messages=[
        {
            "role": "system",
            "content": "Your go-to Email Marketing Guru - I'm here to help you craft compelling campaigns, boost conversions, and take your business to the next level.",
        },
        {"role": "user", "content":  marketing_email_prompt.format(
    product, # product
    description, # description
    "", # output - leave this blank for generation!
    )},
    ],
    # response_format={
    #     "type": "json_object",
    # },
    max_tokens=8192,
    temperature=0.7,
    stream=True
    )
    partial_message = ""
    for chunk in output:
        delta = chunk['choices'][0]['delta']
        if 'content' in delta:
            partial_message = partial_message + delta.get('content', '')
            yield partial_message

demo = gr.Interface(fn=greet, inputs=["text","text"], outputs="text")
demo.launch()