Spaces:
Sleeping
Sleeping
import gradio as gr | |
from llama_cpp import Llama | |
llm = Llama.from_pretrained( | |
repo_id="amir22010/fine_tuned_product_marketing_email_gemma_2_9b_q4_k_m", | |
filename="unsloth.Q4_K_M.gguf", | |
verbose=False | |
) | |
#marketing prompt | |
marketing_email_prompt = """Below is a product and description, please write a marketing email for this product. | |
### Product: | |
{} | |
### Description: | |
{} | |
### Marketing Email: | |
{}""" | |
def greet(product,description): | |
output = llm.create_chat_completion( | |
messages=[ | |
{ | |
"role": "system", | |
"content": "Your go-to Email Marketing Guru - I'm here to help you craft compelling campaigns, boost conversions, and take your business to the next level.", | |
}, | |
{"role": "user", "content": marketing_email_prompt.format( | |
product, # product | |
description, # description | |
"", # output - leave this blank for generation! | |
)}, | |
], | |
# response_format={ | |
# "type": "json_object", | |
# }, | |
max_tokens=8192, | |
temperature=0.7, | |
stream=True | |
) | |
partial_message = "" | |
for chunk in output: | |
delta = chunk['choices'][0]['delta'] | |
if 'content' in delta: | |
partial_message = partial_message + delta.get('content', '') | |
yield partial_message | |
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs="text") | |
demo.launch() |