darkc0de commited on
Commit
c5288b3
Β·
verified Β·
1 Parent(s): 0475481

Upload app (28).py

Browse files
Files changed (1) hide show
  1. app (28).py +157 -0
app (28).py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from openai import OpenAI
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+
8
+ SYSTEM_PROMPT = os.getenv("XTRNPMT")
9
+
10
+ API_BASE_URL = "https://api.featherless.ai/v1"
11
+
12
+ FEATHERLESS_API_KEY = os.getenv("FEATHERLESS_API_KEY")
13
+
14
+ FEATHERLESS_MODEL = "darkc0de/XortronCriminalComputingConfig"
15
+
16
+ if not FEATHERLESS_API_KEY:
17
+ print("WARNING: FEATHERLESS_API_KEY environment variable is not set.")
18
+
19
+ try:
20
+ if not FEATHERLESS_API_KEY:
21
+ raise ValueError("FEATHERLESS_API_KEY is not set. Please set it as an environment variable or a secret in your deployment environment.")
22
+
23
+ client = OpenAI(
24
+ base_url=API_BASE_URL,
25
+ api_key=FEATHERLESS_API_KEY
26
+ )
27
+ print(f"OpenAI client initialized with base_url: {API_BASE_URL} for Featherless AI, model: {FEATHERLESS_MODEL}")
28
+
29
+ except Exception as e:
30
+ print(f"Error initializing OpenAI client with base_url '{API_BASE_URL}': {e}")
31
+ raise RuntimeError(
32
+ "Could not initialize OpenAI client. "
33
+ f"Please check the API base URL ('{API_BASE_URL}'), your Featherless AI API key, model ID, "
34
+ f"and ensure the server is accessible. Original error: {e}"
35
+ )
36
+
37
+
38
+ def respond(message, history):
39
+ """
40
+ This function processes the user's message and the chat history to generate a response
41
+ from the language model using the Featherless AI API (compatible with OpenAI's API),
42
+ including a static system prompt.
43
+
44
+ Args:
45
+ message (str): The latest message from the user.
46
+ history (list of lists): A list where each inner list contains a pair of
47
+ [user_message, ai_message].
48
+
49
+ Yields:
50
+ str: The generated response token by token (for streaming).
51
+ """
52
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}]
53
+
54
+ for user_message, ai_message in history:
55
+ if user_message:
56
+ messages.append({"role": "user", "content": user_message})
57
+ if ai_message:
58
+ messages.append({"role": "assistant", "content": ai_message})
59
+
60
+ messages.append({"role": "user", "content": message})
61
+
62
+ response_text = ""
63
+
64
+ try:
65
+ stream = client.chat.completions.create(
66
+ messages=messages,
67
+ model=FEATHERLESS_MODEL,
68
+ stream=True,
69
+ )
70
+
71
+ for chunk in stream:
72
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
73
+ token = chunk.choices[0].delta.content
74
+ response_text += token
75
+ yield response_text
76
+ elif chunk.choices and chunk.choices[0].message and chunk.choices[0].message.content is not None:
77
+ token = chunk.choices[0].message.content
78
+ response_text += token
79
+ yield response_text
80
+
81
+ except Exception as e:
82
+ error_message = f"An error occurred during model inference with Featherless AI: {e}"
83
+ print(error_message)
84
+ yield error_message
85
+
86
+
87
+ kofi_script = """
88
+ <script src='https://storage.ko-fi.com/cdn/scripts/overlay-widget.js'></script>
89
+ <script>
90
+ kofiWidgetOverlay.draw('xortron', {
91
+ 'type': 'floating-chat',
92
+ 'floating-chat.donateButton.text': 'Support me',
93
+ 'floating-chat.donateButton.background-color': '#794bc4',
94
+ 'floating-chat.donateButton.text-color': '#fff'
95
+ });
96
+ </script>
97
+ """
98
+
99
+ kofi_button_html = """
100
+ <div style="text-align: center; padding: 20px;">
101
+ <a href='https://ko-fi.com/Z8Z51E5TIG' target='_blank'>
102
+ <img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi5.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' />
103
+ </a>
104
+ </div>
105
+ """
106
+
107
+ donation_solicitation_html = """
108
+ <div style="text-align: center; font-size: x-small; margin-bottom: 5px;">
109
+ The Cybernetic Criminal Computing Corporation presents: XORTRON 4.2, free of charge, unlimited, no login, no signup, no bullshit. Finding yourself in a long queue? Consider donating so we can scale and remain 100% free for all. Im sure even a low-life deadbeat freeloader like yourself can at least throw some spare change right? - Support Xortron @ ko-fi.com/xortron<br>
110
+
111
+ </div>
112
+ """
113
+
114
+ custom_css = """
115
+ @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
116
+ body, .gradio-container {
117
+ font-family: 'Orbitron', sans-serif !important;
118
+ }
119
+ .gr-button { font-family: 'Orbitron', sans-serif !important; }
120
+ .gr-input { font-family: 'Orbitron', sans-serif !important; }
121
+ .gr-label { font-family: 'Orbitron', sans-serif !important; }
122
+ .gr-chatbot .message { font-family: 'Orbitron', sans-serif !important; }
123
+ """
124
+
125
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme", head=kofi_script, css=custom_css) as demo:
126
+ # Added the header image using gr.HTML
127
+ gr.HTML('<img src="https://cdn-uploads.huggingface.co/production/uploads/6540a02d1389943fef4d2640/TVPNkZCjnaOwfzD4Ze9tj.png" alt="Header Image" style="display: block; margin-left: auto; margin-right: auto; max-width: 22%; height: auto;">')
128
+
129
+ gr.ChatInterface(
130
+ fn=respond, # The function to call when a message is sent
131
+ chatbot=gr.Chatbot( # Configure the chatbot display area
132
+ height=750, # Set the height of the chat history display to 800px
133
+ label="XORTRON - Criminal Computing" # Set the label
134
+ )
135
+ )
136
+
137
+ gr.HTML(donation_solicitation_html)
138
+ gr.HTML(kofi_button_html)
139
+
140
+
141
+ if __name__ == "__main__":
142
+ if not FEATHERLESS_API_KEY:
143
+ print("\nCRITICAL ERROR: FEATHERLESS_API_KEY is not set.")
144
+ print("Please ensure it's set as a secret in your Hugging Face Space settings or as an environment variable.\n")
145
+
146
+ try:
147
+ demo.queue(default_concurrency_limit=4)
148
+
149
+ demo.launch(show_api=False, share=False)
150
+ except NameError as ne:
151
+ print(f"Gradio demo could not be launched. 'client' might not have been initialized: {ne}")
152
+ except RuntimeError as re:
153
+ print(f"Gradio demo could not be launched due to an error during client initialization: {re}")
154
+ except Exception as e:
155
+ print(f"An unexpected error occurred when trying to launch Gradio demo: {e}")
156
+
157
+