AC-Angelo93 commited on
Commit
49bf197
·
verified ·
1 Parent(s): b4b14b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -71
app.py CHANGED
@@ -10,16 +10,17 @@ HF_TOKEN = os.getenv("HF_TOKEN") # For Hugging Face Spaces, set this as a Secre
10
 
11
  # Global variable to store the pipeline
12
  text_generator_pipeline = None
13
- model_load_error = None
14
 
15
  # --- Hugging Face Login and Model Loading ---
16
  def load_model_and_pipeline():
17
  global text_generator_pipeline, model_load_error
18
  if text_generator_pipeline is not None:
 
19
  return True # Already loaded
20
 
21
  if not HF_TOKEN:
22
- model_load_error = "Hugging Face token (HF_TOKEN) not found in Space secrets. Please add it."
23
  print(f"ERROR: {model_load_error}")
24
  return False
25
 
@@ -29,30 +30,31 @@ def load_model_and_pipeline():
29
  print("Login successful.")
30
 
31
  print(f"Loading tokenizer for {MODEL_ID}...")
32
- # trust_remote_code is necessary for some models that define custom architectures/code
33
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
 
 
 
 
 
 
34
  print("Tokenizer loaded.")
35
 
36
  print(f"Loading model {MODEL_ID}...")
37
  # For large models, specify dtype and device_map
38
- # device_map="auto" will try to use GPU if available, otherwise CPU
39
- # torch_dtype="auto" or torch.bfloat16 (if supported by hardware) can save memory
40
- # On CPU Spaces (free tier), this will be VERY slow or might OOM.
41
- # You might need to use quantization (e.g., bitsandbytes) for CPU, but that's more complex.
42
  model = AutoModelForCausalLM.from_pretrained(
43
  MODEL_ID,
44
  trust_remote_code=True,
45
- torch_dtype="auto", # or torch.bfloat16 if on A10G or similar
46
- device_map="auto" # "auto" is good for single/multi GPU or CPU fallback
47
  )
48
  print("Model loaded.")
49
 
50
- # MIIA is an instruct/chat model, so text-generation is the appropriate task
51
  text_generator_pipeline = pipeline(
52
  "text-generation",
53
  model=model,
54
  tokenizer=tokenizer,
55
- # device=0 if torch.cuda.is_available() else -1 # device_map handles this
56
  )
57
  print("Text generation pipeline created successfully.")
58
  model_load_error = None
@@ -71,21 +73,17 @@ def analyze_text(text_input, file_upload, custom_instruction, max_new_tokens, te
71
  if model_load_error:
72
  return f"Model not loaded. Error: {model_load_error}"
73
  else:
74
- return "Model is not loaded. Please ensure HF_TOKEN is set and the Space has enough resources."
75
 
76
  content_to_analyze = ""
77
  if file_upload is not None:
78
  try:
79
- # file_upload is a TemporaryFileWrapper object, .name gives the path
80
  with open(file_upload.name, 'r', encoding='utf-8') as f:
81
  content_to_analyze = f.read()
82
- if not content_to_analyze.strip() and not text_input.strip(): # if file is empty and no text input
83
  return "Uploaded file is empty and no direct text input provided. Please provide some text."
84
- elif not content_to_analyze.strip() and text_input.strip(): # if file empty but text input has content
85
  content_to_analyze = text_input
86
- # If file has content, it will be used. If user also typed, file content takes precedence.
87
- # We could add logic to concatenate or choose, but this is simpler.
88
-
89
  except Exception as e:
90
  return f"Error reading uploaded file: {str(e)}"
91
  elif text_input:
@@ -96,61 +94,61 @@ def analyze_text(text_input, file_upload, custom_instruction, max_new_tokens, te
96
  if not content_to_analyze.strip():
97
  return "Input text is empty."
98
 
99
- # FastwebMIIA is an instruct model. It expects prompts like Alpaca.
100
- # Structure:
101
- # Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
102
- # ### Instruction:
103
- # {your instruction}
104
- # ### Input:
105
- # {your text}
106
- # ### Response:
107
- # {model generates this}
108
-
109
- prompt = f"""Di seguito è riportata un'istruzione che descrive un task, abbinata a un input che fornisce un contesto più ampio. Scrivi una risposta che completi la richiesta in modo appropriato.
110
-
111
- ### Istruzione:
112
- {custom_instruction}
113
 
114
- ### Input:
115
- {content_to_analyze}
116
 
117
- ### Risposta:"""
 
 
 
118
 
119
- # For English, you might change the preamble:
120
- # prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
121
- # ### Instruction:
122
- # {custom_instruction}
123
- # ### Input:
124
- # {content_to_analyze}
125
- # ### Response:"""
 
 
 
 
126
 
127
 
128
  print(f"\n--- Sending to Model ---")
129
- print(f"Prompt:\n{prompt}")
130
  print(f"Max New Tokens: {max_new_tokens}, Temperature: {temperature}, Top P: {top_p}")
131
  print("------------------------\n")
132
 
133
  try:
134
- # Note: text-generation pipelines often return the prompt + completion.
135
- # We might need to strip the prompt from the output if desired.
136
  generated_outputs = text_generator_pipeline(
137
  prompt,
138
  max_new_tokens=int(max_new_tokens),
139
  do_sample=True,
140
- temperature=float(temperature) if float(temperature) > 0 else 0.7, # temp 0 means greedy
141
  top_p=float(top_p),
142
- num_return_sequences=1
 
 
143
  )
144
- response = generated_outputs[0]['generated_text']
145
 
146
- # Often, the response includes the prompt. Let's try to return only the new part.
147
- # The model should generate text after "### Risposta:"
148
- answer_marker = "### Risposta:"
149
- if answer_marker in response:
150
- return response.split(answer_marker, 1)[1].strip()
151
  else:
152
- # Fallback if the marker isn't found (shouldn't happen with good prompting)
153
- return response # Or you could try to remove the original prompt string
 
 
 
 
154
 
155
  except Exception as e:
156
  return f"Error during text generation: {str(e)}"
@@ -162,31 +160,35 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
162
  Test the capabilities of the `{MODEL_ID}` model for text analysis tasks on Italian or English texts.
163
  Provide an instruction and your text (directly or via upload).
164
  **Important:** Model loading can take a few minutes, especially on the first run or on CPU.
165
- This app is best run on a Hugging Face Space with GPU resources for this model size.
166
  """)
167
 
168
  with gr.Row():
169
- status_textbox = gr.Textbox(label="Model Status", value="Attempting to load model...", interactive=False)
 
 
 
170
 
171
  with gr.Tab("Text Input & Analysis"):
172
  with gr.Row():
173
  with gr.Column(scale=2):
174
  instruction_prompt = gr.Textbox(
175
- label="Instruction for the Model (e.g., 'Riassumi questo testo', 'Identify main topics', 'Translate to English')",
176
  value="Riassumi questo testo in 3 frasi concise.",
177
- lines=3
 
178
  )
179
- text_area_input = gr.Textbox(label="Enter Text Directly", lines=10, placeholder="Paste your text here...")
180
- file_input = gr.File(label="Or Upload a Document (.txt)", file_types=['.txt'])
181
  with gr.Column(scale=3):
182
- output_text = gr.Textbox(label="Model Output", lines=20, interactive=False)
183
 
184
  with gr.Accordion("Advanced Generation Parameters", open=False):
185
- max_new_tokens_slider = gr.Slider(minimum=50, maximum=1024, value=256, step=10, label="Max New Tokens")
186
- temperature_slider = gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature (higher is more creative)")
187
  top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top P (nucleus sampling)")
188
 
189
- analyze_button = gr.Button("🧠 Analyze Text", variant="primary")
190
 
191
  analyze_button.click(
192
  fn=analyze_text,
@@ -197,10 +199,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
197
  # Load the model when the app starts.
198
  # This will update the status_textbox after attempting to load.
199
  def startup_load_model():
 
200
  if load_model_and_pipeline():
201
  return "Model loaded successfully and ready."
202
  else:
203
- return f"Failed to load model. Error: {model_load_error or 'Unknown error during startup.'}"
204
 
205
  demo.load(startup_load_model, outputs=status_textbox)
206
 
@@ -208,18 +211,21 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
208
  if __name__ == "__main__":
209
  # For local testing (ensure HF_TOKEN is set as an environment variable or you're logged in via CLI)
210
  # You would run: HF_TOKEN="your_hf_token_here" python app.py
211
- # If not set, it will fail unless you've done `huggingface-cli login`
212
  if not HF_TOKEN and "HF_TOKEN" not in os.environ:
213
  print("WARNING: HF_TOKEN environment variable not set.")
214
  print("For local execution, either set HF_TOKEN or ensure you are logged in via 'huggingface-cli login'.")
215
- # Attempt to use CLI login if available
216
  try:
217
- HF_TOKEN = huggingface_hub.HfApi().token
218
- if HF_TOKEN:
 
 
 
 
219
  print("Using token from huggingface-cli login.")
220
  else:
221
  print("Could not retrieve token from CLI login. Model access might fail.")
222
  except Exception as e:
223
  print(f"Could not check CLI login status: {e}. Model access might fail.")
224
 
 
225
  demo.queue().launch(debug=True, share=False) # share=True for public link if local
 
10
 
11
  # Global variable to store the pipeline
12
  text_generator_pipeline = None
13
+ model_load_error = None # To store any error message during model loading
14
 
15
  # --- Hugging Face Login and Model Loading ---
16
  def load_model_and_pipeline():
17
  global text_generator_pipeline, model_load_error
18
  if text_generator_pipeline is not None:
19
+ print("Model already loaded.")
20
  return True # Already loaded
21
 
22
  if not HF_TOKEN:
23
+ model_load_error = "Hugging Face token (HF_TOKEN) not found in Space secrets. Please add it and restart the Space."
24
  print(f"ERROR: {model_load_error}")
25
  return False
26
 
 
30
  print("Login successful.")
31
 
32
  print(f"Loading tokenizer for {MODEL_ID}...")
33
+ tokenizer = AutoTokenizer.from_pretrained(
34
+ MODEL_ID,
35
+ trust_remote_code=True,
36
+ use_fast=False # As recommended by the model card
37
+ )
38
+ # Llama models often don't have a pad token set by default
39
+ if tokenizer.pad_token is None:
40
+ tokenizer.pad_token = tokenizer.eos_token
41
  print("Tokenizer loaded.")
42
 
43
  print(f"Loading model {MODEL_ID}...")
44
  # For large models, specify dtype and device_map
 
 
 
 
45
  model = AutoModelForCausalLM.from_pretrained(
46
  MODEL_ID,
47
  trust_remote_code=True,
48
+ torch_dtype=torch.bfloat16, # Use bfloat16 for better performance and memory if supported
49
+ device_map="auto" # Automatically distribute model across available GPUs/CPU
50
  )
51
  print("Model loaded.")
52
 
 
53
  text_generator_pipeline = pipeline(
54
  "text-generation",
55
  model=model,
56
  tokenizer=tokenizer,
57
+ # device_map="auto" handles device placement, so no need for device=0 here
58
  )
59
  print("Text generation pipeline created successfully.")
60
  model_load_error = None
 
73
  if model_load_error:
74
  return f"Model not loaded. Error: {model_load_error}"
75
  else:
76
+ return "Model is not loaded or still loading. Please check Space logs for errors (especially OOM) and ensure HF_TOKEN is set and you've accepted model terms. If on CPU, it may take a very long time or fail due to memory."
77
 
78
  content_to_analyze = ""
79
  if file_upload is not None:
80
  try:
 
81
  with open(file_upload.name, 'r', encoding='utf-8') as f:
82
  content_to_analyze = f.read()
83
+ if not content_to_analyze.strip() and not text_input.strip():
84
  return "Uploaded file is empty and no direct text input provided. Please provide some text."
85
+ elif not content_to_analyze.strip() and text_input.strip():
86
  content_to_analyze = text_input
 
 
 
87
  except Exception as e:
88
  return f"Error reading uploaded file: {str(e)}"
89
  elif text_input:
 
94
  if not content_to_analyze.strip():
95
  return "Input text is empty."
96
 
97
+ # Using Llama 2 Chat Format
98
+ # <s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n{user_prompt} [/INST]
99
+ # For text analysis, the "instruction" is the user_prompt, and the "text_input" is part of it.
 
 
 
 
 
 
 
 
 
 
 
100
 
101
+ system_prompt = "You are a helpful AI assistant specialized in text analysis. Perform the requested task on the provided text."
102
+ user_message = f"{custom_instruction}\n\nHere is the text:\n```\n{content_to_analyze}\n```"
103
 
104
+ messages = [
105
+ {"role": "system", "content": system_prompt},
106
+ {"role": "user", "content": user_message}
107
+ ]
108
 
109
+ try:
110
+ # Use tokenizer.apply_chat_template if available (transformers >= 4.34.0)
111
+ prompt = text_generator_pipeline.tokenizer.apply_chat_template(
112
+ messages,
113
+ tokenize=False,
114
+ add_generation_prompt=True
115
+ )
116
+ except Exception as e:
117
+ print(f"Warning: Could not use apply_chat_template ({e}). Falling back to manual formatting.")
118
+ # Manual Llama 2 chat format
119
+ prompt = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n{user_message} [/INST]"
120
 
121
 
122
  print(f"\n--- Sending to Model ---")
123
+ print(f"Full Prompt:\n{prompt}")
124
  print(f"Max New Tokens: {max_new_tokens}, Temperature: {temperature}, Top P: {top_p}")
125
  print("------------------------\n")
126
 
127
  try:
 
 
128
  generated_outputs = text_generator_pipeline(
129
  prompt,
130
  max_new_tokens=int(max_new_tokens),
131
  do_sample=True,
132
+ temperature=float(temperature) if float(temperature) > 0.01 else 0.01, # Temperature 0 can be problematic
133
  top_p=float(top_p),
134
+ num_return_sequences=1,
135
+ eos_token_id=text_generator_pipeline.tokenizer.eos_token_id,
136
+ pad_token_id=text_generator_pipeline.tokenizer.pad_token_id # Use the set pad_token
137
  )
138
+ response_full = generated_outputs[0]['generated_text']
139
 
140
+ # Extract only the assistant's response part
141
+ # The model's actual answer starts after the [/INST] token.
142
+ answer_marker = "[/INST]"
143
+ if answer_marker in response_full:
144
+ response_text = response_full.split(answer_marker, 1)[1].strip()
145
  else:
146
+ # Fallback if the full prompt wasn't returned, might happen with some pipeline configs
147
+ # or if the model didn't fully adhere to the template in its output.
148
+ # This is less ideal, but better than nothing.
149
+ response_text = response_full.replace(prompt, "").strip() # Try to remove the input prompt
150
+
151
+ return response_text
152
 
153
  except Exception as e:
154
  return f"Error during text generation: {str(e)}"
 
160
  Test the capabilities of the `{MODEL_ID}` model for text analysis tasks on Italian or English texts.
161
  Provide an instruction and your text (directly or via upload).
162
  **Important:** Model loading can take a few minutes, especially on the first run or on CPU.
163
+ This app is best run on a Hugging Face Space with GPU resources (e.g., T4-small or A10G-small) for this 7B model.
164
  """)
165
 
166
  with gr.Row():
167
+ status_textbox = gr.Textbox(label="Model Status", value="Initializing...", interactive=False, scale=3)
168
+ current_hardware = os.getenv("SPACE_HARDWARE", "Unknown (likely local or unspecified)")
169
+ gr.Markdown(f"Running on: **{current_hardware}**")
170
+
171
 
172
  with gr.Tab("Text Input & Analysis"):
173
  with gr.Row():
174
  with gr.Column(scale=2):
175
  instruction_prompt = gr.Textbox(
176
+ label="Instruction for the Model (Cosa vuoi fare con il testo?)",
177
  value="Riassumi questo testo in 3 frasi concise.",
178
+ lines=3,
179
+ placeholder="Example: Riassumi questo testo. / Summarize this text. / Estrai le entità nominate. / Identify named entities."
180
  )
181
+ text_area_input = gr.Textbox(label="Enter Text Directly / Inserisci il testo direttamente", lines=10, placeholder="Paste your text here or upload a file below...")
182
+ file_input = gr.File(label="Or Upload a Document (.txt) / O carica un documento (.txt)", file_types=['.txt'])
183
  with gr.Column(scale=3):
184
+ output_text = gr.Textbox(label="Model Output / Risultato del Modello", lines=20, interactive=False)
185
 
186
  with gr.Accordion("Advanced Generation Parameters", open=False):
187
+ max_new_tokens_slider = gr.Slider(minimum=10, maximum=2048, value=256, step=10, label="Max New Tokens")
188
+ temperature_slider = gr.Slider(minimum=0.01, maximum=2.0, value=0.7, step=0.01, label="Temperature (higher is more creative, 0.01 for more deterministic)")
189
  top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top P (nucleus sampling)")
190
 
191
+ analyze_button = gr.Button("🧠 Analyze Text / Analizza Testo", variant="primary")
192
 
193
  analyze_button.click(
194
  fn=analyze_text,
 
199
  # Load the model when the app starts.
200
  # This will update the status_textbox after attempting to load.
201
  def startup_load_model():
202
+ print("Gradio app starting, attempting to load model...")
203
  if load_model_and_pipeline():
204
  return "Model loaded successfully and ready."
205
  else:
206
+ return f"Failed to load model. Error: {model_load_error or 'Unknown error during startup. Check Space logs.'}"
207
 
208
  demo.load(startup_load_model, outputs=status_textbox)
209
 
 
211
  if __name__ == "__main__":
212
  # For local testing (ensure HF_TOKEN is set as an environment variable or you're logged in via CLI)
213
  # You would run: HF_TOKEN="your_hf_token_here" python app.py
 
214
  if not HF_TOKEN and "HF_TOKEN" not in os.environ:
215
  print("WARNING: HF_TOKEN environment variable not set.")
216
  print("For local execution, either set HF_TOKEN or ensure you are logged in via 'huggingface-cli login'.")
 
217
  try:
218
+ from huggingface_hub import HfApi
219
+ hf_api = HfApi()
220
+ token = hf_api.token
221
+ if token:
222
+ os.environ['HF_TOKEN'] = token # Set it for the current process
223
+ HF_TOKEN = token # also update the global variable used by the script
224
  print("Using token from huggingface-cli login.")
225
  else:
226
  print("Could not retrieve token from CLI login. Model access might fail.")
227
  except Exception as e:
228
  print(f"Could not check CLI login status: {e}. Model access might fail.")
229
 
230
+ print("Launching Gradio interface...")
231
  demo.queue().launch(debug=True, share=False) # share=True for public link if local