kouki321 commited on
Commit
ce0b8fa
·
verified ·
1 Parent(s): 39050ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -75,7 +75,7 @@ def calculate_cache_size(cache):
75
 
76
  @st.cache_resource
77
  def load_model_and_tokenizer(doc_text_count):
78
- model_name = "mistralai/Mistral-7B-Instruct-v0.1" # Configure quantization for 4-bit loading
79
  quantization_config = BitsAndBytesConfig(
80
  load_in_4bit=True, # Enable 4-bit quantization
81
  bnb_4bit_compute_dtype=torch.float16, # Set computation precision
 
75
 
76
  @st.cache_resource
77
  def load_model_and_tokenizer(doc_text_count):
78
+ model_name = "google/gemma-3-4b-it" # Configure quantization for 4-bit loading
79
  quantization_config = BitsAndBytesConfig(
80
  load_in_4bit=True, # Enable 4-bit quantization
81
  bnb_4bit_compute_dtype=torch.float16, # Set computation precision