Rahul-8799 commited on
Commit
762beb8
·
verified ·
1 Parent(s): beac814

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +39 -39
src/streamlit_app.py CHANGED
@@ -1,40 +1,40 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
5
-
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
2
  import streamlit as st
3
+ import torch
4
+
5
+ st.title("Tokenizer Test Space")
6
+
7
+ model_id = "google/gemma-2b-it" # Test with the official model first
8
+ # model_id = "Rahul-8799/project_manager_gemma3" # If the official model works, try yours
9
+
10
+ try:
11
+ st.write(f"Attempting to load tokenizer for {model_id}...")
12
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
13
+ st.success("Tokenizer loaded successfully!")
14
+ st.write("Tokenizer details:", tokenizer)
15
+ except Exception as e:
16
+ st.error(f"Error loading tokenizer: {e}")
17
+ st.exception(e) # Show full traceback
18
+
19
+ try:
20
+ st.write(f"Attempting to load model for {model_id}...")
21
+ # Assuming you want 4-bit quantization for Gemma
22
+ from transformers import BitsAndBytesConfig
23
+ quantization_config = BitsAndBytesConfig(
24
+ load_in_4bit=True,
25
+ bnb_4bit_quant_type="nf4",
26
+ bnb_4bit_compute_dtype=torch.bfloat16,
27
+ bnb_4bit_use_double_quant=False,
28
+ )
29
+ model = AutoModelForCausalLM.from_pretrained(
30
+ model_id,
31
+ quantization_config=quantization_config,
32
+ low_cpu_mem_usage=True,
33
+ torch_dtype=torch.bfloat16,
34
+ trust_remote_code=True
35
+ )
36
+ st.success("Model loaded successfully!")
37
+ st.write("Model details:", model)
38
+ except Exception as e:
39
+ st.error(f"Error loading model: {e}")
40
+ st.exception(e)