Spaces:
Sleeping
Sleeping
Moving the model to main()
Browse files
app.py
CHANGED
@@ -8,14 +8,14 @@ from tester import test
|
|
8 |
import transformers
|
9 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
10 |
|
11 |
-
# Move the transformers related setup outside the Streamlit app's main function
|
12 |
-
model_name = "tiiuae/falcon-7b-instruct"
|
13 |
-
model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
14 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
15 |
-
pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100, temperature=0.7)
|
16 |
-
|
17 |
|
18 |
def main():
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
|
20 |
|
21 |
st.sidebar.header("Make Your Environment Configuration")
|
|
|
8 |
import transformers
|
9 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def main():
|
13 |
+
model_name = "tiiuae/falcon-7b-instruct"
|
14 |
+
model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
16 |
+
pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
|
17 |
+
temperature=0.7)
|
18 |
+
|
19 |
st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
|
20 |
|
21 |
st.sidebar.header("Make Your Environment Configuration")
|