Spaces:
Sleeping
Sleeping
Handling Signal
Browse files
app.py
CHANGED
@@ -8,6 +8,12 @@ from tester import test
|
|
8 |
import transformers
|
9 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def main():
|
13 |
st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
|
@@ -31,11 +37,6 @@ def main():
|
|
31 |
if start_button:
|
32 |
agent = perform_training(jammer_type, channel_switching_cost)
|
33 |
st.subheader("Generating Insights of the DRL-Training")
|
34 |
-
model_name = "tiiuae/falcon-7b-instruct"
|
35 |
-
model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
36 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
37 |
-
pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
|
38 |
-
temperature=0.7)
|
39 |
text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
|
40 |
st.write(text)
|
41 |
test(agent, jammer_type, channel_switching_cost)
|
|
|
8 |
import transformers
|
9 |
from transformers import TFAutoModelForCausalLM, AutoTokenizer
|
10 |
|
11 |
+
# Move the transformers related setup outside the Streamlit app's main function
|
12 |
+
model_name = "tiiuae/falcon-7b-instruct"
|
13 |
+
model = TFAutoModelForCausalLM.from_pretrained(model_name)
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
15 |
+
pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100, temperature=0.7)
|
16 |
+
|
17 |
|
18 |
def main():
|
19 |
st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
|
|
|
37 |
if start_button:
|
38 |
agent = perform_training(jammer_type, channel_switching_cost)
|
39 |
st.subheader("Generating Insights of the DRL-Training")
|
|
|
|
|
|
|
|
|
|
|
40 |
text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
|
41 |
st.write(text)
|
42 |
test(agent, jammer_type, channel_switching_cost)
|