KaizeShi commited on
Commit
be0715d
·
1 Parent(s): 91df19c

Add application file

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -9,6 +9,7 @@ assert (
9
  ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
10
  from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
11
  access_token = os.environ.get('HF_TOKEN')
 
12
 
13
  tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", token=access_token)
14
 
@@ -138,4 +139,4 @@ g = gr.Interface(
138
  description="LLaMA-E is a series of fine-tuned LLaMA model following the E-commerce instructions. It is developed by DSMI (http://dsmi.tech/) @ University of Technology Sydney, and trained on the 120k instruction set. This model is for academic research use only. For more details please contact: Kaize.Shi@uts.edu.au",
139
  )
140
  g.queue(concurrency_count=1)
141
- g.launch()
 
9
  ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
10
  from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
11
  access_token = os.environ.get('HF_TOKEN')
12
+ print(access_token)
13
 
14
  tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", token=access_token)
15
 
 
139
  description="LLaMA-E is a series of fine-tuned LLaMA model following the E-commerce instructions. It is developed by DSMI (http://dsmi.tech/) @ University of Technology Sydney, and trained on the 120k instruction set. This model is for academic research use only. For more details please contact: Kaize.Shi@uts.edu.au",
140
  )
141
  g.queue(concurrency_count=1)
142
+ g.launch(share=True)