leonardlin commited on
Commit
53ef41d
·
verified ·
1 Parent(s): 6b94036

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -15
app.py CHANGED
@@ -12,21 +12,11 @@ DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
- # Llama-2 7B Chat
16
 
17
- This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, a Llama 2 model with 7B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
18
-
19
- 🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
20
-
21
- 🔨 Looking for an even more powerful model? Check out the [13B version](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat) or the large [70B model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
22
- """
23
 
24
  LICENSE = """
25
- <p/>
26
-
27
- ---
28
- As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta,
29
- this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
30
  """
31
 
32
  if not torch.cuda.is_available():
@@ -34,7 +24,7 @@ if not torch.cuda.is_available():
34
 
35
 
36
  if torch.cuda.is_available():
37
- model_id = "meta-llama/Llama-2-7b-chat-hf"
38
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
39
  tokenizer = AutoTokenizer.from_pretrained(model_id)
40
  tokenizer.use_default_system_prompt = False
@@ -129,8 +119,8 @@ chat_interface = gr.ChatInterface(
129
  ["Hello there! How are you doing?"],
130
  ["Can you explain briefly to me what is the Python programming language?"],
131
  ["Explain the plot of Cinderella in a sentence."],
132
- ["How many hours does it take a man to eat a Helicopter?"],
133
- ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
134
  ],
135
  cache_examples=False,
136
  type="messages",
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
+ # shisa-v2-llama3.1-8b-preview
16
 
17
+ This Space demonstrates the [shisa-v2-llama3.1-8b-preview](https://huggingface.co/shisa-ai/shisa-v2-llama3.1-8b-preview) bilingual (JA/EN) chat model."""
 
 
 
 
 
18
 
19
  LICENSE = """
 
 
 
 
 
20
  """
21
 
22
  if not torch.cuda.is_available():
 
24
 
25
 
26
  if torch.cuda.is_available():
27
+ model_id = "shisa-ai/shisa-v2-llama3.1-8b-preview"
28
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
29
  tokenizer = AutoTokenizer.from_pretrained(model_id)
30
  tokenizer.use_default_system_prompt = False
 
119
  ["Hello there! How are you doing?"],
120
  ["Can you explain briefly to me what is the Python programming language?"],
121
  ["Explain the plot of Cinderella in a sentence."],
122
+ ["日本の桜の季節について教えてください。"],
123
+ ["新幹線で東京から京都まで行くのにどれくらい時間がかかりますか?"],
124
  ],
125
  cache_examples=False,
126
  type="messages",