zavavan commited on
Commit
dabb5c1
·
verified ·
1 Parent(s): 018ea62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -1
app.py CHANGED
@@ -29,9 +29,31 @@ model = PeftModel.from_pretrained(base_model, "unica/CLiMA")
29
  # Load tokenizer
30
  tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  # Format prompt
33
  def format_prompt(user_input, entity1, entity2):
34
- return f"Identify causal relations in the following clinical narrative:\n\n{user_input}\n\nEntity 1: {entity1}\nEntity 2: {entity2}\n\nCausal relations:"
 
 
 
 
 
 
 
35
 
36
  # Prediction function
37
  def generate_relations(text, entity1, entity2):
 
29
  # Load tokenizer
30
  tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)
31
 
32
+ prompt_instruction_drug_reviews = f"""Given a drug review enclosed in triple quotes and a pair of entities E1 corresponding to the drug name and E2 corresponding to the treated condition, classify the relation holding between E1 and E2.
33
+ The relations are identified with 9 labels from 0 to 8. The meaning of the labels is the following:
34
+ 0 means that E1 causes E2
35
+ 1 means that E2 causes E1
36
+ 2 means that E1 enables E2
37
+ 3 means that E2 enables E1
38
+ 4 means that E1 prevents E2
39
+ 5 means that E2 prevents E1
40
+ 6 means that E1 hinders E2
41
+ 7 means that E2 hinders E1
42
+ 8 means that E1 and E2 are in a relation different than any of the previous ones.
43
+ Given X the label that you predicted, for the output use the format LABEL: X
44
+ """
45
+
46
+
47
  # Format prompt
48
  def format_prompt(user_input, entity1, entity2):
49
+ #return f"Identify causal relations in the following clinical narrative:\n\n{user_input}\n\nEntity 1: {entity1}\nEntity 2: {entity2}\n\nCausal relations:"
50
+ text = user_input
51
+ prompt_text = f"Text:'''{text}'''"
52
+ e1 = entity1
53
+ e2 = entity2
54
+ prompt_entities = f"\nEntities: E1: '''{e1}''', E2: '''{e2}'''"
55
+ full_prompt = f"<USER> {prompt_instruction_drug_reviews} {prompt_text} {prompt_entities} <ASSISTANT>"
56
+ return full_prompt
57
 
58
  # Prediction function
59
  def generate_relations(text, entity1, entity2):