albhu commited on
Commit
2532ae2
·
verified ·
1 Parent(s): a7587d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -21
app.py CHANGED
@@ -104,31 +104,21 @@ class InterestCalculatorApp:
104
  except requests.RequestException as e:
105
  st.error(f"Failed to download rates: {e}")
106
 
107
- def ask_tapas(self, query, df):
108
- # Convert the DataFrame to a list of dictionaries
109
- table = df.to_dict('records')
 
110
 
111
- # Initialize tokenized table
112
- tokenized_table = []
113
 
114
- # Tokenize each row in the table
115
- for row in table:
116
- tokenized_row = []
117
- for column_name, cell in row.items():
118
- # Ensure that the cell is converted to a string before tokenizing
119
- tokenized_row.append(self.tokenizer.tokenize(str(cell)))
120
- tokenized_table.append(tokenized_row)
121
 
122
- # Create DataFrame from tokenized table
123
- df_tokenized = pd.DataFrame(tokenized_table)
124
-
125
- # Tokenize the query
126
- tokenized_query = self.tokenizer.tokenize(query)
127
-
128
- # Encode tokens and convert them to PyTorch tensors
129
- inputs = self.tokenizer(table=df_tokenized, queries=tokenized_query, return_tensors="pt", padding=True)
130
 
131
- return inputs
132
 
133
 
134
  def main():
 
104
  except requests.RequestException as e:
105
  st.error(f"Failed to download rates: {e}")
106
 
107
+ def ask_tapas(self, query, table):
108
+ if isinstance(table, pd.DataFrame):
109
+ # Convert the DataFrame to a list of dictionaries
110
+ table_dict = table.to_dict('records')
111
 
112
+ # Tokenize each cell in the table
113
+ tokenized_table = [[self.tokenizer.tokenize(str(cell)) for cell in row.values()] for row in table_dict]
114
 
115
+ # Encode tokens and convert them to PyTorch tensors
116
+ inputs = self.tokenizer(table=tokenized_table, queries=query, return_tensors="pt", padding=True)
 
 
 
 
 
117
 
118
+ return inputs
119
+ else:
120
+ raise TypeError("Table must be of type pd.DataFrame")
 
 
 
 
 
121
 
 
122
 
123
 
124
  def main():