Spaces:
Running
Running
Update insight_and_tasks/utils/pandasai_setup.py
Browse files
insight_and_tasks/utils/pandasai_setup.py
CHANGED
@@ -25,28 +25,22 @@ def configure_pandasai(api_key: str, model_name: str = None):
|
|
25 |
# raise ValueError("API key must be provided for PandasAI configuration")
|
26 |
return
|
27 |
|
|
|
|
|
|
|
28 |
selected_model = model_name if model_name else DEFAULT_PANDASAI_MODEL
|
29 |
|
30 |
try:
|
31 |
llm = LiteLLM(
|
32 |
model=selected_model, # Use the selected model
|
33 |
api_key=api_key
|
34 |
-
# You might need to add other parameters for LiteLLM depending on the provider
|
35 |
-
# e.g., if not using a Google model directly via gemini provider in LiteLLM
|
36 |
)
|
37 |
|
38 |
# PandasAI configuration
|
39 |
pai.config.set({
|
40 |
"llm": llm,
|
41 |
-
"
|
42 |
-
"
|
43 |
-
"enforce_privacy": False, # Be cautious with this in production
|
44 |
-
"save_charts": False, # Set to True if you want to save charts locally
|
45 |
-
# "save_charts_path": "charts_output", # Define path if saving charts
|
46 |
-
"custom_whitelisted_dependencies": [], # Add any custom dependencies if needed
|
47 |
-
"max_retries": 3, # Default retries for PandasAI operations
|
48 |
-
"temperature": 0.3, # Lower temperature for more deterministic/factual outputs
|
49 |
-
# "open_charts": False # Whether to automatically open charts
|
50 |
})
|
51 |
logger.info(f"PandasAI configured successfully with model: {selected_model}")
|
52 |
logger.info(f"PandasAI LLM object: {llm}")
|
|
|
25 |
# raise ValueError("API key must be provided for PandasAI configuration")
|
26 |
return
|
27 |
|
28 |
+
os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "False"
|
29 |
+
os.environ["GOOGLE_API_KEY"] = api_key
|
30 |
+
|
31 |
selected_model = model_name if model_name else DEFAULT_PANDASAI_MODEL
|
32 |
|
33 |
try:
|
34 |
llm = LiteLLM(
|
35 |
model=selected_model, # Use the selected model
|
36 |
api_key=api_key
|
|
|
|
|
37 |
)
|
38 |
|
39 |
# PandasAI configuration
|
40 |
pai.config.set({
|
41 |
"llm": llm,
|
42 |
+
"temperature": 0.3, # Lower temperature for more consistent results
|
43 |
+
"max_retries": 3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
})
|
45 |
logger.info(f"PandasAI configured successfully with model: {selected_model}")
|
46 |
logger.info(f"PandasAI LLM object: {llm}")
|