LPX
commited on
Commit
·
73783f9
1
Parent(s):
d7aaf8c
feat: integrate dotenv for environment variable management and update weight management logic for prediction handling
Browse files- app_mcp.py +5 -0
- models/weight_management.py +2 -2
- requirements.txt +1 -0
app_mcp.py
CHANGED
@@ -26,12 +26,17 @@ from models.smart_agents import ContextualIntelligenceAgent, ForensicAnomalyDete
|
|
26 |
|
27 |
from forensics.registry import register_model, MODEL_REGISTRY, ModelEntry
|
28 |
from models.weight_management import ModelWeightManager
|
|
|
29 |
|
30 |
# Configure logging
|
31 |
logging.basicConfig(level=logging.DEBUG)
|
32 |
logger = logging.getLogger(__name__)
|
|
|
33 |
|
34 |
|
|
|
|
|
|
|
35 |
# Ensure using GPU if available
|
36 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
37 |
|
|
|
26 |
|
27 |
from forensics.registry import register_model, MODEL_REGISTRY, ModelEntry
|
28 |
from models.weight_management import ModelWeightManager
|
29 |
+
from dotenv import load_dotenv
|
30 |
|
31 |
# Configure logging
|
32 |
logging.basicConfig(level=logging.DEBUG)
|
33 |
logger = logging.getLogger(__name__)
|
34 |
+
os.environ['HF_HUB_CACHE'] = './models'
|
35 |
|
36 |
|
37 |
+
load_dotenv()
|
38 |
+
print(os.getenv("HF_HUB_CACHE"))
|
39 |
+
|
40 |
# Ensure using GPU if available
|
41 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
42 |
|
models/weight_management.py
CHANGED
@@ -87,13 +87,13 @@ class ModelWeightManager:
|
|
87 |
def _has_consensus(self, predictions):
|
88 |
"""Check if models agree on prediction"""
|
89 |
# Ensure all predictions are not None before checking for consensus
|
90 |
-
non_none_predictions = [p for p in predictions.values() if p is not None and p != "Error"]
|
91 |
return len(non_none_predictions) > 0 and len(set(non_none_predictions)) == 1
|
92 |
|
93 |
def _has_conflicts(self, predictions):
|
94 |
"""Check if models have conflicting predictions"""
|
95 |
# Ensure all predictions are not None before checking for conflicts
|
96 |
-
non_none_predictions = [p for p in predictions.values() if p is not None and p != "Error"]
|
97 |
return len(non_none_predictions) > 1 and len(set(non_none_predictions)) > 1
|
98 |
|
99 |
def _normalize_weights(self, weights):
|
|
|
87 |
def _has_consensus(self, predictions):
|
88 |
"""Check if models agree on prediction"""
|
89 |
# Ensure all predictions are not None before checking for consensus
|
90 |
+
non_none_predictions = [p.get("Label") for p in predictions.values() if p is not None and isinstance(p, dict) and p.get("Label") is not None and p.get("Label") != "Error"]
|
91 |
return len(non_none_predictions) > 0 and len(set(non_none_predictions)) == 1
|
92 |
|
93 |
def _has_conflicts(self, predictions):
|
94 |
"""Check if models have conflicting predictions"""
|
95 |
# Ensure all predictions are not None before checking for conflicts
|
96 |
+
non_none_predictions = [p.get("Label") for p in predictions.values() if p is not None and isinstance(p, dict) and p.get("Label") is not None and p.get("Label") != "Error"]
|
97 |
return len(non_none_predictions) > 1 and len(set(non_none_predictions)) > 1
|
98 |
|
99 |
def _normalize_weights(self, weights):
|
requirements.txt
CHANGED
@@ -15,3 +15,4 @@ pyexiftool
|
|
15 |
psutil
|
16 |
datasets
|
17 |
Pillow
|
|
|
|
15 |
psutil
|
16 |
datasets
|
17 |
Pillow
|
18 |
+
python-dotenv
|