Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,28 +7,30 @@ from reranker import rerank
|
|
7 |
def clean_df(df):
|
8 |
df = df.copy()
|
9 |
|
10 |
-
#
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
df["
|
15 |
|
16 |
-
#
|
17 |
-
df["
|
18 |
-
df["adaptive_support"] = df["Adaptive/IRT"].map(lambda x: "Yes" if x == "T" else "No")
|
19 |
|
20 |
-
|
21 |
-
df["test_type"] = df["Test Type"]
|
22 |
|
23 |
-
#
|
24 |
-
df["
|
|
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
df["duration"] = df["Assessment_Length"].str.extract(r'(\d+)').fillna("N/A")
|
28 |
-
|
29 |
-
# Select only the columns we need
|
30 |
-
return df[["assessment_name", "url", "remote_support", "adaptive_support",
|
31 |
-
"description", "duration", "test_type"]]
|
32 |
|
33 |
try:
|
34 |
df = pd.read_csv("assesments.csv")
|
@@ -67,19 +69,10 @@ def recommend(query):
|
|
67 |
try:
|
68 |
# Print some debug info
|
69 |
print(f"Processing query: {query[:50]}...")
|
70 |
-
print(f"DataFrame shape: {df_clean.shape}")
|
71 |
-
print(f"DataFrame columns: {df_clean.columns.tolist()}")
|
72 |
-
|
73 |
-
if df_clean.empty:
|
74 |
-
return {"error": "No assessment data available"}
|
75 |
-
|
76 |
-
# Print a sample row for debugging
|
77 |
-
print("Sample row:")
|
78 |
-
print(df_clean.iloc[0].to_dict())
|
79 |
|
80 |
top_k_df = get_relevant_passages(query, df_clean, top_k=20)
|
81 |
|
82 |
-
# Debug: Check retrieved data
|
83 |
print(f"Retrieved {len(top_k_df)} assessments")
|
84 |
if not top_k_df.empty:
|
85 |
print(f"Sample URLs from retrieval: {top_k_df['url'].iloc[:3].tolist()}")
|
@@ -105,7 +98,6 @@ def recommend(query):
|
|
105 |
print(f"Error: {str(e)}\n{error_details}")
|
106 |
return {"error": f"Error processing request: {str(e)}"}
|
107 |
|
108 |
-
|
109 |
iface = gr.Interface(
|
110 |
fn=recommend,
|
111 |
inputs=gr.Textbox(label="Enter Job Description", lines=4),
|
|
|
7 |
def clean_df(df):
|
8 |
df = df.copy()
|
9 |
|
10 |
+
# Ensure clean URLs
|
11 |
+
# Check if the second column contains URLs or just IDs
|
12 |
+
second_col = df.iloc[:, 1].astype(str)
|
13 |
+
if second_col.str.contains('http').any() or second_col.str.contains('www').any():
|
14 |
+
df["url"] = second_col # Already has full URLs
|
15 |
+
else:
|
16 |
+
# Create full URLs from IDs
|
17 |
+
df["url"] = "https://www.shl.com/" + second_col.str.replace(r'^[\s/]*', '', regex=True)
|
18 |
|
19 |
+
df["remote_support"] = df.iloc[:, 2].map(lambda x: "Yes" if x == "T" else "No")
|
20 |
+
df["adaptive_support"] = df.iloc[:, 3].map(lambda x: "Yes" if x == "T" else "No")
|
21 |
|
22 |
+
# Handle test_type with error checking
|
23 |
+
df["test_type"] = df.iloc[:, 4].astype(str).str.split("\\n")
|
|
|
24 |
|
25 |
+
df["description"] = df.iloc[:, 5]
|
|
|
26 |
|
27 |
+
# Extract duration with error handling
|
28 |
+
df["duration"] = pd.to_numeric(
|
29 |
+
df.iloc[:, 8].astype(str).str.extract(r'(\d+)')[0],
|
30 |
+
errors='coerce'
|
31 |
+
)
|
32 |
|
33 |
+
return df[["url", "adaptive_support", "remote_support", "description", "duration", "test_type"]]
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
try:
|
36 |
df = pd.read_csv("assesments.csv")
|
|
|
69 |
try:
|
70 |
# Print some debug info
|
71 |
print(f"Processing query: {query[:50]}...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
top_k_df = get_relevant_passages(query, df_clean, top_k=20)
|
74 |
|
75 |
+
# Debug: Check URLs in retrieved data
|
76 |
print(f"Retrieved {len(top_k_df)} assessments")
|
77 |
if not top_k_df.empty:
|
78 |
print(f"Sample URLs from retrieval: {top_k_df['url'].iloc[:3].tolist()}")
|
|
|
98 |
print(f"Error: {str(e)}\n{error_details}")
|
99 |
return {"error": f"Error processing request: {str(e)}"}
|
100 |
|
|
|
101 |
iface = gr.Interface(
|
102 |
fn=recommend,
|
103 |
inputs=gr.Textbox(label="Enter Job Description", lines=4),
|