File size: 2,334 Bytes
3c06ec2
50c02b0
 
cdf6731
f93c703
50c02b0
 
 
1ab7e62
cdf6731
 
 
 
 
 
ed9c544
50c02b0
cdf6731
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3c06ec2
cdf6731
1ab7e62
ed9c544
5c93693
 
 
 
ed9c544
 
5c93693
cdf6731
5c93693
ed9c544
50c02b0
3c06ec2
1ab7e62
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import gradio as gr
from keybert import KeyBERT
from sentence_transformers import SentenceTransformer
import re

# βœ… Load Hugging Face model (no API key needed)
model = SentenceTransformer('all-MiniLM-L6-v2')
kw_model = KeyBERT(model)

# πŸ” Helper: Clean keywords from text (split on commas, newlines)
def clean_keywords(text):
    keywords = re.split(r"[,\n]", text.lower())
    return set(kw.strip() for kw in keywords if kw.strip())

# πŸ” Main function
def extract_keywords(job_desc, resume_text, analyze_with_jd):
    if not resume_text.strip():
        return "⚠️ Please paste your resume text."

    # Step 1: Combine input for keyword extraction
    combined_text = job_desc + "\n\n" + resume_text if analyze_with_jd and job_desc else resume_text
    extracted_keywords = kw_model.extract_keywords(combined_text, top_n=15, stop_words='english')
    extracted_set = set([kw.lower() for kw, _ in extracted_keywords])

    # Step 2: Tokenize job description and resume separately
    jd_tokens = clean_keywords(job_desc) if analyze_with_jd and job_desc else set()
    resume_tokens = clean_keywords(resume_text)

    # Step 3: Match and miss
    matched = sorted(jd_tokens & resume_tokens)
    missing = sorted(jd_tokens - resume_tokens)

    # Step 4: Output
    result = ""
    result += f"πŸ” **Extracted Keywords:** {', '.join(extracted_set)}\n\n"
    result += f"βœ… **Matched (Job & Resume):** {', '.join(matched) or 'None'}\n"
    result += f"❌ **Missing in Resume:** {', '.join(missing) or 'None'}\n"

    return result

# πŸŽ›οΈ Gradio UI
with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column():
            analyze_checkbox = gr.Checkbox(label="Analyze with Job Description", value=True)
            job_desc = gr.Textbox(label="Job Description", lines=6, placeholder="Paste job description here...")
            resume_text = gr.Textbox(label="Resume Text", lines=12, placeholder="Paste resume content here...")
        with gr.Column():
            output_keywords = gr.Markdown(label="Keyword Match Result")  # Markdown for styled output

    resume_text.change(fn=extract_keywords, inputs=[job_desc, resume_text, analyze_checkbox], outputs=output_keywords)
    job_desc.change(fn=extract_keywords, inputs=[job_desc, resume_text, analyze_checkbox], outputs=output_keywords)

demo.launch()