File size: 1,906 Bytes
ff2cc46
c2717d6
 
152df72
c2717d6
152df72
c2717d6
ff2cc46
152df72
 
ff2cc46
152df72
c2717d6
ff2cc46
 
 
 
 
152df72
ff2cc46
 
152df72
ff2cc46
 
152df72
ff2cc46
152df72
ff2cc46
152df72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff2cc46
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from device_config import get_device
import torch
import json

# Automatically choose device (CUDA, MPS, CPU)
device = get_device()

# βš™οΈ Model config: Use phi-2-mini (replace with phi-4-mini when available)
MODEL_ID = "microsoft/Phi-4-mini-instruct"

# Load tokenizer and model
model = AutoModelForCausalLM.from_pretrained(MODEL_ID).to(device)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
summarizer = pipeline("text-generation", model=model, tokenizer=tokenizer)

def summarize_flowchart(flowchart_json):
    """
    Generates a human-friendly explanation from flowchart JSON.

    Args:
        flowchart_json (dict): Contains "start" node and a list of "steps".

    Returns:
        str: Bullet-style explanation with proper nesting and flow.
    """
    # πŸ“„ Prompt optimized for flow comprehension
    prompt = (
        "You are an expert in visual reasoning and instruction generation.\n"
        "Convert the following flowchart JSON into a clear, step-by-step summary using bullets.\n"
        "- Each bullet represents a process step.\n"
        "- Use indented sub-bullets to explain decision branches (Yes/No).\n"
        "- Maintain order based on dependencies and parent-child links.\n"
        "- Avoid repeating the same step more than once.\n"
        "- Do not include JSON in the output, only human-readable text.\n"
        "\nFlowchart:\n{flowchart}\n\nBullet Explanation:"
    ).format(flowchart=json.dumps(flowchart_json, indent=2))

    # 🧠 Run the model inference
    result = summarizer(prompt, max_new_tokens=400, do_sample=False)[0]["generated_text"]

    # Extract the portion after the final prompt marker
    if "Bullet Explanation:" in result:
        explanation = result.split("Bullet Explanation:")[-1].strip()
    else:
        explanation = result.strip()

    return explanation