Spaces:
Sleeping
Sleeping
# summarizer_module/__init__.py | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
# Use a small local model (e.g., Phi-2) | |
MODEL_ID = "microsoft/phi-2" # Ensure it's downloaded and cached locally | |
# Load model and tokenizer | |
model = AutoModelForCausalLM.from_pretrained(MODEL_ID) | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
summarizer = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
def summarize_flowchart(flowchart_json): | |
""" | |
Given a flowchart JSON with 'start' and 'steps', returns a plain English explanation | |
formatted as bullets and sub-bullets. | |
Args: | |
flowchart_json (dict): Structured representation of flowchart | |
Returns: | |
str: Bullet-style natural language summary of the logic | |
""" | |
prompt = ( | |
"Turn the following flowchart into a bullet-point explanation in plain English.\n" | |
"Use bullets for steps and sub-bullets for branches.\n" | |
"\n" | |
f"Flowchart JSON:\n{flowchart_json}\n" | |
"\nExplanation:" | |
) | |
result = summarizer(prompt, max_new_tokens=300, do_sample=False)[0]["generated_text"] | |
explanation = result.split("Explanation:")[-1].strip() | |
return explanation | |