File size: 522 Bytes
e4f5d4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import os
import ollama
from typing import List, Dict

def cosine_similarity(embedding_0, embedding_1):
    pass


def generate_embedding(model, text: str, model_type: str) -> List[float]:
    pass

def search_with_llm(query: str, model: str = "llama3.2"):
    try:
        response = ollama.chat(
            model=model,
            messages=[{"role": "user", "content": query}]
        )
        return response["message"]["content"]
    except Exception as e:
        return f"❌ Error processing request: {str(e)}"