From 65aa8d64591be0112d06b3cef330e3cf00178587 Mon Sep 17 00:00:00 2001 From: Vishwanath Martur <64204611+vishwamartur@users.noreply.github.com> Date: Sat, 30 Aug 2025 07:46:57 +0530 Subject: [PATCH] Enhanced Ollama integration with native tool calling support - Implemented OllamaWrapper class for ChatOpenAI interface compatibility - Added native tool calling support using Ollama's built-in capabilities - Maintained backward compatibility with existing agent code - Updated pyproject.toml to include ollama dependency - Enhanced documentation with Ollama setup and usage instructions - Updated CLI help text to include Ollama model option Addresses GitHub PR #12 comment about leveraging Ollama's built-in tool calling --- README.md | 14 ++++--- integuru/util/LLM.py | 89 +++++++++++++++++++++++++++++++++++++++++--- pyproject.toml | 1 + 3 files changed, 93 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index bbcaf9e..82674a8 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,9 @@ Let's assume we want to download utility bills: ## Setup -1. Set up your OpenAI [API Keys](https://platform.openai.com/account/api-keys) and add the `OPENAI_API_KEY` environment variable. (We recommend using an account with access to models that are at least as capable as OpenAI o1-mini. Models on par with OpenAI o1-preview are ideal.) +1. **For OpenAI models**: Set up your OpenAI [API Keys](https://platform.openai.com/account/api-keys) and add the `OPENAI_API_KEY` environment variable. (We recommend using an account with access to models that are at least as capable as OpenAI o1-mini. Models on par with OpenAI o1-preview are ideal.) + + **For Ollama models**: Install and run [Ollama](https://ollama.com/download), then pull a compatible model (e.g., `ollama pull llama3.1`). 2. Install Python requirements via poetry: ``` poetry install @@ -60,11 +62,13 @@ Let's assume we want to download utility bills: Log into your platform and perform the desired action (such as downloading a utility bill). 6. Run Integuru: ``` - poetry run integuru --prompt "download utility bills" --model + poetry run integuru --prompt "download utility bills" --model ``` You can also run it via Jupyter Notebook `main.ipynb` - **Recommended to use gpt-4o as the model for graph generation as it supports function calling. Integuru will automatically switch to o1-preview for code generation if available in the user's OpenAI account.** + **Recommended to use gpt-4o as the model for graph generation as it supports function calling. Integuru will automatically switch to o1-preview for code generation if available in the user's OpenAI account.** ⚠️ **Note: o1-preview does not support function calls.** + + **Ollama support is now available! You can use the Ollama model by specifying `--model ollama` in the command.** ## Usage @@ -75,7 +79,7 @@ poetry run integuru --help Usage: integuru [OPTIONS] Options: - --model TEXT The LLM model to use (default is gpt-4o) + --model TEXT The LLM model to use (default is gpt-4o, supports ollama) --prompt TEXT The prompt for the model [required] --har-path TEXT The HAR file path (default is ./network_requests.har) @@ -132,7 +136,7 @@ We open-source unofficial APIs that we've built already. You can find them [here Collected data is stored locally in the `network_requests.har` and `cookies.json` files. ### LLM Usage -The tool uses a cloud-based LLM (OpenAI's GPT-4o and o1-preview models). +The tool uses either cloud-based LLMs (OpenAI's GPT-4o and o1-preview models) or local LLMs (via Ollama). ### LLM Training The LLM is not trained or improved by the usage of this tool. diff --git a/integuru/util/LLM.py b/integuru/util/LLM.py index 9e20293..a3d7de7 100644 --- a/integuru/util/LLM.py +++ b/integuru/util/LLM.py @@ -1,17 +1,83 @@ from langchain_openai import ChatOpenAI +from ollama import chat +import json +from typing import Dict, List, Any, Optional + +class OllamaWrapper: + """Wrapper class to make Ollama compatible with ChatOpenAI interface""" + + def __init__(self, model: str = "llama3.1", temperature: float = 1.0): + self.model = model + self.temperature = temperature + + def invoke(self, prompt: str, functions: Optional[List[Dict]] = None, function_call: Optional[Dict] = None, **kwargs): + """ + Invoke Ollama with function calling support, maintaining ChatOpenAI interface compatibility + """ + messages = [{'role': 'user', 'content': prompt}] + + # Convert functions to Ollama tools format if provided + tools = [] + if functions: + for func in functions: + # Convert ChatOpenAI function format to Ollama tool format + tool = { + 'type': 'function', + 'function': { + 'name': func['name'], + 'description': func['description'], + 'parameters': func['parameters'] + } + } + tools.append(tool) + + # Make the Ollama chat call + if tools: + response = chat( + model=self.model, + messages=messages, + tools=tools + ) + else: + response = chat( + model=self.model, + messages=messages + ) + + # Create a response object that mimics ChatOpenAI's response format + class OllamaResponse: + def __init__(self, ollama_response): + self.content = ollama_response.message.content or "" + self.additional_kwargs = {} + + # Convert Ollama tool calls to ChatOpenAI format + if hasattr(ollama_response.message, 'tool_calls') and ollama_response.message.tool_calls: + # Take the first tool call (matching current usage pattern) + tool_call = ollama_response.message.tool_calls[0] + self.additional_kwargs['function_call'] = { + 'name': tool_call.function.name, + 'arguments': json.dumps(tool_call.function.arguments) + } + + return OllamaResponse(response) class LLMSingleton: _instance = None - _default_model = "gpt-4o" + _default_model = "gpt-4o" _alternate_model = "o1-preview" + _ollama_model = "ollama" @classmethod def get_instance(cls, model: str = None): if model is None: model = cls._default_model - - if cls._instance is None: - cls._instance = ChatOpenAI(model=model, temperature=1) + + if cls._instance is None or (hasattr(cls._instance, 'model') and cls._instance.model != model): + if model == cls._ollama_model: + cls._instance = OllamaWrapper(model="llama3.1", temperature=1) + cls._instance.model = model # Add model attribute for consistency + else: + cls._instance = ChatOpenAI(model=model, temperature=1) return cls._instance @classmethod @@ -28,11 +94,22 @@ def revert_to_default_model(cls): @classmethod def switch_to_alternate_model(cls): - """Returns a ChatOpenAI instance configured for o1-miniss""" + """Returns a ChatOpenAI instance configured for o1-preview""" # Create a new instance only if we don't have one yet - cls._instance = ChatOpenAI(model=cls._alternate_model, temperature=1) + if cls._alternate_model == cls._ollama_model: + cls._instance = OllamaWrapper(model="llama3.1", temperature=1) + cls._instance.model = cls._alternate_model # Add model attribute for consistency + else: + cls._instance = ChatOpenAI(model=cls._alternate_model, temperature=1) return cls._instance + @classmethod + def get_ollama_instance(cls): + """Returns an Ollama instance""" + cls._instance = OllamaWrapper(model="llama3.1", temperature=1) + cls._instance.model = cls._ollama_model # Add model attribute for consistency + return cls._instance + llm = LLMSingleton() diff --git a/pyproject.toml b/pyproject.toml index 66b237e..aa1366f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,7 @@ playwright = "^1.47.0" networkx = "^3.3" matplotlib = "^3.9.2" ipykernel = "^6.29.5" +ollama = "^0.3.3" [tool.poetry.scripts] integuru = "integuru.__main__:cli"