Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 9 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,9 @@ Let's assume we want to download utility bills:

## Setup

1. Set up your OpenAI [API Keys](https://platform.openai.com/account/api-keys) and add the `OPENAI_API_KEY` environment variable. (We recommend using an account with access to models that are at least as capable as OpenAI o1-mini. Models on par with OpenAI o1-preview are ideal.)
1. **For OpenAI models**: Set up your OpenAI [API Keys](https://platform.openai.com/account/api-keys) and add the `OPENAI_API_KEY` environment variable. (We recommend using an account with access to models that are at least as capable as OpenAI o1-mini. Models on par with OpenAI o1-preview are ideal.)

**For Ollama models**: Install and run [Ollama](https://ollama.com/download), then pull a compatible model (e.g., `ollama pull llama3.1`).
2. Install Python requirements via poetry:
```
poetry install
Expand All @@ -60,11 +62,13 @@ Let's assume we want to download utility bills:
Log into your platform and perform the desired action (such as downloading a utility bill).
6. Run Integuru:
```
poetry run integuru --prompt "download utility bills" --model <gpt-4o|o3-mini|o1|o1-mini>
poetry run integuru --prompt "download utility bills" --model <gpt-4o|o3-mini|o1|o1-mini|ollama>
```
You can also run it via Jupyter Notebook `main.ipynb`

**Recommended to use gpt-4o as the model for graph generation as it supports function calling. Integuru will automatically switch to o1-preview for code generation if available in the user's OpenAI account.**
**Recommended to use gpt-4o as the model for graph generation as it supports function calling. Integuru will automatically switch to o1-preview for code generation if available in the user's OpenAI account.** ⚠️ **Note: o1-preview does not support function calls.**

**Ollama support is now available! You can use the Ollama model by specifying `--model ollama` in the command.**

## Usage

Expand All @@ -75,7 +79,7 @@ poetry run integuru --help
Usage: integuru [OPTIONS]

Options:
--model TEXT The LLM model to use (default is gpt-4o)
--model TEXT The LLM model to use (default is gpt-4o, supports ollama)
--prompt TEXT The prompt for the model [required]
--har-path TEXT The HAR file path (default is
./network_requests.har)
Expand Down Expand Up @@ -132,7 +136,7 @@ We open-source unofficial APIs that we've built already. You can find them [here
Collected data is stored locally in the `network_requests.har` and `cookies.json` files.

### LLM Usage
The tool uses a cloud-based LLM (OpenAI's GPT-4o and o1-preview models).
The tool uses either cloud-based LLMs (OpenAI's GPT-4o and o1-preview models) or local LLMs (via Ollama).

### LLM Training
The LLM is not trained or improved by the usage of this tool.
89 changes: 83 additions & 6 deletions integuru/util/LLM.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,83 @@
from langchain_openai import ChatOpenAI
from ollama import chat
import json
from typing import Dict, List, Any, Optional

class OllamaWrapper:
"""Wrapper class to make Ollama compatible with ChatOpenAI interface"""

def __init__(self, model: str = "llama3.1", temperature: float = 1.0):
self.model = model
self.temperature = temperature

def invoke(self, prompt: str, functions: Optional[List[Dict]] = None, function_call: Optional[Dict] = None, **kwargs):
"""
Invoke Ollama with function calling support, maintaining ChatOpenAI interface compatibility
"""
messages = [{'role': 'user', 'content': prompt}]

# Convert functions to Ollama tools format if provided
tools = []
if functions:
for func in functions:
# Convert ChatOpenAI function format to Ollama tool format
tool = {
'type': 'function',
'function': {
'name': func['name'],
'description': func['description'],
'parameters': func['parameters']
}
}
tools.append(tool)

# Make the Ollama chat call
if tools:
response = chat(
model=self.model,
messages=messages,
tools=tools
)
else:
response = chat(
model=self.model,
messages=messages
)

# Create a response object that mimics ChatOpenAI's response format
class OllamaResponse:
def __init__(self, ollama_response):
self.content = ollama_response.message.content or ""
self.additional_kwargs = {}

# Convert Ollama tool calls to ChatOpenAI format
if hasattr(ollama_response.message, 'tool_calls') and ollama_response.message.tool_calls:
# Take the first tool call (matching current usage pattern)
tool_call = ollama_response.message.tool_calls[0]
self.additional_kwargs['function_call'] = {
'name': tool_call.function.name,
'arguments': json.dumps(tool_call.function.arguments)
}

return OllamaResponse(response)

class LLMSingleton:
_instance = None
_default_model = "gpt-4o"
_default_model = "gpt-4o"
_alternate_model = "o1-preview"
_ollama_model = "ollama"

@classmethod
def get_instance(cls, model: str = None):
if model is None:
model = cls._default_model

if cls._instance is None:
cls._instance = ChatOpenAI(model=model, temperature=1)

if cls._instance is None or (hasattr(cls._instance, 'model') and cls._instance.model != model):
if model == cls._ollama_model:
cls._instance = OllamaWrapper(model="llama3.1", temperature=1)
cls._instance.model = model # Add model attribute for consistency
else:
cls._instance = ChatOpenAI(model=model, temperature=1)
return cls._instance

@classmethod
Expand All @@ -28,11 +94,22 @@ def revert_to_default_model(cls):

@classmethod
def switch_to_alternate_model(cls):
"""Returns a ChatOpenAI instance configured for o1-miniss"""
"""Returns a ChatOpenAI instance configured for o1-preview"""
# Create a new instance only if we don't have one yet
cls._instance = ChatOpenAI(model=cls._alternate_model, temperature=1)
if cls._alternate_model == cls._ollama_model:
cls._instance = OllamaWrapper(model="llama3.1", temperature=1)
cls._instance.model = cls._alternate_model # Add model attribute for consistency
else:
cls._instance = ChatOpenAI(model=cls._alternate_model, temperature=1)

return cls._instance

@classmethod
def get_ollama_instance(cls):
"""Returns an Ollama instance"""
cls._instance = OllamaWrapper(model="llama3.1", temperature=1)
cls._instance.model = cls._ollama_model # Add model attribute for consistency
return cls._instance

llm = LLMSingleton()

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ playwright = "^1.47.0"
networkx = "^3.3"
matplotlib = "^3.9.2"
ipykernel = "^6.29.5"
ollama = "^0.3.3"

[tool.poetry.scripts]
integuru = "integuru.__main__:cli"
Expand Down
Loading