Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ jobs:
- name: Check package version and detect an update
id: check-package-version
uses: PostHog/check-package-version@v2
with:
path: typescript/

release:
name: Publish release if new version
Expand Down
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,6 @@ __pycache__/

.env

dist/
dist/

.venv/
6 changes: 6 additions & 0 deletions examples/langchain/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# PostHog Configuration
POSTHOG_PERSONAL_API_KEY=your_posthog_api_key_here
POSTHOG_MCP_URL=https://mcp.posthog.com/mcp # Optional

# OpenAI Configuration (for LangChain agent)
OPENAI_API_KEY=your_openai_api_key_here
36 changes: 36 additions & 0 deletions examples/langchain/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# PostHog LangChain Python Integration Example

This example demonstrates how to use PostHog tools with LangChain using the `posthog_agent_toolkit` package.

## Setup

1. Install dependencies:
```bash
# Reinstall the local PostHog Agent Toolkit to ensure latest changes
uv sync --reinstall-package posthog-agent-toolkit
```

2. Copy the environment file and fill in your credentials:
```bash
cp .env.example .env
```

3. Update your `.env` file with:
- `POSTHOG_PERSONAL_API_KEY`: Your PostHog personal API key
- `OPENAI_API_KEY`: Your OpenAI API key

## Usage

Run the example:
```bash
uv run python posthog_agent_example.py
```

The example will:
1. Connect to the PostHog MCP server using your personal API key
2. Load all available PostHog tools from the MCP server
3. Create a LangChain agent
4. Analyze product usage by:
- Getting available insights
- Querying data for the most relevant ones
- Summarizing key findings
103 changes: 103 additions & 0 deletions examples/langchain/posthog_agent_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
"""
PostHog LangChain Integration Example

This example demonstrates how to use PostHog tools with LangChain using
the local posthog_agent_toolkit package. It shows how to analyze product
usage data similar to the TypeScript example.
"""

import asyncio
import os
import sys

from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from posthog_agent_toolkit.integrations.langchain.toolkit import PostHogAgentToolkit


async def analyze_product_usage():
"""Analyze product usage using PostHog data."""

print("🚀 PostHog LangChain Agent - Product Usage Analysis\n")

# Initialize the PostHog toolkit with credentials
toolkit = PostHogAgentToolkit(
personal_api_key=os.getenv("POSTHOG_PERSONAL_API_KEY"),
url=os.getenv("POSTHOG_MCP_URL", "https://mcp.posthog.com/mcp")
)

# Get the tools
tools = await toolkit.get_tools()

# Initialize the LLM
llm = ChatOpenAI(
model="gpt-4o-mini",
temperature=0,
api_key=os.getenv("OPENAI_API_KEY")
)

# Create a system prompt for the agent
prompt = ChatPromptTemplate.from_messages([
(
"system",
"You are a data analyst. Your task is to do a deep dive into what's happening in our product. "
"Be concise and data-driven in your responses."
),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
])

agent = create_tool_calling_agent(
llm=llm,
tools=tools,
prompt=prompt,
)

agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=False,
max_iterations=5,
)

# Invoke the agent with an analysis request
result = await agent_executor.ainvoke({
"input": """Please analyze our product usage:

1. Get all available insights (limit 100)
2. Pick the 5 MOST INTERESTING and VALUABLE insights - prioritize:
- User behavior and engagement metrics
- Conversion funnels
- Retention and growth metrics
- Product adoption insights
- Revenue or business KPIs
AVOID picking feature flag insights unless they show significant business impact
3. For each selected insight, query its data and explain why it's important
4. Summarize the key findings in a brief report with actionable recommendations

Focus on insights that tell a story about user behavior and business performance."""
})

print("\n📊 Analysis Complete!\n")
print("=" * 50)
print(result["output"])
print("=" * 50)


async def main():
"""Main function to run the product usage analysis."""
try:
# Load environment variables
load_dotenv()

# Run the analysis
await analyze_product_usage()
except Exception as error:
print(f"Error: {error}")
sys.exit(1)


if __name__ == "__main__":
asyncio.run(main())
24 changes: 24 additions & 0 deletions examples/langchain/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
[project]
name = "posthog-langchain-example"
version = "0.1.0"
description = "PostHog LangChain integration example"
readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"posthog-agent-toolkit",
"langchain>=0.3.0",
"langchain-openai>=0.2.0",
"langchain-core>=0.3.0",
"python-dotenv>=1.0.0",
]

[tool.uv.sources]
posthog-agent-toolkit = { path = "../../python", editable = true }

[tool.ruff]
line-length = 120
target-version = "py311"

[tool.black]
line-length = 120
target-version = ['py311']
Loading