Skip to content

Commit 23542b3

Browse files
Merge pull request #225 from oracle-samples/224-langchain-mcp
fix for import new optimizer_settings.json
2 parents b3fa2c0 + ab94bad commit 23542b3

File tree

5 files changed

+91
-73
lines changed

5 files changed

+91
-73
lines changed

src/client/mcp/rag/README.md

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ In the **AI Optimizer & Toolkit** web interface, after tested a configuration, i
3939
There is a client that you can run without MCP via commandline to test it:
4040

4141
```bash
42-
uv run rag_base_optimizer_config.py
42+
uv run rag_base_optimizer_config.py "[YOUR_QUESTION]"
4343
```
4444

4545
## Quick test via MCP "inspector"
@@ -50,7 +50,14 @@ uv run rag_base_optimizer_config.py
5050
npx @modelcontextprotocol/inspector uv run rag_base_optimizer_config_mcp.py
5151
```
5252

53-
* connect to the port `http://localhost:6274/` with your browser
53+
* connect to the port `http://localhost:6274/` with your browser on the link printed, like in the following example:
54+
```bash
55+
..
56+
Open inspector with token pre-filled:
57+
http://localhost:6274/?MCP_PROXY_AUTH_TOKEN=cb2ef7521aaf2050ad9620bfb5e5df42dc958889e6e99ce4e9b18003eb93fffd
58+
..
59+
```
60+
5461
* setup the `Inspector Proxy Address` with `http://127.0.0.1:6277`
5562
* test the tool developed.
5663

Lines changed: 27 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,8 @@
1+
"""
2+
Copyright (c) 2024, 2025, Oracle and/or its affiliates.
3+
Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl.
4+
"""
5+
16
from langchain_openai import ChatOpenAI
27
from langchain_openai import OpenAIEmbeddings
38
from langchain_huggingface import HuggingFaceEmbeddings
@@ -10,15 +15,19 @@
1015
from langchain_community.vectorstores.oraclevs import OracleVS
1116
import oracledb
1217

18+
import logging
19+
logging.basicConfig(level=logging.INFO)
1320

1421
def get_llm(data):
22+
logging.info("llm data:")
23+
logging.info(data["user_settings"]["ll_model"]["model"])
1524
llm={}
16-
llm_config = data["ll_model_config"][data["client_settings"]["ll_model"]["model"]]
25+
llm_config = data["ll_model_config"][data["user_settings"]["ll_model"]["model"]]
1726
api=llm_config["api"]
1827
url=llm_config["url"]
1928
api_key=llm_config["api_key"]
20-
model=data["client_settings"]["ll_model"]["model"]
21-
print(f"CHAT_MODEL: {model} {api} {url} {api_key}")
29+
model=data["user_settings"]["ll_model"]["model"]
30+
logging.info(f"CHAT_MODEL: {model} {api} {url} {api_key}")
2231
if api == "ChatOllama":
2332
# Initialize the LLM
2433
llm = OllamaLLM(
@@ -35,45 +44,45 @@ def get_llm(data):
3544

3645
def get_embeddings(data):
3746
embeddings={}
38-
model=data["client_settings"]["rag"]["model"]
47+
model=data["user_settings"]["vector_search"]["model"]
3948
api=data["embed_model_config"][model]["api"]
4049
url=data["embed_model_config"][model]["url"]
4150
api_key=data["embed_model_config"][model]["api_key"]
42-
print(f"EMBEDDINGS: {model} {api} {url} {api_key}")
51+
logging.info(f"EMBEDDINGS: {model} {api} {url} {api_key}")
4352
embeddings = {}
4453
if api=="OllamaEmbeddings":
4554
embeddings=OllamaEmbeddings(
4655
model=model,
4756
base_url=url)
4857
elif api == "OpenAIEmbeddings":
49-
print("BEFORE create embbedding")
58+
logging.info("BEFORE create embbedding")
5059
embeddings = OpenAIEmbeddings(
5160
model=model,
5261
api_key=api_key
5362
)
54-
print("AFTER create emebdding")
63+
logging.info("AFTER create emebdding")
5564
return embeddings
5665

5766
def get_vectorstore(data,embeddings):
5867

59-
config=data["database_config"][data["client_settings"]["rag"]["database"]]
60-
68+
config=data["database_config"][data["user_settings"]["database"]["alias"]]
69+
logging.info(config)
70+
6171
conn23c = oracledb.connect(user=config["user"],
6272
password=config["password"], dsn=config["dsn"])
6373

64-
print("DB Connection successful!")
65-
metric=data["client_settings"]["rag"]["distance_metric"]
74+
logging.info("DB Connection successful!")
75+
metric=data["user_settings"]["vector_search"]["distance_metric"]
6676

6777
dist_strategy=DistanceStrategy.COSINE
6878
if metric=="COSINE":
6979
dist_strategy=DistanceStrategy.COSINE
7080
elif metric == "EUCLIDEAN":
7181
dist_strategy=DistanceStrategy.EUCLIDEAN
7282

73-
print("1")
74-
a=data["client_settings"]["rag"]["vector_store"]
75-
print(f"{a}")
76-
print(f"BEFORE KNOWLEDGE BASE")
77-
print(embeddings)
78-
knowledge_base = OracleVS(conn23c, embeddings, data["client_settings"]["rag"]["vector_store"], dist_strategy)
79-
return knowledge_base
83+
a=data["user_settings"]["vector_search"]["vector_store"]
84+
logging.info(f"{a}")
85+
logging.info(f"BEFORE KNOWLEDGE BASE")
86+
logging.info(embeddings)
87+
knowledge_base = OracleVS(conn23c, embeddings, data["user_settings"]["vector_search"]["vector_store"], dist_strategy)
88+
return knowledge_base

src/client/mcp/rag/optimizer_utils/rag.py

Lines changed: 40 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -2,80 +2,81 @@
22
Copyright (c) 2024, 2025, Oracle and/or its affiliates.
33
Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl.
44
"""
5-
65
from typing import List
76
from mcp.server.fastmcp import FastMCP
87
import os
98
from dotenv import load_dotenv
10-
11-
# from sentence_transformers import CrossEncoder
12-
# from langchain_community.embeddings import HuggingFaceEmbeddings
139
from langchain_core.prompts import PromptTemplate
1410
from langchain_core.runnables import RunnablePassthrough
1511
from langchain_core.output_parsers import StrOutputParser
1612
import json
1713
import logging
18-
1914
logging.basicConfig(level=logging.DEBUG)
2015

2116
from optimizer_utils import config
2217

23-
_optimizer_settings_path = ""
24-
18+
_optimizer_settings_path= ""
2519

2620
def set_optimizer_settings_path(path: str):
2721
global _optimizer_settings_path
2822
_optimizer_settings_path = path
2923

30-
3124
def rag_tool_base(question: str) -> str:
3225
"""
3326
Use this tool to answer any question that may benefit from up-to-date or domain-specific information.
34-
27+
3528
Args:
3629
question: the question for which are you looking for an answer
37-
30+
3831
Returns:
3932
JSON string with answer
4033
"""
4134
with open(_optimizer_settings_path, "r") as file:
4235
data = json.load(file)
43-
try:
36+
logging.info("Json loaded!")
37+
try:
38+
4439
embeddings = config.get_embeddings(data)
45-
46-
print("Embedding successful!")
47-
knowledge_base = config.get_vectorstore(data, embeddings)
48-
print("DB Connection successful!")
49-
50-
print("knowledge_base successful!")
40+
41+
logging.info("Embedding successful!")
42+
knowledge_base = config.get_vectorstore(data,embeddings)
43+
logging.info("DB Connection successful!")
44+
45+
logging.info("knowledge_base successful!")
5146
user_question = question
52-
# result_chunks=knowledge_base.similarity_search(user_question, 5)
53-
54-
for d in data["prompt_configs"]:
55-
if d["name"] == data["client_settings"]["prompts"]["sys"]:
56-
rag_prompt = d["prompt"]
57-
58-
template = """DOCUMENTS: {context} \n""" + rag_prompt + """\nQuestion: {question} """
59-
# template = """Answer the question based only on the following context:{context} Question: {question} """
60-
print(template)
47+
logging.info("start looking for prompts")
48+
for d in data["prompts_config"]:
49+
if d["name"]==data["user_settings"]["prompts"]["sys"]:
50+
51+
rag_prompt=d["prompt"]
52+
53+
logging.info("rag_prompt:")
54+
logging.info(rag_prompt)
55+
template = """DOCUMENTS: {context} \n"""+rag_prompt+"""\nQuestion: {question} """
56+
logging.info(template)
6157
prompt = PromptTemplate.from_template(template)
62-
print("before retriever")
63-
print(data["client_settings"]["rag"]["top_k"])
64-
retriever = knowledge_base.as_retriever(search_kwargs={"k": data["client_settings"]["rag"]["top_k"]})
65-
print("after retriever")
58+
logging.info("before retriever")
59+
logging.info(data["user_settings"]["vector_search"]["top_k"])
60+
retriever = knowledge_base.as_retriever(search_kwargs={"k": data["user_settings"]["vector_search"]["top_k"]})
61+
logging.info("after retriever")
62+
6663

6764
# Initialize the LLM
68-
llm = config.get_llm(data)
69-
70-
chain = {"context": retriever, "question": RunnablePassthrough()} | prompt | llm | StrOutputParser()
71-
print("pre-chain successful!")
65+
llm = config.get_llm(data)
66+
67+
chain = (
68+
{"context": retriever, "question": RunnablePassthrough()}
69+
| prompt
70+
| llm
71+
| StrOutputParser()
72+
)
73+
logging.info("pre-chain successful!")
7274
answer = chain.invoke(user_question)
7375

74-
# print(f"Results provided for question: {question}")
75-
# print(f"{answer}")
76+
7677
except Exception as e:
77-
print(e)
78-
print("Connection failed!")
79-
answer = ""
78+
logging.info(e)
79+
logging.info("Connection failed!")
80+
answer=""
8081

8182
return f"{answer}"

src/client/mcp/rag/rag_base_optimizer_config.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,16 @@
22
Copyright (c) 2024, 2025, Oracle and/or its affiliates.
33
Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl.
44
"""
5+
import sys
56
from typing import List
6-
#from mcp.server.fastmcp import FastMCP
77
import os
88
from dotenv import load_dotenv
9-
#from sentence_transformers import CrossEncoder
10-
#from langchain_community.embeddings import HuggingFaceEmbeddings
119
import logging
1210
logging.basicConfig(level=logging.INFO)
1311

1412

1513

16-
print("Successfully imported libraries and modules")
14+
logging.info("Successfully imported libraries and modules")
1715

1816
from optimizer_utils import config
1917

@@ -34,7 +32,7 @@ def similarity_search(question: str, max_results: int = 5) -> List[str]:
3432
List of information related to the question
3533
"""
3634

37-
print(f"Results provided for question: {question} with top {max_results}")
35+
logging.info(f"Results provided for question: {question} with top {max_results}")
3836
chunks=["first chunk", "second chunk"]
3937

4038
return chunks
@@ -43,10 +41,13 @@ def similarity_search(question: str, max_results: int = 5) -> List[str]:
4341
# Initialize and run the server
4442
# Load JSON file
4543
file_path = os.path.join(os.getcwd(), "optimizer_settings.json")
46-
print(file_path)
44+
logging.info(file_path)
4745
rag.set_optimizer_settings_path(file_path)
48-
49-
#Set your question to check if configuration is working
50-
question="Which kind of IDE should be used in this demo?"
51-
print(f"Question: {question}")
52-
print(f"Answer: {rag.rag_tool_base(question)}")
46+
47+
if len(sys.argv) > 1:
48+
question = sys.argv[1]
49+
print(question)
50+
logging.info(f"Question: {sys.argv[1]}")
51+
logging.info(f"\n\nAnswer: {rag.rag_tool_base(question)}")
52+
else:
53+
logging.info("No question provided.")

src/client/mcp/rag/rag_base_optimizer_config_mcp.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from optimizer_utils import rag
1919

2020

21-
print("Successfully imported libraries and modules")
21+
logging.info("Successfully imported libraries and modules")
2222

2323
CHUNKS_DIR = "chunks_temp"
2424
data = {}
@@ -62,8 +62,8 @@ def rag_tool(question: str) -> str:
6262

6363
# Initialize and run the server
6464

65-
# Set optimizer_settings.json file absolute path
66-
rag.set_optimizer_settings_path("/Users/cdebari/Documents/GitHub/ai-optimizer-mcp-export/src/client/mcp/rag/optimizer_settings.json")
65+
# Set optimizer_settings.json file ABSOLUTE path
66+
rag.set_optimizer_settings_path("optimizer_settings.json")
6767

6868
mcp.run(transport='stdio')
6969
#mcp.run(transport='sse')

0 commit comments

Comments
 (0)