From 8bb10fb94a2081c5c2bfb83954cba7ea56b35efa Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Wed, 2 Aug 2023 13:14:08 -0700 Subject: [PATCH 1/5] v0 --- Chatbot.py | 15 +++++++++++---- requirements.txt | 1 + 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/Chatbot.py b/Chatbot.py index ecc1fac9e..4ece400a9 100644 --- a/Chatbot.py +++ b/Chatbot.py @@ -1,9 +1,17 @@ import openai import streamlit as st +from litellm import completion +import os with st.sidebar: - openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") + + model = st.selectbox( + 'Model', + ('gpt-3.5-turbo', 'gpt-4', 'command-nightly', 'text-davinci-003', 'claude-2', 'claude-instant-v1')) "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" + os.environ["OPENAI_API_KEY"] = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") + os.environ['COHERE_API_KEY'] = st.text_input("Cohere API Key", key="cohere_api_key", type="password") + os.environ['ANTHROPIC_API_KEY'] = st.text_input("Anthropic API Key", key="anthropic_api_key", type="password") "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)" "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)" @@ -15,14 +23,13 @@ st.chat_message(msg["role"]).write(msg["content"]) if prompt := st.chat_input(): - if not openai_api_key: + if not os.environ["OPENAI_API_KEY"]: st.info("Please add your OpenAI API key to continue.") st.stop() - openai.api_key = openai_api_key st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) - response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages) + response = completion(model=model, messages=st.session_state.messages) msg = response.choices[0].message st.session_state.messages.append(msg) st.chat_message("assistant").write(msg.content) diff --git a/requirements.txt b/requirements.txt index 24465cad9..8990b3b1b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ openai duckduckgo-search anthropic>=0.3.0 trubrics>=1.4.3 +litellm From 216827c4db5cce23d83b8d4a9b1bd8f383bbf798 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Wed, 2 Aug 2023 13:15:42 -0700 Subject: [PATCH 2/5] add badge --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index a0cc3820f..13da01a21 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,9 @@ [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1) +[![litellm](https://img.shields.io/badge/%20%F0%9F%9A%85%20liteLLM-chatGPT%7CAzure%7CAnthropic-blue?color=green)](https://github.com/BerriAI/litellm) + + Starter examples for building LLM apps with Streamlit. ## Overview of the App From dee5300c71c77e1234024c334835219035813f70 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Wed, 2 Aug 2023 13:16:09 -0700 Subject: [PATCH 3/5] stable v --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8990b3b1b..7ca7b3eed 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,4 +4,4 @@ openai duckduckgo-search anthropic>=0.3.0 trubrics>=1.4.3 -litellm +litellm==0.1.213 From 2807dbed4974ae2c97691655ba24c618c629bfd4 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Fri, 11 Aug 2023 08:28:05 -0700 Subject: [PATCH 4/5] new liteLLM page --- Chatbot.py | 17 ++--- README.md | 1 + pages/2_lite_LLM_Quickstart.py | 74 +++++++++++++++++++ ...t_with_search.py => 3_Chat_with_search.py} | 0 ...uickstart.py => 4_Langchain_Quickstart.py} | 0 ...plate.py => 5_Langchain_PromptTemplate.py} | 0 ...edback.py => 6_Chat_with_user_feedback.py} | 0 requirements.txt | 2 +- 8 files changed, 81 insertions(+), 13 deletions(-) create mode 100644 pages/2_lite_LLM_Quickstart.py rename pages/{2_Chat_with_search.py => 3_Chat_with_search.py} (100%) rename pages/{3_Langchain_Quickstart.py => 4_Langchain_Quickstart.py} (100%) rename pages/{4_Langchain_PromptTemplate.py => 5_Langchain_PromptTemplate.py} (100%) rename pages/{5_Chat_with_user_feedback.py => 6_Chat_with_user_feedback.py} (100%) diff --git a/Chatbot.py b/Chatbot.py index 4ece400a9..c8ccb0a64 100644 --- a/Chatbot.py +++ b/Chatbot.py @@ -1,17 +1,9 @@ import openai import streamlit as st -from litellm import completion -import os with st.sidebar: - - model = st.selectbox( - 'Model', - ('gpt-3.5-turbo', 'gpt-4', 'command-nightly', 'text-davinci-003', 'claude-2', 'claude-instant-v1')) + openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" - os.environ["OPENAI_API_KEY"] = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") - os.environ['COHERE_API_KEY'] = st.text_input("Cohere API Key", key="cohere_api_key", type="password") - os.environ['ANTHROPIC_API_KEY'] = st.text_input("Anthropic API Key", key="anthropic_api_key", type="password") "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)" "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)" @@ -23,13 +15,14 @@ st.chat_message(msg["role"]).write(msg["content"]) if prompt := st.chat_input(): - if not os.environ["OPENAI_API_KEY"]: + if not openai_api_key: st.info("Please add your OpenAI API key to continue.") st.stop() + openai.api_key = openai_api_key st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) - response = completion(model=model, messages=st.session_state.messages) + response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages) msg = response.choices[0].message st.session_state.messages.append(msg) - st.chat_message("assistant").write(msg.content) + st.chat_message("assistant").write(msg.content) \ No newline at end of file diff --git a/README.md b/README.md index 13da01a21..df388e92c 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ Current examples include: - LangChain Quickstart - LangChain PromptTemplate - LangChain Search +- LiteLLM Playground - Run 1 prompt on Claude2, Claude 1.2, GPT-3.5, GPT-4 ## Demo App diff --git a/pages/2_lite_LLM_Quickstart.py b/pages/2_lite_LLM_Quickstart.py new file mode 100644 index 000000000..7b6ccfc18 --- /dev/null +++ b/pages/2_lite_LLM_Quickstart.py @@ -0,0 +1,74 @@ +import streamlit as st +import threading +import os +from litellm import completion +from dotenv import load_dotenv + +# load .env, so litellm reads from .env +load_dotenv() + +# Function to get model outputs +def get_model_output(prompt, model_name): + messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": prompt}, + ] + response = completion(messages=messages, model=model_name) + + return response['choices'][0]['message']['content'] + +# Function to get model outputs +def get_model_output_thread(prompt, model_name, outputs, idx): + output = get_model_output(prompt, model_name) + outputs[idx] = output + +# Streamlit app + +st.title("liteLLM API Playground - use 50+ LLM Models") +st.markdown("[Powered by liteLLM - one package for Anthropic, Cohere, OpenAI, Replicate](https://github.com/BerriAI/litellm/)") + +# Sidebar for user input +with st.sidebar: + st.header("User Settings") + anthropic_api_key = st.text_input("Enter your Anthropic API key:", type="password") + openai_api_key = st.text_input("Enter your OpenAI API key:", type="password") + set_keys_button = st.button("Set API Keys") + +if set_keys_button: + if anthropic_api_key: + os.environ["ANTHROPIC_API_KEY"] = anthropic_api_key + if openai_api_key: + os.environ["OPENAI_API_KEY"] = openai_api_key + st.success("API keys have been set.") + +# User Input section +with st.sidebar: + st.header("User Input") + prompt = st.text_area("Enter your prompt here:") + submit_button = st.button("Submit") + +# Main content area to display model outputs +st.header("Model Outputs") + +# List of models to test +model_names = ["claude-instant-1.2", "claude-2", "gpt-3.5-turbo", "gpt-4", ] # Add your model names here + +cols = st.columns(len(model_names)) # Create columns +outputs = [""] * len(model_names) # Initialize outputs list with empty strings + +threads = [] + +if submit_button and prompt: + for idx, model_name in enumerate(model_names): + thread = threading.Thread(target=get_model_output_thread, args=(prompt, model_name, outputs, idx)) + threads.append(thread) + thread.start() + + for thread in threads: + thread.join() + +# Display text areas and fill with outputs if available +for idx, model_name in enumerate(model_names): + with cols[idx]: + st.text_area(label=f"{model_name}", value=outputs[idx], height=300, key=f"output_{model_name}_{idx}") # Use a unique key + diff --git a/pages/2_Chat_with_search.py b/pages/3_Chat_with_search.py similarity index 100% rename from pages/2_Chat_with_search.py rename to pages/3_Chat_with_search.py diff --git a/pages/3_Langchain_Quickstart.py b/pages/4_Langchain_Quickstart.py similarity index 100% rename from pages/3_Langchain_Quickstart.py rename to pages/4_Langchain_Quickstart.py diff --git a/pages/4_Langchain_PromptTemplate.py b/pages/5_Langchain_PromptTemplate.py similarity index 100% rename from pages/4_Langchain_PromptTemplate.py rename to pages/5_Langchain_PromptTemplate.py diff --git a/pages/5_Chat_with_user_feedback.py b/pages/6_Chat_with_user_feedback.py similarity index 100% rename from pages/5_Chat_with_user_feedback.py rename to pages/6_Chat_with_user_feedback.py diff --git a/requirements.txt b/requirements.txt index 7ca7b3eed..a78ef0c78 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,4 +4,4 @@ openai duckduckgo-search anthropic>=0.3.0 trubrics>=1.4.3 -litellm==0.1.213 +litellm>=0.1.380 From 6525e8c1518dc937adb086aecae7887163d312d5 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Fri, 11 Aug 2023 08:28:59 -0700 Subject: [PATCH 5/5] fix formatting --- Chatbot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Chatbot.py b/Chatbot.py index c8ccb0a64..ecc1fac9e 100644 --- a/Chatbot.py +++ b/Chatbot.py @@ -25,4 +25,4 @@ response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages) msg = response.choices[0].message st.session_state.messages.append(msg) - st.chat_message("assistant").write(msg.content) \ No newline at end of file + st.chat_message("assistant").write(msg.content)