diff --git a/README.md b/README.md index 072ac7e38..01215213a 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,7 @@ cp .streamlit/secrets.toml.example .streamlit/secrets.toml supabase_url = "SUPABASE_URL" supabase_service_key = "SUPABASE_SERVICE_KEY" openai_api_key = "OPENAI_API_KEY" +anthropic_api_key = "ANTHROPIC_API_KEY" # Optional ``` - Run the migration script on the Supabase database via the web interface diff --git a/question.py b/question.py index 6f8e9d340..c3e13f96d 100644 --- a/question.py +++ b/question.py @@ -1,3 +1,4 @@ +import anthropic import streamlit as st from streamlit.logger import get_logger from langchain.chains import ConversationalRetrievalChain @@ -13,9 +14,17 @@ anthropic_api_key = st.secrets.anthropic_api_key logger = get_logger(__name__) +def count_tokens(question, model): + count = f'Words: {len(question.split())}' + if model.startswith("claude"): + count += f' | Tokens: {anthropic.count_tokens(question)}' + return count + + def chat_with_doc(model, vector_store: SupabaseVectorStore): question = st.text_area("## Ask a question") button = st.button("Ask") + count_button = st.button("Count Tokens", type='secondary') if button: if model.startswith("gpt"): logger.info('Using OpenAI model %s', model) @@ -33,3 +42,6 @@ def chat_with_doc(model, vector_store: SupabaseVectorStore): result = qa({"question": question}) logger.info('Result: %s', result) st.write(result["answer"]) + + if count_button: + st.write(count_tokens(question, model))