Skip to main content

Simple Chatbot Example

This example demonstrates how to build a simple chatbot using Glean's Chat API with the official Python client. The chatbot can answer questions using your company's knowledge base.

Prerequisites

  • Glean API token with CHAT scope
  • Python 3.8+
  • Your Glean instance name

Installation

pip install glean-api-client

Basic Chat Example

import os
from glean.api_client import Glean, models

def send_chat_message(message_text):
"""Send a message to Glean Chat and get the response."""
api_token = os.getenv("GLEAN_API_TOKEN")
instance = os.getenv("GLEAN_INSTANCE")

if not api_token:
raise ValueError("GLEAN_API_TOKEN environment variable is required")
if not instance:
raise ValueError("GLEAN_INSTANCE environment variable is required")

with Glean(api_token=api_token, instance=instance) as glean:
try:
response = glean.client.chat.create(
messages=[
{
"fragments": [
models.ChatMessageFragment(
text=message_text,
),
],
},
],
)

if response.messages:
last_message = response.messages[-1]
if hasattr(last_message, 'fragments') and last_message.fragments:
for fragment in last_message.fragments:
if hasattr(fragment, 'text') and fragment.text:
return fragment.text

return "No response received"

except Exception as e:
return f"Error: {str(e)}"

def main():
"""Simple interactive chatbot."""
print("Glean Chatbot - Type 'quit' to exit")
print("-" * 40)

while True:
user_input = input("\nYou: ").strip()

if user_input.lower() in ['quit', 'exit', 'q']:
print("Goodbye!")
break

if not user_input:
continue

print("Bot: ", end="", flush=True)
try:
response = send_chat_message(user_input)
print(response)
except ValueError as e:
print(f"Setup error: {e}")
break

if __name__ == "__main__":
main()

Environment Setup

Create a .env file in your project directory:

GLEAN_API_TOKEN=your_chat_token_here
GLEAN_INSTANCE=your_company_name

Running the Example

  1. Set your environment variables:

    export GLEAN_API_TOKEN="your_chat_token_here"
    export GLEAN_INSTANCE="your_company_name"
  2. Run the chatbot:

    python chatbot.py
  3. Start asking questions:

    You: What is our vacation policy?
    Bot: Based on your company's HR documentation, here are the key points about vacation policy...

    You: Who should I contact about benefits?
    Bot: For benefits questions, you should reach out to...

Streaming Chat Example

For real-time streaming responses:

import os
from glean.api_client import Glean, models

def stream_chat_response(message_text):
"""Stream chat response in real-time."""
api_token = os.getenv("GLEAN_API_TOKEN")
instance = os.getenv("GLEAN_INSTANCE")

if not api_token:
print("Error: GLEAN_API_TOKEN environment variable required")
return
if not instance:
print("Error: GLEAN_INSTANCE environment variable required")
return

with Glean(api_token=api_token, instance=instance) as glean:
try:
response_stream = glean.client.chat.create_stream(
messages=[
{
"fragments": [
models.ChatMessageFragment(
text=message_text,
),
],
},
],
)

print("Bot: ", end="", flush=True)

for chunk in response_stream:
if chunk:
print(chunk, end="", flush=True)
print()

except Exception as e:
print(f"Error during streaming: {e}")

def main():
"""Interactive streaming chatbot."""
print("Glean Streaming Chatbot - Type 'quit' to exit")
print("-" * 45)

while True:
user_input = input("\nYou: ").strip()

if user_input.lower() in ['quit', 'exit', 'q']:
print("Goodbye!")
break

if user_input:
stream_chat_response(user_input)

if __name__ == "__main__":
main()

Conversation History Example

Maintain context across multiple messages:

import os
from glean.api_client import Glean, models

class ConversationalChatbot:
def __init__(self):
self.chat_id = None
self.api_token = os.getenv("GLEAN_API_TOKEN")
self.instance = os.getenv("GLEAN_INSTANCE")

if not self.api_token:
raise ValueError("GLEAN_API_TOKEN environment variable is required")
if not self.instance:
raise ValueError("GLEAN_INSTANCE environment variable is required")

def send_message(self, user_message):
"""Send a message with conversation history."""
with Glean(api_token=self.api_token, instance=self.instance) as glean:
try:
response = glean.client.chat.create(
messages=[
{
"fragments": [
models.ChatMessageFragment(
text=user_message,
),
],
},
],
chat_id=self.chat_id,
save_chat=True,
)

if hasattr(response, 'chat_id') and response.chat_id:
self.chat_id = response.chat_id

if response.messages:
last_message = response.messages[-1]
if hasattr(last_message, 'fragments') and last_message.fragments:
for fragment in last_message.fragments:
if hasattr(fragment, 'text') and fragment.text:
return fragment.text

return "No response received"

except Exception as e:
return f"Error: {str(e)}"

def reset_conversation(self):
"""Start a new conversation."""
self.chat_id = None

def main():
"""Conversational chatbot with memory."""
print("Glean Conversational Chatbot")
print("Type 'quit' to exit, 'reset' to start new conversation")
print("-" * 55)

try:
chatbot = ConversationalChatbot()
except ValueError as e:
print(f"Setup error: {e}")
return

while True:
user_input = input("\nYou: ").strip()

if user_input.lower() in ['quit', 'exit', 'q']:
print("Goodbye!")
break
elif user_input.lower() == 'reset':
chatbot.reset_conversation()
print("Started new conversation!")
continue
elif not user_input:
continue

print("Bot: ", end="", flush=True)
response = chatbot.send_message(user_input)
print(response)

if __name__ == "__main__":
main()

Error Handling

Add robust error handling for production use:

import logging
from glean.api_client import Glean, models
from glean.api_client.errors import GleanError

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def send_chat_message_with_retry(message_text, max_retries=3):
"""Send chat message with retry logic."""
api_token = os.getenv("GLEAN_API_TOKEN")
instance = os.getenv("GLEAN_INSTANCE")

if not api_token:
return "Missing GLEAN_API_TOKEN"
if not instance:
return "Missing GLEAN_INSTANCE"

for attempt in range(max_retries):
try:
with Glean(api_token=api_token, instance=instance) as glean:
response = glean.client.chat.create(
messages=[
{
"fragments": [
models.ChatMessageFragment(
text=message_text,
),
],
},
],
)

if response.messages:
last_message = response.messages[-1]
if hasattr(last_message, 'fragments') and last_message.fragments:
for fragment in last_message.fragments:
if hasattr(fragment, 'text') and fragment.text:
return fragment.text

return "No response received"

except GleanError as e:
logger.warning(f"Glean API error on attempt {attempt + 1}: {e}")
if attempt == max_retries - 1:
return f"Failed after {max_retries} attempts: {e}"

except Exception as e:
logger.error(f"Unexpected error: {e}")
return f"Unexpected error: {e}"

return "Failed to get response"

Advanced Features

Using Custom Agents

# Use a specific agent for specialized responses
with Glean(api_token=api_token, instance=instance) as glean:
response = glean.client.chat.create(
messages=[
{
"fragments": [
models.ChatMessageFragment(
text="Help me write a technical document",
),
],
},
],
agent_id="your-custom-agent-id",
)

Next Steps

  • Add conversation persistence: Use the chat_id to maintain long-running conversations
  • Implement user authentication: Handle different user permissions
  • Add file upload support: Use upload_files() for document analysis
  • Deploy as a web service: Use Flask/FastAPI to create a web interface
  • Integrate with Slack/Teams: Build a bot for your team chat platform