Enhance UX for your Streamlit chatbot: avatars, timestamps, a "typing" spinner, and controls to clear or reset the system instruction.
st.spinner so responses feel aliveSave the code below as app.py in your project. It uses st.chat_message (Streamlit ≥1.18). If your Streamlit version is older, update Streamlit first.
# app.py
# Streamlit Chatbot UI polishing: avatars, timestamps, typing indicator, and reset.
# -------------------------------------------------------------------------------
import os
import time
from datetime import datetime
import streamlit as st
from dotenv import load_dotenv
# Vertex AI imports (Gemini)
import vertexai
from vertexai.generative_models import GenerativeModel
# ---- config ----
load_dotenv()
PROJECT_ID = os.getenv("GCP_PROJECT_ID", "YOUR_PROJECT_ID")
LOCATION = os.getenv("GCP_LOCATION", "us-central1")
# init Vertex AI (uses ADC or service account key if env var set)
vertexai.init(project=PROJECT_ID, location=LOCATION)
# ---- Streamlit page settings ----
st.set_page_config(page_title="Polished Vertex AI Chatbot", page_icon="🤖", layout="wide")
st.title("🤖 Polished Vertex AI Chatbot (Step 5)")
# ---- Sidebar: controls ----
with st.sidebar:
st.header("Chat Settings")
system_instruction = st.text_area("System instruction", value=st.session_state.get("system_instruction",
"You are a helpful assistant. Answer concisely."), height=100)
st.markdown("---")
st.write("Avatars")
user_avatar = st.text_input("User avatar (emoji or URL)", value=st.session_state.get("user_avatar","🧑"))
bot_avatar = st.text_input("Bot avatar (emoji or URL)", value=st.session_state.get("bot_avatar","🤖"))
st.markdown("---")
if st.button("Clear chat"):
# Clear messages but keep the system instruction unless user resets
st.session_state['messages'] = []
st.success("Chat cleared.")
if st.button("Reset system instruction"):
st.session_state['system_instruction'] = "You are a helpful assistant. Answer concisely."
st.success("System instruction reset to default.")
st.markdown("---")
st.info("Tip: Use emojis as avatars (fast) or paste image URLs (hosted PNG/JPG).")
# persist sidebar choices into session_state so UI uses them
st.session_state['user_avatar'] = user_avatar
st.session_state['bot_avatar'] = bot_avatar
st.session_state['system_instruction'] = system_instruction
# ---- Initialize conversation store ----
if "messages" not in st.session_state:
# messages is a list of dicts: {'role':'user'|'assistant','text':..., 'ts':timestamp, 'avatar':...}
st.session_state['messages'] = []
# ---- Helper: render message with avatar + timestamp ----
def render_message(role, text, avatar, ts):
"""
Renders a message row with avatar, message body, and timestamp.
We use columns to format avatar on the left and message on the right for user,
and reverse for assistant to create a chat feel.
"""
time_str = ts.strftime("%Y-%m-%d %H:%M:%S")
# For assistant: avatar left, message right. For user: message right aligned with user's avatar
if role == "assistant":
col1, col2 = st.columns([1, 11])
with col1:
# avatar can be an emoji or image URL
if avatar.startswith("http"):
st.image(avatar, width=40)
else:
st.markdown(f"{avatar}", unsafe_allow_html=True)
with col2:
st.markdown(f"**Bot • {time_str}**")
st.markdown(text)
else: # user
col1, col2 = st.columns([11, 1])
with col1:
st.markdown(f"**You • {time_str}**")
st.markdown(text)
with col2:
if avatar.startswith("http"):
st.image(avatar, width=40)
else:
st.markdown(f"{avatar}", unsafe_allow_html=True)
# ---- Show existing messages ----
for msg in st.session_state['messages']:
render_message(msg['role'], msg['text'], msg.get('avatar',''), msg['ts'])
# ---- Input area ----
prompt = st.chat_input("Type your message here...")
if prompt:
# save & render user message immediately with timestamp
user_msg = {"role":"user", "text":prompt, "ts":datetime.now(), "avatar":st.session_state.get('user_avatar',"🧑")}
st.session_state['messages'].append(user_msg)
render_message("user", user_msg['text'], user_msg['avatar'], user_msg['ts'])
# Build prompt for LLM (you may choose to include limited history + system instruction)
# Simple strategy: include system instruction + last N messages (user+assistant) as plain text
history = st.session_state['messages'][-10:] # limit to last 10 messages
parts = []
if st.session_state.get('system_instruction'):
parts.append(f"System: {st.session_state['system_instruction']}")
for h in history:
role = "User" if h['role']=="user" else "Assistant"
parts.append(f"{role}: {h['text']}")
parts.append("Assistant:")
prompt_text = "\n".join(parts)
# Simulate typing spinner while calling model (makes UX nicer)
with st.spinner("Thinking..."):
# small pause to allow spinner to show (adjust or remove in production)
time.sleep(0.4)
# Call Vertex AI model
try:
model = GenerativeModel("gemini-1.5-pro") # choose desired model
response = model.generate_content(prompt_text, max_output_tokens=256, temperature=0.3)
answer = response.text
except Exception as e:
answer = f"(Model error: {e})"
# Append assistant message with timestamp and avatar, then render it
bot_msg = {"role":"assistant", "text":answer, "ts":datetime.now(), "avatar":st.session_state.get('bot_avatar',"🤖")}
st.session_state['messages'].append(bot_msg)
render_message("assistant", bot_msg['text'], bot_msg['avatar'], bot_msg['ts'])
# ---- Small export / download button (always visible if messages exist) ----
if st.session_state['messages']:
conv_lines = []
for m in st.session_state['messages']:
t = m['ts'].strftime("%Y-%m-%d %H:%M:%S")
conv_lines.append(f"{t} [{m['role'].upper()}] {m['text']}")
conv_text = "\n".join(conv_lines)
st.download_button("Download conversation (.txt)", data=conv_text, file_name="conversation.txt", mime="text/plain")
| Feature | Reason |
|---|---|
| Avatars | Visual cue to distinguish who said what; emojis are quick and universally supported. |
| Timestamps | Helpful during demos to show when responses were generated and for auditability. |
| Typing spinner | Makes the assistant feel responsive & realistic; reduces perceived latency. |
| Clear / Reset controls | Useful during demos to quickly restart conversation or change system behavior. |
pip install streamlit google-cloud-aiplatform python-dotenv
GOOGLE_APPLICATION_CREDENTIALS to your key.json:
gcloud auth application-default login
streamlit run app.py
http://localhost:8501 in your browser; type and demo.st.spinner is supported.