|
a |
|
b/app/frontend/streamlit_app_talk2cells.py |
|
|
1 |
#!/usr/bin/env python3 |
|
|
2 |
|
|
|
3 |
''' |
|
|
4 |
Talk2Cells: A Streamlit app for the Talk2Cells graph. |
|
|
5 |
''' |
|
|
6 |
|
|
|
7 |
import os |
|
|
8 |
import sys |
|
|
9 |
import random |
|
|
10 |
import streamlit as st |
|
|
11 |
from streamlit_feedback import streamlit_feedback |
|
|
12 |
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage |
|
|
13 |
from langchain_core.messages import ChatMessage |
|
|
14 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder |
|
|
15 |
from langchain_core.tracers.context import collect_runs |
|
|
16 |
from langchain.callbacks.tracers import LangChainTracer |
|
|
17 |
from langsmith import Client |
|
|
18 |
sys.path.append('./') |
|
|
19 |
from aiagents4pharma.talk2cells.agents.scp_agent import get_app |
|
|
20 |
|
|
|
21 |
st.set_page_config(page_title="Talk2Cells", page_icon="🤖", layout="wide") |
|
|
22 |
|
|
|
23 |
# Check if env variable OPENAI_API_KEY exists |
|
|
24 |
if "OPENAI_API_KEY" not in os.environ: |
|
|
25 |
st.error("Please set the OPENAI_API_KEY environment \ |
|
|
26 |
variable in the terminal where you run the app.") |
|
|
27 |
st.stop() |
|
|
28 |
|
|
|
29 |
# Create a chat prompt template |
|
|
30 |
prompt = ChatPromptTemplate.from_messages([ |
|
|
31 |
("system", "Welcome to Talk2Cells!"), |
|
|
32 |
MessagesPlaceholder(variable_name='chat_history', optional=True), |
|
|
33 |
("human", "{input}"), |
|
|
34 |
("placeholder", "{agent_scratchpad}"), |
|
|
35 |
]) |
|
|
36 |
|
|
|
37 |
# Initialize chat history |
|
|
38 |
if "messages" not in st.session_state: |
|
|
39 |
st.session_state.messages = [] |
|
|
40 |
|
|
|
41 |
# Initialize project_name for Langsmith |
|
|
42 |
if "project_name" not in st.session_state: |
|
|
43 |
# st.session_state.project_name = str(st.session_state.user_name) + '@' + str(uuid.uuid4()) |
|
|
44 |
st.session_state.project_name = 'Talk2Cells-' + str(random.randint(1000, 9999)) |
|
|
45 |
|
|
|
46 |
# Initialize run_id for Langsmith |
|
|
47 |
if "run_id" not in st.session_state: |
|
|
48 |
st.session_state.run_id = None |
|
|
49 |
|
|
|
50 |
# Initialize graph |
|
|
51 |
if "unique_id" not in st.session_state: |
|
|
52 |
st.session_state.unique_id = random.randint(1, 1000) |
|
|
53 |
if "app" not in st.session_state: |
|
|
54 |
st.session_state.app = get_app(st.session_state.unique_id) |
|
|
55 |
|
|
|
56 |
# Get the app |
|
|
57 |
app = st.session_state.app |
|
|
58 |
|
|
|
59 |
def _submit_feedback(user_response): |
|
|
60 |
''' |
|
|
61 |
Function to submit feedback to the developers. |
|
|
62 |
''' |
|
|
63 |
client = Client() |
|
|
64 |
client.create_feedback( |
|
|
65 |
st.session_state.run_id, |
|
|
66 |
key="feedback", |
|
|
67 |
score=1 if user_response['score'] == "👍" else 0, |
|
|
68 |
comment=user_response['text'] |
|
|
69 |
) |
|
|
70 |
st.info("Your feedback is on its way to the developers. Thank you!", icon="🚀") |
|
|
71 |
|
|
|
72 |
# Main layout of the app split into two columns |
|
|
73 |
main_col1, main_col2 = st.columns([3, 7]) |
|
|
74 |
# First column |
|
|
75 |
with main_col1: |
|
|
76 |
with st.container(border=True): |
|
|
77 |
# Title |
|
|
78 |
st.write(""" |
|
|
79 |
<h3 style='margin: 0px; padding-bottom: 10px; font-weight: bold;'> |
|
|
80 |
🤖 Talk2Cells |
|
|
81 |
</h3> |
|
|
82 |
""", |
|
|
83 |
unsafe_allow_html=True) |
|
|
84 |
|
|
|
85 |
# LLM panel (Only at the front-end for now) |
|
|
86 |
llms = ["gpt-4o-mini", "gpt-4-turbo", "gpt-3.5-turbo"] |
|
|
87 |
llm_option = st.selectbox( |
|
|
88 |
"Pick an LLM to power the agent", |
|
|
89 |
llms, |
|
|
90 |
index=0, |
|
|
91 |
key="st_selectbox_llm" |
|
|
92 |
) |
|
|
93 |
|
|
|
94 |
# Upload files (placeholder) |
|
|
95 |
# uploaded_file = st.file_uploader( |
|
|
96 |
# "Upload sequencing data", |
|
|
97 |
# accept_multiple_files=False, |
|
|
98 |
# type=["h5ad"], |
|
|
99 |
# help='''Upload a single h5ad file containing the sequencing data. |
|
|
100 |
# The file should be in the AnnData format.''' |
|
|
101 |
# ) |
|
|
102 |
|
|
|
103 |
with st.container(border=False, height=500): |
|
|
104 |
prompt = st.chat_input("Say something ...", key="st_chat_input") |
|
|
105 |
|
|
|
106 |
# Second column |
|
|
107 |
with main_col2: |
|
|
108 |
# Chat history panel |
|
|
109 |
with st.container(border=True, height=575): |
|
|
110 |
st.write("#### 💬 Chat History") |
|
|
111 |
|
|
|
112 |
# Display chat messages |
|
|
113 |
for count, message in enumerate(st.session_state.messages): |
|
|
114 |
with st.chat_message(message["content"].role, |
|
|
115 |
avatar="🤖" |
|
|
116 |
if message["content"].role != 'user' |
|
|
117 |
else "👩🏻💻"): |
|
|
118 |
st.markdown(message["content"].content) |
|
|
119 |
st.empty() |
|
|
120 |
|
|
|
121 |
# When the user asks a question |
|
|
122 |
if prompt: |
|
|
123 |
# Create a key 'uploaded_file' to read the uploaded file |
|
|
124 |
# if uploaded_file: |
|
|
125 |
# st.session_state.article_pdf = uploaded_file.read().decode("utf-8") |
|
|
126 |
|
|
|
127 |
# Display user prompt |
|
|
128 |
prompt_msg = ChatMessage(prompt, role="user") |
|
|
129 |
st.session_state.messages.append( |
|
|
130 |
{ |
|
|
131 |
"type": "message", |
|
|
132 |
"content": prompt_msg |
|
|
133 |
} |
|
|
134 |
) |
|
|
135 |
with st.chat_message("user", avatar="👩🏻💻"): |
|
|
136 |
st.markdown(prompt) |
|
|
137 |
st.empty() |
|
|
138 |
|
|
|
139 |
with st.chat_message("assistant", avatar="🤖"): |
|
|
140 |
# with st.spinner("Fetching response ..."): |
|
|
141 |
with st.spinner(): |
|
|
142 |
# Get chat history |
|
|
143 |
history = [(m["content"].role, m["content"].content) |
|
|
144 |
for m in st.session_state.messages |
|
|
145 |
if m["type"] == "message"] |
|
|
146 |
# Convert chat history to ChatMessage objects |
|
|
147 |
chat_history = [ |
|
|
148 |
SystemMessage(content=m[1]) if m[0] == "system" else |
|
|
149 |
HumanMessage(content=m[1]) if m[0] == "human" else |
|
|
150 |
AIMessage(content=m[1]) |
|
|
151 |
for m in history |
|
|
152 |
] |
|
|
153 |
|
|
|
154 |
# Create config for the agent |
|
|
155 |
config = {"configurable": {"thread_id": st.session_state.unique_id}} |
|
|
156 |
|
|
|
157 |
# Update the agent state with the selected LLM model |
|
|
158 |
current_state = app.get_state(config) |
|
|
159 |
# app.update_state(config, {"llm_model": llm_option}) |
|
|
160 |
current_state = app.get_state(config) |
|
|
161 |
# st.markdown(current_state.values["llm_model"]) |
|
|
162 |
|
|
|
163 |
# Set the environment variable AIAGENTS4PHARMA_LLM_MODEL |
|
|
164 |
os.environ["AIAGENTS4PHARMA_LLM_MODEL"] = llm_option |
|
|
165 |
|
|
|
166 |
# # Get response from the agent |
|
|
167 |
# response = app.invoke( |
|
|
168 |
# {"messages": [HumanMessage(content=prompt)]}, |
|
|
169 |
# config=config |
|
|
170 |
# ) |
|
|
171 |
ERROR_FLAG = False |
|
|
172 |
with collect_runs() as cb: |
|
|
173 |
# Add Langsmith tracer |
|
|
174 |
tracer = LangChainTracer( |
|
|
175 |
project_name=st.session_state.project_name |
|
|
176 |
) |
|
|
177 |
# Get response from the agent |
|
|
178 |
response = app.invoke( |
|
|
179 |
{"messages": [HumanMessage(content=prompt)]}, |
|
|
180 |
config=config|{"callbacks": [tracer]} |
|
|
181 |
) |
|
|
182 |
st.session_state.run_id = cb.traced_runs[-1].id |
|
|
183 |
# Print the response |
|
|
184 |
# print (response) |
|
|
185 |
|
|
|
186 |
# Add assistant response to chat history |
|
|
187 |
assistant_msg = ChatMessage(response["messages"][-1].content, |
|
|
188 |
role="assistant") |
|
|
189 |
st.session_state.messages.append({ |
|
|
190 |
"type": "message", |
|
|
191 |
"content": assistant_msg |
|
|
192 |
}) |
|
|
193 |
# Display the response in the chat |
|
|
194 |
st.markdown(response["messages"][-1].content) |
|
|
195 |
st.empty() |
|
|
196 |
# Collect feedback and display the thumbs feedback |
|
|
197 |
if st.session_state.get("run_id"): |
|
|
198 |
feedback = streamlit_feedback( |
|
|
199 |
feedback_type="thumbs", |
|
|
200 |
optional_text_label="[Optional] Please provide an explanation", |
|
|
201 |
on_submit=_submit_feedback, |
|
|
202 |
key=f"feedback_{st.session_state.run_id}" |
|
|
203 |
) |