mirror of
https://github.com/lfnovo/open-notebook.git
synced 2026-04-29 03:50:04 +00:00
Version 1 (#160)
New front-end Launch Chat API Manage Sources Enable re-embedding of all contents Sources can be added without a notebook now Improved settings Enable model selector on all chats Background processing for better experience Dark mode Improved Notes Improved Docs: - Remove all Streamlit references from documentation - Update deployment guides with React frontend setup - Fix Docker environment variables format (SURREAL_URL, SURREAL_PASSWORD) - Update docker image tag from :latest to :v1-latest - Change navigation references (Settings → Models to just Models) - Update development setup to include frontend npm commands - Add MIGRATION.md guide for users upgrading from Streamlit - Update quick-start guide with correct environment variables - Add port 5055 documentation for API access - Update project structure to reflect frontend/ directory - Remove outdated source-chat documentation files
This commit is contained in:
parent
124d7d110c
commit
b7e656a319
319 changed files with 46747 additions and 7408 deletions
|
|
@ -20,19 +20,54 @@ class ThreadState(TypedDict):
|
|||
notebook: Optional[Notebook]
|
||||
context: Optional[str]
|
||||
context_config: Optional[dict]
|
||||
model_override: Optional[str]
|
||||
|
||||
|
||||
def call_model_with_messages(state: ThreadState, config: RunnableConfig) -> dict:
|
||||
system_prompt = Prompter(prompt_template="chat").render(data=state)
|
||||
system_prompt = Prompter(prompt_template="chat").render(data=state) # type: ignore[arg-type]
|
||||
payload = [SystemMessage(content=system_prompt)] + state.get("messages", [])
|
||||
model = asyncio.run(
|
||||
provision_langchain_model(
|
||||
str(payload),
|
||||
config.get("configurable", {}).get("model_id"),
|
||||
"chat",
|
||||
max_tokens=10000,
|
||||
)
|
||||
model_id = (
|
||||
config.get("configurable", {}).get("model_id")
|
||||
or state.get("model_override")
|
||||
)
|
||||
|
||||
# Handle async model provisioning from sync context
|
||||
def run_in_new_loop():
|
||||
"""Run the async function in a new event loop"""
|
||||
new_loop = asyncio.new_event_loop()
|
||||
try:
|
||||
asyncio.set_event_loop(new_loop)
|
||||
return new_loop.run_until_complete(
|
||||
provision_langchain_model(
|
||||
str(payload),
|
||||
model_id,
|
||||
"chat",
|
||||
max_tokens=10000,
|
||||
)
|
||||
)
|
||||
finally:
|
||||
new_loop.close()
|
||||
asyncio.set_event_loop(None)
|
||||
|
||||
try:
|
||||
# Try to get the current event loop
|
||||
asyncio.get_running_loop()
|
||||
# If we're in an event loop, run in a thread with a new loop
|
||||
import concurrent.futures
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(run_in_new_loop)
|
||||
model = future.result()
|
||||
except RuntimeError:
|
||||
# No event loop running, safe to use asyncio.run()
|
||||
model = asyncio.run(
|
||||
provision_langchain_model(
|
||||
str(payload),
|
||||
model_id,
|
||||
"chat",
|
||||
max_tokens=10000,
|
||||
)
|
||||
)
|
||||
|
||||
ai_message = model.invoke(payload)
|
||||
return {"messages": ai_message}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue