improve search functions

This commit is contained in:
LUIS NOVO 2024-11-13 15:52:44 -03:00
parent b04761affc
commit 066c7a06e2
7 changed files with 287 additions and 38 deletions

View file

@ -22,6 +22,7 @@ class MigrationManager:
Migration.from_file("migrations/1.surrealql"),
Migration.from_file("migrations/2.surrealql"),
Migration.from_file("migrations/3.surrealql"),
Migration.from_file("migrations/4.surrealql"),
]
self.down_migrations = [
Migration.from_file(
@ -29,6 +30,7 @@ class MigrationManager:
),
Migration.from_file("migrations/2_down.surrealql"),
Migration.from_file("migrations/3_down.surrealql"),
Migration.from_file("migrations/4_down.surrealql"),
]
self.runner = MigrationRunner(
up_migrations=self.up_migrations,

View file

@ -7,7 +7,6 @@ from langchain_core.runnables import (
)
from langgraph.graph import END, START, StateGraph
from langgraph.types import Send
from loguru import logger
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
@ -63,7 +62,6 @@ async def call_model_with_messages(state: ThreadState, config: RunnableConfig) -
)
# model = model.bind_tools(tools)
ai_message = (model | parser).invoke(system_prompt)
logger.debug(ai_message)
return {"strategy": ai_message}

View file

@ -280,12 +280,6 @@ class OpenAILanguageModel(LanguageModel):
Convert the language model to a LangChain chat model.
"""
data = {
"model": self.model_name,
"top_p": self.top_p,
"temperature": self.temperature,
}
kwargs = self.kwargs.copy() # Make a copy to avoid modifying the original
if self.json:
kwargs["response_format"] = {"type": "json_object"}
@ -293,19 +287,19 @@ class OpenAILanguageModel(LanguageModel):
# Set the token limit in kwargs with the appropriate key
if self.model_name in ["o1-mini", "o1-preview"]:
kwargs["max_completion_tokens"] = self.max_tokens
data["top_p"] = 1
data["streaming"] = False
data["max_tokens"] = None
top_p = 1
streaming = False
max_tokens = None
else:
data["max_tokens"] = self.max_tokens
data["top_p"] = self.top_p
data["streaming"] = self.streaming
max_tokens = self.max_tokens
top_p = self.top_p
streaming = self.streaming
return ChatOpenAI(
model_name=data.get("model_name"),
temperature=data.get("temperature"),
streaming=data.get("streaming"),
max_tokens=data.get("max_tokens"),
top_p=data.get("top_p"),
model=self.model_name,
temperature=self.temperature,
streaming=streaming,
max_tokens=max_tokens,
top_p=top_p,
model_kwargs=kwargs,
)