mirror of
https://github.com/TheBlewish/Automated-AI-Web-Researcher-Ollama.git
synced 2025-04-23 18:19:10 +00:00
added openai support
This commit is contained in:
parent
e3cb357c3b
commit
b5117f37b5
4 changed files with 88 additions and 23 deletions
20
README.md
20
README.md
|
@ -119,6 +119,26 @@ python Web-LLM.py
|
|||
|
||||
The LLM settings can be modified in `llm_config.py`. You must specify your model name in the configuration for the researcher to function. The default configuration is optimized for research tasks with the specified Phi-3 model.
|
||||
|
||||
|
||||
## OpenAI Configuration
|
||||
|
||||
To use OpenAI models:
|
||||
|
||||
1. Set your OpenAI API key either:
|
||||
- In `llm_config.py`: Add your API key to `LLM_CONFIG_OPENAI["api_key"]`
|
||||
- Or as an environment variable: `export OPENAI_API_KEY='your-api-key'`
|
||||
|
||||
2. Change the LLM_TYPE in `llm_config.py`:
|
||||
```python
|
||||
LLM_TYPE = "openai" # Change this line
|
||||
```
|
||||
|
||||
3. Optional: Modify the model name in `LLM_CONFIG_OPENAI`:
|
||||
```python
|
||||
"model_name": "gpt-4o-mini" # Or another OpenAI model
|
||||
```
|
||||
|
||||
|
||||
## Current Status
|
||||
This is a prototype that demonstrates functional automated research capabilities. While still in development, it successfully performs structured research tasks. Currently tested and working well with the phi3:3.8b-mini-128k-instruct model when the context is set as advised previously.
|
||||
|
||||
|
|
|
@ -1,29 +1,29 @@
|
|||
# llm_config.py
|
||||
|
||||
LLM_TYPE = "ollama" # Options: 'llama_cpp', 'ollama'
|
||||
LLM_TYPE = "openai" # Options: 'llama_cpp', 'ollama', 'openai'
|
||||
|
||||
# LLM settings for llama_cpp
|
||||
MODEL_PATH = "/home/james/llama.cpp/models/gemma-2-9b-it-Q6_K.gguf" # Replace with your llama.cpp models filepath
|
||||
MODEL_PATH = "/home/james/llama.cpp/models/gemma-2-9b-it-Q6_K.gguf"
|
||||
|
||||
LLM_CONFIG_LLAMA_CPP = {
|
||||
"llm_type": "llama_cpp",
|
||||
"model_path": MODEL_PATH,
|
||||
"n_ctx": 20000, # context size
|
||||
"n_gpu_layers": 0, # number of layers to offload to GPU (-1 for all, 0 for none)
|
||||
"n_threads": 8, # number of threads to use
|
||||
"temperature": 0.7, # temperature for sampling
|
||||
"top_p": 0.9, # top p for sampling
|
||||
"top_k": 40, # top k for sampling
|
||||
"repeat_penalty": 1.1, # repeat penalty
|
||||
"max_tokens": 1024, # max tokens to generate
|
||||
"stop": ["User:", "\n\n"] # stop sequences
|
||||
"n_ctx": 20000,
|
||||
"n_gpu_layers": 0,
|
||||
"n_threads": 8,
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
"top_k": 40,
|
||||
"repeat_penalty": 1.1,
|
||||
"max_tokens": 1024,
|
||||
"stop": ["User:", "\n\n"]
|
||||
}
|
||||
|
||||
# LLM settings for Ollama
|
||||
LLM_CONFIG_OLLAMA = {
|
||||
"llm_type": "ollama",
|
||||
"base_url": "http://localhost:11434", # default Ollama server URL
|
||||
"model_name": "custom-phi3-32k-Q4_K_M", # Replace with your Ollama model name
|
||||
"base_url": "http://localhost:11434",
|
||||
"model_name": "custom-phi3-32k-Q4_K_M",
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
"n_ctx": 55000,
|
||||
|
@ -31,10 +31,24 @@ LLM_CONFIG_OLLAMA = {
|
|||
"stop": ["User:", "\n\n"]
|
||||
}
|
||||
|
||||
# New: LLM settings for OpenAI
|
||||
LLM_CONFIG_OPENAI = {
|
||||
"llm_type": "openai",
|
||||
"model_name": "gpt-4o-mini",
|
||||
"api_key": "",
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
"max_tokens": 4096,
|
||||
"stop": ["User:", "\n\n"],
|
||||
"context_length": 128000 # GPT-4 Turbo context window
|
||||
}
|
||||
|
||||
def get_llm_config():
|
||||
if LLM_TYPE == "llama_cpp":
|
||||
return LLM_CONFIG_LLAMA_CPP
|
||||
elif LLM_TYPE == "ollama":
|
||||
return LLM_CONFIG_OLLAMA
|
||||
elif LLM_TYPE == "openai":
|
||||
return LLM_CONFIG_OPENAI
|
||||
else:
|
||||
raise ValueError(f"Invalid LLM_TYPE: {LLM_TYPE}")
|
||||
|
|
|
@ -1,20 +1,60 @@
|
|||
from llama_cpp import Llama
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
from llm_config import get_llm_config
|
||||
from openai import OpenAI
|
||||
|
||||
class LLMWrapper:
|
||||
def __init__(self):
|
||||
self.llm_config = get_llm_config()
|
||||
self.llm_type = self.llm_config.get('llm_type', 'llama_cpp')
|
||||
|
||||
if self.llm_type == 'llama_cpp':
|
||||
self.llm = self._initialize_llama_cpp()
|
||||
elif self.llm_type == 'ollama':
|
||||
self.base_url = self.llm_config.get('base_url', 'http://localhost:11434')
|
||||
self.model_name = self.llm_config.get('model_name', 'your_model_name')
|
||||
elif self.llm_type == 'openai':
|
||||
self._initialize_openai()
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM type: {self.llm_type}")
|
||||
|
||||
def _initialize_openai(self):
|
||||
"""Initialize OpenAI client"""
|
||||
api_key = self.llm_config.get('api_key') or os.getenv('OPENAI_API_KEY')
|
||||
if not api_key:
|
||||
raise ValueError("OpenAI API key not found. Set it in config or OPENAI_API_KEY environment variable")
|
||||
self.client = OpenAI(api_key=api_key)
|
||||
self.model_name = self.llm_config.get('model_name', 'gpt-4o-mini')
|
||||
|
||||
def generate(self, prompt, **kwargs):
|
||||
if self.llm_type == 'llama_cpp':
|
||||
llama_kwargs = self._prepare_llama_kwargs(kwargs)
|
||||
response = self.llm(prompt, **llama_kwargs)
|
||||
return response['choices'][0]['text'].strip()
|
||||
elif self.llm_type == 'ollama':
|
||||
return self._ollama_generate(prompt, **kwargs)
|
||||
elif self.llm_type == 'openai':
|
||||
return self._openai_generate(prompt, **kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM type: {self.llm_type}")
|
||||
|
||||
def _openai_generate(self, prompt, **kwargs):
|
||||
"""Generate text using OpenAI API"""
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=kwargs.get('temperature', self.llm_config.get('temperature', 0.7)),
|
||||
max_tokens=kwargs.get('max_tokens', self.llm_config.get('max_tokens', 4096)),
|
||||
top_p=kwargs.get('top_p', self.llm_config.get('top_p', 0.9)),
|
||||
stop=kwargs.get('stop', self.llm_config.get('stop', None))
|
||||
)
|
||||
return response.choices[0].message.content.strip()
|
||||
except Exception as e:
|
||||
raise Exception(f"OpenAI API error: {str(e)}")
|
||||
|
||||
def _initialize_llama_cpp(self):
|
||||
return Llama(
|
||||
model_path=self.llm_config.get('model_path'),
|
||||
|
@ -24,16 +64,6 @@ class LLMWrapper:
|
|||
verbose=False
|
||||
)
|
||||
|
||||
def generate(self, prompt, **kwargs):
|
||||
if self.llm_type == 'llama_cpp':
|
||||
llama_kwargs = self._prepare_llama_kwargs(kwargs)
|
||||
response = self.llm(prompt, **llama_kwargs)
|
||||
return response['choices'][0]['text'].strip()
|
||||
elif self.llm_type == 'ollama':
|
||||
return self._ollama_generate(prompt, **kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM type: {self.llm_type}")
|
||||
|
||||
def _ollama_generate(self, prompt, **kwargs):
|
||||
url = f"{self.base_url}/api/generate"
|
||||
data = {
|
||||
|
|
|
@ -9,3 +9,4 @@ keyboard
|
|||
curses-windows; sys_platform == 'win32'
|
||||
tqdm
|
||||
urllib3
|
||||
openai
|
Loading…
Add table
Reference in a new issue