mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 09:34:37 +00:00
add simple api example
This commit is contained in:
parent
771261f5be
commit
b925bbfc6d
2 changed files with 40 additions and 1 deletions
30
examples/api_example.py
Normal file
30
examples/api_example.py
Normal file
|
@ -0,0 +1,30 @@
|
|||
import requests
|
||||
|
||||
ENDPOINT = "http://localhost:5001/api" # Please visit this link and read OpenAI documentation. It has a LOT more than what is shown here.
|
||||
|
||||
# This is a very basic example of how to use the KoboldCpp API in python.
|
||||
# For full documentation, you can launch KoboldCpp and read it at http://localhost:5001/api or view the web docs at https://lite.koboldai.net/koboldcpp_api
|
||||
# Note: KoboldCpp also provides a fully compatible /v1/completions and /v1/chat/completions API. You can use it as a direct replacement for any OpenAI API usecases.
|
||||
# Refer to https://platform.openai.com/docs/api-reference/completions and https://platform.openai.com/docs/api-reference/chat
|
||||
|
||||
payload = {
|
||||
"prompt": "Niko the kobold stalked carefully down the alley, his small scaly figure obscured by a dusky cloak that fluttered lightly in the cold winter breeze.",
|
||||
"max_context_length": 4096, # The maximum number of tokens in history that the AI can see. Increase for longer inputs.
|
||||
"max_length": 128, # How many token to be generated at maximum. It might stop before this if EOS is allowed.
|
||||
"rep_pen": 1.1, # Makes outputs less repetitive by penalizing repetition
|
||||
"rep_pen_range": 512, # The range to which to apply repetition penalty
|
||||
"rep_pen_slope": 0.7, # This number determains the strength of the repetition penalty over time
|
||||
"temperature": 0.8, # How random should the AI outputs be? Lower values make output more predictable.
|
||||
"top_k": 100, # Keep the X most probable tokens
|
||||
"top_p": 0.9, # Top P sampling / Nucleus Sampling, https://arxiv.org/pdf/1904.09751.pdf
|
||||
#"sampler_seed": 1337, # Use specific seed for text generation? This helps with consistency across tests.
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(f"{ENDPOINT}/v1/generate", json=payload) # Send prompt to API
|
||||
if response.status_code == 200:
|
||||
results = response.json()['results'] # Set results as JSON response
|
||||
text = results[0]['text'] # inside results, look in first group for section labeled 'text'
|
||||
print(text)
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
|
@ -2872,7 +2872,11 @@ bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
|
|||
|
||||
//note that the memory here must be subsequently freed!
|
||||
uint8_t* make_new_letterbox_img(uint8_t* input_image, int nx, int ny, int nc, int target_width, int target_height) {
|
||||
int new_image_size = target_width * target_height * nc;
|
||||
int new_image_size = (target_width * target_height * nc) + 512; //add some padding
|
||||
if (target_width < nx || target_height < ny) {
|
||||
printf("\nERROR: Target size smaller than input image\n");
|
||||
return nullptr;
|
||||
}
|
||||
uint8_t* letterboxed_image = (uint8_t*)malloc(new_image_size);
|
||||
if(letterboxed_image==nullptr)
|
||||
{
|
||||
|
@ -2960,6 +2964,11 @@ bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length
|
|||
free(letterboxed_image);
|
||||
letterboxed_image = nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
//letterboxing failed. just use original image
|
||||
clip_build_img_from_pixels(data, nx, ny, img);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue