diff --git a/.github/workflows/python-check-requirements.yml b/.github/workflows/python-check-requirements.yml deleted file mode 100644 index 92e1108b3..000000000 --- a/.github/workflows/python-check-requirements.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Python check requirements.txt - -on: - push: - paths: - - 'scripts/check-requirements.sh' - - 'convert*.py' - - 'requirements.txt' - - 'requirements/*.txt' - pull_request: - paths: - - 'scripts/check-requirements.sh' - - 'convert*.py' - - 'requirements.txt' - - 'requirements/*.txt' - -jobs: - python-check-requirements: - runs-on: ubuntu-latest - name: check-requirements - steps: - - name: Check out source repository - uses: actions/checkout@v3 - - name: Set up Python environment - uses: actions/setup-python@v4 - with: - python-version: "3.11" - - name: Run check-requirements.sh script - run: bash scripts/check-requirements.sh nocleanup diff --git a/expose.cpp b/expose.cpp index e78637198..ab1111ee2 100644 --- a/expose.cpp +++ b/expose.cpp @@ -217,6 +217,10 @@ extern "C" int get_last_token_count() { return last_token_count; } + int get_last_seed() + { + return last_seed; + } int get_total_gens() { return total_gens; } diff --git a/expose.h b/expose.h index 0c9df8bb6..e97d47e08 100644 --- a/expose.h +++ b/expose.h @@ -102,5 +102,6 @@ extern bool generation_finished; extern float last_eval_time; extern float last_process_time; extern int last_token_count; +extern int last_seed; extern int total_gens; extern stop_reason last_stop_reason; diff --git a/gpttype_adapter.cpp b/gpttype_adapter.cpp index 81ecfea39..7b2203cc1 100644 --- a/gpttype_adapter.cpp +++ b/gpttype_adapter.cpp @@ -39,6 +39,7 @@ bool generation_finished; float last_process_time = 0; float last_eval_time = 0; int last_token_count = 0; +int last_seed = -1; int total_gens = 0; stop_reason last_stop_reason = stop_reason::INVALID; std::vector generated_tokens; @@ -1531,7 +1532,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o } if (kcpp_params->seed <= 0 || kcpp_params->seed==0xFFFFFFFF) { - kcpp_params->seed = time(NULL); + kcpp_params->seed = (((uint32_t)time(NULL)) % 1000000); } // tokenize the prompt @@ -2051,6 +2052,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o last_eval_time = pt2; last_process_time = pt1; last_token_count = realnpredict; + last_seed = kcpp_params->seed; total_gens += 1; snprintf(output.text, sizeof(output.text), "%s", concat_output.c_str()); diff --git a/koboldcpp.py b/koboldcpp.py index e584ff5aa..75c48c2c4 100755 --- a/koboldcpp.py +++ b/koboldcpp.py @@ -226,6 +226,7 @@ def init_library(): handle.get_last_eval_time.restype = ctypes.c_float handle.get_last_process_time.restype = ctypes.c_float handle.get_last_token_count.restype = ctypes.c_int + handle.get_last_seed.restype = ctypes.c_int handle.get_total_gens.restype = ctypes.c_int handle.get_last_stop_reason.restype = ctypes.c_int handle.abort_generate.restype = ctypes.c_bool @@ -793,7 +794,8 @@ Enter Prompt:
lastc = handle.get_last_token_count() totalgens = handle.get_total_gens() stopreason = handle.get_last_stop_reason() - response_body = (json.dumps({"last_process":lastp,"last_eval":laste,"last_token_count":lastc, "total_gens":totalgens, "stop_reason":stopreason, "queue":requestsinqueue, "idle":(0 if modelbusy.locked() else 1), "hordeexitcounter":exitcounter}).encode()) + lastseed = handle.get_last_seed() + response_body = (json.dumps({"last_process":lastp,"last_eval":laste,"last_token_count":lastc, "last_seed":lastseed, "total_gens":totalgens, "stop_reason":stopreason, "queue":requestsinqueue, "idle":(0 if modelbusy.locked() else 1), "hordeexitcounter":exitcounter}).encode()) elif self.path.endswith('/api/extra/generate/check'): pendtxtStr = "" @@ -2214,6 +2216,7 @@ def unload_libs(): del handle.get_last_eval_time del handle.get_last_process_time del handle.get_last_token_count + del handle.get_last_seed del handle.get_total_gens del handle.get_last_stop_reason del handle.abort_generate