diff --git a/Makefile b/Makefile index 442b6dea4..4f2c6ddf0 100644 --- a/Makefile +++ b/Makefile @@ -266,8 +266,8 @@ endif # LLAMA_METAL ifneq ($(filter aarch64%,$(UNAME_M)),) # Apple M1, M2, etc. # Raspberry Pi 3, 4, Zero 2 (64-bit) - CFLAGS += -mcpu=native - CXXFLAGS += -mcpu=native + CFLAGS += + CXXFLAGS += endif ifneq ($(filter armv6%,$(UNAME_M)),) # Raspberry Pi 1, Zero diff --git a/README.md b/README.md index cc2400503..ccf9469f9 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,7 @@ You can then run koboldcpp anywhere from the terminal by running `koboldcpp` to - Grab a small GGUF model, such as `wget https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q2_K.gguf` - Start the python server `python koboldcpp.py --model phi-2.Q2_K.gguf` - Connect to `http://localhost:5001` on your mobile browser +- If you encounter any errors, make sure your packages are up-to-date with `pkg up` ## AMD - Please check out https://github.com/YellowRoseCx/koboldcpp-rocm