mirror of
https://github.com/Lizonghang/prima.cpp.git
synced 2025-09-05 23:19:18 +00:00
fix: replace localhost to 127.0.0.1
This commit is contained in:
parent
fbbc30c950
commit
104e3b2356
3 changed files with 17 additions and 17 deletions
|
@ -3438,8 +3438,8 @@ struct llama_context {
|
||||||
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
|
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
|
||||||
|
|
||||||
// sockets
|
// sockets
|
||||||
std::string master_ip = "localhost";
|
std::string master_ip = "127.0.0.1";
|
||||||
std::string next_node_ip = "localhost";
|
std::string next_node_ip = "127.0.0.1";
|
||||||
uint32_t data_port = 9000;
|
uint32_t data_port = 9000;
|
||||||
uint32_t signal_port = 10000;
|
uint32_t signal_port = 10000;
|
||||||
zmq::context_t * sock_context = nullptr;
|
zmq::context_t * sock_context = nullptr;
|
||||||
|
@ -20452,12 +20452,12 @@ static uint32_t map_rank_to_port(uint32_t rank, uint32_t data_port) {
|
||||||
return data_port + rank;
|
return data_port + rank;
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string try_connect(llama_context *ctx, uint32_t rank, TopoRebuildHelperInfo* infos, uint32_t n_world, zmq::socket_t** socket){
|
static std::string try_connect(llama_context * ctx, uint32_t rank, TopoRebuildHelperInfo * infos, uint32_t n_world, zmq::socket_t ** socket){
|
||||||
auto prv_rank = (rank - 1 + n_world) % n_world;
|
auto prev_rank = (rank - 1 + n_world) % n_world;
|
||||||
std::string ip = infos[prv_rank].dev_info.next_ip;
|
std::string ip = infos[prev_rank].dev_info.next_ip;
|
||||||
auto port = map_rank_to_port(rank, ctx->data_port);
|
auto port = map_rank_to_port(rank, ctx->data_port);
|
||||||
|
|
||||||
if(!isPortOpen(ip, port)){
|
if (!is_port_open(ip, port)) {
|
||||||
*socket = nullptr;
|
*socket = nullptr;
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
@ -20679,7 +20679,7 @@ int llama_rebuild_topo(llama_context * ctx,
|
||||||
auto next_connect_rank = (my_rank + 1) % n_world;
|
auto next_connect_rank = (my_rank + 1) % n_world;
|
||||||
zmq::socket_t* socket_to_close = nullptr;
|
zmq::socket_t* socket_to_close = nullptr;
|
||||||
bool is_not_exit = n_layer_window[my_rank] > 0 || topo_helper[my_rank].is_forwarder == 1;
|
bool is_not_exit = n_layer_window[my_rank] > 0 || topo_helper[my_rank].is_forwarder == 1;
|
||||||
if (is_not_exit){
|
if (is_not_exit) {
|
||||||
// reconstruct socket to the next valid rank
|
// reconstruct socket to the next valid rank
|
||||||
auto current_rank = my_rank;
|
auto current_rank = my_rank;
|
||||||
std::vector<uint32_t> nodes;
|
std::vector<uint32_t> nodes;
|
||||||
|
@ -20738,7 +20738,7 @@ int llama_rebuild_topo(llama_context * ctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
// notify next connect node
|
// notify next connect node
|
||||||
if(!ctx->next_node_ip.empty() && is_not_exit){
|
if (!ctx->next_node_ip.empty() && is_not_exit) {
|
||||||
GGML_ASSERT(ctx->send_socket != nullptr);
|
GGML_ASSERT(ctx->send_socket != nullptr);
|
||||||
try {
|
try {
|
||||||
auto msgs = topohelper_to_messages(topo_helper, n_world);
|
auto msgs = topohelper_to_messages(topo_helper, n_world);
|
||||||
|
@ -20749,15 +20749,15 @@ int llama_rebuild_topo(llama_context * ctx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(n_layer_window[my_rank] > 0){
|
if (n_layer_window[my_rank] > 0) {
|
||||||
*node_type = NodeType::NODE_TYPE_WORKER;
|
*node_type = NodeType::NODE_TYPE_WORKER;
|
||||||
}else if (topo_helper[my_rank].is_forwarder == 1){
|
} else if (topo_helper[my_rank].is_forwarder == 1) {
|
||||||
*node_type = NodeType::NODE_TYPE_FORWARDER;
|
*node_type = NodeType::NODE_TYPE_FORWARDER;
|
||||||
}else{
|
} else {
|
||||||
*node_type = NodeType::NODE_TYPE_EXIT;
|
*node_type = NodeType::NODE_TYPE_EXIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(ctx->send_socket != nullptr && *node_type!=NodeType::NODE_TYPE_EXIT){
|
if (ctx->send_socket != nullptr && *node_type != NodeType::NODE_TYPE_EXIT) {
|
||||||
// recv the whole view of all nodes
|
// recv the whole view of all nodes
|
||||||
std::vector<zmq::message_t> msgs;
|
std::vector<zmq::message_t> msgs;
|
||||||
if (!zmq::recv_multipart(*ctx->recv_socket, std::back_inserter(msgs))) {
|
if (!zmq::recv_multipart(*ctx->recv_socket, std::back_inserter(msgs))) {
|
||||||
|
@ -20768,7 +20768,7 @@ int llama_rebuild_topo(llama_context * ctx,
|
||||||
topo_helper[i].deserialize((char *)msgs[i].data());
|
topo_helper[i].deserialize((char *)msgs[i].data());
|
||||||
}
|
}
|
||||||
// broadcast the whole view
|
// broadcast the whole view
|
||||||
if(next_connect_rank!=0){
|
if (next_connect_rank!=0) {
|
||||||
try {
|
try {
|
||||||
zmq::send_multipart(*ctx->send_socket, msgs);
|
zmq::send_multipart(*ctx->send_socket, msgs);
|
||||||
} catch (const zmq::error_t& e) {
|
} catch (const zmq::error_t& e) {
|
||||||
|
@ -20777,7 +20777,7 @@ int llama_rebuild_topo(llama_context * ctx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for(size_t i = 0; i < n_world; i++) {
|
for (size_t i = 0; i < n_world; i++) {
|
||||||
is_forwarder[i] = topo_helper[i].is_forwarder;
|
is_forwarder[i] = topo_helper[i].is_forwarder;
|
||||||
}
|
}
|
||||||
ctx->cparams.node_type = *node_type;
|
ctx->cparams.node_type = *node_type;
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
#include <arpa/inet.h>
|
#include <arpa/inet.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
bool isPortOpen(const std::string& ip, uint32_t port, int timeout_sec) {
|
bool is_port_open(const std::string& ip, uint32_t port, int timeout_sec) {
|
||||||
int sock = socket(AF_INET, SOCK_STREAM, 0);
|
int sock = socket(AF_INET, SOCK_STREAM, 0);
|
||||||
if (sock < 0) return false;
|
if (sock < 0) return false;
|
||||||
|
|
||||||
|
|
|
@ -4,4 +4,4 @@
|
||||||
|
|
||||||
typedef unsigned int uint32_t;
|
typedef unsigned int uint32_t;
|
||||||
|
|
||||||
bool isPortOpen(const std::string& ip, uint32_t port, int timeout_sec = 2);
|
bool is_port_open(const std::string& ip, uint32_t port, int timeout_sec = 2);
|
Loading…
Add table
Reference in a new issue