日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 综合教程 >内容正文

综合教程

Android中的网络管理源码分析--netd

發布時間:2024/8/26 综合教程 32 生活家
生活随笔 收集整理的這篇文章主要介紹了 Android中的网络管理源码分析--netd 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

http://www.voidcn.com/blog/a34140974/article/p-5033426.html

1 Netd簡介

Netd是Android的網絡守護進程。NetD是個網絡管家,封裝了復雜的底層各種類型的網絡(NAT,PLAN,PPP,SOFTAP,TECHER,ETHO,MDNS等),隔離了底層網絡接口的差異,給Framework提供了統一調用接口,簡化了網絡的使用。NetD主要功能是:第一、接收Framework的網絡請求,處理請求,向Framework層反饋處理結果;第二、監聽網絡事件(斷開/連接/錯誤等),向Framework層上報。

2Netd的啟動過程

Netd作為后臺服務進程在Andriod系統啟動的init1階段就被啟動了,其在init.rc文件的配置如下:

service netd /system/bin/netd

class main

socket netd stream 0660 root system

socket dnsproxyd stream 0660 root inet

socket mdns stream 0660 root system

socket fwmarkd stream 0660 root inet

看一看到,這里為netd配置了4個socket(比老版本多了一個名字為“fwmakd”的socket),根據配置可找到netd的入口函數為main():

int main() {

CommandListener *cl;

NetlinkManager *nm;

DnsProxyListener *dpl;

MDnsSdListener *mdnsl;

FwmarkServer* fwmarkServer;

ALOGI("Netd 1.0 starting");

remove_pid_file();//猜測為每次重啟時刪除舊的

blockSigpipe();//禁止SIGPIPE中斷

//創建NetlinkManager實例

if (!(nm = NetlinkManager::Instance())) {

ALOGE("Unable to create NetlinkManager");

exit(1);

};

//創建CommandListener實例,并將其設置為NetlinkManager的Broadcaster,之后啟動nm

cl = new CommandListener();

nm->setBroadcaster((SocketListener *) cl);

if (nm->start()) {

ALOGE("Unable to start NetlinkManager (%s)", strerror(errno));

exit(1);

}

// Set local DNS mode, to prevent bionic from proxying

// back to this service, recursively.

setenv("ANDROID_DNS_MODE", "local", 1);

//創建并開始監聽“dnsproxyd”socket

dpl = new DnsProxyListener(CommandListener::sNetCtrl);

if (dpl->startListener()) {

ALOGE("Unable to start DnsProxyListener (%s)", strerror(errno));

exit(1);

}

//創建并開始監聽“mdns”socket

mdnsl = new MDnsSdListener();

if (mdnsl->startListener()) {

ALOGE("Unable to start MDnsSdListener (%s)", strerror(errno));

exit(1);

}

//創建并開始監聽“fwmarkd”socket

fwmarkServer = new FwmarkServer(CommandListener::sNetCtrl);

if (fwmarkServer->startListener()) {

ALOGE("Unable to start FwmarkServer (%s)", strerror(errno));

exit(1);

}

/*

* Now that we're up, we can respond to commands

*/

//開始監聽“netd”socket

if (cl->startListener()) {

ALOGE("Unable to start CommandListener (%s)", strerror(errno));

exit(1);

}

bool wrote_pid = write_pid_file();

while(1) {

sleep(30); // 30 sec

if (!wrote_pid) {

wrote_pid = write_pid_file();

}

}

ALOGI("Netd exiting");

remove_pid_file();

exit(0);

}

從上面個可以看出netd的啟動并不復雜,主要是啟動了4個監聽socket,后面的分析將會看到每個socket對應這一個監聽線程。首先來看NetlinkManage,NetlinkManager(以后簡稱NM)主要負責接收并解析來自Kernel的UEvent消息。如果對linux的socket特別熟悉的話,光從“NetlinkMananger”的名字就能推斷出此類的基本實現和作用:肯定使用了PF_NETLINK的socket。這種socket一般是在應用層(相對于內核)監聽內核事件的時候使用。例如USB的插拔等等。從main的代碼可以知道它的入口為start()函數。

int NetlinkManager::start() {

//創建接收NETLINK_KOBJECT_UEVENT消息的socket,其值保存在mUeventSock中

//其中,NETLINK_FORMAT_ASCII代表UEvent消息的內容為ASCII字符串

if ((mUeventHandler = setupSocket(&mUeventSock, NETLINK_KOBJECT_UEVENT,

0xffffffff, NetlinkListener::NETLINK_FORMAT_ASCII, false)) == NULL) {

return -1;

}

//創建接收RTMGPR_LINK消息的socket,其值保存在mRouteSock中

//其中,NETLINK_FORMAT_BINARY代表UEvent消息的類型為結構體,故需要進行二進制解析

if ((mRouteHandler = setupSocket(&mRouteSock, NETLINK_ROUTE,

RTMGRP_LINK |

RTMGRP_IPV4_IFADDR |

RTMGRP_IPV6_IFADDR |

RTMGRP_IPV6_ROUTE |

(1 << (RTNLGRP_ND_USEROPT - 1)),

NetlinkListener::NETLINK_FORMAT_BINARY, false)) == NULL) {

return -1;

}

//創建接收NETLINK_NFLOG消息的socket,其值保存在mQuotaSock中

if ((mQuotaHandler = setupSocket(&mQuotaSock, NETLINK_NFLOG,

NFLOG_QUOTA_GROUP, NetlinkListener::NETLINK_FORMAT_BINARY, false)) == NULL) {

ALOGE("Unable to open quota socket");

}

//創建接收NETLINK_NETFILTER消息的socket,其值保存在mQuotaSock中

if ((mStrictHandler = setupSocket(&mStrictSock, NETLINK_NETFILTER,

0, NetlinkListener::NETLINK_FORMAT_BINARY_UNICAST, true)) == NULL) {

ALOGE("Unable to open strict socket");

}

return 0;

}

start()四次調用了setupSocket函數,新建了4個PF_NETLINK類型的socket監聽內核的不同事件。查看函數setupSocket()。

NetlinkHandler *NetlinkManager::setupSocket(int *sock, int netlinkFamily,

int groups, int format, bool configNflog) {

struct sockaddr_nl nladdr;

int sz = 64 * 1024;

int on = 1;

memset(&nladdr, 0, sizeof(nladdr));

nladdr.nl_family = AF_NETLINK;

nladdr.nl_pid = getpid();

nladdr.nl_groups = groups;

//新建socket,一定要注意這里的socket類型為SOCK_DGRAM,這句是整個Nm的關鍵

//netlinkFamily指定了soket監聽的內核事件

if ((*sock = socket(PF_NETLINK, SOCK_DGRAM | SOCK_CLOEXEC, netlinkFamily)) < 0) {

ALOGE("Unable to create netlink socket: %s", strerror(errno));

return NULL;

}

//設置socket的屬性

if (setsockopt(*sock, SOL_SOCKET, SO_RCVBUFFORCE, &sz, sizeof(sz)) < 0) {

ALOGE("Unable to set uevent socket SO_RCVBUFFORCE option: %s", strerror(errno));

close(*sock);

return NULL;

}

if (setsockopt(*sock, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on)) < 0) {

SLOGE("Unable to set uevent socket SO_PASSCRED option: %s", strerror(errno));

close(*sock);

return NULL;

}

//綁定

if (bind(*sock, (struct sockaddr *) &nladdr, sizeof(nladdr)) < 0) {

ALOGE("Unable to bind netlink socket: %s", strerror(errno));

close(*sock);

return NULL;

}

if (configNflog) {//只有mStrictSock對應的為true

if (android_nflog_send_config_cmd(*sock, 0, NFULNL_CFG_CMD_PF_UNBIND, AF_INET) < 0) {

ALOGE("Failed NFULNL_CFG_CMD_PF_UNBIND: %s", strerror(errno));

return NULL;

}

if (android_nflog_send_config_cmd(*sock, 0, NFULNL_CFG_CMD_PF_BIND, AF_INET) < 0) {

ALOGE("Failed NFULNL_CFG_CMD_PF_BIND: %s", strerror(errno));

return NULL;

}

if (android_nflog_send_config_cmd(*sock, 0, NFULNL_CFG_CMD_BIND, AF_UNSPEC) < 0) {

ALOGE("Failed NFULNL_CFG_CMD_BIND: %s", strerror(errno));

return NULL;

}

}

//將socket封裝成 NetLinkHandler,從而在socket有活動的時候處理

NetlinkHandler *handler = new NetlinkHandler(this, *sock, format);

if (handler->start()) {//啟動NetlinkHandler,實際就是啟動監聽

ALOGE("Unable to start NetlinkHandler: %s", strerror(errno));

close(*sock);

return NULL;

}

return handler;

}

NetlinkHandler的start()函數轉調了this-> startListener(),此方法實際上是繼承自SocketListener類。這個類是一個比較通用的類,很多與socket的IO復用有關的模塊都會調用此類的相關方法。

int SocketListener::startListener(int backlog) {

//注意這個變量是類的成員變量,實際上這里就是想方設法得到socket

if (!mSocketName && mSock == -1) {

SLOGE("Failed to start unbound listener");

errno = EINVAL;

return -1;

} else if (mSocketName) {

if ((mSock = android_get_control_socket(mSocketName)) < 0) {

SLOGE("Obtaining file descriptor socket '%s' failed: %s",

mSocketName, strerror(errno));

return -1;

}

SLOGV("got mSock = %d for %s", mSock, mSocketName);

fcntl(mSock, F_SETFD, FD_CLOEXEC);

}

//如果設置了mListen則監聽socket,如果沒有設置則新建一個socketClient放入客戶端集合

//注意短路,對于NetlinkHandler,從其構造函數可知mListen為false

if (mListen &&
listen(mSock, backlog) < 0) {

SLOGE("Unable to listen on socket (%s)", strerror(errno));

return -1;

} else if (!mListen)

mClients->push_back(new SocketClient(mSock, false, mUseCmdNum));//這里

if (pipe(mCtrlPipe)) {

SLOGE("pipe failed (%s)", strerror(errno));

return -1;

}

//創建線程處理監聽socket,這里其實并沒有所謂的“監聽socket”,因為是NETLINK型的socket

if (pthread_create(&mThread, NULL, SocketListener::threadStart, this)) {

SLOGE("pthread_create (%s)", strerror(errno));

return -1;

}

return 0;

}

進入線程的入口函數SocketListener::threadStart()

void *SocketListener::threadStart(void *obj) {

SocketListener *me = reinterpret_cast<SocketListener *>(obj);

//注意obj為主線程傳遞進來的參數,就是SocketListener

me->runListener();

pthread_exit(NULL);

return NULL;

}

進入runListener

void SocketListener::runListener() {

//此函數的主要邏輯就是select()

SocketClientCollection pendingList;//新建一個socketClientCollection存放活動fd

while(1) {

SocketClientCollection::iterator it;

fd_set read_fds;

int rc = 0;

int max = -1;

FD_ZERO(&read_fds);

if (mListen) {//監聽listenSocket的讀事件,前面已經知道mListen此時為fasle

max = mSock;

FD_SET(mSock, &read_fds);

}

FD_SET(mCtrlPipe[0], &read_fds);//這里的pipe什么作用?中斷循環標志?

if (mCtrlPipe[0] > max)

max = mCtrlPipe[0];

pthread_mutex_lock(&mClientsLock);

//遍歷mClients集合

for (it = mClients->begin(); it != mClients->end(); ++it) {

// NB: calling out to an other object with mClientsLock held (safe)

int fd = (*it)->getSocket();//獲取與客戶通信的socket

FD_SET(fd, &read_fds);//監聽它

if (fd > max) {

max = fd;

}

}

pthread_mutex_unlock(&mClientsLock);

SLOGV("mListen=%d, max=%d, mSocketName=%s", mListen, max, mSocketName);

if ((rc = select(max + 1, &read_fds, NULL, NULL, NULL)) < 0) {//select

if (errno == EINTR)

continue;

SLOGE("select failed (%s) mListen=%d, max=%d", strerror(errno), mListen, max);

sleep(1);

continue;

} else if (!rc)

continue;

if (FD_ISSET(mCtrlPipe[0], &read_fds)) {//如果是pipe有活動

char c = CtrlPipe_Shutdown;

TEMP_FAILURE_RETRY(read(mCtrlPipe[0], &c, 1));//讀取管道

if (c == CtrlPipe_Shutdown) {

break;//難道這就是監聽pipe的作用?

}

continue;

}

//如果是監聽socket,接收連接請求,當然NETLINK不會走這里

if (mListen && FD_ISSET(mSock, &read_fds)) {

struct sockaddr addr;

socklen_t alen;

int c;

do {

alen = sizeof(addr);

c = accept(mSock, &addr, &alen);

SLOGV("%s got %d from accept", mSocketName, c);

} while (c < 0 && errno == EINTR);

if (c < 0) {

SLOGE("accept failed (%s)", strerror(errno));

sleep(1);

continue;

}

fcntl(c, F_SETFD, FD_CLOEXEC);

pthread_mutex_lock(&mClientsLock);

//放入client集合

mClients->push_back(new SocketClient(c, true, mUseCmdNum));

pthread_mutex_unlock(&mClientsLock);

}

//將所有活動的fd都放入pendingList,貌似也只有一個

pendingList.clear();

pthread_mutex_lock(&mClientsLock);

for (it = mClients->begin(); it != mClients->end(); ++it) {

SocketClient* c = *it;

// NB: calling out to an other object with mClientsLock held (safe)

int fd = c->getSocket();

if (FD_ISSET(fd, &read_fds)) {//fd如果有活動

pendingList.push_back(c);//放入pendingList

c->incRef();

}

}

pthread_mutex_unlock(&mClientsLock);

//處理pendingList,這里的具體意思就是內核有事件了,需要上層處理

while (!pendingList.empty()) {

/* Pop the first item from the list */

it = pendingList.begin();

SocketClient* c = *it;

pendingList.erase(it);

/* Process it, if false is returned, remove from list */

if (!onDataAvailable(c)) {

release(c, false);

}

c->decRef();

}

}

}

從上面的函數可以看到,這里實際上是對3類fd作了監聽處理。一類是監聽socket,一類是client socket,并且這類socket被封裝成SocketClient集中在一個集合之內。還有一個就是pipe。從NetlinkManager.start()中我們已經知道啟動了四套這樣的結構,其socket分別為mUeventSock ,mRouteSock,mQuotaSock,mStrictSock。這些Socket都是PF_NETLINK類型的sockegt,并不是監聽socket,具體一點就是他們對應的mListen均為false。也就是這四個socket被當做SocketClient添加進了mClients(注意有四個實例)。等等,那么監聽socket在哪呢?壓根就沒有監聽socket,這里采用的是SOCK_DGRAM類型的socket!

當檢測到這些socket有可讀事件發生時,也就是內核有上層感興趣的事件發生時。相應的onDataAvailable()被調用,這是一個虛函數。分析可知此時this的具體類型為NetlinkHandler,因此調用的是NetlinkHandler的onDataAvailable()。

bool NetlinkListener::onDataAvailable(SocketClient *cli)

{

int socket = cli->getSocket();

ssize_t count;

uid_t uid = -1;

bool require_group = true;

if (mFormat == NETLINK_FORMAT_BINARY_UNICAST) {

require_group = false;

}

//讀取數據

count = TEMP_FAILURE_RETRY(uevent_kernel_recv(socket,

mBuffer, sizeof(mBuffer), require_group, &uid));

if (count < 0) {

if (uid > 0)

LOG_EVENT_INT(65537, uid);

SLOGE("recvmsg failed (%s)", strerror(errno));

return false;

}

NetlinkEvent *evt = new NetlinkEvent();//新建一個NetLinkEvent

if (evt->decode(mBuffer, count, mFormat)) {//解碼

onEvent(evt);//調用了此函數,對event做了處理

} else if (mFormat != NETLINK_FORMAT_BINARY) {

// Don't complain if parseBinaryNetlinkMessage returns false. That can

// just mean that the buffer contained no messages we're interested in.

SLOGE("Error decoding NetlinkEvent");

}

delete evt;

return true;

}

這里調用了onEvent()才是NetlinkHandler的入口。

void NetlinkHandler::onEvent(NetlinkEvent *evt) {

const char *subsys = evt->getSubsystem();

if (!subsys) {

ALOGW("No subsystem found in netlink event");

return;

}

if (!strcmp(subsys, "net")) {

NetlinkEvent::Action action = evt->getAction();

const char *iface = evt->findParam("INTERFACE");

if (action == NetlinkEvent::Action::kAdd) {

notifyInterfaceAdded(iface);

} else if (action == NetlinkEvent::Action::kRemove) {

notifyInterfaceRemoved(iface);

} else if (action == NetlinkEvent::Action::kChange) {

evt->dump();

notifyInterfaceChanged("nana", true);

} else if (action == NetlinkEvent::Action::kLinkUp) {

notifyInterfaceLinkChanged(iface, true);

} else if (action == NetlinkEvent::Action::kLinkDown) {

notifyInterfaceLinkChanged(iface, false);

} else if (action == NetlinkEvent::Action::kAddressUpdated ||

action == NetlinkEvent::Action::kAddressRemoved) {

const char *address = evt->findParam("ADDRESS");

const char *flags = evt->findParam("FLAGS");

const char *scope = evt->findParam("SCOPE");

if (action == NetlinkEvent::Action::kAddressRemoved && iface && address) {

int resetMask = strchr(address, ':') ? RESET_IPV6_ADDRESSES : RESET_IPV4_ADDRESSES;

resetMask |= RESET_IGNORE_INTERFACE_ADDRESS;

if (int ret = ifc_reset_connections(iface, resetMask)) {

ALOGE("ifc_reset_connections failed on iface %s for address %s (%s)", iface,

address, strerror(ret));

}

}

if (iface && flags && scope) {

notifyAddressChanged(action, address, iface, flags, scope);

}

} else if (action == NetlinkEvent::Action::kRdnss) {

const char *lifetime = evt->findParam("LIFETIME");

const char *servers = evt->findParam("SERVERS");

if (lifetime && servers) {

notifyInterfaceDnsServers(iface, lifetime, servers);

}

} else if (action == NetlinkEvent::Action::kRouteUpdated ||

action == NetlinkEvent::Action::kRouteRemoved) {

const char *route = evt->findParam("ROUTE");

const char *gateway = evt->findParam("GATEWAY");

const char *iface = evt->findParam("INTERFACE");

if (route && (gateway || iface)) {

notifyRouteChange(action, route, gateway, iface);

}

}

} else if (!strcmp(subsys, "qlog")) {

const char *alertName = evt->findParam("ALERT_NAME");

const char *iface = evt->findParam("INTERFACE");

notifyQuotaLimitReached(alertName, iface);

} else if (!strcmp(subsys, "strict")) {

const char *uid = evt->findParam("UID");

const char *hex = evt->findParam("HEX");

notifyStrictCleartext(uid, hex);

} else if (!strcmp(subsys, "xt_idletimer")) {

const char *label = evt->findParam("INTERFACE");

const char *state = evt->findParam("STATE");

const char *timestamp = evt->findParam("TIME_NS");

const char *uid = evt->findParam("UID");

if (state)

notifyInterfaceClassActivity(label, !strcmp("active", state),

timestamp, uid);

#if !LOG_NDEBUG

} else if (strcmp(subsys, "platform") && strcmp(subsys, "backlight")) {

/* It is not a VSYNC or a backlight event */

ALOGV("unexpected event from subsystem %s", subsys);

#endif

}

}

可以看到,這里對不同的時間進行看了notifXxx所有的notifyXXXXX函數都會調用notify()函數

void NetlinkHandler::notify(int code, const char *format, ...) {

char *msg;

va_list args;

va_start(args, format);

if (vasprintf(&msg, format, args) >= 0) {

//一定要注意這里的所使用的是cl的socket,名字為”netd”,而非之前的那四個

mNm->getBroadcaster()->sendBroadcast(code, msg, false);

free(msg);

} else {

SLOGE("Failed to send notification: vasprintf: %s", strerror(errno));

}

va_end(args);

}

mNm就是之前在main()中新建的NetworkMananger。其BroadCaster已經設置為了cl(即CommandListener的一個實例)。CommandListener通過netd向NetworkManagementService發送消息。這里的消息可能有兩種:一種是底層主動上報的消息,另一種是上層請求的response。(這個和RILD很類似)

現在我們來整理一下上面的步驟:NetlinkManager新建了4個PF_NETLINK類型的socket,監聽內核發生的uEvent。當內核發生相應的uEvent被對應的 NetlinkManager檢測到。NetlinkManager將著個uEvent轉化為NetlinkEvent 通過CommandListener廣播到更上層。而這里的“更上層”指的是java層??梢?,底層C/C++和上層java的聯系是通過socket聯系在一起的。

這里一定要清楚出兩點:之前的4個socket并不是這里的BroadCaster的socket;而且,個人覺得這個BroadCaster名字也容易讓人產生誤解,以為是廣播,廣播對應的socket就應該是UDP。而實際上這個socket是init.rc配置的名字為“netd”的socke所accept出來的clientSocket,是一個TCPsocket。而TCPsocket是無法廣播的。這里直接將sendBroadCast理解為sendMsg后面的就很好理解了。

接著分析CommandListener。這個類同樣繼承自SocketListener,與之前的4個Netlink socket所不同的是此類的mListen被設置為true。也就是“netd”為監聽socket。CommandListener在之前的main函數中調用startListener開啟監聽來自java層的連接。當上層有連接時,select返回,accpet得到一個clientSocket,之后將其封裝成SocketClient添加經list,并添加進select的監聽隊列。當java層下發命令,SocketClient的可讀事件被檢測到,從而做后續的處理,最后將底層處理結果response回上層,底層主動上報的消息也是通過此clientSocket上發到上層的。熟悉網絡編程的就應該知道,這里是一個很典型的Select型的IO復用服務端模型。

除“netd”外,其他三個在init.rc中配置的socket:dnsproxyd mdns fwmarkd也構建了幾乎一樣的服務端結構。這里就不再贅述。以下為netd的大致框圖:

總結

以上是生活随笔為你收集整理的Android中的网络管理源码分析--netd的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。