SHELL BYPASS 403 |
Modern UI.
Responsive.
Powerful.
Faizzz-Chin Shell
: /proc/self/root/opt/cloudlinux/venv/lib/python3.11/site-packages/lvestats/lib/commons/ [ drwxr-xr-x ]
# coding=utf-8
#
# Copyright © Cloud Linux GmbH & Cloud Linux Software, Inc 2010-2019 All Rights Reserved
#
# Licensed under CLOUD LINUX LICENSE AGREEMENT
# http://cloudlinux.com/docs/LICENSE.TXT
import logging
import os
import pwd
import re
import time
from lvestats.lib.commons.fileutil import open_nofollow
from lvestats.lib.commons.func import get_all_user_domains, normalize_domain
class Proctitle(object):
"""
Class for working with mod_procurl files
"""
def __init__(self, shm_dir="/dev/shm/"):
self.now = 0
self.log = logging.getLogger('Proctitle')
self.SHM_DIR = shm_dir
self.FILE_PATTERN = re.compile(r"apache_title_shm_[0-9]+_[0-9]+_[0-9]+$", re.IGNORECASE)
self._web_server_uids = self._get_web_server_uids()
self.parsed_data = self._get_all_data()
def _get_web_server_uids(self):
"""
Collect UIDs of all known web server usernames.
When hostinglimits_module is enabled, SHM files may be owned by
'nobody' even though 'apache' also exists on the system. Returning
only the first match would reject every file owned by 'nobody',
silently disabling proctitle data collection.
"""
uids = set()
for username in ('apache', 'httpd', 'nobody'):
try:
uids.add(pwd.getpwnam(username).pw_uid)
except KeyError:
continue
if not uids:
self.log.warning("Cannot determine web server UID; no shm files will be processed")
return frozenset(uids)
def _open_apache_owned(self, file_name):
"""
Open *file_name* rejecting symlinks at every path component,
verify ownership via fstat on the same fd, and return a file
object ready for reading. The caller receives the same
underlying descriptor — no close-then-reopen — so there is
no TOCTOU window.
:return: open file object if owned by a web server user, None otherwise
:raises OSError: if any path component is a symlink or does not exist
"""
fd = open_nofollow(file_name)
try:
if os.fstat(fd).st_uid not in self._web_server_uids:
return None
f = os.fdopen(fd, 'r', encoding='utf-8')
fd = -1 # os.fdopen owns the fd now
return f
finally:
if fd != -1:
os.close(fd)
def _get_all_data(self):
"""
1) Get all files in the self.SHM_DIR that mathes to the regexp self.FILE_PATTERN
2) Read every file to the first \x00 simbol
3) If apache process is iddle file content equals to httpd, else it should contain 5 values separated by space
4) Split every interesting line and return
:return:
list of the lists
[[Timestamp, Domain, Http type, Path, Http version],...]
"""
result = []
if os.path.exists(self.SHM_DIR):
files = filter(self.FILE_PATTERN.search, os.listdir(self.SHM_DIR))
self.now = time.time()
for file in files:
try:
file_name = os.path.join(self.SHM_DIR, file)
http_stats = self._open_apache_owned(file_name)
if http_stats is None:
continue
with http_stats:
http_stats_line = http_stats.readline()
http_stats_line = http_stats_line.split('\x00')[0]
if not http_stats_line == "httpd":
http_stats_line_split = http_stats_line.split(" ")
if len(http_stats_line_split) == 5:
result.append(http_stats_line_split + [file.split("_")[-3]])
else:
self.log.debug("Number of values in file %s is not equal to 5", file)
except (IOError, OSError) as e:
self.log.debug(str(e))
return result
def get_user_data(self, username):
"""
Returns information about processed by user pages.
:param username:
:return:
list of the lists
[[Pid, Domain, Path, Http type, Http version, Time],...]
"""
all_domains = get_all_user_domains(username)
normalized_domains = set(map(normalize_domain, all_domains))
result = []
for data in self.parsed_data:
if normalize_domain(data[1]) in normalized_domains:
result.append([
data[5],
data[1],
data[2],
data[3],
data[4],
f"{self.now - float(data[0]):.1f}",
])
return result