add log2mem functionality and refracture

This commit is contained in:
dadav 2020-01-18 09:29:28 +01:00
parent 5606ad7281
commit 6a4d7a895e
8 changed files with 305 additions and 110 deletions

View File

@ -8,14 +8,15 @@ import sys
import toml
import pwnagotchi
import pwnagotchi.grid as grid
import pwnagotchi.utils as utils
import pwnagotchi.plugins as plugins
from pwnagotchi import grid
from pwnagotchi import utils
from pwnagotchi import plugins
from pwnagotchi import log
from pwnagotchi.identity import KeyPair
from pwnagotchi.agent import Agent
from pwnagotchi.ui.display import Display
from pwnagotchi import restart
from pwnagotchi import fs
def do_clear(display):
@ -124,7 +125,8 @@ if __name__ == '__main__':
print(toml.dumps(config))
sys.exit(0)
utils.setup_logging(args, config)
fs.setup_mounts(config)
log.setup_logging(args, config)
pwnagotchi.set_name(config['main']['name'])

View File

@ -53,6 +53,7 @@
- wpa_supplicant
- nfs-common
install:
- rsync
- vim
- screen
- golang

View File

@ -6,6 +6,7 @@ import re
import pwnagotchi.ui.view as view
import pwnagotchi
from pwnagotchi import fs
from pwnagotchi._version import __version__
_name = None
@ -99,6 +100,11 @@ def temperature(celsius=True):
def shutdown():
logging.warning("syncing...")
for m in fs.mounts:
m.sync()
logging.warning("shutting down ...")
if view.ROOT:
view.ROOT.on_shutdown()

View File

@ -204,3 +204,18 @@ bettercap.silence = [
"wifi.ap.lost",
"mod.started"
]
fs.memory.enabled = false
fs.memory.mounts.log.enabled = false
fs.memory.mounts.log.mount = "/var/log"
fs.memory.mounts.log.size = "50M"
fs.memory.mounts.log.sync = 60
fs.memory.mounts.log.zram = true
fs.memory.mounts.log.rsync = true
fs.memory.mounts.data.enabled = false
fs.memory.mounts.data.mount = "/var/tmp/pwnagotchi"
fs.memory.mounts.data.size = "10M"
fs.memory.mounts.data.sync = 3600
fs.memory.mounts.data.zram = false
fs.memory.mounts.data.rsync = true

176
pwnagotchi/fs/__init__.py Normal file
View File

@ -0,0 +1,176 @@
import os
import re
import tempfile
import contextlib
import shutil
import _thread
import logging
from time import sleep
from distutils.dir_util import copy_tree
mounts = list()
@contextlib.contextmanager
def ensure_write(filename, mode='w'):
path = os.path.dirname(filename)
fd, tmp = tempfile.mkstemp(dir=path)
with os.fdopen(fd, mode) as f:
yield f
f.flush()
os.fsync(f.fileno())
os.replace(tmp, filename)
def size_of(path):
total = 0
for root, dirs, files in os.walk(path):
for f in files:
total += os.path.getsize(os.path.join(root, f))
return total
def is_mountpoint(path):
return os.system(f"mountpoint -q {path}") == 0
def setup_mounts(config):
global mounts
fs_cfg = config['fs']['memory']
if not fs_cfg['enabled']:
return
for name, options in fs_cfg['mounts'].items():
if not options['enabled']:
continue
logging.debug("[FS] Trying to setup mount %s (%s)", name, options['mount'])
size,unit = re.match(r"(\d+)([a-zA-Z]+)", options['size']).groups()
target = os.path.join('/run/pwnagotchi/disk/', os.path.basename(options['mount']))
is_mounted = is_mountpoint(target)
logging.debug("[FS] %s is %s mounted", options['mount'],
"already" if is_mounted else "not yet")
m = MemoryFS(
options['mount'],
target,
size=options['size'],
zram=options['zram'],
zram_disk_size=f"{int(size)*2}{unit}",
rsync=options['rsync'])
if not is_mounted:
if not m.mount():
logging.debug(f"Error while mounting {m.mountpoint}")
continue
if not m.sync(to_ram=True):
logging.debug(f"Error while syncing to {m.mountpoint}")
m.umount()
continue
interval = int(options['sync'])
if interval:
logging.debug("[FS] Starting thread to sync %s (interval: %d)",
options['mount'], interval)
_thread.start_new_thread(m.daemonize, (interval,))
else:
logging.debug("[FS] Not syncing %s, because interval is 0",
options['mount'])
mounts.append(m)
class MemoryFS:
@staticmethod
def zram_install():
if not os.path.exists("/sys/class/zram-control"):
logging.debug("[FS] Installing zram")
return os.system("modprobe zram") == 0
return True
@staticmethod
def zram_dev():
logging.debug("[FS] Adding zram device")
return open("/sys/class/zram-control/hot_add", "rt").read().strip("\n")
def __init__(self, mount, disk, size="40M",
zram=True, zram_alg="lz4", zram_disk_size="100M",
zram_fs_type="ext4", rsync=True):
self.mountpoint = mount
self.disk = disk
self.size = size
self.zram = zram
self.zram_alg = zram_alg
self.zram_disk_size = zram_disk_size
self.zram_fs_type = zram_fs_type
self.zdev = None
self.rsync = True
self._setup()
def _setup(self):
if self.zram and MemoryFS.zram_install():
# setup zram
self.zdev = MemoryFS.zram_dev()
open(f"/sys/block/zram{self.zdev}/comp_algorithm", "wt").write(self.zram_alg)
open(f"/sys/block/zram{self.zdev}/disksize", "wt").write(self.zram_disk_size)
open(f"/sys/block/zram{self.zdev}/mem_limit", "wt").write(self.size)
logging.debug("[FS] Creating fs (type: %s)", self.zram_fs_type)
os.system(f"mke2fs -t {self.zram_fs_type} /dev/zram{self.zdev} >/dev/null 2>&1")
# ensure mountpoints exist
if not os.path.exists(self.disk):
logging.debug("[FS] Creating %s", self.disk)
os.makedirs(self.disk)
if not os.path.exists(self.mountpoint):
logging.debug("[FS] Creating %s", self.mountpoint)
os.makedirs(self.mountpoint)
def daemonize(self, interval=60):
logging.debug("[FS] Daemonized...")
while True:
self.sync()
sleep(interval)
def sync(self, to_ram=False):
source, dest = (self.disk, self.mountpoint) if to_ram else (self.mountpoint, self.disk)
needed, actually_free = size_of(source), shutil.disk_usage(dest)[2]
if actually_free >= needed:
logging.debug("[FS] Syning %s -> %s", source,dest)
if self.rsync:
os.system(f"rsync -aXv --inplace --no-whole-file --delete-after {source}/ {dest}/ >/dev/null 2>&1")
else:
copy_tree(source, dest, preserve_symlinks=True)
os.system("sync")
return True
return False
def mount(self):
if os.system(f"mount --bind {self.mountpoint} {self.disk}"):
return False
if os.system(f"mount --make-private {self.disk}"):
return False
if self.zram and self.zdev is not None:
if os.system(f"mount -t {self.zram_fs_type} -o nosuid,noexec,nodev,user=pwnagotchi /dev/zram{self.zdev} {self.mountpoint}/"):
return False
else:
if os.system(f"mount -t tmpfs -o nosuid,noexec,nodev,mode=0755,size={self.size} pwnagotchi {self.mountpoint}/"):
return False
return True
def umount(self):
if os.system(f"umount -l {self.mountpoint}"):
return False
if os.system(f"umount -l {self.disk}"):
return False
return True

View File

@ -3,6 +3,8 @@ import time
import re
import os
import logging
import shutil
import gzip
from datetime import datetime
from pwnagotchi.voice import Voice
@ -209,3 +211,96 @@ class LastSession(object):
def is_new(self):
return self.last_session_id != self.last_saved_session_id
def setup_logging(args, config):
cfg = config['main']['log']
memory_cfg = config['fs']['memory']
log_to_memory = memory_cfg['enabled'] and \
memory_cfg['files']['logs']['enabled']
memory_log_levels = memory_cfg['files']['logs']['levels'] \
if log_to_memory else None
filename = cfg['path']
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
root = logging.getLogger()
root.setLevel(logging.DEBUG if args.debug else logging.INFO)
if filename:
# since python default log rotation might break session data in different files,
# we need to do log rotation ourselves
log_rotation(filename, cfg)
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(formatter)
root.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
root.addHandler(console_handler)
# https://stackoverflow.com/questions/24344045/how-can-i-completely-remove-any-logging-from-requests-module-in-python?noredirect=1&lq=1
logging.getLogger("urllib3").propagate = False
requests_log = logging.getLogger("requests")
requests_log.addHandler(logging.NullHandler())
requests_log.propagate = False
def log_rotation(filename, cfg):
rotation = cfg['rotation']
if not rotation['enabled']:
return
elif not os.path.isfile(filename):
return
stats = os.stat(filename)
# specify a maximum size to rotate ( format is 10/10B, 10K, 10M 10G )
if rotation['size']:
max_size = parse_max_size(rotation['size'])
if stats.st_size >= max_size:
do_rotate(filename, stats, cfg)
else:
raise Exception("log rotation is enabled but log.rotation.size was not specified")
def parse_max_size(s):
parts = re.findall(r'(^\d+)([bBkKmMgG]?)', s)
if len(parts) != 1 or len(parts[0]) != 2:
raise Exception("can't parse %s as a max size" % s)
num, unit = parts[0]
num = int(num)
unit = unit.lower()
if unit == 'k':
return num * 1024
elif unit == 'm':
return num * 1024 * 1024
elif unit == 'g':
return num * 1024 * 1024 * 1024
else:
return num
def do_rotate(filename, stats, cfg):
base_path = os.path.dirname(filename)
name = os.path.splitext(os.path.basename(filename))[0]
archive_filename = os.path.join(base_path, "%s.gz" % name)
counter = 2
while os.path.exists(archive_filename):
archive_filename = os.path.join(base_path, "%s-%d.gz" % (name, counter))
counter += 1
log_filename = archive_filename.replace('gz', 'log')
print("%s is %d bytes big, rotating to %s ..." % (filename, stats.st_size, log_filename))
shutil.move(filename, log_filename)
print("compressing to %s ..." % archive_filename)
with open(log_filename, 'rb') as src:
with gzip.open(archive_filename, 'wb') as dst:
dst.writelines(src)

View File

@ -1,6 +1,7 @@
import os
from threading import Lock
frame_path = '/root/pwnagotchi.png'
frame_path = '/var/tmp/pwnagotchi/pwnagotchi.png'
frame_format = 'PNG'
frame_ctype = 'image/png'
frame_lock = Lock()
@ -8,5 +9,7 @@ frame_lock = Lock()
def update_frame(img):
global frame_lock, frame_path, frame_format
if not os.path.exists(os.path.basename(frame_path)):
os.makedirs(os.path.basename(frame_path))
with frame_lock:
img.save(frame_path, format=frame_format)

View File

@ -3,19 +3,16 @@ from enum import Enum
import logging
import glob
import os
import re
import time
import subprocess
import yaml
import json
import shutil
import gzip
import contextlib
import tempfile
import toml
import sys
import pwnagotchi
from pwnagotchi.fs import ensure_write
# https://stackoverflow.com/questions/823196/yaml-merge-in-python
@ -165,94 +162,6 @@ def load_config(args):
return config
def parse_max_size(s):
parts = re.findall(r'(^\d+)([bBkKmMgG]?)', s)
if len(parts) != 1 or len(parts[0]) != 2:
raise Exception("can't parse %s as a max size" % s)
num, unit = parts[0]
num = int(num)
unit = unit.lower()
if unit == 'k':
return num * 1024
elif unit == 'm':
return num * 1024 * 1024
elif unit == 'g':
return num * 1024 * 1024 * 1024
else:
return num
def do_rotate(filename, stats, cfg):
base_path = os.path.dirname(filename)
name = os.path.splitext(os.path.basename(filename))[0]
archive_filename = os.path.join(base_path, "%s.gz" % name)
counter = 2
while os.path.exists(archive_filename):
archive_filename = os.path.join(base_path, "%s-%d.gz" % (name, counter))
counter += 1
log_filename = archive_filename.replace('gz', 'log')
print("%s is %d bytes big, rotating to %s ..." % (filename, stats.st_size, log_filename))
shutil.move(filename, log_filename)
print("compressing to %s ..." % archive_filename)
with open(log_filename, 'rb') as src:
with gzip.open(archive_filename, 'wb') as dst:
dst.writelines(src)
def log_rotation(filename, cfg):
rotation = cfg['rotation']
if not rotation['enabled']:
return
elif not os.path.isfile(filename):
return
stats = os.stat(filename)
# specify a maximum size to rotate ( format is 10/10B, 10K, 10M 10G )
if rotation['size']:
max_size = parse_max_size(rotation['size'])
if stats.st_size >= max_size:
do_rotate(filename, stats, cfg)
else:
raise Exception("log rotation is enabled but log.rotation.size was not specified")
def setup_logging(args, config):
cfg = config['main']['log']
filename = cfg['path']
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
root = logging.getLogger()
root.setLevel(logging.DEBUG if args.debug else logging.INFO)
if filename:
# since python default log rotation might break session data in different files,
# we need to do log rotation ourselves
log_rotation(filename, cfg)
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(formatter)
root.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
root.addHandler(console_handler)
# https://stackoverflow.com/questions/24344045/how-can-i-completely-remove-any-logging-from-requests-module-in-python?noredirect=1&lq=1
logging.getLogger("urllib3").propagate = False
requests_log = logging.getLogger("requests")
requests_log.addHandler(logging.NullHandler())
requests_log.propagate = False
def secs_to_hhmmss(secs):
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
@ -385,18 +294,6 @@ def extract_from_pcap(path, fields):
return results
@contextlib.contextmanager
def ensure_write(filename, mode='w'):
path = os.path.dirname(filename)
fd, tmp = tempfile.mkstemp(dir=path)
with os.fdopen(fd, mode) as f:
yield f
f.flush()
os.fsync(f.fileno())
os.replace(tmp, filename)
class StatusFile(object):
def __init__(self, path, data_format='raw'):
self._path = path