added logger resource API

This commit is contained in:
mh4x0f 2021-05-13 15:06:22 -03:00
parent 7aef4d2a67
commit 6368a0091e
3 changed files with 83 additions and 9 deletions

View File

@ -9,7 +9,8 @@ All notable changes to this project will be documented in this file.
### Added
- added route for get information of plugins and proxies on restAPI
- added new attribute on plugins and proxies mode class
- added new attribute on plugins and proxies mode class
- added logger resource API
### Changed

View File

@ -33,6 +33,7 @@ def init_app(app):
api.add_resource(res_auth.LoginResource, "/authenticate/")
api.add_resource(res_logger.getFileLogResource, "/logger/<string:filename>")
api.add_resource(res_logger.getAllFileLogResource, "/loggers")
api.add_resource(
res_ap.SettingsAccesspointResource,

View File

@ -6,6 +6,8 @@ import os, json
from itertools import islice
from wifipumpkin3.core.servers.rest.ext.auth import token_required
from wifipumpkin3.core.servers.rest.ext.exceptions import exception
from datetime import datetime
import urllib.parse
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
@ -75,14 +77,84 @@ class getFileLogResource(Resource):
}
)
class getAllFileLogResource(Resource):
config = SettingsINI.getInstance()
args = ("page", "session", "excludes")
exludes_logs = []
filenames = ("pumpkin_proxy", "pydns_server", "pydhcp_server", "sniffkin3", "captiveportal", "responder3")
limit_view = 10
session = None
def chunk(self, it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def parser_data(self, list_data: list, session):
resp_list = []
for line in list_data:
resp_data = {}
if line["record"]["extra"]["session"] == session:
resp_data["time"] = str(datetime.fromtimestamp(line["record"]["time"]["timestamp"]).replace(microsecond=0))
resp_data["timestamp"] = line["record"]["time"]["timestamp"]
resp_data["message"] = line["record"]["message"]
resp_data["text"] = line["text"]
resp_data["name"] = line["record"]["extra"]["name"]
resp_data["session"] = line["record"]["extra"]["session"]
resp_list.append(resp_data)
return sorted(
resp_list,
key=lambda x: datetime.strptime(x['time'], "%Y-%m-%d %H:%M:%S")
)
@token_required
def post(self):
data = request.get_json(force=True)
for key, value in data.items():
if not key in self.config.get_all_childname(self.key_name):
def get(self):
for args in request.args:
if not args in self.args:
return exception(
"Cannot found that attribute {} on {}!".format(key, self.key_name),
code=400,
"Cannot found parameters {} on request ".format(args), code=400
)
self.config.set(self.key_name, key, value)
return jsonify(data)
table = []
page = int(request.args.get("page"))
if request.args.get("excludes"):
resq_exclude = urllib.parse.unquote_plus(request.args.get("excludes"))
self.exludes_logs = tuple(x.strip() for x in resq_exclude.split(","))
if request.args.get("session"):
self.session = request.args.get("session")
for filename in self.filenames:
if os.path.isfile("{}/{}.log".format(C.LOG_BASE, filename)) and filename not in self.exludes_logs:
with open("{}/{}.log".format(C.LOG_BASE, filename), "r") as f:
for line in f:
table.append(json.loads(line))
table = self.parser_data(table, self.session)
data_splited = list(self.chunk(table, self.limit_view))
if page <= (len(data_splited) - 1):
return jsonify(
{
"data": {
"items": data_splited[page],
"limit_view": self.limit_view,
"total_pages": (len(table) // self.limit_view) + 1,
"current_page": page,
"total_count": len(table),
}
}
)
return jsonify(
{
"data": {
"items": [],
"limt_view": self.limit_view,
"total_pages": (len(table) // self.limit_view) + 1,
"current_page": page,
"total_count": len(table),
}
}
)