add more debug output

This commit is contained in:
2026-03-03 21:32:21 +01:00
parent caa2bd4897
commit c721fdf9f2

67
main.py
View File

@@ -139,10 +139,13 @@ class LogMonitor:
# Initialize LogBull logger
logbull_config = config.get_logbull_config()
logger.info(f"LogBull config: host={logbull_config.get('host', 'http://localhost:4005')}, project_id={logbull_config.get('project_id')}")
self.logbull_logger = LogBullLogger(
host=logbull_config.get('host', 'http://localhost:4005'),
project_id=logbull_config.get('project_id')
)
logger.info("LogBull logger initialized successfully")
self.flush_interval = logbull_config.get('flush_interval', 5)
self.last_flush = time.time()
@@ -152,17 +155,30 @@ class LogMonitor:
self.poll_interval = service_config.get('poll_interval', 1)
self.max_lines_per_batch = service_config.get('max_lines_per_batch', 100)
logger.info(f"Service config: poll_interval={self.poll_interval}s, max_lines_per_batch={self.max_lines_per_batch}")
logger.info(f"Monitoring {len(self.log_files)} log files: {self.log_files}")
# Initialize file positions
for log_file in self.log_files:
self.file_positions[log_file] = 0
if os.path.exists(log_file):
logger.info(f"Found log file: {log_file}")
else:
logger.warning(f"Log file does not exist yet: {log_file}")
def _read_new_lines(self, file_path: str) -> List[str]:
"""Read new lines from file since last position"""
try:
with open(file_path, 'r') as f:
f.seek(self.file_positions[file_path])
current_position = self.file_positions[file_path]
f.seek(current_position)
lines = f.readlines()
self.file_positions[file_path] = f.tell()
new_position = f.tell()
self.file_positions[file_path] = new_position
if lines:
logger.debug(f"Read {len(lines)} new lines from {file_path} (position {current_position} -> {new_position})")
return lines
except (FileNotFoundError, IOError) as e:
logger.warning(f"Error reading {file_path}: {e}")
@@ -170,9 +186,12 @@ class LogMonitor:
def process_logs(self):
"""Process all log files and send to LogBull"""
total_processed = 0
total_sent = 0
for log_file in self.log_files:
if not os.path.exists(log_file):
logger.warning(f"Log file not found: {log_file}")
logger.debug(f"Log file not found: {log_file}")
continue
lines = self._read_new_lines(log_file)
@@ -181,15 +200,30 @@ class LogMonitor:
logger.info(f"Processing {len(lines)} new lines from {log_file}")
parsed_count = 0
unparsed_count = 0
for line in lines[:self.max_lines_per_batch]: # Limit batch size
total_processed += 1
parsed_log = LogParser.parse_log_line(line)
if parsed_log:
parsed_count += 1
logger.debug(f"Parsed log line: type={parsed_log.get('type')}, subtype={parsed_log.get('subtype', 'N/A')}")
self._send_to_logbull(parsed_log, log_file)
total_sent += 1
else:
unparsed_count += 1
logger.debug(f"Failed to parse line: {line.strip()[:100]}")
if parsed_count > 0:
logger.info(f"From {log_file}: parsed {parsed_count}, unparsed {unparsed_count}")
# Periodically flush
if time.time() - self.last_flush > self.flush_interval:
logger.info(f"Flushing logs to LogBull (total sent since last flush: {total_sent})")
self.logbull_logger.flush()
self.last_flush = time.time()
logger.info("Flush completed")
def _send_to_logbull(self, log_data: Dict, source_file: str):
"""Send parsed log data to LogBull"""
@@ -199,27 +233,41 @@ class LogMonitor:
# Send based on log type
if log_data['type'] == 'access':
logger.debug(f"Sending ACCESS log to LogBull: {log_data.get('method')} {log_data.get('path')} - {log_data.get('status')}")
self.logbull_logger.info("NGINX Access Log", fields=log_data)
elif log_data['type'] == 'error':
if log_data.get('subtype') == 'naxsi':
logger.debug(f"Sending NAXSI log to LogBull: {log_data.get('request')}")
self.logbull_logger.warning("NAXSI Block", fields=log_data)
else:
logger.debug(f"Sending ERROR log to LogBull: {log_data.get('request')}")
self.logbull_logger.error("NGINX Error Log", fields=log_data)
logger.debug(f"LogBull message queued successfully (type={log_data['type']})")
except Exception as e:
logger.error(f"Error sending to LogBull: {e}")
logger.error(f"Error sending to LogBull: {e}", exc_info=True)
def run(self):
"""Main monitoring loop"""
logger.info("Starting log monitoring service")
logger.info(f"LogBull endpoint: {self.config.get_logbull_config().get('host', 'http://localhost:4005')}")
logger.info(f"Project ID: {self.config.get_logbull_config().get('project_id')}")
iteration = 0
try:
while True:
iteration += 1
logger.debug(f"Processing iteration {iteration}")
self.process_logs()
time.sleep(self.poll_interval)
except KeyboardInterrupt:
logger.info("Shutting down gracefully...")
logger.info("Performing final flush...")
self.logbull_logger.flush()
logger.info("Service stopped")
except Exception as e:
logger.error(f"Unexpected error in monitoring loop: {e}", exc_info=True)
raise
class LogFileHandler(FileSystemEventHandler):
"""Handle file system events for log files"""
@@ -243,6 +291,8 @@ def main():
elif sys.argv[1].endswith('.yaml') or sys.argv[1].endswith('.yml'):
config_path = sys.argv[1]
logger.info(f"Loading configuration from: {config_path}")
# Load configuration
config = Config(config_path)
@@ -250,13 +300,20 @@ def main():
service_config = config.get_service_config()
log_level = service_config.get('log_level', 'INFO').upper()
logger.setLevel(getattr(logging, log_level, logging.INFO))
logger.info(f"Log level set to: {log_level}")
logger.info("Configuration loaded successfully")
logger.info(f"LogBull host: {config.get_logbull_config().get('host')}")
logger.info(f"LogBull project_id: {config.get_logbull_config().get('project_id')}")
logger.info(f"Log files to monitor: {config.get_log_files()}")
# Create and run monitor
logger.info("Initializing log monitor...")
monitor = LogMonitor(config)
monitor.run()
except Exception as e:
logger.error(f"Fatal error: {e}")
logger.error(f"Fatal error: {e}", exc_info=True)
raise
if __name__ == "__main__":