Add the user daemon

main
Rebel Zhang 2025-09-20 16:40:21 +08:00
parent 5934a4021e
commit 34e73a020b
3 changed files with 108 additions and 27 deletions

View File

@ -1,5 +1,5 @@
#!/usr/bin/python3
import sys, pathlib, re
import sys, pathlib, re, subprocess
import ressenger_initialisation, ressenger_client, ressenger_common, ressenger_cryptography, ressenger_exceptions, ressenger_server
from PySide6.QtUiTools import QUiLoader
from PySide6.QtWidgets import QApplication, QListWidget, QTextBrowser, QTextEdit, QPushButton, QFileDialog, QMessageBox, QDialog, QVBoxLayout, QLineEdit, QLabel

View File

@ -1,4 +1,17 @@
#!/usr/bin/python3
#!/usr/bin/env python3
"""
TCP server that receives binary blobs and persists them to a cache file.
This variant ensures the cache file is removed on shutdown (where possible).
Behaviour:
- Always start with a fresh, empty CACHE (do NOT load existing file).
- Use a dedicated CacheWriter thread to debounce and atomically write CACHE.
- On exit (signals, KeyboardInterrupt, normal interpreter exit) stop writer and remove cache file.
Note: SIGKILL (kill -9) and sudden power loss cannot be trapped; cleanup cannot run then.
"""
# English comments throughout (British English spelling).
import socket
import threading
import struct
@ -9,12 +22,20 @@ import pathlib
import tempfile
import os
import traceback
import signal
import atexit
HOST = "0.0.0.0"
CACHE_PATH = pathlib.Path(f"~/.ressenger/cache_{sys.argv[1]}").expanduser()
CACHE = {} # in-memory cache: {timestamp_ns: bytes}
cache_lock = threading.Lock() # protect access to CACHE
# Globals used by cleanup / signal handlers
_writer = None # will hold CacheWriter instance
_server_sock = None # will hold listening socket
_shutdown_event = threading.Event()
_cleaned = False # ensure cleanup runs only once
def atomic_dump(obj, path: pathlib.Path):
"""Atomically write `obj` (pickled) to `path`.
@ -27,15 +48,12 @@ def atomic_dump(obj, path: pathlib.Path):
path.parent.mkdir(parents=True, exist_ok=True)
fd, tmp_path = tempfile.mkstemp(dir=str(path.parent), prefix=path.name + ".", suffix=".tmp")
try:
# Open file descriptor as a file object and write pickled data
with os.fdopen(fd, "wb") as f:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
f.flush()
os.fsync(f.fileno())
# Atomic replace (POSIX and Windows)
os.replace(tmp_path, str(path))
except Exception:
# Clean up temporary file on error
try:
os.remove(tmp_path)
except Exception:
@ -71,11 +89,11 @@ class CacheWriter(threading.Thread):
self._stop = threading.Event()
def notify(self):
"""Notify writer that there is new data to persist."""
"""Notify the writer that there is new data to persist."""
self._event.set()
def stop(self):
"""Signal the writer to stop and wait for it."""
"""Signal the writer to stop and wait for it to finish."""
self._stop.set()
self._event.set()
self.join()
@ -83,7 +101,7 @@ class CacheWriter(threading.Thread):
def run(self):
try:
while not self._stop.is_set():
# Wait until someone notifies or stop is requested
# Wait until notified or stop is requested
self._event.wait()
if self._stop.is_set():
break
@ -92,15 +110,14 @@ class CacheWriter(threading.Thread):
# Take a snapshot of CACHE under lock
with cache_lock:
snapshot = dict(CACHE)
# Clear the event before writing so new notifications will set it again
# Clear the event before writing so new notifications set it again
self._event.clear()
# Atomically write snapshot
try:
atomic_dump(snapshot, self.path)
except Exception:
# Log exception but continue; do not crash the writer thread
traceback.print_exc()
# Before exit, attempt a final write of the latest cache
# Final flush just before exit
with cache_lock:
final_snapshot = dict(CACHE)
try:
@ -135,7 +152,7 @@ def handle_client(conn: socket.socket, addr, writer: CacheWriter):
# Using time.time_ns() as key is usually fine; for extreme throughput
# consider uuid.uuid4() or a sequence number to avoid collisions.
CACHE[ts] = data
# Notify writer to persist latest CACHE (writer will snapshot under lock)
# Notify writer to persist latest CACHE (writer snapshots under lock)
writer.notify()
print(f"Received {len(data)} bytes from {addr} and queued for persist")
except Exception:
@ -148,10 +165,63 @@ def handle_client(conn: socket.socket, addr, writer: CacheWriter):
print(f"Closed connection {addr}")
def _cleanup():
"""Stop writer, close server socket and remove cache file. Safe to call multiple times."""
global _cleaned, _writer, _server_sock
if _cleaned:
return
_cleaned = True
print("Cleanup: stopping writer and removing cache file (if present)...")
# Stop writer thread first (this will perform a final flush)
try:
if _writer is not None:
_writer.stop()
print("Writer thread stopped.")
except Exception:
traceback.print_exc()
# Close server socket if it exists
try:
if _server_sock is not None:
_server_sock.close()
print("Server socket closed.")
except Exception:
traceback.print_exc()
# Remove cache file (best-effort)
try:
if CACHE_PATH.exists():
os.remove(CACHE_PATH)
print(f"Removed cache file {CACHE_PATH}")
except Exception:
traceback.print_exc()
print("Cleanup complete.")
def _signal_handler(signum, frame):
"""Signal handler: set shutdown event and attempt to interrupt accept by closing socket."""
print(f"Signal {signum} received: initiating shutdown...")
_shutdown_event.set()
# attempt to close server socket to break accept() quickly
try:
if _server_sock is not None:
_server_sock.close()
except Exception:
pass
# Register atexit cleanup (runs on normal interpreter exit)
atexit.register(_cleanup)
# Register signal handlers for common termination signals
for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGHUP, getattr(signal, "SIGQUIT", None)):
if sig is not None:
signal.signal(sig, _signal_handler)
def run_server(port: int):
"""Main server routine: always start with a fresh CACHE, start writer, accept connections."""
# DO NOT load any existing cache file on startup — always start fresh.
# Ensure directory exists and initialise an empty cache file on disk.
"""Main server routine: start writer, accept connections, and honour shutdown requests."""
global _writer, _server_sock
# DO NOT load existing cache file on startup — always start fresh.
try:
CACHE_PATH.parent.mkdir(parents=True, exist_ok=True)
with cache_lock:
@ -162,30 +232,40 @@ def run_server(port: int):
traceback.print_exc()
# Start writer thread
writer = CacheWriter(CACHE_PATH, debounce=0.05)
writer.start()
_writer = CacheWriter(CACHE_PATH, debounce=0.05)
_writer.start()
# Create listening socket with short timeout so we can check shutdown_event regularly
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_server_sock = sock
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, port))
sock.listen()
sock.settimeout(1.0) # 1 second timeout to check for shutdown_event
print(f"Server listening on {HOST}:{port}...")
try:
while True:
conn, addr = sock.accept()
t = threading.Thread(target=handle_client, args=(conn, addr, writer), daemon=True)
while not _shutdown_event.is_set():
try:
conn, addr = sock.accept()
except socket.timeout:
continue
except OSError:
# socket may be closed from signal handler; check shutdown flag
if _shutdown_event.is_set():
break
else:
raise
# start handler thread for the accepted connection
t = threading.Thread(target=handle_client, args=(conn, addr, _writer), daemon=True)
t.start()
except KeyboardInterrupt:
print("Server shutting down (KeyboardInterrupt)...")
except Exception:
traceback.print_exc()
finally:
try:
sock.close()
except Exception:
pass
# Stop the writer thread (this will perform a final flush)
writer.stop()
print("Writer thread stopped. Exiting.")
# Ensure cleanup runs here as well (stop writer and remove cache file)
_cleanup()
if __name__ == "__main__":

View File

@ -0,0 +1 @@
#!/usr/bin/python3