diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index dd84ea7..4d823bc 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -8,31 +8,27 @@ assignees: '' --- **Describe the bug** + A clear and concise description of what the bug is. **To Reproduce** + Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error **Expected behavior** + A clear and concise description of what you expected to happen. **Screenshots** + If applicable, add screenshots to help explain your problem. -**Desktop (please complete the following information):** +**(please complete the following information):** + - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - - Version [e.g. 22] - -**Smartphone (please complete the following information):** - - Device: [e.g. iPhone6] - - OS: [e.g. iOS8.1] - - Browser [e.g. stock browser, safari] - - Version [e.g. 22] + - Systemguard version [e.g. 22] **Additional context** + Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index bbcbbe7..f0291e0 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -8,13 +8,17 @@ assignees: '' --- **Is your feature request related to a problem? Please describe.** + A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** + A clear and concise description of what you want to happen. **Describe alternatives you've considered** + A clear and concise description of any alternative solutions or features you've considered. **Additional context** + Add any other context or screenshots about the feature request here. diff --git a/.gitignore b/.gitignore index 48e90cb..069c006 100644 --- a/.gitignore +++ b/.gitignore @@ -164,3 +164,8 @@ cython_debug/ predefine_user.json src/assets/predefine_user.json + +influxdb_data +prometheus_config +*.yml +prometheus_config/prometheus.yml diff --git a/README.md b/README.md index 79b0b91..3760df4 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ System Guard is a Flask app designed to monitor server stats such as CPU, Memory, Disk, and Network. It also provides real-time monitoring capabilities which can be useful for system administrators, developers, and DevOps engineers to keep track of their server's performance and troubleshoot issues. The app uses the `psutil` library to retrieve system stats and the `speedtest-cli` library to perform a network speed test. + ## Features 🚀 - Lightweight, open-source, and free to use with a straightforward installation process, out-of-the-box monitoring solution. @@ -18,22 +19,15 @@ System Guard is a Flask app designed to monitor server stats such as CPU, Memory - Role-based dashboards tailored for Developer, Admin, IT Manager, and Manager roles (upcoming feature). - Update security updates with a single click or automatically update to the latest version to simplify maintenance. -## Old Features 🚀 - -- Administrators can manage user accounts by creating, updating, or deleting users. -- Admin-level access is required for configuring settings, managing users, and adjusting security and notification - preferences. -- Historical performance data can be viewed as charts, aiding in trend analysis. -- Supports network speed testing directly from the server. -- Provides the ability to terminate resource-heavy processes with a single command. -- Real-time server metric monitoring keeps data consistently updated. -- The interface is responsive and optimized for various devices including mobile, tablets, and desktops. -- The system can automatically update to the latest version to simplify maintenance. -- Installation can be done quickly via a bash script for easy setup. -- Notifications are sent to users and admins when a process is manually terminated. -- Offers website monitoring tasks that trigger email alerts when a website becomes unavailable. -- Configurable email alerts for various actions across the server. -- Option to download historical data in CSV format for detailed analysis (upcoming feature). -- Server status monitoring with alerts for server downtime or recovery (upcoming feature). +## Architecture 🏗️ + +![SystemGuard-Architecture](/src/docs/images/SystemGuard-Architecture.jpg) + +## Tech Stack 🛠️ + +- **Frontend**: JavaScript, Bootstrap, Chart.js, Grafana +- **Backend**: Python, Flask, SQLAlchemy, SQLite, Prometheus, InfluxDB +- **Monitoring**: psutil, speedtest-cli, nmap, netstat ## Get started 🛠️ diff --git a/app.py b/app.py index 3f5f623..bb29e1c 100644 --- a/app.py +++ b/app.py @@ -1,13 +1,10 @@ from src.config import app from src import routes -from src.background_task import start_website_monitoring, monitor_settings -import os +from src.background_task import start_background_tasks + +# Start the background tasks +start_background_tasks() -# background thread to monitor system settings changes -print("FLASK_ENV: ", os.getenv('FLASK_ENV')) -if os.getenv('FLASK_ENV') == 'production': - monitor_settings() # Starts monitoring for system logging changes - start_website_monitoring() # Starts pinging active websites if __name__ == "__main__": app.run(host="0.0.0.0", port=5000, debug=True) diff --git a/requirements.txt b/requirements.txt index e708956..9c55f5f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,4 +23,13 @@ watchdog==5.0.2 requests==2.32.3 # Speedtest CLI for testing internet speed -speedtest-cli==2.1.3 \ No newline at end of file +speedtest-cli==2.1.3 + +# prometheus client for monitoring +prometheus_client==0.20.0 + +# influxdb-client, optional for writing metrics to InfluxDB +# influxdb-client==1.46.0 + +# pyyaml for parsing YAML configuration files +pyyaml==6.0.2 \ No newline at end of file diff --git a/setup.sh b/setup.sh index e709310..6332cfd 100644 --- a/setup.sh +++ b/setup.sh @@ -853,10 +853,12 @@ start_server() { # Install function install() { + create_dir "$EXTRACT_DIR" + # PROMETHEUS_INSTALL_SCRIPT message_box "$APP_NAME Installer $INSATLLER_VERSION" 0 - message_box "Welcome on board: $(echo "$USER_NAME" | sed 's/.*/\u&/')" 3 + message_box "Welcome on board: $(echo "$USER_NAME" | sed 's/.*/\u&/')" 0 check_dependencies - create_dir "$EXTRACT_DIR" + message_box "Choose the installation method\nNote: Release is recommended for production use." 0 message_box "1. Release (More Stable Version)\n2. Git Repository (Pre-Release Version)\n3. Source Code (Current Directory)" 0 @@ -878,6 +880,11 @@ install() { exit 1 ;; esac + PROMETHEUS_INSTALL_SCRIPT=$(find "$EXTRACT_DIR" -name prometheus.sh) || { + log "ERROR" "Prometheus installation script not found." + exit 1 + } + sudo -u "$USER_NAME" bash "$PROMETHEUS_INSTALL_SCRIPT" start_server message_box "The $APP_NAME server is running at $HOST_URL" 0 # open_browser diff --git a/src/background_task/__init__.py b/src/background_task/__init__.py index 27d4659..9007474 100644 --- a/src/background_task/__init__.py +++ b/src/background_task/__init__.py @@ -1,4 +1,19 @@ +import os from src.background_task.monitor_website import start_website_monitoring from src.background_task.log_system_info import monitor_settings +from src.background_task.external_monitoring import fetch_file_metrics_task +from src.logger import logger -__all__ = ["start_website_monitoring", "monitor_settings"] \ No newline at end of file + + +def start_background_tasks(): + """ + Starts the background tasks for the application. + """ + if os.getenv('FLASK_ENV') == 'production': + logger.info("Starting background tasks for production environment.") + start_website_monitoring() + fetch_file_metrics_task() + monitor_settings() + else: + logger.info("Background tasks are not started in development environment.") \ No newline at end of file diff --git a/src/background_task/external_monitoring.py b/src/background_task/external_monitoring.py new file mode 100644 index 0000000..898ef82 --- /dev/null +++ b/src/background_task/external_monitoring.py @@ -0,0 +1,52 @@ +from flask import Flask, Response +from prometheus_client import Gauge, generate_latest +import os +import time +from src.models import ExternalMonitornig +from src.config import app +import threading +from src.logger import logger + +# Define a Gauge metric with a label 'key' to store multiple values from the file +file_metric = Gauge('external_metrics', 'Value read from file for each key', ['key']) + +def read_file_and_update_metric(file_path: str) -> None: + """Reads a file and updates metrics based on its content.""" + if os.path.exists(file_path): + with open(file_path, 'r') as file: + for line in file: + try: + key, value = line.strip().split(':') + file_metric.labels(key.strip()).set(float(value.strip())) + except ValueError as ve: + logger.error(f"Value error processing line '{line}': {ve}") + except Exception as e: + logger.error(f"Error processing line '{line}': {e}") + else: + logger.warning(f"File {file_path} does not exist") + +def fetch_file_metrics(sleep_duration: int = 5) -> None: + """Background task to read file paths from the database and update metrics.""" + while True: + with app.app_context(): + file_paths = ExternalMonitornig.query.all() + current_keys = {sample.labels['key'] for sample in file_metric.collect()[0].samples} + new_keys = set() + + for file_path in file_paths: + logger.info(f"Reading file: {file_path.file_path}") + read_file_and_update_metric(file_path.file_path) + new_keys.update({key.strip() for line in open(file_path.file_path) for key in line.split(':')[0].strip()}) + + # Remove metrics for keys that are no longer in the database + for key in current_keys: + if key not in new_keys: + file_metric.remove(key) + + time.sleep(sleep_duration) + +def fetch_file_metrics_task() -> None: + """Starts the background task in a separate thread.""" + thread = threading.Thread(target=fetch_file_metrics) + thread.daemon = True + thread.start() diff --git a/src/background_task/log_system_info.py b/src/background_task/log_system_info.py index a9c47fa..6733073 100644 --- a/src/background_task/log_system_info.py +++ b/src/background_task/log_system_info.py @@ -1,4 +1,3 @@ -import os import datetime from threading import Timer from src.config import app, db @@ -6,102 +5,177 @@ from src.logger import logger from src.models import GeneralSettings, SystemInformation from sqlalchemy.exc import SQLAlchemyError -from src.logger import logger +from prometheus_client import Counter, Gauge -# Flag to check if logging is already scheduled +from src.logger import logger +# Flag to track if logging is already scheduled is_logging_scheduled = False - +fetch_system_info_interval = 10 + +# Initialize Prometheus metrics +metrics = { + 'cpu_usage_metric': Gauge('cpu_usage_percentage', 'Current CPU usage percentage'), + 'memory_usage_metric': Gauge('memory_usage_percentage', 'Current memory usage percentage'), + 'disk_usage_metric': Gauge('disk_usage_percentage', 'Disk usage percentage'), + 'network_sent_metric': Gauge('network_bytes_sent', 'Total network bytes sent'), + 'network_recv_metric': Gauge('network_bytes_received', 'Total network bytes received'), + 'cpu_temp_metric': Gauge('cpu_temperature', 'Current CPU temperature'), + 'cpu_frequency_metric': Gauge('cpu_frequency', 'Current CPU frequency'), + 'battery_percentage_metric': Gauge('battery_percentage', 'Current battery percentage'), + 'dashboard_memory_usage_metric': Gauge('dashboard_memory_usage_percentage', 'Current memory usage percentage'), + 'request_count': Counter('http_requests_total', 'Total HTTP requests made') +} def log_system_info(): """ Logs system information at regular intervals based on the general settings. - This function checks if logging is still active before each logging event. + This function checks if logging is enabled and schedules the next log if active. """ global is_logging_scheduled with app.app_context(): try: - # Fetch the general settings to check if logging is enabled - general_settings = GeneralSettings.query.first() - is_logging_system_info = ( - general_settings.is_logging_system_info if general_settings else False - ) - - if not is_logging_system_info: + if not is_logging_enabled(): logger.info("System info logging has been stopped.") - is_logging_scheduled = False # Reset the flag if logging stops + is_logging_scheduled = False return log_system_info_to_db() logger.debug("System information logged successfully.") - - # Schedule the next log after 60 seconds - Timer(60, log_system_info).start() + schedule_next_log(interval=fetch_system_info_interval) except Exception as e: logger.error(f"Error during system info logging: {e}", exc_info=True) - is_logging_scheduled = False # Reset the flag in case of an error + is_logging_scheduled = False + + +def is_logging_enabled(): + """ + Checks if system info logging is enabled in the general settings. + """ + try: + general_settings = GeneralSettings.query.first() + return general_settings.is_logging_system_info if general_settings else False + except SQLAlchemyError as e: + logger.error(f"Error fetching general settings: {e}", exc_info=True) + return False + + +def schedule_next_log(interval=10): + """ + Schedules the next logging event after the specified interval (in seconds). + """ + Timer(interval, log_system_info).start() def log_system_info_to_db(): """ - Fetches system information and logs it to the database. + Fetches system information and logs it to the database and updates Prometheus metrics. """ with app.app_context(): try: system_info = _get_system_info() - system_log = SystemInformation( - cpu_percent=system_info["cpu_percent"], - memory_percent=system_info["memory_percent"], - battery_percent=system_info["battery_percent"], - network_sent=system_info["network_sent"], - network_received=system_info["network_received"], - dashboard_memory_usage=system_info["dashboard_memory_usage"], - cpu_frequency=system_info["cpu_frequency"], - current_temp=system_info["current_temp"], - timestamp=datetime.datetime.now(), - ) - db.session.add(system_log) - db.session.commit() + + # Update Prometheus metrics + update_prometheus_metrics(system_info) + + # Store system information in InfluxDB + # store_system_info_in_influxdb(system_info) + + # Store system information in the database + # store_system_info_in_db(system_info) logger.info("System information logged to database.") except SQLAlchemyError as db_err: - logger.error( - f"Database error while logging system info: {db_err}", exc_info=True - ) + logger.error(f"Database error while logging system info: {db_err}", exc_info=True) db.session.rollback() except Exception as e: logger.error(f"Failed to log system information: {e}", exc_info=True) + +def update_prometheus_metrics(system_info): + """ + Updates Prometheus metrics with the latest system information. + """ + metrics['cpu_usage_metric'].set(system_info['cpu_percent']) + metrics['memory_usage_metric'].set(system_info['memory_percent']) + metrics['disk_usage_metric'].set(system_info['disk_percent']) + metrics['network_sent_metric'].set(system_info['network_sent']) + metrics['network_recv_metric'].set(system_info['network_received']) + metrics['cpu_temp_metric'].set(system_info['current_temp']) + metrics['cpu_frequency_metric'].set(system_info['cpu_frequency']) + metrics['battery_percentage_metric'].set(system_info['battery_percent']) + metrics['dashboard_memory_usage_metric'].set(system_info['dashboard_memory_usage']) + metrics['request_count'].inc() + + +def store_system_info_in_db(system_info): + """ + Stores the collected system information into the database. + """ + system_log = SystemInformation( + cpu_percent=system_info["cpu_percent"], + memory_percent=system_info["memory_percent"], + battery_percent=system_info["battery_percent"], + network_sent=system_info["network_sent"], + network_received=system_info["network_received"], + dashboard_memory_usage=system_info["dashboard_memory_usage"], + cpu_frequency=system_info["cpu_frequency"], + current_temp=system_info["current_temp"], + timestamp=datetime.datetime.now(), + ) + db.session.add(system_log) + db.session.commit() + +# def store_system_info_in_influxdb(system_info): +# """ +# Stores the collected system information into the InfluxDB with proper error handling. +# """ +# try: +# # Create a data point for system information +# point = ( +# Point("system_info") +# .tag("host", get_system_username()) +# .field("cpu_percent", system_info["cpu_percent"]) +# .field("memory_percent", system_info["memory_percent"]) +# .field("battery_percent", system_info["battery_percent"]) +# .field("network_sent", system_info["network_sent"]) +# .field("network_received", system_info["network_received"]) +# .field("dashboard_memory_usage", system_info["dashboard_memory_usage"]) +# .field("cpu_frequency", system_info["cpu_frequency"]) +# .field("current_temp", system_info["current_temp"]) +# .time(int(time.time() * 1_000_000_000)) +# ) + +# # Write the data point to InfluxDB +# write_api.write(bucket=bucket, record=point) +# logger.info("Successfully wrote system information to InfluxDB") + +# except ValueError as ve: +# logger.error(f"Value error while storing system info: {ve}") +# except Exception as e: +# logger.error(f"An unexpected error occurred: {e}", exc_info=True) + + + def monitor_settings(): """ Monitors application general settings for changes and controls system logging dynamically. - This function runs periodically to check for updates to logging settings. """ global is_logging_scheduled with app.app_context(): try: - # Fetch the general settings - general_settings = GeneralSettings.query.first() - - # Check if logging should be active or not - is_logging_system_info = ( - general_settings.is_logging_system_info if general_settings else False - ) - if is_logging_system_info: + if is_logging_enabled(): logger.info("System logging enabled. Starting system info logging.") - - # Schedule logging only if not already scheduled if not is_logging_scheduled: logger.debug("Scheduling system info logging.") Timer(0, log_system_info).start() is_logging_scheduled = True else: logger.info("System logging disabled. Stopping system info logging.") - is_logging_scheduled = False # Reset the flag if logging is disabled + is_logging_scheduled = False - # Check settings periodically (every 10 seconds) + # Recheck settings every 10 seconds Timer(10, monitor_settings).start() except SQLAlchemyError as db_err: logger.error(f"Error fetching settings: {db_err}", exc_info=True) - diff --git a/src/background_task/thread_process.py b/src/background_task/thread_process.py deleted file mode 100644 index b5108a0..0000000 --- a/src/background_task/thread_process.py +++ /dev/null @@ -1,248 +0,0 @@ -# deprecated file - -import os -import datetime -from threading import Timer -from flask import url_for -from src.config import app, db -from src.utils import _get_system_info -from src.logger import logger -from src.models import MonitoredWebsite, GeneralSettings, SystemInformation -from sqlalchemy.exc import SQLAlchemyError -import requests -from src.scripts.email_me import send_smtp_email -from src.logger import logger -from src.utils import render_template_from_file, ROOT_DIR -from src.config import get_app_info - -# Dictionary to track the last known status of each website -website_status = {} -# Flag to check if logging is already scheduled -is_logging_scheduled = False - - -def send_mail(website_name, status, email_adress, email_alerts_enabled): - """ - Dummy function to simulate sending an email. - - Args: - website_name (str): The name or URL of the website. - status (str): The status of the website, either 'DOWN' or 'UP'. - """ - # This is a dummy function, so no real email is sent. - if email_alerts_enabled: - context = { - "website_status": status, # UP/DOWN - "website_name": website_name, - "checked_time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), - "message": f"{website_name} is now {status}", - "title": get_app_info()["title"], - } - website_status_template = os.path.join( - ROOT_DIR, "src/templates/email_templates/website_monitor_status.html" - ) - email_subject = f"{website_name} is now {status}" - email_body = render_template_from_file(website_status_template, **context) - send_smtp_email(email_adress, email_subject, email_body, is_html=True) - - -def update_website_status(website, status): - """ - Updates the status of the website and sends an email notification if the status has changed. - - Args: - website (MonitoredWebsite): The website object to update. - status (str): The new status of the website. - """ - global website_status - - if website.id not in website_status: - website_status[website.id] = "UP" # Initialize with UP status if not present - - if website_status[website.id] != status: - send_mail( - website.name, status, website.email_address, website.email_alerts_enabled - ) - website_status[website.id] = status - - -def ping_website(website): - """ - Pings a single website and updates its status in the database. - - Args: - website (MonitoredWebsite): The website object to ping. - """ - with app.app_context(): - try: - # Check if the website is still active - updated_website = MonitoredWebsite.query.get(website.id) - if not updated_website or not updated_website.is_ping_active: - logger.info( - f"Website {website.name} is no longer active. Stopping monitoring." - ) - return - - logger.info( - f"Pinging {website.name} (Interval: {website.ping_interval}s)..." - ) - response = requests.get(website.name, timeout=10) - updated_website.last_ping_time = datetime.datetime.now() - updated_website.ping_status_code = response.status_code - - new_status = "UP" if response.status_code == 200 else "DOWN" - updated_website.ping_status = new_status - - # Update the website status - db.session.commit() - logger.info(f"Website {website.name} updated successfully.") - - # Determine if an email should be sent - update_website_status(website, new_status) - - except requests.RequestException as req_err: - updated_website.ping_status = "DOWN" - logger.error(f"Failed to ping {website.name}: {req_err}", exc_info=True) - db.session.rollback() - - except SQLAlchemyError as db_err: - logger.error( - f"Database commit error for {website.name}: {db_err}", exc_info=True - ) - db.session.rollback() - - finally: - # Add more detailed logging for debugging - if db.session.new or db.session.dirty: - logger.warning( - f"Database transaction not committed properly for {website.name}." - ) - - # Schedule the next ping for this website - Timer( - updated_website.ping_interval, ping_website, args=[updated_website] - ).start() - -def start_website_monitoring(): - """ - Periodically pings monitored websites based on individual ping intervals. - """ - with app.app_context(): - try: - while True: - active_websites = MonitoredWebsite.query.filter_by( - is_ping_active=True - ).all() - if not active_websites: - logger.info("No active websites to monitor.") - else: - for website in active_websites: - # Start pinging each website individually based on its ping interval - Timer(0, ping_website, args=[website]).start() - - # Check for active websites periodically (every 30 seconds) - Timer(30, start_website_monitoring).start() - break # Break out of the loop to avoid creating a new thread infinitely - - except SQLAlchemyError as db_err: - logger.error( - f"Database error during website monitoring: {db_err}", exc_info=True - ) - except Exception as e: - logger.error(f"Error during website monitoring: {e}", exc_info=True) - -def monitor_settings(): - """ - Monitors application general settings for changes and controls system logging dynamically. - This function runs periodically to check for updates to logging settings. - """ - global is_logging_scheduled - with app.app_context(): - try: - # Fetch the general settings - general_settings = GeneralSettings.query.first() - - # Check if logging should be active or not - is_logging_system_info = ( - general_settings.is_logging_system_info if general_settings else False - ) - if is_logging_system_info: - logger.info("System logging enabled. Starting system info logging.") - - # Schedule logging only if not already scheduled - if not is_logging_scheduled: - logger.debug("Scheduling system info logging.") - Timer(0, log_system_info).start() - is_logging_scheduled = True - else: - logger.info("System logging disabled. Stopping system info logging.") - is_logging_scheduled = False # Reset the flag if logging is disabled - - # Check settings periodically (every 10 seconds) - Timer(10, monitor_settings).start() - - except SQLAlchemyError as db_err: - logger.error(f"Error fetching settings: {db_err}", exc_info=True) - - -def log_system_info(): - """ - Logs system information at regular intervals based on the general settings. - This function checks if logging is still active before each logging event. - """ - global is_logging_scheduled - with app.app_context(): - try: - # Fetch the general settings to check if logging is enabled - general_settings = GeneralSettings.query.first() - is_logging_system_info = ( - general_settings.is_logging_system_info if general_settings else False - ) - - if not is_logging_system_info: - logger.info("System info logging has been stopped.") - is_logging_scheduled = False # Reset the flag if logging stops - return - - log_system_info_to_db() - logger.debug("System information logged successfully.") - - # Schedule the next log after 60 seconds - Timer(60, log_system_info).start() - - except Exception as e: - logger.error(f"Error during system info logging: {e}", exc_info=True) - is_logging_scheduled = False # Reset the flag in case of an error - - -def log_system_info_to_db(): - """ - Fetches system information and logs it to the database. - """ - with app.app_context(): - try: - system_info = _get_system_info() - system_log = SystemInformation( - cpu_percent=system_info["cpu_percent"], - memory_percent=system_info["memory_percent"], - battery_percent=system_info["battery_percent"], - network_sent=system_info["network_sent"], - network_received=system_info["network_received"], - dashboard_memory_usage=system_info["dashboard_memory_usage"], - cpu_frequency=system_info["cpu_frequency"], - current_temp=system_info["current_temp"], - timestamp=datetime.datetime.now(), - ) - db.session.add(system_log) - db.session.commit() - logger.info("System information logged to database.") - - except SQLAlchemyError as db_err: - logger.error( - f"Database error while logging system info: {db_err}", exc_info=True - ) - db.session.rollback() - except Exception as e: - logger.error(f"Failed to log system information: {e}", exc_info=True) - - diff --git a/src/config.py b/src/config.py index 6e7c5f5..4bfe3f4 100644 --- a/src/config.py +++ b/src/config.py @@ -2,6 +2,7 @@ from flask import Flask, render_template from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate +import os from src.logger import logger from src.helper import get_system_node_name, get_ip_address @@ -27,6 +28,7 @@ # Configure the SQLite database app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///{DB_DIR}/systemguard.db" +# app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///systemguard.db" app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SECRET_KEY'] = 'secret' diff --git a/src/docs/README.md b/src/docs/README.md index a864371..66b76ec 100644 --- a/src/docs/README.md +++ b/src/docs/README.md @@ -23,3 +23,7 @@ ## Security Analysis ![screenshot_007](/src/static/images/screenshot_007.png) + +## Central Dashboard + +![screenshot_008](/src/static/images/tracking.png) \ No newline at end of file diff --git a/src/docs/images/SystemGuard-Architecture.jpg b/src/docs/images/SystemGuard-Architecture.jpg new file mode 100644 index 0000000..06556c0 Binary files /dev/null and b/src/docs/images/SystemGuard-Architecture.jpg differ diff --git a/src/helper.py b/src/helper.py index 0d36b44..33e9e33 100644 --- a/src/helper.py +++ b/src/helper.py @@ -33,12 +33,7 @@ def get_ip_address(): return ip_address except (IndexError, subprocess.CalledProcessError) as e: - print(f"Error occurred: {e}") return None - - -import os -import subprocess def check_installation_information(): # Output dictionary to store results @@ -82,7 +77,6 @@ def check_installation_information(): # Check for updates try: result = subprocess.run(["git", "status", "-uno"], capture_output=True, text=True, check=True) - print(result.stdout) if "Your branch is up to date" in result.stdout: output["update_available"] = False else: diff --git a/src/influxdb_config.py b/src/influxdb_config.py new file mode 100644 index 0000000..5ba68aa --- /dev/null +++ b/src/influxdb_config.py @@ -0,0 +1,122 @@ +# from influxdb_client import InfluxDBClient, Point, WritePrecision +# from influxdb_client.client.write_api import SYNCHRONOUS +# import os +# from src.logger import logger + + +# # influx db configuration +# org = "systemguard" +# url = "http://localhost:8086" +# bucket="system_metrics" +# INFLUXDB_TOKEN=os.getenv('INFLUXDB_TOKEN') +# print("INFLUXDB_TOKEN: ", INFLUXDB_TOKEN) + +# try: +# influx_client = InfluxDBClient(url=url, token=INFLUXDB_TOKEN, org=org) +# bucket = "system_metrics" +# write_api = influx_client.write_api(write_options=SYNCHRONOUS) +# query_api = influx_client.query_api() +# logger.info("Connected to InfluxDB successfully") +# except Exception as e: +# logger.error(f"Failed to connect to InfluxDB: {e}") +# raiseclient = InfluxDBClient(url=url, token=INFLUXDB_TOKEN, org=org) + + + +# @app.route('/api/v2/influxdb/graphs_data', methods=['GET']) +# @login_required +# def graph_data_api_v2(): +# try: +# current_time = datetime.now() +# # Get the time filter from query parameters +# time_filter = request.args.get('filter', default='1 day') + +# # Determine the start time based on the filter +# time_deltas = { +# '5 minutes': '-5m', +# '15 minutes': '-15m', +# '30 minutes': '-30m', +# '1 hour': '-1h', +# '3 hours': '-3h', +# '6 hours': '-6h', +# '12 hours': '-12h', +# '1 day': '-1d', +# '2 days': '-2d', +# '3 days': '-3d', +# '1 week': '-1w', +# '1 month': '-30d', +# '3 months': '-90d', +# } + +# # Get the start time for the query +# time_range = time_deltas.get(time_filter, '-1d') + +# # Build the InfluxDB query +# flux_query = f""" +# from(bucket: "{bucket}") +# |> range(start: {time_range}) +# |> filter(fn: (r) => r._measurement == "system_info") +# |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") +# """ + +# # Execute the query +# tables = query_api.query(flux_query) + +# # Initialize lists for the data +# time_data = [] +# cpu_data = [] +# memory_data = [] +# battery_data = [] +# network_sent_data = [] +# network_received_data = [] +# dashboard_memory_usage = [] +# cpu_frequency = [] +# current_temp = [] + +# # Parse the results +# for table in tables: +# for record in table.records: +# time_data.append(record.values.get("_time", None)) +# # Extract each field by key (handle missing fields gracefully) +# cpu_data.append(record.values.get("cpu_percent", None)) +# memory_data.append(record.values.get("memory_percent", None)) +# battery_data.append(record.values.get("battery_percent", None)) +# network_sent_data.append(record.values.get("network_sent", None)) +# network_received_data.append(record.values.get("network_received", None)) +# dashboard_memory_usage.append(record.values.get("dashboard_memory_usage", None)) +# cpu_frequency.append(record.values.get("cpu_frequency", None)) +# current_temp.append(record.values.get("current_temp", None)) + +# # Return the data as JSON +# response = jsonify({ +# "time": time_data, +# "cpu": cpu_data, +# "memory": memory_data, +# "battery": battery_data, +# "network_sent": network_sent_data, +# "network_received": network_received_data, +# "dashboard_memory_usage": dashboard_memory_usage, +# "cpu_frequency": cpu_frequency, +# "current_temp": current_temp, +# "current_time": current_time +# }) + +# # Clean up large data structures +# del tables +# del time_data +# del cpu_data +# del memory_data +# del battery_data +# del network_sent_data +# del network_received_data +# del dashboard_memory_usage +# del cpu_frequency +# del current_temp + +# gc.collect() + +# return response, 200 + +# except Exception as e: +# # Handle and log the error for debugging purposes +# return jsonify({'error': 'An error occurred while fetching the graph data', 'details': str(e)}), 500 diff --git a/src/models/__init__.py b/src/models/__init__.py index da25b54..e607d3f 100644 --- a/src/models/__init__.py +++ b/src/models/__init__.py @@ -9,6 +9,7 @@ from src.models.system_information import SystemInformation from src.models.user_profile import UserProfile from src.models.monitored_website import MonitoredWebsite +from src.models.prometheus_model import ExternalMonitornig from flask_login import current_user from src.logger import logger from werkzeug.security import generate_password_hash @@ -17,9 +18,39 @@ ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +# Context processor for injecting settings into templates +@app.context_processor +def inject_settings(): + if current_user.is_anonymous: + user_dashboard_settings = UserDashboardSettings(user_id=0) + card_settings = None + page_toggles_settings = None + general_settings = None + return dict( + user_dashboard_settings=user_dashboard_settings, + card_settings=card_settings, + page_toggles_settings=page_toggles_settings, + general_settings=general_settings, + ) + general_settings = GeneralSettings.query.first() + card_settings = UserCardSettings.query.filter_by(user_id=current_user.id).first() + user_dashboard_settings = UserDashboardSettings.query.filter_by( + user_id=current_user.id + ).first() # Retrieve user-specific user_dashboard_settings from DB + page_toggles_settings = PageToggleSettings.query.filter_by( + user_id=current_user.id + ).first() + all_settings = dict( + user_dashboard_settings=user_dashboard_settings, + general_settings=general_settings, + card_settings=card_settings, + page_toggles_settings=page_toggles_settings, + ) + return all_settings + with app.app_context(): # Check if tables already exist - if not db.inspect(db.engine).has_table('user_profile'): # Use an important table to check existence + if not db.inspect(db.engine).has_table('users'): # Use an important table to check existence logger.info("Creating tables") db.create_all() @@ -69,32 +100,3 @@ else: logger.info("Tables already exist. Skipping creation.") -# Context processor for injecting settings into templates -@app.context_processor -def inject_settings(): - if current_user.is_anonymous: - user_dashboard_settings = UserDashboardSettings(user_id=0) - card_settings = None - page_toggles_settings = None - general_settings = None - return dict( - user_dashboard_settings=user_dashboard_settings, - card_settings=card_settings, - page_toggles_settings=page_toggles_settings, - general_settings=general_settings, - ) - general_settings = GeneralSettings.query.first() - card_settings = UserCardSettings.query.filter_by(user_id=current_user.id).first() - user_dashboard_settings = UserDashboardSettings.query.filter_by( - user_id=current_user.id - ).first() # Retrieve user-specific user_dashboard_settings from DB - page_toggles_settings = PageToggleSettings.query.filter_by( - user_id=current_user.id - ).first() - all_settings = dict( - user_dashboard_settings=user_dashboard_settings, - general_settings=general_settings, - card_settings=card_settings, - page_toggles_settings=page_toggles_settings, - ) - return all_settings diff --git a/src/models/page_toggle_settings.py b/src/models/page_toggle_settings.py index 987d718..2731f85 100644 --- a/src/models/page_toggle_settings.py +++ b/src/models/page_toggle_settings.py @@ -1,4 +1,3 @@ -import datetime from src.config import db class PageToggleSettings(db.Model): @@ -26,4 +25,4 @@ class PageToggleSettings(db.Model): is_disk_info_enabled = db.Column(db.Boolean, default=True) is_network_info_enabled = db.Column(db.Boolean, default=True) is_process_info_enabled = db.Column(db.Boolean, default=True) - is_dashboard_network_enabled = db.Column(db.Boolean, default=False) + is_dashboard_network_enabled = db.Column(db.Boolean, default=True) diff --git a/src/models/prometheus_model.py b/src/models/prometheus_model.py new file mode 100644 index 0000000..087312a --- /dev/null +++ b/src/models/prometheus_model.py @@ -0,0 +1,7 @@ +from src.config import db + + +class ExternalMonitornig(db.Model): + id = db.Column(db.Integer, primary_key=True) + file_path = db.Column(db.String(100), nullable=False) + is_active = db.Column(db.Boolean, default=True) diff --git a/src/routes/__init__.py b/src/routes/__init__.py index 5c7c545..dd77fcb 100644 --- a/src/routes/__init__.py +++ b/src/routes/__init__.py @@ -19,3 +19,4 @@ from src.routes.experimental import experimental_bp from src.routes.error_handlers import error_handlers_bp from src.routes.profile import profile_bp +from src.routes.prometheus import prometheus_bp diff --git a/src/routes/api.py b/src/routes/api.py index fd9a256..b6c13ab 100644 --- a/src/routes/api.py +++ b/src/routes/api.py @@ -1,14 +1,34 @@ from flask import jsonify, blueprints, request +import requests from flask_login import login_required, current_user from src.config import app, db from src.models import SystemInformation, UserDashboardSettings from src.utils import _get_system_info, get_os_release_info, get_os_info, get_cached_value from datetime import datetime, timedelta +from flask import request, jsonify import gc +from datetime import datetime, timezone + +from src.routes.helper.common_helper import admin_required api_bp = blueprints.Blueprint("api", __name__) -@app.route("/api/system-info", methods=["GET"]) +PROMETHEUS_URL = 'http://localhost:9090' # Change if using a different URL or port +QUERY_API_URL = f'{PROMETHEUS_URL}/api/v1/query_range' +TARGETS_API_URL = f'{PROMETHEUS_URL}/api/v1/targets' + +PROMETHEUS_METRICS = { + 'cpu': 'cpu_usage_percentage', # Adjusting to match the defined gauge + 'memory': 'memory_usage_percentage', + 'battery': 'battery_percentage', + 'network_sent': 'network_bytes_sent', + 'network_received': 'network_bytes_received', + 'dashboard_memory_usage': 'dashboard_memory_usage_percentage', + 'cpu_frequency': 'cpu_frequency', + 'current_temp': 'cpu_temperature', +} + +@app.route("/api/v1/system-info", methods=["GET"]) @login_required def system_api(): try: @@ -17,7 +37,7 @@ def system_api(): except Exception as e: return jsonify({"error": "An error occurred while fetching the system information", "details": str(e)}), 500 -@app.route('/api/v1/graphs_data', methods=['GET']) +@app.route('/api/v1/sqlite/graphs_data', methods=['GET']) @login_required def graph_data_api(): try: @@ -39,9 +59,7 @@ def graph_data_api(): '2 days': timedelta(days=2), '3 days': timedelta(days=3), '1 week': timedelta(weeks=1), - '1 month': timedelta(weeks=4), - '3 months': timedelta(weeks=12), - + '15 days': timedelta(days=15), } if time_filter == 'all': start_time = datetime.min @@ -90,6 +108,19 @@ def graph_data_api(): "current_time": current_time }) + print("response", { + "time": time_data, + "cpu": cpu_data, + "memory": memory_data, + "battery": battery_data, + "network_sent": network_sent_data, + "network_received": network_received_data, + "dashboard_memory_usage": dashboard_memory_usage, + "cpu_frequency": cpu_frequency, + "current_temp": current_temp, + "current_time": current_time + }) + # Clean up large data structures del recent_system_info_entries del time_data @@ -109,6 +140,228 @@ def graph_data_api(): # Handle and log the error for debugging purposes return jsonify({'error': 'An error occurred while fetching the graph data', 'details': str(e)}), 500 +@app.route('/api/v1/prometheus/graphs_data', methods=['GET']) +@login_required +def graph_data_api_v3(): + try: + current_time = datetime.now() + + # Get the time filter from query parameters + time_filter = request.args.get('filter', default='1 day') + + # Determine the start time based on the filter + time_deltas = { + '5 minutes': 5 * 60, + '15 minutes': 15 * 60, + '30 minutes': 30 * 60, + '1 hour': 60 * 60, + '3 hours': 3 * 60 * 60, + '6 hours': 6 * 60 * 60, + '12 hours': 12 * 60 * 60, + '1 day': 24 * 60 * 60, + '2 days': 2 * 24 * 60 * 60, + '3 days': 3 * 24 * 60 * 60, + '1 week': 7 * 24 * 60 * 60, + '15 days': 15 * 24 * 60 * 60, + } + + # Get the time range in seconds + time_range_seconds = time_deltas.get(time_filter, 24 * 60 * 60) + + # Prepare time parameters for the Prometheus query + end_time = int(current_time.timestamp()) + start_time = end_time - time_range_seconds + step = '10s' + + # Initialize lists for the data + time_data = [] + metric_data = {key: [] for key in PROMETHEUS_METRICS} + + # Fetch data for each metric from Prometheus + for metric, prometheus_query in PROMETHEUS_METRICS.items(): + # Prepare Prometheus API query parameters + params = { + 'query': prometheus_query, + 'start': start_time, + 'end': end_time, + 'step': step + } + + # Send the query to Prometheus + response = requests.get(QUERY_API_URL, params=params) + + # Check if the request was successful + if response.status_code == 200: + result = response.json().get('data', {}).get('result', []) + + # Extract the time and values for the metric + if result: + for value in result[0]['values']: + timestamp = datetime.fromtimestamp(float(value[0]), tz=timezone.utc).isoformat() + if timestamp not in time_data: + time_data.append(timestamp) + metric_data[metric].append(value[1]) + else: + print(f"No data for metric: {metric}") + else: + raise Exception(f"Failed to fetch data for {metric} from Prometheus: {response.text}") + + # Ensure all metric data has the same length as time_data + for metric in metric_data: + while len(metric_data[metric]) < len(time_data): + metric_data[metric].append(None) + + # Return the data as JSON + response_data = { + "time": time_data, + **metric_data, + "current_time": current_time + } + + return jsonify(response_data), 200 + + except Exception as e: + # Handle and log the error for debugging purposes + return jsonify({'error': 'An error occurred while fetching the graph data', 'details': str(e)}), 500 + +@app.route('/api/v1/prometheus/graphs_data/targets', methods=['GET']) +@login_required +def graph_data_api_v3_(): + try: + current_time = datetime.now() + + # Get the time filter from query parameters + time_filter = request.args.get('filter', default='1 day') + + # Determine the start time based on the filter + time_deltas = { + '5 minutes': 5 * 60, + '15 minutes': 15 * 60, + '30 minutes': 30 * 60, + '1 hour': 60 * 60, + '3 hours': 3 * 60 * 60, + '6 hours': 6 * 60 * 60, + '12 hours': 12 * 60 * 60, + '1 day': 24 * 60 * 60, + '2 days': 2 * 24 * 60 * 60, + '3 days': 3 * 24 * 60 * 60, + '1 week': 7 * 24 * 60 * 60, + '1 month': 30 * 24 * 60 * 60, + '3 months': 90 * 24 * 60 * 60, + } + + # Get the time range in seconds + time_range_seconds = time_deltas.get(time_filter, 24 * 60 * 60) + + # Prepare time parameters for the Prometheus query + end_time = int(current_time.timestamp()) + start_time = end_time - time_range_seconds + step = '10s' + + # Initialize lists for the data + time_data = [] + metric_data = {} + + # Fetch data for each metric from Prometheus + for metric, prometheus_query in PROMETHEUS_METRICS.items(): + # Prepare Prometheus API query parameters + params = { + 'query': prometheus_query, + 'start': start_time, + 'end': end_time, + 'step': step + } + + # Send the query to Prometheus + response = requests.get(QUERY_API_URL, params=params) + + # Check if the request was successful + if response.status_code == 200: + result = response.json().get('data', {}).get('result', []) + + if result: + # Initialize a dictionary to hold time series data for this metric + metric_data[metric] = [] + + for series in result: + # Create a new list for the time series data of this particular series + series_data = { + "metric": series.get("metric"), + "values": {} + } + + # Iterate over the values for this series + for value in series.get("values", []): + timestamp = datetime.fromtimestamp(float(value[0]), tz=timezone.utc).isoformat() + if timestamp not in time_data: + time_data.append(timestamp) + series_data["values"][timestamp] = value[1] + + # Append the series data to the metric + metric_data[metric].append(series_data) + else: + print(f"No data for metric: {metric}") + else: + raise Exception(f"Failed to fetch data for {metric} from Prometheus: {response.text}") + + # Sort the time data for proper alignment + time_data.sort() + + # Ensure all metric data aligns with time_data + for metric, series_list in metric_data.items(): + for series in series_list: + aligned_values = [] + for timestamp in time_data: + aligned_values.append(series["values"].get(timestamp, None)) + series["values"] = aligned_values + + # Return the data as JSON + response_data = { + "time": time_data, + **{metric: [{"metric": s["metric"], "values": s["values"]} for s in series_list] for metric, series_list in metric_data.items()}, + "current_time": current_time + } + + return jsonify(response_data), 200 + + except Exception as e: + # Handle and log the error for debugging purposes + return jsonify({'error': 'An error occurred while fetching the graph data', 'details': str(e)}), 500 + + +@app.route('/api/v1/targets', methods=['GET']) +@admin_required +def get_prometheus_targets(): + try: + # Query Prometheus API to get the targets + response = requests.get(TARGETS_API_URL) + + # Check if the request was successful + if response.status_code == 200: + targets_data = response.json().get('data', {}) + active_targets = targets_data.get('activeTargets', []) + dropped_targets = targets_data.get('droppedTargets', []) + + # Return the active and dropped targets as JSON + return jsonify({ + 'active_targets': active_targets, + 'dropped_targets': dropped_targets + }), 200 + else: + # Handle non-200 responses from Prometheus + return jsonify({ + 'error': 'Failed to fetch targets from Prometheus', + 'details': response.text + }), response.status_code + + except Exception as e: + # Handle exceptions + return jsonify({ + 'error': 'An error occurred while fetching Prometheus targets', + 'details': str(e) + }), 500 + + @app.route('/api/v1/refresh-interval', methods=['GET', 'POST']) @login_required def manage_refresh_interval(): @@ -155,4 +408,3 @@ def get_os_info_api(): return jsonify(os_info), 200 except Exception as e: return jsonify({"error": "An error occurred while fetching the OS information", "details": str(e)}), 500 - \ No newline at end of file diff --git a/src/routes/dashboard_network.py b/src/routes/dashboard_network.py index 5f38f1b..284979e 100644 --- a/src/routes/dashboard_network.py +++ b/src/routes/dashboard_network.py @@ -1,63 +1,11 @@ -from flask import render_template, blueprints, flash, redirect, url_for, request +from flask import render_template, blueprints -from src.config import app, db -from src.models import DashboardNetworkSettings +from src.config import app from src.routes.helper.common_helper import admin_required network_bp = blueprints.Blueprint('network', __name__) -@app.route('/network', methods=['GET']) +@app.route('/dashboard_network', methods=['GET']) @admin_required def dashboard_network(): - groups = DashboardNetworkSettings.query.all() # Fetch all dashboard groups - return render_template('network/dashboard_network.html', groups=groups) - -@app.route('/add_server', methods=['GET', 'POST']) -@admin_required -def add_server(): - if request.method == 'POST': - name = request.form.get('name') - description = request.form.get('description') - ip_address = request.form.get('ip_address') - port = request.form.get('port') - link = request.form.get('link') - - # Check if the server name already exists - existing_server = DashboardNetworkSettings.query.filter_by(name=name).first() - if existing_server: - flash('Server name already exists. Please choose a different name.', 'danger') - return redirect(url_for('add_server')) - - # Create a new server entry - new_server = DashboardNetworkSettings(name=name, description=description, ip_address=ip_address, port=port, link=link) - db.session.add(new_server) - db.session.commit() - - flash('Server added successfully!', 'success') - return redirect(url_for('dashboard_network')) - - return render_template('network/add_server.html') - -@app.route('/edit_server/', methods=['GET', 'POST']) -@admin_required -def edit_server(server_id): - server = DashboardNetworkSettings.query.get_or_404(server_id) - if request.method == 'POST': - server.name = request.form['name'] - server.description = request.form['description'] - server.ip_address = request.form['ip_address'] - server.port = request.form['port'] - server.link = request.form['link'] - db.session.commit() - flash('Server updated successfully!', 'success') - return redirect(url_for('dashboard_network')) - return render_template('network/edit_server.html', server=server) - -@app.route('/delete_server/', methods=['POST']) -@admin_required -def delete_server(server_id): - server = DashboardNetworkSettings.query.get_or_404(server_id) - db.session.delete(server) - db.session.commit() - flash('Server deleted successfully!', 'success') - return redirect(url_for('dashboard_network')) + return render_template('network/dashboard_network.html') diff --git a/src/routes/graphs.py b/src/routes/graphs.py index db82123..7c8cba9 100644 --- a/src/routes/graphs.py +++ b/src/routes/graphs.py @@ -1,3 +1,4 @@ +import subprocess from flask import render_template, blueprints from src.config import app from src.routes.helper.common_helper import admin_required @@ -7,4 +8,11 @@ @app.route('/graphs') @admin_required def graphs(): - return render_template('graphs/graphs.html') + + return render_template('experimental/graphs.html') + + +@app.route('/experimental/graphs') +@admin_required +def experimental_graphs(): + return render_template('experimental/graphs.html') diff --git a/src/routes/helper/__init__.py b/src/routes/helper/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/routes/helper/common_helper.py b/src/routes/helper/common_helper.py index 4b9dcf7..5714931 100644 --- a/src/routes/helper/common_helper.py +++ b/src/routes/helper/common_helper.py @@ -4,6 +4,10 @@ from functools import wraps from flask import flash, redirect, url_for, render_template, request, session import subprocess +import os +from src.utils import ROOT_DIR + + def get_email_addresses(user_level=None, receive_email_alerts=True, fetch_all_users=False): """ Retrieve email addresses of users based on filters.""" @@ -113,3 +117,4 @@ def decorated_function(*args, **kwargs): return f(*args, **kwargs) return decorated_function return decorator + diff --git a/src/routes/helper/prometheus_helper.py b/src/routes/helper/prometheus_helper.py new file mode 100644 index 0000000..f398d9a --- /dev/null +++ b/src/routes/helper/prometheus_helper.py @@ -0,0 +1,120 @@ +import os +import yaml +import subprocess +from collections import OrderedDict +from src.utils import ROOT_DIR + +prometheus_yml_path = os.path.join(ROOT_DIR, 'prometheus_config/prometheus.yml') +update_prometheus_path = os.path.join(ROOT_DIR, 'src/scripts/update_prometheus.sh') + +def is_valid_file(file_path: str) -> bool: + """Checks if a file is valid and has key-value pairs separated by a colon.""" + with open(file_path, 'r') as file: + for line in file: + if not line.strip(): + continue + if ':' not in line: + return False + return True + + +class OrderedDumper(yaml.SafeDumper): + """Custom YAML dumper that preserves order of keys.""" + pass + +def dict_representer(dumper, data): + return dumper.represent_dict(data.items()) + +OrderedDumper.add_representer(OrderedDict, dict_representer) + +def load_yaml(file_path): + """Load the YAML file and keep the order of dictionaries.""" + with open(file_path, 'r') as file: + return yaml.load(file, Loader=yaml.SafeLoader) + +def save_yaml(data, file_path): + """Save the updated YAML data back to the file, preserving order.""" + with open(file_path, 'w') as file: + yaml.dump(data, file, Dumper=OrderedDumper, default_flow_style=False) + +def update_prometheus_config(): + """Update the first target with the machine's IP address.""" + print("Updating Prometheus config...") + + # Get the machine's IP address + try: + ipv4_address = subprocess.run( + ['hostname', '-I'], capture_output=True, text=True, check=True + ).stdout.split()[0] + except subprocess.CalledProcessError as e: + print(f"Error getting IP address: {e}") + return False + + # Load the existing config + try: + config = load_yaml(prometheus_yml_path) + except Exception as e: + print(f"Error loading YAML config: {e}") + return False + + for job in config.get('scrape_configs', []): + if job['job_name'] == 'localhost': + # Update the target for 'localhost' job + job['static_configs'][0]['targets'][0] = f'{ipv4_address}:5050' + + # Create a new OrderedDict to ensure the correct order + updated_job = OrderedDict() + updated_job['job_name'] = job['job_name'] + updated_job['scrape_interval'] = job.get('scrape_interval', '10s') + updated_job['static_configs'] = job['static_configs'] + + # Add basic_auth at the end if it exists + if 'basic_auth' in job: + updated_job['basic_auth'] = job['basic_auth'] + + # Replace the old job with the updated one + for index, j in enumerate(config['scrape_configs']): + if j['job_name'] == 'localhost': + config['scrape_configs'][index] = updated_job + else: + config['scrape_configs'][index] = OrderedDict(j) + + # Save the updated config + try: + save_yaml(config, prometheus_yml_path) + print("Prometheus config updated successfully.") + return True + except Exception as e: + print(f"Error saving YAML config: {e}") + return False + + print("No 'localhost' job found in Prometheus config.") + return False + +def show_targets(): + """Show all targets for each job.""" + config = load_yaml(prometheus_yml_path) + targets_info = [] + for scrape_config in config.get('scrape_configs', []): + job_name = scrape_config['job_name'] + targets = scrape_config.get('static_configs', [{}])[0].get('targets', []) + scrape_interval = scrape_config.get('scrape_interval', '15s') + targets_info.append({ + 'job_name': job_name, + 'targets': targets, + 'scrape_interval': scrape_interval + }) + return targets_info + +def update_prometheus_container(): + """Update the Prometheus container.""" + try: + result = subprocess.run(['bash', update_prometheus_path], check=True, text=True, capture_output=True) + print("Output:") + print(result.stdout) + + if result.stderr: + print("Errors:") + print(result.stderr) + except subprocess.CalledProcessError as e: + print(f"An error occurred while updating Prometheus container: {e}") \ No newline at end of file diff --git a/src/routes/memory_info.py b/src/routes/memory_info.py index db6fb16..f6348cc 100644 --- a/src/routes/memory_info.py +++ b/src/routes/memory_info.py @@ -2,7 +2,7 @@ from flask_login import login_required from src.config import app -from src.utils import get_cached_value, get_memory_percent, get_memory_available, get_memory_used, get_swap_memory_info +from src.utils import get_cached_value, get_memory_percent, get_memory_available, get_memory_used, get_swap_memory_info, get_flask_memory_usage from src.routes.helper.common_helper import check_page_toggle memory_info_bp = blueprints.Blueprint("memory_usage", __name__) @@ -17,6 +17,7 @@ def memory_usage(): "memory_percent": get_memory_percent(), "memory_available": memory_available, "memory_used": get_memory_used(), + 'dashboard_memory_usage': get_flask_memory_usage(), } swap_info = get_swap_memory_info() diff --git a/src/routes/other.py b/src/routes/other.py index ae1542f..7d2aeff 100644 --- a/src/routes/other.py +++ b/src/routes/other.py @@ -1,6 +1,6 @@ import os import subprocess -from flask import render_template, request, jsonify, flash, blueprints, redirect, url_for +from flask import render_template, request, jsonify, flash, blueprints, redirect, url_for, session from flask_login import login_required from src.models import GeneralSettings diff --git a/src/routes/prometheus.py b/src/routes/prometheus.py new file mode 100644 index 0000000..8c29a94 --- /dev/null +++ b/src/routes/prometheus.py @@ -0,0 +1,202 @@ +from flask import Blueprint, Response, request, render_template, flash, redirect, url_for +from prometheus_client import generate_latest +import os +import yaml +from collections import OrderedDict + +from src.config import app, db +from src.models import ExternalMonitornig +from src.utils import ROOT_DIR +from src.routes.helper.common_helper import admin_required +from src.routes.helper.prometheus_helper import ( + load_yaml, + save_yaml, + is_valid_file, + show_targets, + prometheus_yml_path, + update_prometheus_container, + update_prometheus_config) + + +# Define the Prometheus Blueprint +prometheus_bp = Blueprint('prometheus', __name__) + +# todo, find a better way to store the username and password +username = 'prometheus_admin' +password = 'prometheus_password' + +# Define a route to serve Prometheus metrics +@app.route('/metrics') +def metrics(): + auth = request.authorization + if not auth or not (auth.username == username and auth.password == password): + return Response('Could not verify', 401, {'WWW-Authenticate': 'Basic realm="Login required"'}) + output = generate_latest() + output = '\n'.join([line for line in output.decode().split('\n') if not line.startswith('#') and line]) + return Response(output, mimetype='text/plain') + +# POST request to manage file paths +@app.route('/external_monitoring', methods=['GET', 'POST']) +@admin_required +def external_monitoring(): + if request.method == 'POST': + file_path = request.form.get('file_path') + + if not os.path.exists(file_path): + flash('File path does not exist', 'danger') + return redirect(url_for('external_monitoring')) + + # Check file path and validity + if not is_valid_file(file_path): + flash('Invalid file format. File should have key-value pairs separated by a colon.', 'danger') + return redirect(url_for('external_monitoring')) + + # Save into the ExternalMonitoring table + new_task = ExternalMonitornig(file_path=file_path) + db.session.add(new_task) + db.session.commit() + + return redirect(url_for('external_monitoring')) + + data = ExternalMonitornig.query.all() + return render_template('prometheus/external_monitoring.html', data=data) + +# POST request to delete file path +@app.route('/external_monitoring/delete_file_path/', methods=['POST']) +@admin_required +def delete_file_path(id): + file_path = ExternalMonitornig.query.get_or_404(id) + db.session.delete(file_path) + db.session.commit() + flash('File path deleted successfully!', 'success') + return redirect(url_for('external_monitoring')) + +@app.route('/configure_targets') +@admin_required +def configure_targets(): + update_prometheus_config() + targets_info = show_targets() + return render_template('other/targets.html', targets_info=targets_info) + +@app.route('/targets/restart_prometheus') +@admin_required +def restart_prometheus(): + update_prometheus_container() + flash('Prometheus container restarted successfully!', 'success') + return redirect(url_for('configure_targets')) + +@app.route('/targets/add_target', methods=['POST']) +def add_target(): + job_name = request.form.get('job_name') + new_target = request.form.get('new_target') + username = request.form.get('username') + password = request.form.get('password') + scrape_interval = request.form.get('scrape_interval', '15s') + 's' # New scrape interval + config = load_yaml(prometheus_yml_path) + + # Validate target format + if ':' not in new_target: + flash('Invalid target format. It should be in the format :.', 'danger') + return redirect(url_for('configure_targets')) + + job_found = False + + # if job name already exists, add new target to the job + for scrape_config in config['scrape_configs']: + if scrape_config['job_name'] == job_name: + # Append new target + scrape_config['static_configs'][0]['targets'].append(new_target) + job_found = True + + # Update scrape interval + scrape_config['scrape_interval'] = scrape_interval + + # Prepare the updated job dictionary to maintain order + updated_job = OrderedDict() + updated_job['job_name'] = scrape_config['job_name'] + updated_job['static_configs'] = scrape_config['static_configs'] + updated_job['scrape_interval'] = scrape_config['scrape_interval'] + updated_job['basic_auth'] = scrape_config.get('basic_auth', None) + + # Replace the existing job with the updated one + index = config['scrape_configs'].index(scrape_config) + config['scrape_configs'][index] = updated_job + + break + + if not job_found: + # Create new job entry + new_job = OrderedDict() + new_job['job_name'] = job_name + new_job['static_configs'] = [{'targets': [new_target]}] + new_job['scrape_interval'] = scrape_interval + + # Add basic_auth if provided + if username and password: + new_job['basic_auth'] = { + 'username': username, + 'password': password + } + # Append the new job to scrape_configs + config['scrape_configs'].append(new_job) + + for index, j in enumerate(config['scrape_configs']): + config['scrape_configs'][index] = OrderedDict(j) + + # Save the updated config + save_yaml(config, prometheus_yml_path) + flash('Target added successfully!', 'success') + # update_prometheus_container() + return redirect(url_for('configure_targets')) + +@app.route('/targets/remove_target', methods=['POST']) +@admin_required +def remove_target(): + job_name = request.form.get('job_name') + target_to_remove = request.form.get('target_to_remove') + config = load_yaml(prometheus_yml_path) + + for scrape_config in config['scrape_configs']: + if scrape_config['job_name'] == job_name: + targets = scrape_config['static_configs'][0]['targets'] + if target_to_remove in targets: + targets.remove(target_to_remove) + flash(f'Target {target_to_remove} removed successfully!', 'success') + + # Check if this was the last target, then remove the job + if not targets: # If the list is now empty + config['scrape_configs'].remove(scrape_config) + flash(f'Job {job_name} removed because it had no targets left.', 'success') + else: + flash(f'Target {target_to_remove} not found in job {job_name}.', 'warning') + break + + for index, j in enumerate(config['scrape_configs']): + config['scrape_configs'][index] = OrderedDict(j) + + else: + flash(f'Job {job_name} not found.', 'warning') + + save_yaml(config, prometheus_yml_path) + # update_prometheus_container() + return redirect(url_for('configure_targets')) + +@app.route('/targets/change_interval', methods=['POST']) +@admin_required +def change_interval(): + job_name = request.form.get('job_name') + new_interval = request.form.get('new_interval') + 's' # New scrape interval + config = load_yaml(prometheus_yml_path) + + for scrape_config in config['scrape_configs']: + if scrape_config['job_name'] == job_name: + scrape_config['scrape_interval'] = new_interval + flash('Scrape interval updated successfully!', 'success') + break + + for index, j in enumerate(config['scrape_configs']): + config['scrape_configs'][index] = OrderedDict(j) + + save_yaml(config, prometheus_yml_path) + # update_prometheus_container() + return redirect(url_for('configure_targets')) diff --git a/src/scripts/install_influx.sh b/src/scripts/install_influx.sh new file mode 100644 index 0000000..83f53b1 --- /dev/null +++ b/src/scripts/install_influx.sh @@ -0,0 +1,125 @@ +#!/bin/bash + +# how to run the script | why not with the bash command +# source install_influx.sh +# as running with source update the environment variables in the current shell + +# Function to get the username of the current user or logname +get_user_name() { + if [ "$(whoami)" = "root" ]; then + LOGNAME_USER=$(logname 2>/dev/null) + if [ $? -ne 0 ]; then + USER_NAME=$(grep '/home' /etc/passwd | cut -d: -f1 | tail -n 1) + else + USER_NAME=$LOGNAME_USER + fi + else + USER_NAME=$(whoami) + fi + echo "$USER_NAME" +} + +# Variables +INFLUXDB_VERSION="latest" +CONTAINER_NAME="influxdb" +NETWORK_NAME="influx_network" +DATA_DIR="./influxdb_data" +INFLUXDB_USER="admin" # Desired username +INFLUXDB_PASSWORD="password" # Desired password +INFLUXDB_ORG="systemguard" # Organization name +INFLUXDB_BUCKET="system_metrics" # Initial bucket name +USER_NAME=$(get_user_name) +INFLUXDB_TOKEN=$(openssl rand -base64 48) +BAHRC_FILE="/home/$USER_NAME/.bashrc" +DATABASE_DIR="/home/$USER_NAME/.database/$DATA_DIR" + +# Check if INFLUXDB_TOKEN is already set in .bashrc, and update if necessary +if grep -q "export INFLUXDB_TOKEN=" "$BAHRC_FILE"; then + sed -i "s|export INFLUXDB_TOKEN=.*|export INFLUXDB_TOKEN=$INFLUXDB_TOKEN|" "$BAHRC_FILE" +else + echo "export INFLUXDB_TOKEN=$INFLUXDB_TOKEN" >> "$BAHRC_FILE" +fi + +# Apply changes to the current session by sourcing .bashrc +source "$BAHRC_FILE" + +# Verify if the token was applied to the environment +if [ -z "$INFLUXDB_TOKEN" ]; then + echo "Failed to export INFLUXDB_TOKEN" + exit 1 +else + echo "INFLUXDB_TOKEN successfully set." +fi + +# Remove and recreate the data directory for InfluxDB +rm -rf "$DATABASE_DIR" +if ! sudo -u "$USER_NAME" mkdir -p "$DATABASE_DIR"; then + echo "Failed to create data directory: $DATABASE_DIR" + exit 1 +fi + +# Stop and remove existing InfluxDB container if it exists +if [ "$(docker ps -aq -f name=$CONTAINER_NAME)" ]; then + echo "Stopping existing InfluxDB container..." + docker stop $CONTAINER_NAME || { echo "Failed to stop container"; exit 1; } + + echo "Removing existing InfluxDB container..." + docker rm -f $CONTAINER_NAME || { echo "Failed to remove container"; exit 1; } +fi + +# Kill any process using port 8086 +if sudo lsof -i :8086; then + echo "Killing process using port 8086..." + sudo fuser -k 8086/tcp || { echo "Failed to kill process on port 8086"; exit 1; } +fi + +# Create a Docker network if it doesn't exist +if ! docker network inspect $NETWORK_NAME >/dev/null 2>&1; then + echo "Creating Docker network..." + docker network create $NETWORK_NAME || { echo "Failed to create network"; exit 1; } +fi + +# Pull the InfluxDB Docker image +echo "Pulling InfluxDB Docker image..." +if ! docker pull influxdb:$INFLUXDB_VERSION; then + echo "Failed to pull InfluxDB image" + exit 1 +fi + +# Run the InfluxDB container with setup options +echo "Running InfluxDB container..." +docker run -d \ + --name $CONTAINER_NAME \ + --network $NETWORK_NAME \ + --restart=always \ + -p 8086:8086 \ + -v "$DATABASE_DIR:/var/lib/influxdb2" \ + -e DOCKER_INFLUXDB_INIT_MODE=setup \ + -e DOCKER_INFLUXDB_INIT_USERNAME=$INFLUXDB_USER \ + -e DOCKER_INFLUXDB_INIT_PASSWORD=$INFLUXDB_PASSWORD \ + -e DOCKER_INFLUXDB_INIT_ORG=$INFLUXDB_ORG \ + -e DOCKER_INFLUXDB_INIT_BUCKET=$INFLUXDB_BUCKET \ + -e DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=$INFLUXDB_TOKEN \ + influxdb:$INFLUXDB_VERSION + +# Check if the container is running +if [ "$(docker ps -q -f name=$CONTAINER_NAME)" ]; then + echo "InfluxDB container is running successfully." +else + echo "InfluxDB container failed to start." + docker logs $CONTAINER_NAME # Fetch logs to debug why it's not starting + exit 1 +fi + +# Print InfluxDB access information +echo "" +echo "InfluxDB setup completed! Access it at http://localhost:8086" +echo "" +echo "InfluxDB credentials:" +echo "--------------------------------" +echo "Username: $INFLUXDB_USER" +echo "Password: $INFLUXDB_PASSWORD" +echo "Organization: $INFLUXDB_ORG" +echo "Bucket: $INFLUXDB_BUCKET" +echo "INFLUXDB_TOKEN: $INFLUXDB_TOKEN" +echo "--------------------------------" diff --git a/src/scripts/prometheus.sh b/src/scripts/prometheus.sh new file mode 100644 index 0000000..a0f1f97 --- /dev/null +++ b/src/scripts/prometheus.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +# function to get the user name +get_user_name() { + if [ "$(whoami)" = "root" ]; then + LOGNAME_USER=$(logname 2>/dev/null) # Redirect any error output to /dev/null + if [ $? -ne 0 ]; then # Check if the exit status of the last command is not 0 + USER_NAME=$(cat /etc/passwd | grep '/home' | cut -d: -f1 | tail -n 1) + else + USER_NAME=$LOGNAME_USER + fi + else + USER_NAME=$(whoami) + fi + echo "$USER_NAME" +} + +USER_NAME=$(get_user_name) +# Configuration +NETWORK_NAME="flask-prometheus-net" +CONTAINER_NAME="prometheus" +PROMETHEUS_IMAGE="prom/prometheus" +PROMETHEUS_PORT="9090" +PROMETHEUS_CONFIG_DIR="$(pwd)/prometheus_config" +PROMETHEUS_CONFIG_FILE="$PROMETHEUS_CONFIG_DIR/prometheus.yml" +PROMETHEUS_DATA_DIR="/home/$USER_NAME/.database/prometheus" +FLASK_APP_IP=$(hostname -I | cut -d' ' -f1) +FLASK_APP_PORT="5050" +SCRAPING_INTERVAL="10s" +monitor='systemguard-metrics' + +# Logging function for better readability +log() { + echo "[INFO] $1" +} + +# Ensure the config directory exists +log "Creating Prometheus config directory if it doesn't exist." +mkdir -p "$PROMETHEUS_CONFIG_DIR" +mkdir -p "$PROMETHEUS_DATA_DIR" + +# Create the prometheus.yml configuration file +log "Generating prometheus.yml configuration file." +cat > "$PROMETHEUS_CONFIG_FILE" < /dev/null + docker rm "$CONTAINER_NAME" &> /dev/null +else + log "No existing container found. Proceeding to start a new one." +fi + +# Run Prometheus container +log "Starting Prometheus container: $CONTAINER_NAME" +run_output=$(docker run -d \ + --name "$CONTAINER_NAME" \ + --network "$NETWORK_NAME" \ + -p "$PROMETHEUS_PORT:$PROMETHEUS_PORT" \ + --restart always \ + -v "$PROMETHEUS_CONFIG_FILE:/etc/prometheus/prometheus.yml" \ + -v "PROMETHEUS_DATA_DIR:/prometheus" \ + "$PROMETHEUS_IMAGE" 2>&1) # Capture both stdout and stderr + +# Check if Prometheus started successfully +if [ $? -eq 0 ]; then + log "Prometheus container $CONTAINER_NAME started successfully on port $PROMETHEUS_PORT." + log "Prometheus config file located at $PROMETHEUS_CONFIG_FILE" +else + echo "[ERROR] Failed to start Prometheus container: $run_output" + echo "[ERROR] Checking logs for container: $CONTAINER_NAME" + exit 1 +fi diff --git a/src/scripts/update_prometheus.sh b/src/scripts/update_prometheus.sh new file mode 100644 index 0000000..c8054f2 --- /dev/null +++ b/src/scripts/update_prometheus.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +# function to get the user name +get_user_name() { + if [ "$(whoami)" = "root" ]; then + LOGNAME_USER=$(logname 2>/dev/null) # Redirect any error output to /dev/null + if [ $? -ne 0 ]; then # Check if the exit status of the last command is not 0 + USER_NAME=$(cat /etc/passwd | grep '/home' | cut -d: -f1 | tail -n 1) + else + USER_NAME=$LOGNAME_USER + fi + else + USER_NAME=$(whoami) + fi + echo "$USER_NAME" +} + +USER_NAME=$(get_user_name) +# Configuration +CURRENT_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +CURRENT_DIR="$CURRENT_SCRIPT_DIR" +ROOT_DIR="$(dirname "$(dirname "$CURRENT_DIR")")" +PROMETHEUS_CONFIG_DIR="$ROOT_DIR/prometheus_config" +CONTAINER_NAME="prometheus" +PROMETHEUS_CONFIG="$PROMETHEUS_CONFIG_DIR/prometheus.yml" +PROMETHEUS_DATA_DIR="/home/$USER_NAME/.database/prometheus" +PROMETHEUS_IMAGE="prom/prometheus" # Add your image name if needed +NETWORK_NAME="flask-prometheus-net" # Specify your network name +PROMETHEUS_PORT="9090" # Specify your port + +# Logging function for better readability +log() { + echo "[INFO] $1" +} + +# Check if Prometheus container is running +if [ "$(docker ps -q -f name="$CONTAINER_NAME")" ]; then + log "Stopping container: $CONTAINER_NAME" + docker stop "$CONTAINER_NAME" + + log "Starting container: $CONTAINER_NAME with new configuration" + docker start "$CONTAINER_NAME" + + log "Reloading configuration for container: $CONTAINER_NAME" + docker exec "$CONTAINER_NAME" kill -HUP 1 # Send SIGHUP to reload config +else + log "Container $CONTAINER_NAME is not running. Starting a new container." + docker run -d \ + --name "$CONTAINER_NAME" \ + --network "$NETWORK_NAME" \ + -p "$PROMETHEUS_PORT:$PROMETHEUS_PORT" \ + --restart always \ + -v "$PROMETHEUS_CONFIG:/etc/prometheus/prometheus.yml" \ + -v "PROMETHEUS_DATA_DIR:/prometheus" \ + "$PROMETHEUS_IMAGE" &> /dev/null + + log "Prometheus container started successfully." +fi + +log "Prometheus container has been updated with the new configuration." diff --git a/src/static/css/dashboard_network.css b/src/static/css/dashboard_network.css index bb30156..15e47c6 100644 --- a/src/static/css/dashboard_network.css +++ b/src/static/css/dashboard_network.css @@ -1,101 +1,97 @@ -.container { - max-width: 1200px; - margin: 0 auto; - padding: 20px; + + +h1 { text-align: center; - color: #fff; + color: #2c3e50; + font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; + margin-bottom: 20px; } -.card-container { - display: flex; - flex-wrap: wrap; - gap: 20px; - justify-content: center; +table { + width: 100%; + border-collapse: collapse; + margin-bottom: 20px; + background-color: white; } -.card { - background-color: var(--color-username); - border: 1px solid #ddd; - border-radius: 8px; - padding: 20px; - width: calc(33.333% - 40px); /* Adjust based on gaps */ - box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); - text-decoration: none; - color: #333; - transition: transform 0.2s; +table th, table td { + padding: 15px; + border: 1px solid #e1e4e8; + text-align: left; + font-family: 'Arial', sans-serif; } -.card:hover { - transform: translateX(-10px); /* Slight lift effect */ - box-shadow: 0 10px 20px #378fe7; - transition: transform 0.3s ease, box-shadow 0.3s ease; /* Smooth transition */ +table th { + background-color: #007bff; + color: white; + font-weight: bold; + text-transform: uppercase; } -.card h2 { - margin-top: 0; - font-size: 1.5em; - color: #fff; +table tr:hover { + background-color: #f1f1f1; } -.card p { - margin: 5px 0; - color: #fff; +table td { + font-size: 14px; + color: #333; } -.card a { - color: #fff; - text-decoration: none; +table td.health-up { + color: green; + font-weight: bold; } -.add-server-link { - margin-top: 20px; +table td.health-down { + color: red; + font-weight: bold; } -.add-server-link .btn { - font-size: 1.2em; - padding: 10px 20px; - border-radius: 5px; - background-color: #007bff; - color: white; - text-decoration: none; - transition: background-color 0.3s; -} - -.add-server-link .btn:hover { - background-color: #0056b3; +.error-message { + color: red; + font-style: italic; + text-align: center; } -/* Responsive layout */ -@media (max-width: 992px) { - .card { - width: calc(50% - 30px); /* Two cards per line on medium screens */ - } +.text-center { + text-align: center; + margin-top: 30px; } -@media (max-width: 768px) { - .card { - width: calc(100% - 40px); /* One card per line on small screens */ - } +.text-center h2 { + color: #2c3e50; + font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; + margin-bottom: 20px; } -.card-actions { - margin-top: 10px; +form input[type="text"] { + padding: 10px; + margin: 5px; + width: 200px; + border: 1px solid #ced4da; + border-radius: 4px; + font-size: 14px; } -.card-actions .btn { - margin-right: 5px; - padding: 5px 10px; +form input[type="submit"] { + padding: 10px 20px; + background-color: #28a745; + color: white; + border: none; + border-radius: 4px; font-size: 14px; + cursor: pointer; + transition: background-color 0.3s ease; } -.card-actions .btn-warning { - background-color: #ffc107; - border-color: #ffc107; - color: #fff; +form input[type="submit"]:hover { + background-color: #218838; } -.card-actions .btn-danger { - background-color: #dc3545; - border-color: #dc3545; - color: #fff; +footer { + text-align: center; + margin-top: 20px; + color: #666; + font-size: 12px; } + diff --git a/src/static/css/external_monitoring.css b/src/static/css/external_monitoring.css new file mode 100644 index 0000000..40a9575 --- /dev/null +++ b/src/static/css/external_monitoring.css @@ -0,0 +1,41 @@ +body { + background-color: #f8f9fa; +} + +.container { + max-width: 600px; + margin: auto; +} + +.card { + border: 1px solid #ced4da; + border-radius: 0.5rem; +} + +.card-title { + font-weight: bold; +} + +.message { + color: #28a745; /* Success message color */ +} + +.list-group-item { + background-color: #ffffff; + border: 1px solid #e9ecef; + border-radius: 0.5rem; +} + +.list-group-item:hover { + background-color: #f1f1f1; +} + +.btn-primary { + background-color: #007bff; + border: none; +} + +.btn-danger { + background-color: #dc3545; + border: none; +} diff --git a/src/static/css/graphs.css b/src/static/css/graphs.css index 258b39d..219ae42 100644 --- a/src/static/css/graphs.css +++ b/src/static/css/graphs.css @@ -278,3 +278,11 @@ button i.fas { #refreshCpuFrequencyTime:hover, #refreshCurrentTempTime:hover { background-color: #2980b9; /* Darker blue on hover for specific buttons */ } + + +.graph { + display: flex; + justify-content: center; + align-items: center; + margin-top: 20px; +} \ No newline at end of file diff --git a/src/static/css/os_info.css b/src/static/css/os_info.css index 950d07b..72d8a72 100644 --- a/src/static/css/os_info.css +++ b/src/static/css/os_info.css @@ -1,46 +1,62 @@ -/* - +/* General container styles */ .container { - background-color: #ffffff; - padding: 20px; - border-radius: 8px; - box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); -} */ + background-color: #f8f9fa; + padding: 30px; + border-radius: 12px; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); +} +/* Title styling */ h1 { - font-size: 1.75rem; - font-weight: 600; - color: #333; + font-size: 2.25rem; + font-weight: 700; + color: #495057; + text-align: center; + margin-bottom: 30px; } +/* Table styles */ .table { + background-color: #ffffff; + border-radius: 8px; + overflow: hidden; + box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); margin-top: 20px; border: none; } -.table th { - background-color: #343a40; +/* Header of the table */ +.table thead th { + background-color: #6c757d; color: #fff; - border: none; text-transform: uppercase; font-size: 0.875rem; + letter-spacing: 0.05em; + border-bottom: none; + padding: 15px; + text-align: left; } -.table td { +/* Table body cells */ +.table tbody td { + padding: 15px; vertical-align: middle; font-size: 0.875rem; border-color: #dee2e6; } -.table td i { +/* Table icons */ +.table tbody td i { margin-right: 8px; color: #007bff; /* Icon color */ } +/* Hover effect for table rows */ .table-hover tbody tr:hover { - background-color: #f1f1f1; /* Hover effect */ + background-color: #f1f1f1; } +/* Alternating row colors for table */ .table-striped tbody tr:nth-of-type(odd) { - background-color: #f9f9f9; /* Light gray for alternating rows */ + background-color: #f9f9f9; } diff --git a/src/static/css/style.css b/src/static/css/style.css index a06d4bd..6ae24b6 100644 --- a/src/static/css/style.css +++ b/src/static/css/style.css @@ -46,6 +46,14 @@ body { background-position: center center; } +.content { + margin: 0 auto; /* Center the content */ + padding: 1rem 4rem; + border-radius: 8px; /* Rounded corners */ + font-family: 'Arial', sans-serif; /* Font family */ + line-height: 1.6; /* Improve readability */ +} + .header { background-color: var(--color-dark); color: var(--color-white); diff --git a/src/static/css/targets.css b/src/static/css/targets.css new file mode 100644 index 0000000..5310c6c --- /dev/null +++ b/src/static/css/targets.css @@ -0,0 +1,145 @@ +/* General page styles */ +.container { + max-width: 1200px; + margin: 20px auto; + padding: 20px; + border-radius: 8px; +} + +h1, h2 { + font-family: 'Roboto', sans-serif; + color: #333; + margin-bottom: 20px; + text-align: center; +} + +.section { + margin-top: 30px; + text-align: center; +} + +/* Table styles */ +.table-wrapper { + overflow-x: auto; +} + +.modern-table { + width: 100%; + border-collapse: collapse; + font-family: 'Roboto', sans-serif; +} + +.modern-table th, .modern-table td { + padding: 12px 15px; + text-align: left; + border-bottom: 1px solid #ddd; +} + +.modern-table th { + background-color: #007bff; + color: #fff; + text-transform: uppercase; + letter-spacing: 0.03em; +} + +.modern-table tr:nth-child(even) { + background-color: #f9f9f9; +} + +.modern-table tr:hover { + background-color: #f1f1f1; +} + +.target-list { + list-style-type: none; + padding-left: 0; +} + +.target-list li { + margin-bottom: 8px; +} + +/* Form styles */ +form.inline-form { + display: inline; +} + +.input-field { + padding: 8px; + margin: 5px; + border: 1px solid #ccc; + border-radius: 4px; + width: 200px; +} + +.input-field:focus { + border-color: #007bff; + outline: none; + transition: border 0.2s; +} + +/* Button styles */ +.btn-primary, .btn-danger, .btn-warning { + padding: 10px 15px; + border: none; + border-radius: 4px; + color: white; + cursor: pointer; + transition: background-color 0.3s ease; +} + +.btn-primary { + background-color: #007bff; +} + +.btn-primary:hover { + background-color: #0056b3; +} + +.btn-danger { + background-color: #dc3545; +} + +.btn-danger:hover { + background-color: #c82333; +} + +.btn-warning { + background-color: #ffc107; +} + +.btn-warning:hover { + background-color: #e0a800; +} + +/* Messages */ +.flash { + padding: 10px; + margin: 10px 0; + border-radius: 4px; +} + +.flash.success { + background-color: #d4edda; + color: #155724; +} + +.flash.danger { + background-color: #f8d7da; + color: #721c24; +} + +/* Add New Target Form */ +.add-target-form { + margin: 0 auto; + max-width: 600px; + text-align: center; +} + +.add-target-form .input-field { + width: 70%; +} + +.add-target-form input[type="submit"] { + width: 150px; +} diff --git a/src/static/images/tracking.png b/src/static/images/tracking.png new file mode 100644 index 0000000..a6a0dcc Binary files /dev/null and b/src/static/images/tracking.png differ diff --git a/src/static/js/cardChart.js b/src/static/js/cardChart.js new file mode 100644 index 0000000..9838dd5 --- /dev/null +++ b/src/static/js/cardChart.js @@ -0,0 +1,63 @@ +const maxDataPoints = 500; // Number of data points to show on the chart + +// Generalized function to create and update a line chart +function createLineChart(canvasId, label, dataStorage, updateFunc) { + console.log(dataStorage); + const ctx = document.getElementById(canvasId).getContext('2d'); + const chart = new Chart(ctx, { + type: 'line', + data: { + labels: Array(maxDataPoints).fill(''), // Empty labels for time intervals + datasets: [{ + label: label, + data: dataStorage, + borderColor: 'red', + borderWidth: 2, + fill: true, + opacity: 0.5, + tension: 0.6, // Smooth line + pointRadius: 0 // Removes the round tip (data points) on the line + }] + }, + options: { + scales: { + x: { + display: false // Hide the x-axis labels and grid + }, + y: { + display: false, // Hide the y-axis labels and grid + beginAtZero: true, + max: 100 // Assuming max value is 100 for CPU and memory usage + } + }, + plugins: { + legend: { + display: false // Hide the legend + } + }, + animation: false, // Disable animation for smooth updates + responsive: true + } + }); + + // Function to update the chart with new data + function updateChart(newUsage) { + // Add the new data point + dataStorage.push(newUsage); + + // Keep the data array length within the maxDataPoints + if (dataStorage.length > maxDataPoints) { + dataStorage.shift(); + } + + // Update the chart + chart.update(); + } + + // Set interval to fetch and update data every 2 seconds + setInterval(() => { + const newUsage = updateFunc(); // Call the update function to get the current usage + console.log(`${label} Usage:`, newUsage); + updateChart(newUsage); + }, 300); +} diff --git a/src/static/js/graphs.js b/src/static/js/graphs.js index ba2f32a..3736b22 100644 --- a/src/static/js/graphs.js +++ b/src/static/js/graphs.js @@ -1,11 +1,7 @@ // Variables to store chart instances let cpuTimeChart, memoryTimeChart, batteryTimeChart, networkTimeChart, dashboardMemoryTimeChart, cpuFrequencyTimeChart, currentTempTimeChart; -function stringToDate(dateString) { - const date = new Date(dateString); - return date; -} - +// Function to fetch data and render charts function fetchDataAndRenderCharts() { // Retrieve stored filter value from local storage or set default value const storedFilterValue = localStorage.getItem('filterValue') || 5; @@ -16,7 +12,7 @@ function fetchDataAndRenderCharts() { console.log('Stored Filter Value:', storedFilterValue); // Fetch data with the selected time filter - fetch(`/api/v1/graphs_data?filter=${storedFilterValue}`) + fetch(`/api/v1/prometheus/graphs_data?filter=${storedFilterValue}`) .then(response => response.json()) .then(data => { const cpuData = data.cpu; @@ -27,13 +23,14 @@ function fetchDataAndRenderCharts() { const dashboardMemoryUsageData = data.dashboard_memory_usage; const cpuFrequencyData = data.cpu_frequency; const currentTempData = data.current_temp; + + // current time from backend to display const currentTime = data.current_time; const timeZoneName = Intl.DateTimeFormat().resolvedOptions().timeZone; + displayTimeAndTimeZone(currentTime, timeZoneName); // Format the time data using the currentTime from backend - const timeData = data.time.map(time => formatDate(time, currentTime)); - - displayTimeAndTimeZone(currentTime, timeZoneName); + const timeData = data.time.map(time => formatDate(time, timeZoneName)); // Use timeZoneName from displayTimeAndTimeZone function createCharts(cpuData, timeData, memoryData, batteryData, networkSentData, networkReceivedData, dashboardMemoryUsageData, cpuFrequencyData, currentTempData); }) @@ -86,52 +83,25 @@ document.getElementById('refreshCurrentTempTime').addEventListener('click', () = fetchDataAndRenderCharts(); }); - -function formatDate(dateString, currentTime) { - const date = new Date(dateString); - const now = new Date(currentTime); // Use currentTime from backend - - // Helper function to format with leading zeros - const pad = (num) => String(num).padStart(2, '0'); - - // Manually extract UTC components - const day = pad(date.getUTCDate()); // e.g., 09 - const month = pad(date.getUTCMonth() + 1); // e.g., 04 - const year = date.getUTCFullYear(); // e.g., 2021 - const hours = pad(date.getUTCHours()); // e.g., 11 - const minutes = pad(date.getUTCMinutes()); // e.g., 33 - - - // Calculate time differences - const diffDays = Math.floor((now - date) / (1000 * 60 * 60 * 24)); - const diffWeeks = Math.floor(diffDays / 7); - const diffMonths = now.getMonth() - date.getUTCMonth() + (12 * (now.getFullYear() - date.getUTCFullYear())); - const diffYears = now.getFullYear() - date.getUTCFullYear(); - - // Determine the label based on time differences - // // Reset the time to 12am for the date comparison - // date.setUTCHours(0, 0, 0, 0); - // now.setUTCHours(0, 0, 0, 0); - - // if (diffDays === 0) { - // return `Today ${hours}:${minutes}`; - // } else if (diffDays === 1) { - // return `Yesterday ${hours}:${minutes}`; - // } else if (diffDays <= 3) { - // return `${diffDays} Days Ago ${hours}:${minutes}`; - // } else if (diffDays <= 7) { - // return `${Math.ceil(diffDays / 7)} Week${diffDays > 7 ? 's' : ''} Ago ${hours}:${minutes}`; - // } else if (diffDays <= 30) { - // return `${Math.ceil(diffDays / 7)} Weeks Ago ${hours}:${minutes}`; - // } else if (diffMonths < 12) { - // return `${diffMonths} Month${diffMonths > 1 ? 's' : ''} Ago ${hours}:${minutes}`; - // } else if (diffYears < 2) { - // return `Last Year ${hours}:${minutes}`; - // } else { - // return `${year}/${month}/${day} ${hours}:${minutes}`; - // } - - return `${year}/${month}/${day} ${hours}:${minutes}`; +function formatDate(utcTime, timeZone) { + const date = new Date(utcTime); + + // Format options can be adjusted for your needs + const options = { + timeZone: timeZone, + year: 'numeric', + month: '2-digit', + day: '2-digit', + hour: '2-digit', + minute: '2-digit', + hour12: false // Change to true if you prefer 12-hour format + }; + + // Generate formatted string + const formattedDate = date.toLocaleString('en-US', options); + + // For better graph display, you might want just the date and hour + return formattedDate.replace(/, (\d{2}:\d{2})/, ' $1'); // Example: "09/22/2024 14:30" } function displayTimeAndTimeZone(currentTime, timeZoneName) { @@ -159,34 +129,47 @@ function createChart(ctx, labels, datasets, yLabel) { ctx.chart.destroy(); // Destroy the existing chart if it exists } + const allDataPoints = datasets.flatMap(dataset => dataset.data); + const minY = Math.min(...allDataPoints.filter(value => typeof value === 'number')); + const maxY = Math.max(...allDataPoints.filter(value => typeof value === 'number')); + ctx.chart = new Chart(ctx, { type: 'line', data: { - labels: labels, // Use your timeData directly as labels - datasets: datasets + labels: labels, + datasets: datasets.map(dataset => ({ + ...dataset, + borderWidth: 2, + fill: false, + tension: 0.3, + pointRadius: 5, + pointHoverRadius: 7, + backgroundColor: dataset.backgroundColor || 'rgba(75, 192, 192, 0.2)', + borderColor: dataset.borderColor || 'rgba(75, 192, 192, 1)', + })), }, options: { + scales: { x: { - type: 'category', // Use 'category' scale to treat time as strings + type: 'category', title: { display: true, text: 'Time' }, ticks: { - autoSkip: true, // Automatically skip some labels to prevent overlap - maxTicksLimit: 10, // Maximum number of ticks to display - maxRotation: 20, // Prevent rotating the labels for better readability + autoSkip: true, + maxTicksLimit: 6, + maxRotation: 0, minRotation: 0, - } }, y: { - beginAtZero: true, + beginAtZero: minY < 0 ? false : true, // Adjust y-axis based on data title: { display: true, - text: yLabel - } + text: yLabel, + }, } } } diff --git a/src/static/js/graphs_experimental.js b/src/static/js/graphs_experimental.js new file mode 100644 index 0000000..787c848 --- /dev/null +++ b/src/static/js/graphs_experimental.js @@ -0,0 +1,330 @@ +// Variables to store chart instances +let cpuTimeChart, memoryTimeChart, batteryTimeChart, networkTimeChart, dashboardMemoryTimeChart, cpuFrequencyTimeChart, currentTempTimeChart; + +// Function to fetch data and render charts +function fetchDataAndRenderCharts() { + // Retrieve stored filter value from local storage or set default value + const storedFilterValue = localStorage.getItem('filterValue') || 5; + + // Set the filter element value to the stored filter value + document.getElementById('timeFilter').value = storedFilterValue; + + console.log('Stored Filter Value:', storedFilterValue); + + // Fetch data with the selected time filter + fetch(`/api/v1/prometheus/graphs_data/targets?filter=${storedFilterValue}`) + .then(response => response.json()) + .then(data => { + const cpuData = data.cpu; + const memoryData = data.memory; + const batteryData = data.battery; + const networkSentData = data.network_sent; + const networkReceivedData = data.network_received; + const dashboardMemoryUsageData = data.dashboard_memory_usage; + const cpuFrequencyData = data.cpu_frequency; + const currentTempData = data.current_temp; + const currentTime = data.current_time; + const timeZoneName = Intl.DateTimeFormat().resolvedOptions().timeZone; + + // Format the time data using the currentTime from backend + const timeData = data.time.map(time => formatDate(time, timeZoneName)); // Use timeZoneName from displayTimeAndTimeZone function + + displayTimeAndTimeZone(currentTime, timeZoneName); + + createCharts(cpuData, timeData, memoryData, batteryData, networkSentData, networkReceivedData, dashboardMemoryUsageData, cpuFrequencyData, currentTempData); + }) + .catch(error => console.error('Error fetching data:', error)); +} + +// Add event listener to refresh data when filter value changes +document.getElementById('timeFilter').addEventListener('change', (event) => { + localStorage.setItem('filterValue', event.target.value); + fetchDataAndRenderCharts(); +}); + +function formatDate(utcTime, timeZone) { + const date = new Date(utcTime); + + // Format options can be adjusted for your needs + const options = { + timeZone: timeZone, + year: 'numeric', + month: '2-digit', + day: '2-digit', + hour: '2-digit', + minute: '2-digit', + hour12: false // Change to true if you prefer 12-hour format + }; + + // Generate formatted string + const formattedDate = date.toLocaleString('en-US', options); + + // For better graph display, you might want just the date and hour + return formattedDate.replace(/, (\d{2}:\d{2})/, ' $1'); // Example: "09/22/2024 14:30" +} + +function displayTimeAndTimeZone(currentTime, timeZoneName) { + // Display the current time and timezone + document.getElementById('currentTime').textContent = `Current Time: ${currentTime}`; + document.getElementById('timeZoneName').textContent = `Time Zone: ${timeZoneName}`; + // Update currentTime by 1 second every second + setInterval(() => { + const date = new Date(currentTime); + date.setSeconds(date.getSeconds() + 1); + currentTime = date.toISOString(); + document.getElementById('currentTime').textContent = `Current Time: ${currentTime}`; + }, 1000); +} + +// add the refresh button to fetch the data +document.getElementById('refreshData').addEventListener('click', () => { + fetchDataAndRenderCharts(); +}); + + +// Function to create a chart with multiple datasets +function createChart(ctx, labels, datasets, yLabel) { + if (ctx.chart) { + ctx.chart.destroy(); // Destroy the existing chart if it exists + } + + // Ensure the parent element is positioned relatively + ctx.canvas.parentNode.style.position = 'relative'; + + // add h2 element to the parent node + const h2 = document.createElement('h2'); + h2.innerHTML = ` ${yLabel}`; + //css top and left + h2.style.position = 'absolute'; + h2.style.top = '25px'; + h2.style.left = '30px'; + ctx.canvas.parentNode.insertBefore(h2, ctx.canvas); + + + // Create or update download button + getOrCreateButton(ctx.canvas.parentNode, 'Download Chart', 'download-button', (e) => { + const fileName = `${yLabel.replace(/\s+/g, '_')}_chart.png`; // Dynamic filename + console.log('Download button clicked'); + const link = document.createElement('a'); + link.href = ctx.chart.toBase64Image(); + link.download = fileName; + link.click(); + }, { top: '10px', right: '10px' }); + + // Create or update refresh button + getOrCreateButton(ctx.canvas.parentNode, 'Refresh Data', 'refresh-button', () => { + fetchDataAndRenderCharts(); + }, { top: '10px', right: '200px' }); + + const allDataPoints = datasets.flatMap(dataset => dataset.data); + const minY = Math.min(...allDataPoints.filter(value => typeof value === 'number')); + const maxY = Math.max(...allDataPoints.filter(value => typeof value === 'number')); + + ctx.chart = new Chart(ctx, { + type: 'line', + data: { + labels: labels, + datasets: datasets.map(dataset => ({ + ...dataset, + borderWidth: 2, + fill: false, + tension: 0.3, + pointRadius: 5, + pointHoverRadius: 7, + backgroundColor: dataset.backgroundColor || 'rgba(75, 192, 192, 0.2)', + borderColor: dataset.borderColor || 'rgba(75, 192, 192, 1)', + })), + }, + options: { + responsive: true, + scales: { + x: { + type: 'category', + // title: { + // display: true, + // text: 'Time', + // font: { + // size: 16, + // weight: 'bold' + // }, + // padding: { top: 10, left: 0, right: 0, bottom: 0 } + // }, + ticks: { + autoSkip: true, + maxTicksLimit: 6, + maxRotation: 0, + minRotation: 0, + padding: 10, + font: { + size: 12, + weight: 'bold' + } + + } + }, + y: { + beginAtZero: minY < 0 ? false : true, + title: { + display: true, + text: yLabel, + font: { + size: 16, + weight: 'bold' + }, + }, + } + } + } + }); +} + +// Helper function to create or retrieve a button +function getOrCreateButton(parent, text, className, onClick, position) { + let button = parent.querySelector(`.${className}`); + if (!button) { + button = document.createElement('button'); + button.classList.add(className); + button.textContent = text; + button.style.position = 'absolute'; + button.style.zIndex = '5'; + Object.assign(button.style, position); // Apply positioning styles + parent.appendChild(button); + } + button.onclick = onClick; // Update the click handler + return button; +} + +// Function to create charts with the fetched data +function createCharts(cpuData, timeData, memoryData, batteryData, networkSentData, networkReceivedData, dashboardMemoryUsageData, cpuFrequencyData, currentTempData) { + + // Function to generate dynamic colors based on index + function generateColor(index) { + const hue = (index * 40) % 360; // Adjust hue for unique colors + return { + borderColor: `hsl(${hue}, 70%, 50%)`, // Border color + backgroundColor: `hsl(${hue}, 70%, 80%)` // Background color + }; + } + + // CPU Usage Chart + const ctxCpu = document.getElementById('cpuTimeChart').getContext('2d'); + const cpuDatasets = cpuData.map((cpu, index) => { + const { borderColor, backgroundColor } = generateColor(index); + return { + label: `CPU Usage (%) ${cpu.metric.instance}`, + data: cpu.values, + borderColor: borderColor, + backgroundColor: backgroundColor, + tension: 0.4 + }; + }); + + createChart(ctxCpu, timeData, cpuDatasets, 'CPU Usage (%)'); + + // Memory Usage Chart + const ctxMemory = document.getElementById('memoryTimeChart').getContext('2d'); + const memoryDatasets = memoryData.map((memory, index) => { + const { borderColor, backgroundColor } = generateColor(index); + return { + label: `Memory Usage (%) ${memory.metric.instance}`, + data: memory.values, + borderColor: borderColor, + backgroundColor: backgroundColor, + tension: 0.4 + }; + }); + + createChart(ctxMemory, timeData, memoryDatasets, 'Memory Usage (%)'); + + // Battery Percentage Chart + const ctxBattery = document.getElementById('batteryTimeChart').getContext('2d'); + const batteryDatasets = batteryData.map((battery, index) => { + const { borderColor, backgroundColor } = generateColor(index); + return { + label: `Battery Usage (%) ${battery.metric.instance}`, + data: battery.values, + borderColor: borderColor, + backgroundColor: backgroundColor, + tension: 0.4 + }; + }); + + createChart(ctxBattery, timeData, batteryDatasets, 'Power Usage (%)'); + + // Network Sent & Received Chart + const ctxNetwork = document.getElementById('networkTimeChart').getContext('2d'); + const networkDatasets = [ + ...networkSentData.map((networkSent, index) => { + const { borderColor, backgroundColor } = generateColor(index); + return { + label: `Network Sent (MB) ${networkSent.metric.instance}`, + data: networkSent.values, + borderColor: borderColor, + backgroundColor: backgroundColor, + tension: 0.4 + }; + }), + ...networkReceivedData.map((networkReceived, index) => { + const { borderColor, backgroundColor } = generateColor(index + networkSentData.length); + return { + label: `Network Received (MB) ${networkReceived.metric.instance}`, + data: networkReceived.values, + borderColor: borderColor, + backgroundColor: backgroundColor, + tension: 0.4 + }; + }) + ]; + + createChart(ctxNetwork, timeData, networkDatasets, 'Data Transferred (MB)'); + + // Dashboard Memory Usage Chart + const ctxDashboardMemory = document.getElementById('dashboardMemoryTimeChart').getContext('2d'); + const dashboardMemoryDatasets = dashboardMemoryUsageData.map((dashboardMemory, index) => { + const { borderColor, backgroundColor } = generateColor(index); + return { + label: `Dashboard Memory Usage (%) ${dashboardMemory.metric.instance}`, + data: dashboardMemory.values, + borderColor: borderColor, + backgroundColor: backgroundColor, + tension: 0.4 + }; + }); + + createChart(ctxDashboardMemory, timeData, dashboardMemoryDatasets, 'Dashboard Memory Usage (%)'); + + // CPU Frequency Chart + const ctxCpuFrequency = document.getElementById('cpuFrequencyTimeChart').getContext('2d'); + const cpuFrequencyDatasets = cpuFrequencyData.map((cpuFrequency, index) => { + const { borderColor, backgroundColor } = generateColor(index); + return { + label: `CPU Frequency (GHz) ${cpuFrequency.metric.instance}`, + data: cpuFrequency.values, + borderColor: borderColor, + backgroundColor: backgroundColor, + tension: 0.4 + }; + }); + + createChart(ctxCpuFrequency, timeData, cpuFrequencyDatasets, 'CPU Frequency (GHz)'); + + // Current Temperature Chart + const ctxCurrentTemp = document.getElementById('currentTempTimeChart').getContext('2d'); + const currentTempDatasets = currentTempData.map((currentTemp, index) => { + const { borderColor, backgroundColor } = generateColor(index); + return { + label: `Current Temperature (°C) ${currentTemp.metric.instance}`, + data: currentTemp.values, + borderColor: borderColor, + backgroundColor: backgroundColor, + tension: 0.4 + }; + }); + + createChart(ctxCurrentTemp, timeData, currentTempDatasets, 'Current Temperature (°C)'); +} + +// Fetch initial data when the page loads +document.addEventListener('DOMContentLoaded', () => { + fetchDataAndRenderCharts(); +}); diff --git a/src/static/js/refreshCardData.js b/src/static/js/refreshCardData.js index e28743b..b86065c 100644 --- a/src/static/js/refreshCardData.js +++ b/src/static/js/refreshCardData.js @@ -174,7 +174,7 @@ async function updateBatteryIcon(iconSelector, batteryStatusKey, batteryPercentK // Refresh all card data async function refreshData() { - const data = await queueRequest('/api/system-info'); + const data = await queueRequest('/api/v1/system-info'); if (!data) return; updateCard('.bg-disk', 'disk_percent', data, '%', '.disk-bar'); diff --git a/src/static/js/targets.js b/src/static/js/targets.js new file mode 100644 index 0000000..2ae6b39 --- /dev/null +++ b/src/static/js/targets.js @@ -0,0 +1,107 @@ +// Function to fetch and populate target data +async function fetchTargetData() { + try { + const response = await fetch('/api/v1/targets'); // Replace with the actual URL if different + const data = await response.json(); + + const targetTableBody = document.getElementById('target-table-body'); + + // Check if there are active targets + if (data.active_targets && data.active_targets.length > 0) { + data.active_targets.forEach(target => { + const row = document.createElement('tr'); + + const jobCell = document.createElement('td'); + jobCell.textContent = target.labels.job; + row.appendChild(jobCell); + + const instanceCell = document.createElement('td'); + instanceCell.textContent = target.labels.instance; + row.appendChild(instanceCell); + + const healthCell = document.createElement('td'); + healthCell.textContent = target.health; + healthCell.className = target.health === 'up' ? 'health-up' : 'health-down'; + row.appendChild(healthCell); + + const lastScrapeCell = document.createElement('td'); + lastScrapeCell.textContent = new Date(target.lastScrape).toLocaleString(); + row.appendChild(lastScrapeCell); + + const lastErrorCell = document.createElement('td'); + lastErrorCell.textContent = target.lastError ? target.lastError : 'No Errors'; + row.appendChild(lastErrorCell); + + const scrapeUrlCell = document.createElement('td'); + scrapeUrlCell.textContent = target.scrapeUrl; + row.appendChild(scrapeUrlCell); + + const scrapeDurationCell = document.createElement('td'); + scrapeDurationCell.textContent = target.lastScrapeDuration.toFixed(3); + row.appendChild(scrapeDurationCell); + + // __scrape_interval__ + const scrapeIntervalCell = document.createElement('td'); + scrapeIntervalCell.textContent = target.discoveredLabels.__scrape_interval__; + row.appendChild(scrapeIntervalCell); + + if (target.health === 'up') { + const dashboardCell = document.createElement('td'); + const dashboardLink = document.createElement('a'); + dashboardLink.href = target.scrapeUrl.replace(/\/metrics$/, ''); + dashboardLink.textContent = 'View Dashboard'; + dashboardLink.target = '_blank'; // Open link in a new tab + dashboardLink.rel = 'noopener noreferrer'; // Security measure for external links + dashboardCell.appendChild(dashboardLink); + row.appendChild(dashboardCell); + } else { + const emptyCell = document.createElement('td'); + row.appendChild(emptyCell); + } + + // remove the instance from the prometheus + + const removeCell = document.createElement('td'); + const removeForm = document.createElement('form'); + removeForm.action = '/targets/remove_target'; // Replace with actual endpoint if needed + removeForm.method = 'POST'; + removeForm.style.display = 'inline'; + + const jobNameInput = document.createElement('input'); + jobNameInput.type = 'hidden'; + jobNameInput.name = 'job_name'; + jobNameInput.value = target.labels.job; + removeForm.appendChild(jobNameInput); + + // const targetToRemoveInput = document.createElement('input'); + // targetToRemoveInput.type = 'hidden'; + // targetToRemoveInput.name = 'target_to_remove'; + // targetToRemoveInput.value = target.labels.instance; + // removeForm.appendChild(targetToRemoveInput); + + // const submitButton = document.createElement('input'); + // submitButton.type = 'submit'; + // submitButton.value = 'Remove'; + // submitButton.onclick = function () { + // return confirm('Are you sure you want to remove this target?'); + // }; + // removeForm.appendChild(submitButton); + + // removeCell.appendChild(removeForm); + // row.appendChild(removeCell); + + // Append the row to the table body + targetTableBody.appendChild(row); + }); + } else { + targetTableBody.innerHTML = 'No active targets found'; + } + } catch (error) { + console.error('Error fetching target data:', error); + const targetTableBody = document.getElementById('target-table-body'); + targetTableBody.innerHTML = 'Failed to fetch targets'; + } +} + +// Fetch the data when the page loads +document.addEventListener('DOMContentLoaded', fetchTargetData); \ No newline at end of file diff --git a/src/static/js/update.js b/src/static/js/update.js new file mode 100644 index 0000000..749d8e1 --- /dev/null +++ b/src/static/js/update.js @@ -0,0 +1,23 @@ +document.getElementById('updateButton').addEventListener('click', function (event) { + event.preventDefault(); // Prevent the default link behavior + + fetch('{{ url_for("update_git_version") }}', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + // Add any additional headers if needed + }, + }) + .then(response => response.json()) + .then(data => { + if (data.status === 'success') { + alert('Update successful: ' + data.message); + } else { + alert('Update failed: ' + data.message); + } + }) + .catch(error => { + console.error('Error:', error); + alert('An error occurred while updating.'); + }); +}); \ No newline at end of file diff --git a/src/templates/base/base.html b/src/templates/base/base.html index 19b960a..2c4cd8f 100644 --- a/src/templates/base/base.html +++ b/src/templates/base/base.html @@ -15,7 +15,7 @@ {% include 'ext/navbar.html' %} -
+
{% block content %} {% endblock %}
diff --git a/src/templates/card_comp/cpu/current_temp.html b/src/templates/card_comp/cpu/current_temp.html index 0a2b394..e6035ff 100644 --- a/src/templates/card_comp/cpu/current_temp.html +++ b/src/templates/card_comp/cpu/current_temp.html @@ -8,6 +8,16 @@
CPU Temperature
+ {% endif %} + + + diff --git a/src/templates/card_comp/cpu/frequency.html b/src/templates/card_comp/cpu/frequency.html index b50c8df..8833ca1 100644 --- a/src/templates/card_comp/cpu/frequency.html +++ b/src/templates/card_comp/cpu/frequency.html @@ -1,16 +1,26 @@ -
-
-
CPU Frequency
-

- {{ system_info['cpu_frequency'] }} -

-
-
-
+
+
+
CPU Frequency
+

+ {{ system_info['cpu_frequency'] }} +

+
+
-
-
\ No newline at end of file + + +
+
+ + diff --git a/src/templates/card_comp/cpu/usages.html b/src/templates/card_comp/cpu/usages.html index a1a3641..04d56be 100644 --- a/src/templates/card_comp/cpu/usages.html +++ b/src/templates/card_comp/cpu/usages.html @@ -1,13 +1,23 @@ {% if card_settings.is_cpu_usage_card_enabled %} -
-
-
CPU Usage
-

- {{ system_info['cpu_percent'] }}% -

-
-
-
+
+
+
CPU Usage
+

+ {{ system_info['cpu_percent'] }}% +

+
+
+ +
+
{% endif %} + diff --git a/src/templates/card_comp/disk/usage.html b/src/templates/card_comp/disk/usage.html index a2d43ba..4cb8c28 100644 --- a/src/templates/card_comp/disk/usage.html +++ b/src/templates/card_comp/disk/usage.html @@ -2,13 +2,22 @@
Disk Usage
-

+

{{ system_info['disk_percent'] }}%

+
{% endif %} + + diff --git a/src/templates/card_comp/memory/usage.html b/src/templates/card_comp/memory/usage.html index 4c960a8..30a45bf 100644 --- a/src/templates/card_comp/memory/usage.html +++ b/src/templates/card_comp/memory/usage.html @@ -1,15 +1,25 @@ {% if card_settings.is_memory_usage_card_enabled %} -
-
-
Memory Usage
-

- {{ system_info['memory_percent'] }}% -

-
-
-
- +
+
+
Memory Usage
+

+ {{ system_info['memory_percent'] }}% +

+
+
+ +
+
{% endif %} + + \ No newline at end of file diff --git a/src/templates/card_comp/selector/refresh_button.html b/src/templates/card_comp/selector/refresh_button.html index e7ac6ff..1c42477 100644 --- a/src/templates/card_comp/selector/refresh_button.html +++ b/src/templates/card_comp/selector/refresh_button.html @@ -3,15 +3,12 @@
diff --git a/src/templates/dashboard/developer.html b/src/templates/dashboard/developer.html index 8e3db9a..0f46d3f 100644 --- a/src/templates/dashboard/developer.html +++ b/src/templates/dashboard/developer.html @@ -1,7 +1,9 @@ {% extends 'base/base.html' %} -{% block title %}Server Dashboard{% endblock %} +{% block title %}{{ system_info['system_username'] }}@{{ system_info['nodename'] }} - SystemGuard{% endblock %} {% block extra_head %} + + {% endblock %} {% block content %} {% include 'card_comp/selector/refresh_button.html' %} @@ -26,19 +28,19 @@ {% include 'card_comp/battery/percentage.html' %}
-
+
{% include 'card_comp/cpu/usages.html' %}
-
+
{% include 'card_comp/memory/usage.html' %}
{% include 'card_comp/disk/usage.html' %}
-
+
{% include 'card_comp/cpu/current_temp.html' %}
-
+
{% include 'card_comp/cpu/frequency.html' %}
@@ -56,4 +58,5 @@ {% block extra_scripts %} + {% endblock %} \ No newline at end of file diff --git a/src/templates/experimental/graphs.html b/src/templates/experimental/graphs.html new file mode 100644 index 0000000..ff5c449 --- /dev/null +++ b/src/templates/experimental/graphs.html @@ -0,0 +1,103 @@ +{% extends 'base/base.html' %} +{% block title %}Dashboard Network{% endblock %} +{% block extra_head %} + + + + + + +{% endblock %} +{% block content %} + +

+ {{ title }} Metrics (Experimental) +

+
+ + +
+
+

Current Server Time & Time Zone

+ + Exit Experimental Graphs +
+

Current Time:

+

Time Zone:

+

Data Retention: 15 Days

+
+
+ + +
+
+
+ + +
+
+
+
+ +
+
+

+ +
+ +
+

+ +
+
+ +
+
+

+ +
+ +
+

+ +
+
+ +
+
+

+ +
+ +
+

+ +
+
+ +
+
+

+ +
+
+ +{% endblock %} +{% block extra_scripts %} + + +{% endblock %} \ No newline at end of file diff --git a/src/templates/ext/footer.html b/src/templates/ext/footer.html index 65db537..beefb3e 100644 --- a/src/templates/ext/footer.html +++ b/src/templates/ext/footer.html @@ -1,12 +1,12 @@ -
+ -
\ No newline at end of file + \ No newline at end of file diff --git a/src/templates/ext/navbar.html b/src/templates/ext/navbar.html index 3bc8e47..ac257d4 100644 --- a/src/templates/ext/navbar.html +++ b/src/templates/ext/navbar.html @@ -21,7 +21,7 @@
+ +
@@ -137,7 +160,9 @@
More Utilities
Ping Website - + + External API Monitoring +
diff --git a/src/utils.py b/src/utils.py index 14f3bb9..f9ea87d 100644 --- a/src/utils.py +++ b/src/utils.py @@ -482,11 +482,9 @@ def _get_system_info(): Returns: dict: System information dictionary with various system metrics. """ - boot_time = get_cached_value('boot_time', lambda: datetime.datetime.fromtimestamp(psutil.boot_time())) - uptime_dict = get_cached_value('uptime', lambda: format_uptime(datetime.datetime.now() - boot_time)) - disk_total = get_cached_value("disk_total", get_disk_total) - memory_available = get_cached_value("memory_available", get_memory_available) - # Gathering fresh system information + + disk_total = get_disk_total() + memory_available = get_memory_available() battery_data = check_battery_status() memory_info = psutil.virtual_memory() disk_info = psutil.disk_usage('/') @@ -518,7 +516,6 @@ def _get_system_info(): 'timestamp': datetime.datetime.now(), } # update uptime dictionary - info.update(uptime_dict) return info