From 0e098407e15bc1dc341c571cec8fd34ecb4b0943 Mon Sep 17 00:00:00 2001 From: nicolargo Date: Sun, 23 Oct 2022 11:14:41 +0200 Subject: [PATCH 01/21] First version. Container list and CPU ok, need others stats --- conf/glances.conf | 2 + glances/plugins/glances_docker.py | 465 ++++++++++++++++++++---------- optional-requirements.txt | 1 + 3 files changed, 313 insertions(+), 155 deletions(-) diff --git a/conf/glances.conf b/conf/glances.conf index 85326e33..6774b591 100644 --- a/conf/glances.conf +++ b/conf/glances.conf @@ -409,6 +409,8 @@ max_name_size=20 # By default, Glances only display running containers # Set the following key to True to display all containers all=False +# Define Podman sock +#podman_sock=unix:///run/user/1000/podman/podman.sock [amps] # AMPs configuration are defined in the bottom of this file diff --git a/glances/plugins/glances_docker.py b/glances/plugins/glances_docker.py index 65f8124e..37eaf83a 100644 --- a/glances/plugins/glances_docker.py +++ b/glances/plugins/glances_docker.py @@ -7,7 +7,7 @@ # SPDX-License-Identifier: LGPL-3.0-only # -"""Docker plugin.""" +"""Docker (and Podman) plugin.""" import os import threading @@ -26,11 +26,22 @@ try: import docker from dateutil import parser, tz except Exception as e: - import_error_tag = True + import_docker_error_tag = True # Display debug message if import KeyError - logger.warning("Error loading Docker deps Lib. Docker plugin is disabled ({})".format(e)) + logger.debug("Error loading Docker deps Lib. Docker plugin is disabled ({})".format(e)) else: - import_error_tag = False + import_docker_error_tag = False + +# Podman library (optional and Linux-only) +# https://pypi.org/project/podman/ +try: + from podman import PodmanClient +except Exception as e: + import_podman_error_tag = True + # Display debug message if import KeyError + logger.debug("Error loading Podman deps Lib. Podman feature in the Docker plugin is disabled ({})".format(e)) +else: + import_podman_error_tag = False # Define the items history list (list of items to add to history) # TODO: For the moment limited to the CPU. Had to change the graph exports @@ -77,7 +88,9 @@ class Plugin(GlancesPlugin): def __init__(self, args=None, config=None): """Init the plugin.""" - super(Plugin, self).__init__(args=args, config=config, items_history_list=items_history_list) + super(Plugin, self).__init__(args=args, + config=config, + items_history_list=items_history_list) # The plugin can be disabled using: args.disable_docker self.args = args @@ -89,12 +102,27 @@ class Plugin(GlancesPlugin): self.display_curse = True # Init the Docker API - self.docker_client = self.connect() + if not import_docker_error_tag: + self.docker_client = self.connect_docker() + else: + self.docker_client = None - # Dict of thread (to grab stats asynchronously, one thread is created per container) + # Init the Podman API + self._version_podman = {} + if not import_podman_error_tag: + self.podman_client = self.connect_podman() + else: + self.podman_client = None + + # Dict of Docker thread (to grab stats asynchronously, one thread is created per container) # key: Container Id - # value: instance of ThreadDockerGrabber - self.thread_list = {} + # value: instance of ThreadContainerGrabber + self.thread_docker_list = {} + + # Dict of Podman thread (to grab stats asynchronously, one thread is created per container) + # key: Container Id + # value: instance of ThreadContainerGrabber + self.thread_podman_list = {} # Dict of Network stats (Storing previous network stats to compute Rx/s and Tx/s) # key: Container Id @@ -115,7 +143,9 @@ class Plugin(GlancesPlugin): def exit(self): """Overwrite the exit method to close threads.""" - for t in itervalues(self.thread_list): + for t in itervalues(self.thread_docker_list): + t.stop() + for t in itervalues(self.thread_podman_list): t.stop() # Call the father class super(Plugin, self).exit() @@ -143,12 +173,10 @@ class Plugin(GlancesPlugin): return ret - def connect(self): + def connect_docker(self): """Connect to the Docker server.""" try: - # If the following line replace the next one, the issue #1878 - # is reproduced (Docker containers information missing with Docker 20.10.x) - # So, for the moment disable the timeout option + # Do not use the timeout option (see issue #1878) ret = docker.from_env() except Exception as e: logger.error("docker plugin - Can not connect to Docker ({})".format(e)) @@ -156,6 +184,39 @@ class Plugin(GlancesPlugin): return ret + def connect_podman(self): + """Connect to Podman.""" + try: + ret = PodmanClient(base_url=self._podman_sock()) + except Exception as e: + logger.error("docker plugin - Can not connect to Podman ({})".format(e)) + ret = None + + try: + version_podman = ret.version() + except Exception as e: + logger.error("{} plugin - Cannot get Podman version ({})".format(self.plugin_name, e)) + ret = None + else: + self._version_podman = { + 'Version': version_podman['Version'], + 'ApiVersion': version_podman['ApiVersion'], + 'MinAPIVersion': version_podman['MinAPIVersion'], + } + + return ret + + def _podman_sock(self): + """Return the podman sock. + Could be desfined in the [docker] section thanks to the podman_sock option. + Default value: unix:///run/user/1000/podman/podman.sock + """ + conf_podman_sock = self.get_conf_value('podman_sock') + if len(conf_podman_sock) == 0: + return "unix:///run/user/1000/podman/podman.sock" + else: + return conf_podman_sock[0] + def _all_tag(self): """Return the all tag of the Glances/Docker configuration file. @@ -172,154 +233,249 @@ class Plugin(GlancesPlugin): @GlancesPlugin._check_decorator @GlancesPlugin._log_result_decorator def update(self): - """Update Docker stats using the input method.""" - # Init new stats - stats = self.get_init_value() - - # The Docker-py lib is mandatory and connection should be ok - if import_error_tag or self.docker_client is None: - return self.stats + """Update Docker and podman stats using the input method.""" + # Connection should be ok + if self.docker_client is None and self.podman_client is None: + return self.get_init_value() if self.input_method == 'local': # Update stats - - # Docker version - # Example: { - # "KernelVersion": "3.16.4-tinycore64", - # "Arch": "amd64", - # "ApiVersion": "1.15", - # "Version": "1.3.0", - # "GitCommit": "c78088f", - # "Os": "linux", - # "GoVersion": "go1.3.3" - # } - try: - stats['version'] = self.docker_client.version() - except Exception as e: - # Correct issue#649 - logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e)) - # We may have lost connection remove version info - if 'version' in self.stats: - del self.stats['version'] - self.stats['containers'] = [] - return self.stats - - # Update current containers list - try: - # Issue #1152: Docker module doesn't export details about stopped containers - # The Docker/all key of the configuration file should be set to True - containers = self.docker_client.containers.list(all=self._all_tag()) or [] - except Exception as e: - logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e)) - # We may have lost connection empty the containers list. - self.stats['containers'] = [] - return self.stats - - # Start new thread for new container - for container in containers: - if container.id not in self.thread_list: - # Thread did not exist in the internal dict - # Create it, add it to the internal dict and start it - logger.debug( - "{} plugin - Create thread for container {}".format(self.plugin_name, container.id[:12]) - ) - t = ThreadDockerGrabber(container) - self.thread_list[container.id] = t - t.start() - - # Stop threads for non-existing containers - absent_containers = set(iterkeys(self.thread_list)) - set([c.id for c in containers]) - for container_id in absent_containers: - # Stop the thread - logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12])) - self.thread_list[container_id].stop() - # Delete the item from the dict - del self.thread_list[container_id] - - # Get stats for all containers - stats['containers'] = [] - for container in containers: - # Shall we display the stats ? - if not self.is_display(nativestr(container.name)): - continue - - # Init the stats for the current container - container_stats = {} - # The key is the container name and not the Id - container_stats['key'] = self.get_key() - # Export name - container_stats['name'] = nativestr(container.name) - # Container Id - container_stats['Id'] = container.id - # Container Image - container_stats['Image'] = container.image.tags - # Global stats (from attrs) - # Container Status - container_stats['Status'] = container.attrs['State']['Status'] - # Container Command (see #1912) - container_stats['Command'] = [] - if container.attrs['Config'].get('Entrypoint', None): - container_stats['Command'].extend(container.attrs['Config'].get('Entrypoint', [])) - if container.attrs['Config'].get('Cmd', None): - container_stats['Command'].extend(container.attrs['Config'].get('Cmd', [])) - if not container_stats['Command']: - container_stats['Command'] = None - # Standards stats - # See https://docs.docker.com/engine/api/v1.41/#operation/ContainerStats - # Be aware that the API can change... (example see issue #1857) - if container_stats['Status'] in ('running', 'paused'): - # CPU - container_stats['cpu'] = self.get_docker_cpu(container.id, self.thread_list[container.id].stats) - container_stats['cpu_percent'] = container_stats['cpu'].get('total', None) - # MEM - container_stats['memory'] = self.get_docker_memory( - container.id, self.thread_list[container.id].stats - ) - container_stats['memory_usage'] = container_stats['memory'].get('usage', None) - if container_stats['memory'].get('cache', None) is not None: - container_stats['memory_usage'] -= container_stats['memory']['cache'] - # IO - container_stats['io'] = self.get_docker_io(container.id, self.thread_list[container.id].stats) - container_stats['io_r'] = container_stats['io'].get('ior', None) - container_stats['io_w'] = container_stats['io'].get('iow', None) - # NET - container_stats['network'] = self.get_docker_network( - container.id, self.thread_list[container.id].stats - ) - container_stats['network_rx'] = container_stats['network'].get('rx', None) - container_stats['network_tx'] = container_stats['network'].get('tx', None) - # Uptime - container_stats['Uptime'] = pretty_date( - # parser.parse(container.attrs['State']['StartedAt']).replace(tzinfo=None) - parser.parse(container.attrs['State']['StartedAt']) - .astimezone(tz.tzlocal()) - .replace(tzinfo=None) - ) - else: - container_stats['cpu'] = {} - container_stats['cpu_percent'] = None - container_stats['memory'] = {} - container_stats['memory_percent'] = None - container_stats['io'] = {} - container_stats['io_r'] = None - container_stats['io_w'] = None - container_stats['network'] = {} - container_stats['network_rx'] = None - container_stats['network_tx'] = None - container_stats['Uptime'] = None - # Add current container stats to the stats list - stats['containers'].append(container_stats) - + stats_docker = self.update_docker() + stats_podman = self.update_podman() + stats = { + 'version': stats_docker.get('version', {}), + 'version_podman': stats_podman.get('version', {}), + 'containers': stats_docker.get('containers', []) + stats_podman.get('containers', []) + } elif self.input_method == 'snmp': # Update stats using SNMP # Not available pass # Sort and update the stats + # @TODO: Have a look because sort did not work for the moment (need memory stats ?) self.sort_key, self.stats = sort_docker_stats(stats) return self.stats + def update_docker(self): + """Update Docker stats using the input method.""" + # Init new docker stats + stats = self.get_init_value() + + # Docker version + # Example: { + # "KernelVersion": "3.16.4-tinycore64", + # "Arch": "amd64", + # "ApiVersion": "1.15", + # "Version": "1.3.0", + # "GitCommit": "c78088f", + # "Os": "linux", + # "GoVersion": "go1.3.3" + # } + try: + stats['version'] = self.docker_client.version() + except Exception as e: + # Correct issue#649 + logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e)) + return stats + + # Update current containers list + try: + # Issue #1152: Docker module doesn't export details about stopped containers + # The Docker/all key of the configuration file should be set to True + containers = self.docker_client.containers.list(all=self._all_tag()) or [] + except Exception as e: + logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e)) + return stats + + # Start new thread for new container + for container in containers: + if container.id not in self.thread_docker_list: + # Thread did not exist in the internal dict + # Create it, add it to the internal dict and start it + logger.debug( + "{} plugin - Create thread for container {}".format(self.plugin_name, container.id[:12]) + ) + t = ThreadContainerGrabber(container) + self.thread_docker_list[container.id] = t + t.start() + + # Stop threads for non-existing containers + absent_containers = set(iterkeys(self.thread_docker_list)) - set([c.id for c in containers]) + for container_id in absent_containers: + # Stop the thread + logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12])) + self.thread_docker_list[container_id].stop() + # Delete the item from the dict + del self.thread_docker_list[container_id] + + # Get stats for all containers + stats['containers'] = [] + for container in containers: + # logger.info(['{}: {}'.format(key, container.attrs[key]) for key in sorted(container.attrs.keys())]) + # logger.info(container.attrs['State']['Status']) + # Shall we display the stats ? + if not self.is_display(nativestr(container.name)): + continue + + # Init the stats for the current container + container_stats = {} + # The key is the container name and not the Id + container_stats['key'] = self.get_key() + # Export name + container_stats['name'] = nativestr(container.name) + # Container Id + container_stats['Id'] = container.id + # Container Image + container_stats['Image'] = container.image.tags + # Global stats (from attrs) + # Container Status + container_stats['Status'] = container.attrs['State']['Status'] + # Container Command (see #1912) + container_stats['Command'] = [] + if container.attrs['Config'].get('Entrypoint', None): + container_stats['Command'].extend(container.attrs['Config'].get('Entrypoint', [])) + if container.attrs['Config'].get('Cmd', None): + container_stats['Command'].extend(container.attrs['Config'].get('Cmd', [])) + if not container_stats['Command']: + container_stats['Command'] = None + # Standards stats + # See https://docs.docker.com/engine/api/v1.41/#operation/ContainerStats + # Be aware that the API can change... (example see issue #1857) + if container_stats['Status'] in ('running', 'paused'): + # CPU + container_stats['cpu'] = self.get_docker_cpu(container.id, self.thread_docker_list[container.id].stats) + container_stats['cpu_percent'] = container_stats['cpu'].get('total', None) + # MEM + container_stats['memory'] = self.get_docker_memory( + container.id, self.thread_docker_list[container.id].stats + ) + container_stats['memory_usage'] = container_stats['memory'].get('usage', None) + if container_stats['memory'].get('cache', None) is not None: + container_stats['memory_usage'] -= container_stats['memory']['cache'] + # IO + container_stats['io'] = self.get_docker_io(container.id, self.thread_docker_list[container.id].stats) + container_stats['io_r'] = container_stats['io'].get('ior', None) + container_stats['io_w'] = container_stats['io'].get('iow', None) + # NET + container_stats['network'] = self.get_docker_network( + container.id, self.thread_docker_list[container.id].stats + ) + container_stats['network_rx'] = container_stats['network'].get('rx', None) + container_stats['network_tx'] = container_stats['network'].get('tx', None) + # Uptime + container_stats['Uptime'] = pretty_date( + # parser.parse(container.attrs['State']['StartedAt']).replace(tzinfo=None) + parser.parse(container.attrs['State']['StartedAt']) + .astimezone(tz.tzlocal()) + .replace(tzinfo=None) + ) + else: + container_stats['cpu'] = {} + container_stats['cpu_percent'] = None + container_stats['memory'] = {} + container_stats['memory_percent'] = None + container_stats['io'] = {} + container_stats['io_r'] = None + container_stats['io_w'] = None + container_stats['network'] = {} + container_stats['network_rx'] = None + container_stats['network_tx'] = None + container_stats['Uptime'] = None + # Add current container stats to the stats list + stats['containers'].append(container_stats) + + return stats + + def update_podman(self): + """Update Podman stats.""" + # Init new docker stats + stats = self.get_init_value() + + # Podman version + # Request very long so it is only done once in the connect_podman method + stats['version'] = self._version_podman + + # Update current containers list + try: + containers = self.podman_client.containers.list() or [] + except Exception as e: + logger.error("{} plugin - Cannot get Podman containers list ({})".format(self.plugin_name, e)) + return stats + + # And the stats for each container + try: + # Return example: + # [{'CPU': '3.21%', + # 'MemUsage': '352.3kB / 7.836GB', 'MemUsageBytes': '344KiB / 7.298GiB', 'Mem': '0.00%', + # 'NetIO': '-- / --', + # 'BlockIO': '-- / --', + # 'PIDS': '1', 'Pod': '8d0f1c783def', 'CID': '9491515251ed', + # 'Name': '8d0f1c783def-infra'}, ... ] + podman_stats = {s['CID'][:12]: s for s in self.podman_client.pods.stats()} + except Exception as e: + logger.error("{} plugin - Cannot get Podman containers list ({})".format(self.plugin_name, e)) + return stats + + # Get stats for all containers + stats['containers'] = [] + for container in containers: + # Shall we display the stats ? + if not self.is_display(nativestr(container.name)): + continue + + # Init the stats for the current container + container_stats = {} + # The key is the container name and not the Id + container_stats['key'] = self.get_key() + # Export name + container_stats['name'] = nativestr(container.name) + # Container Id + container_stats['Id'] = container.id + container_stats['IdShort'] = container.id[:12] + # Container Image + container_stats['Image'] = container.image.tags + # Container Status (from attrs) + container_stats['Status'] = container.attrs['State'] + # Container Command + container_stats['Command'] = container.attrs['Command'] + # Standards stats + if container_stats['Status'] in ('running', 'paused'): + # CPU + # '3.21%' to 3.21 + container_stats['cpu_percent'] = float(podman_stats[container_stats['IdShort']]['CPU'][:-1]) + container_stats['cpu'] = {'total': container_stats['cpu_percent']} + # MEMORY + # @TODO + # Should convert 'MemUsage': '352.3kB / 7.836GB' to bytes... + container_stats['memory'] = {} + container_stats['memory_percent'] = float(podman_stats[container_stats['IdShort']]['Mem'][:-1]) + # Is it possible ? + container_stats['io'] = {} + container_stats['io_r'] = None + container_stats['io_w'] = None + container_stats['network'] = {} + container_stats['network_rx'] = None + container_stats['network_tx'] = None + container_stats['Uptime'] = None + else: + container_stats['cpu'] = {} + container_stats['cpu_percent'] = None + container_stats['memory'] = {} + container_stats['memory_percent'] = None + container_stats['io'] = {} + container_stats['io_r'] = None + container_stats['io_w'] = None + container_stats['network'] = {} + container_stats['network_rx'] = None + container_stats['network_tx'] = None + container_stats['Uptime'] = None + # Add current container stats to the stats list + stats['containers'].append(container_stats) + + return stats + def get_docker_cpu(self, container_id, all_stats): """Return the container CPU usage. @@ -684,9 +840,9 @@ class Plugin(GlancesPlugin): return 'CAREFUL' -class ThreadDockerGrabber(threading.Thread): +class ThreadContainerGrabber(threading.Thread): """ - Specific thread to grab docker stats. + Specific thread to grab container stats. stats is a dict """ @@ -694,14 +850,13 @@ class ThreadDockerGrabber(threading.Thread): def __init__(self, container): """Init the class. - container: instance of Docker-py Container + container: instance of Container returned by Docker or Podman client """ - super(ThreadDockerGrabber, self).__init__() + super(ThreadContainerGrabber, self).__init__() # Event needed to stop properly the thread self._stopper = threading.Event() # The docker-py return stats as a stream self._container = container - self._stats_stream = container.stats(decode=True) # The class return the stats as a dict self._stats = {} logger.debug("docker plugin - Create thread for container {}".format(self._container.name)) @@ -712,7 +867,7 @@ class ThreadDockerGrabber(threading.Thread): Infinite loop, should be stopped by calling the stop() method """ try: - for i in self._stats_stream: + for i in self._container.stats(decode=True): self._stats = i time.sleep(0.1) if self.stopped(): diff --git a/optional-requirements.txt b/optional-requirements.txt index 944f55c7..18d4128e 100644 --- a/optional-requirements.txt +++ b/optional-requirements.txt @@ -18,6 +18,7 @@ netifaces py3nvml; python_version >= "3.0" paho-mqtt pika +podman; python_version >= "3.6" potsdb prometheus_client pygal From 5dd133af7d8d170938829524199d08006777f8a3 Mon Sep 17 00:00:00 2001 From: nicolargo Date: Sat, 21 Jan 2023 15:40:48 +0100 Subject: [PATCH 02/21] Add memory info for Podman --- glances/compat.py | 31 +++++++++++++++++++++++++++++++ glances/plugins/glances_docker.py | 7 +++++-- unitest.py | 15 ++++++++++++++- 3 files changed, 50 insertions(+), 3 deletions(-) diff --git a/glances/compat.py b/glances/compat.py index 81fad5ad..95955794 100644 --- a/glances/compat.py +++ b/glances/compat.py @@ -20,6 +20,7 @@ import types import subprocess import os from datetime import datetime +import re from glances.logger import logger @@ -355,3 +356,33 @@ def urlopen_auth(url, username, password): headers={'Authorization': 'Basic ' + base64.b64encode(('%s:%s' % (username, password)).encode()).decode()}, ) ) + + +def string_value_to_float(s): + """Convert a string with a value and an unit to a float. + Example: + '12.5 MB' -> 12500000.0 + '32.5 GB' -> 32500000000.0 + Args: + s (string): Input string with value and unit + Output: + float: The value in float + """ + convert_dict = { + None: 1, + 'B': 1, + 'KB': 1000, + 'MB': 1000000, + 'GB': 1000000000, + 'TB': 1000000000000, + 'PB': 1000000000000000, + } + unpack_string = [float(i[0]) if i[1] == '' else i[1].upper() for i in re.findall(r'([\d.]+)|([^\d.]+)', s.replace(' ', ''))] + if len(unpack_string) == 2: + value, unit = unpack_string + elif len(unpack_string) == 1: + value = unpack_string[0] + unit = None + else: + return None + return value * convert_dict[unit] diff --git a/glances/plugins/glances_docker.py b/glances/plugins/glances_docker.py index 37eaf83a..0758e788 100644 --- a/glances/plugins/glances_docker.py +++ b/glances/plugins/glances_docker.py @@ -14,7 +14,7 @@ import threading import time from copy import deepcopy -from glances.compat import iterkeys, itervalues, nativestr, pretty_date +from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float from glances.logger import logger from glances.plugins.glances_plugin import GlancesPlugin from glances.processes import sort_stats as sort_stats_processes, glances_processes @@ -449,7 +449,10 @@ class Plugin(GlancesPlugin): # MEMORY # @TODO # Should convert 'MemUsage': '352.3kB / 7.836GB' to bytes... - container_stats['memory'] = {} + container_stats['memory'] = { + 'usage': string_value_to_float(podman_stats[container_stats['IdShort']]['MemUsage'].split(' / ')[0]), + 'limit': string_value_to_float(podman_stats[container_stats['IdShort']]['MemUsage'].split(' / ')[1]), + } container_stats['memory_percent'] = float(podman_stats[container_stats['IdShort']]['Mem'][:-1]) # Is it possible ? container_stats['io'] = {} diff --git a/unitest.py b/unitest.py index 07bab142..25193d9a 100755 --- a/unitest.py +++ b/unitest.py @@ -10,6 +10,10 @@ """Glances unitary tests suite.""" +# +# ./venv/bin/python unitest.py +# + import time import unittest @@ -25,7 +29,7 @@ from glances.thresholds import GlancesThresholdCritical from glances.thresholds import GlancesThresholds from glances.plugins.glances_plugin import GlancesPlugin from glances.programs import processes_to_programs -from glances.compat import subsample, range +from glances.compat import subsample, range, string_value_to_float from glances.secure import secure_popen from glances.compat import PY3 @@ -284,6 +288,15 @@ class TestGlances(unittest.TestCase): # Check if number of processes in the list equal counter # self.assertEqual(total, len(stats_grab)) + def test_018_string_value_to_float(self): + """Check string_value_to_float function""" + print('INFO: [TEST_018] Check string_value_to_float function') + self.assertEqual(string_value_to_float('32kB'), 32000.0) + self.assertEqual(string_value_to_float('32 KB'), 32000.0) + self.assertEqual(string_value_to_float('15.5MB'), 15500000.0) + self.assertEqual(string_value_to_float('25.9'), 25.9) + self.assertEqual(string_value_to_float('12'), 12) + def test_094_thresholds(self): """Test thresholds classes""" print('INFO: [TEST_094] Thresholds') From b43bf2bcb0c21627010a401758d8a99b2ee17ec0 Mon Sep 17 00:00:00 2001 From: nicolargo Date: Sun, 22 Jan 2023 09:13:06 +0100 Subject: [PATCH 03/21] Code should be refactor in order to make it more easy to update if a new container engine is added --- glances/plugins/glances_docker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/glances/plugins/glances_docker.py b/glances/plugins/glances_docker.py index 0758e788..f2a281b9 100644 --- a/glances/plugins/glances_docker.py +++ b/glances/plugins/glances_docker.py @@ -443,12 +443,12 @@ class Plugin(GlancesPlugin): # Standards stats if container_stats['Status'] in ('running', 'paused'): # CPU - # '3.21%' to 3.21 + # Convert: '3.21%' to 3.21 container_stats['cpu_percent'] = float(podman_stats[container_stats['IdShort']]['CPU'][:-1]) container_stats['cpu'] = {'total': container_stats['cpu_percent']} # MEMORY - # @TODO - # Should convert 'MemUsage': '352.3kB / 7.836GB' to bytes... + # Convert 'MemUsage': '352.3kB / 7.836GB' to bytes + # Yes it is ungly but the API do not expose the memory limit in bytes... container_stats['memory'] = { 'usage': string_value_to_float(podman_stats[container_stats['IdShort']]['MemUsage'].split(' / ')[0]), 'limit': string_value_to_float(podman_stats[container_stats['IdShort']]['MemUsage'].split(' / ')[1]), From a8053862550058fde46fe1a0a40b6596cca9109b Mon Sep 17 00:00:00 2001 From: nicolargo Date: Sun, 22 Jan 2023 09:54:56 +0100 Subject: [PATCH 04/21] Add Podman IO, but not workking for the moment because https://github.com/containers/podman/issues/11695 --- glances/compat.py | 6 +++++- glances/plugins/glances_docker.py | 11 ++++++----- unitest.py | 1 + 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/glances/compat.py b/glances/compat.py index 95955794..ac7c6240 100644 --- a/glances/compat.py +++ b/glances/compat.py @@ -377,7 +377,7 @@ def string_value_to_float(s): 'TB': 1000000000000, 'PB': 1000000000000000, } - unpack_string = [float(i[0]) if i[1] == '' else i[1].upper() for i in re.findall(r'([\d.]+)|([^\d.]+)', s.replace(' ', ''))] + unpack_string = [i[0] if i[1] == '' else i[1].upper() for i in re.findall(r'([\d.]+)|([^\d.]+)', s.replace(' ', ''))] if len(unpack_string) == 2: value, unit = unpack_string elif len(unpack_string) == 1: @@ -385,4 +385,8 @@ def string_value_to_float(s): unit = None else: return None + try: + value = float(unpack_string[0]) + except ValueError: + return None return value * convert_dict[unit] diff --git a/glances/plugins/glances_docker.py b/glances/plugins/glances_docker.py index f2a281b9..1eeb04ac 100644 --- a/glances/plugins/glances_docker.py +++ b/glances/plugins/glances_docker.py @@ -454,13 +454,14 @@ class Plugin(GlancesPlugin): 'limit': string_value_to_float(podman_stats[container_stats['IdShort']]['MemUsage'].split(' / ')[1]), } container_stats['memory_percent'] = float(podman_stats[container_stats['IdShort']]['Mem'][:-1]) - # Is it possible ? + # Not available for the moment: https://github.com/containers/podman/issues/11695 container_stats['io'] = {} - container_stats['io_r'] = None - container_stats['io_w'] = None + container_stats['io_r'] = string_value_to_float(podman_stats[container_stats['IdShort']]['BlockIO'].split(' / ')[0]) + container_stats['io_w'] = string_value_to_float(podman_stats[container_stats['IdShort']]['BlockIO'].split(' / ')[1]) container_stats['network'] = {} - container_stats['network_rx'] = None - container_stats['network_tx'] = None + container_stats['network_rx'] = string_value_to_float(podman_stats[container_stats['IdShort']]['NetIO'].split(' / ')[0]) + container_stats['network_tx'] = string_value_to_float(podman_stats[container_stats['IdShort']]['NetIO'].split(' / ')[1]) + # container_stats['Uptime'] = None else: container_stats['cpu'] = {} diff --git a/unitest.py b/unitest.py index 25193d9a..ca9703a9 100755 --- a/unitest.py +++ b/unitest.py @@ -296,6 +296,7 @@ class TestGlances(unittest.TestCase): self.assertEqual(string_value_to_float('15.5MB'), 15500000.0) self.assertEqual(string_value_to_float('25.9'), 25.9) self.assertEqual(string_value_to_float('12'), 12) + self.assertEqual(string_value_to_float('--'), None) def test_094_thresholds(self): """Test thresholds classes""" From 532df87e74a38eda07fe5ed5f1fcd58acc34ea0e Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:51:59 +0530 Subject: [PATCH 05/21] chg: Plugin name - docker -> containers --- conf/glances.conf | 2 +- glances/outputs/glances_curses.py | 6 +++--- .../plugins/{glances_docker.py => glances_containers.py} | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) rename glances/plugins/{glances_docker.py => glances_containers.py} (99%) diff --git a/conf/glances.conf b/conf/glances.conf index 923a92c9..25a22e10 100644 --- a/conf/glances.conf +++ b/conf/glances.conf @@ -386,7 +386,7 @@ port_default_gateway=True #web_4_url=https://blog.nicolargo.com/nonexist #web_4_description=Intranet -[docker] +[containers] disable=False # Only show specific containers (comma separated list of container name or regular expression) # Comment this line to display all containers (default configuration) diff --git a/glances/outputs/glances_curses.py b/glances/outputs/glances_curses.py index f3be8980..b11c6eb8 100644 --- a/glances/outputs/glances_curses.py +++ b/glances/outputs/glances_curses.py @@ -57,7 +57,7 @@ class _GlancesCurses(object): 'c': {'sort_key': 'cpu_percent'}, 'C': {'switch': 'disable_cloud'}, 'd': {'switch': 'disable_diskio'}, - 'D': {'switch': 'disable_docker'}, + 'D': {'switch': 'disable_containers'}, # 'e' > Enable/Disable process extended # 'E' > Erase the process filter # 'f' > Show/hide fs / folder stats @@ -124,7 +124,7 @@ class _GlancesCurses(object): _left_sidebar_max_width = 34 # Define right sidebar - _right_sidebar = ['docker', 'processcount', 'amps', 'processlist', 'alert'] + _right_sidebar = ['containers', 'processcount', 'amps', 'processlist', 'alert'] def __init__(self, config=None, args=None): # Init @@ -612,7 +612,7 @@ class _GlancesCurses(object): max_processes_displayed = ( self.term_window.getmaxyx()[0] - 11 - - (0 if 'docker' not in __stat_display else self.get_stats_display_height(__stat_display["docker"])) + - (0 if 'containers' not in __stat_display else self.get_stats_display_height(__stat_display["containers"])) - ( 0 if 'processcount' not in __stat_display diff --git a/glances/plugins/glances_docker.py b/glances/plugins/glances_containers.py similarity index 99% rename from glances/plugins/glances_docker.py rename to glances/plugins/glances_containers.py index 1eeb04ac..b316c78e 100644 --- a/glances/plugins/glances_docker.py +++ b/glances/plugins/glances_containers.py @@ -716,7 +716,7 @@ class Plugin(GlancesPlugin): # Get the maximum containers name # Max size is configurable. See feature request #1723. name_max_width = min( - self.config.get_int_value('docker', 'max_name_size', default=20) if self.config is not None else 20, + self.config.get_int_value('containers', 'max_name_size', default=20) if self.config is not None else 20, len(max(self.stats['containers'], key=lambda x: len(x['name']))['name']), ) msg = ' {:{width}}'.format('Name', width=name_max_width) From b65f8006318921d641dcee520687cf87061cd3ef Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:51:59 +0530 Subject: [PATCH 06/21] add: containers Plugin - StatsFetcher --- glances/plugins/containers/__init__.py | 0 glances/plugins/containers/stats_fetcher.py | 72 +++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 glances/plugins/containers/__init__.py create mode 100644 glances/plugins/containers/stats_fetcher.py diff --git a/glances/plugins/containers/__init__.py b/glances/plugins/containers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/glances/plugins/containers/stats_fetcher.py b/glances/plugins/containers/stats_fetcher.py new file mode 100644 index 00000000..ed08f4ce --- /dev/null +++ b/glances/plugins/containers/stats_fetcher.py @@ -0,0 +1,72 @@ +import threading +import time + +from glances.logger import logger + + +class StatsFetcher: + # Should be an Abstract Base Class + # Inherit from abc.ABC by Glancesv4 (not inheriting for compatibility with py2) + """ + Streams the container stats through threading + + Use `StatsFetcher.stats` to access the streamed results + """ + + def __init__(self, container): + """Init the class. + + container: instance of Container returned by Docker or Podman client + """ + # The docker-py return stats as a stream + self._container = container + # Container stats are maintained as dicts + self._raw_stats = {} + # Use a Thread to stream stats + self._thread = threading.Thread(target=self._fetch_stats, daemon=True) + # Event needed to stop properly the thread + self._stopper = threading.Event() + + self._thread.start() + logger.debug("docker plugin - Create thread for container {}".format(self._container.name)) + + def _fetch_stats(self): + """Grab the stats. + + Infinite loop, should be stopped by calling the stop() method + """ + try: + for new_stats in self._container.stats(decode=True): + self._pre_raw_stats_update_hook() + self._raw_stats = new_stats + self._post_raw_stats_update_hook() + + time.sleep(0.1) + if self.stopped(): + break + + except Exception as e: + logger.debug("docker plugin - Exception thrown during run ({})".format(e)) + self.stop() + + def stopped(self): + """Return True is the thread is stopped.""" + return self._stopper.is_set() + + def stop(self, timeout=None): + """Stop the thread.""" + logger.debug("docker plugin - Close thread for container {}".format(self._container.name)) + self._stopper.set() + + @property + def stats(self): + """Raw Stats getter.""" + return self._raw_stats + + def _pre_raw_stats_update_hook(self): + """Hook that runs before worker thread updates the raw_stats""" + pass + + def _post_raw_stats_update_hook(self): + """Hook that runs after worker thread updates the raw_stats""" + pass From 16c3b4311251191d0081c404bb333cb682498a99 Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:51:59 +0530 Subject: [PATCH 07/21] chg: containers Plugin - switch to docker extension unit --- glances/plugins/containers/glances_docker.py | 353 +++++++++++++++++++ glances/plugins/glances_containers.py | 169 +-------- 2 files changed, 366 insertions(+), 156 deletions(-) create mode 100644 glances/plugins/containers/glances_docker.py diff --git a/glances/plugins/containers/glances_docker.py b/glances/plugins/containers/glances_docker.py new file mode 100644 index 00000000..11ea2ef9 --- /dev/null +++ b/glances/plugins/containers/glances_docker.py @@ -0,0 +1,353 @@ +"""Docker Extension unit for Glances' Containers plugin.""" +import threading +import time + +from glances.compat import iterkeys, itervalues, nativestr, pretty_date +from glances.logger import logger +from glances.plugins.containers.stats_fetcher import StatsFetcher + +# Docker-py library (optional and Linux-only) +# https://github.com/docker/docker-py +try: + import docker + from dateutil import parser, tz +except Exception as e: + import_docker_error_tag = True + # Display debug message if import KeyError + logger.debug("Error loading Docker deps Lib. Docker plugin is disabled ({})".format(e)) +else: + import_docker_error_tag = False + + +class DockerStatsFetcher(StatsFetcher): + MANDATORY_MEMORY_FIELDS = ["usage", 'limit'] + + def __init__(self, container): + super().__init__(container) + # Lock to avoid the daemon thread updating stats when main thread reads the stats + self._stats_lock = threading.Lock() + + # Previous computes stats are stored in the self._old_computed_stats variable + # By storing time data we enable IoR/s and IoW/s calculations in the XML/RPC API, which would otherwise + # be overly difficult work for users of the API + self._old_computed_stats = {} + + # Last time when output stats (results) were computed + self._last_stats_output_time = 0 + # Last time when the raw_stats were updated by worker thread + self._last_raws_stats_update_time = 1 + + @property + def activity_stats(self): + """Activity Stats + + Each successive access of activity_stats will cause computation of activity_stats from raw_stats + """ + computed_activity_stats = self._compute_activity_stats() + self._old_computed_stats = computed_activity_stats + self._last_stats_output_time = time.time() + return computed_activity_stats + + def _pre_raw_stats_update_hook(self): + self._stats_lock.acquire() + + def _post_raw_stats_update_hook(self): + self._last_raws_stats_update_time = time.time() + self._stats_lock.release() + + @property + def time_since_update(self): + return self._last_raws_stats_update_time - self._last_stats_output_time + + def _compute_activity_stats(self): + with self._stats_lock: + io_stats = self._get_io_stats() + cpu_stats = self._get_cpu_stats() + memory_stats = self._get_memory_stats() + network_stats = self._get_network_stats() + + computed_stats = { + "io": io_stats or {}, + "memory": memory_stats or {}, + "network": network_stats or {}, + "cpu": cpu_stats or {"total": 0.0}, + } + return computed_stats + + def _get_cpu_stats(self): + """Return the container CPU usage. + + Output: a dict {'total': 1.49} + """ + stats = {'total': 0.0} + + try: + cpu_stats = self.stats['cpu_stats'] + precpu_stats = self.stats['precpu_stats'] + cpu = {'system': cpu_stats['system_cpu_usage'], 'total': cpu_stats['cpu_usage']['total_usage']} + precpu = {'system': precpu_stats['system_cpu_usage'], 'total': precpu_stats['cpu_usage']['total_usage']} + + # Issue #1857 + # If either precpu_stats.online_cpus or cpu_stats.online_cpus is nil + # then for compatibility with older daemons the length of + # the corresponding cpu_usage.percpu_usage array should be used. + cpu['nb_core'] = cpu_stats.get('online_cpus') or len(cpu_stats['cpu_usage']['percpu_usage'] or []) + except KeyError as e: + logger.debug("containers plugin - Can't grab CPU stat for container {} ({})".format(self._container.id, e)) + logger.debug(self.stats) + return None + + try: + cpu_delta = cpu['total'] - precpu['total'] + system_cpu_delta = cpu['system'] - precpu['system'] + # CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0 + stats['total'] = (cpu_delta / system_cpu_delta) * cpu['nb_core'] * 100.0 + except TypeError as e: + msg = "containers plugin - Can't compute CPU usage for container {} ({})".format(self._container.id, e) + logger.debug(msg) + logger.debug(self.stats) + return None + + # Return the stats + return stats + + def _get_memory_stats(self): + """Return the container MEMORY. + + Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} + """ + memory_stats = self.stats.get('memory_stats') + + # Checks for memory_stats & mandatory fields + if not memory_stats or any(field not in memory_stats for field in self.MANDATORY_MEMORY_FIELDS): + logger.debug("containers plugin - Missing MEM usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + stats = {field: memory_stats[field] for field in self.MANDATORY_MEMORY_FIELDS} + try: + # Issue #1857 - Some stats are not always available in ['memory_stats']['stats'] + detailed_stats = memory_stats['stats'] + stats['rss'] = detailed_stats.get('rss') or detailed_stats.get('total_rss') + stats['max_usage'] = detailed_stats.get('max_usage') + stats['cache'] = detailed_stats.get('cache') + except (KeyError, TypeError) as e: + # self.stats do not have MEM information + logger.debug("containers plugin - Can't grab MEM usage for container {} ({})".format(self._container.id, e)) + logger.debug(self.stats) + return None + + # Return the stats + return stats + + def _get_network_stats(self): + """Return the container network usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + rx: Number of bytes received + tx: Number of bytes transmitted + """ + eth0_stats = self.stats.get('networks', {}).get('eth0') + + # Checks for net_stats & mandatory fields + if not eth0_stats or any(field not in eth0_stats for field in ['rx_bytes', 'tx_bytes']): + logger.debug("containers plugin - Missing Network usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + # Read the rx/tx stats (in bytes) + stats = {'cumulative_rx': eth0_stats["rx_bytes"], 'cumulative_tx': eth0_stats["tx_bytes"]} + + # Using previous stats to calculate rates + old_network_stats = self._old_computed_stats.get("network") + if old_network_stats: + stats['time_since_update'] = round(self.time_since_update) + stats['rx'] = stats['cumulative_rx'] - old_network_stats["cumulative_rx"] + stats['tx'] = stats['cumulative_tx'] - old_network_stats['cumulative_tx'] + + # Return the stats + return stats + + def _get_io_stats(self): + """Return the container IO usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + ior: Number of bytes read + iow: Number of bytes written + """ + io_service_bytes_recursive = self.stats.get('blkio_stats', {}).get('io_service_bytes_recursive') + + # Checks for net_stats + if not io_service_bytes_recursive: + logger.debug("containers plugin - Missing blockIO usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + # Read the ior/iow stats (in bytes) + try: + # Read IOR and IOW value in the structure list of dict + cumulative_ior = [i for i in io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value'] + cumulative_iow = [i for i in io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value'] + except (TypeError, IndexError, KeyError, AttributeError) as e: + # self.stats do not have io information + logger.debug( + "containers plugin - Can't grab blockIO usage for container {} ({})".format(self._container.id, e) + ) + logger.debug(self.stats) + return None + + stats = {'cumulative_ior': cumulative_ior, 'cumulative_iow': cumulative_iow} + + # Using previous stats to calculate difference + old_io_stats = self._old_computed_stats.get("io") + if old_io_stats: + stats['time_since_update'] = round(self.time_since_update) + stats['ior'] = stats['cumulative_ior'] - old_io_stats["cumulative_ior"] + stats['iow'] = stats['cumulative_iow'] - old_io_stats["cumulative_iow"] + + # Return the stats + return stats + + +class DockerContainersExtension: + """Glances' Containers Plugin's Docker Extension unit""" + + CONTAINER_ACTIVE_STATUS = ['running', 'paused'] + + def __init__(self): + if import_docker_error_tag: + raise Exception("Missing libs required to run Docker Extension (Containers) ") + + self.client = None + self.ext_name = "Docker Ext" + self.stats_fetchers = {} + self.connect() + + def connect(self): + """Connect to the Docker server.""" + # Init the Docker API Client + try: + # Do not use the timeout option (see issue #1878) + self.client = docker.from_env() + except Exception as e: + logger.error("docker plugin - Can not connect to Docker ({})".format(e)) + self.client = None + + def stop(self): + # Stop all streaming threads + for t in itervalues(self.stats_fetchers): + t.stop() + + def update(self, all_tag): + """Update Docker stats using the input method.""" + # Docker version + # Example: { + # "KernelVersion": "3.16.4-tinycore64", + # "Arch": "amd64", + # "ApiVersion": "1.15", + # "Version": "1.3.0", + # "GitCommit": "c78088f", + # "Os": "linux", + # "GoVersion": "go1.3.3" + # } + try: + version_stats = self.client.version() + except Exception as e: + # Correct issue#649 + logger.error("{} plugin - Cannot get Docker version ({})".format(self.ext_name, e)) + return {}, [] + + # Update current containers list + try: + # Issue #1152: Docker module doesn't export details about stopped containers + # The Containers/all key of the configuration file should be set to True + containers = self.client.containers.list(all=all_tag) + except Exception as e: + logger.error("{} plugin - Cannot get containers list ({})".format(self.ext_name, e)) + return version_stats, [] + + # Start new thread for new container + for container in containers: + if container.id not in self.stats_fetchers: + # StatsFetcher did not exist in the internal dict + # Create it, add it to the internal dict + logger.debug("{} plugin - Create thread for container {}".format(self.ext_name, container.id[:12])) + self.stats_fetchers[container.id] = DockerStatsFetcher(container) + + # Stop threads for non-existing containers + absent_containers = set(iterkeys(self.stats_fetchers)) - set(c.id for c in containers) + for container_id in absent_containers: + # Stop the StatsFetcher + logger.debug("{} plugin - Stop thread for old container {}".format(self.ext_name, container_id[:12])) + self.stats_fetchers[container_id].stop() + # Delete the StatsFetcher from the dict + del self.stats_fetchers[container_id] + + # Get stats for all containers + container_stats = [self.generate_stats(container) for container in containers] + return version_stats, container_stats + + @property + def key(self): + """Return the key of the list.""" + return 'name' + + def generate_stats(self, container): + # Init the stats for the current container + stats = { + 'key': self.key, + # Export name + 'name': nativestr(container.name), + # Container Id + 'Id': container.id, + # Container Image + 'Image': str(container.image.tags), + # Container Status (from attrs) + 'Status': container.attrs['State']['Status'], + 'Created': container.attrs['Created'], + 'Command': [], + } + + if container.attrs['Config'].get('Entrypoint', None): + stats['Command'].extend(container.attrs['Config'].get('Entrypoint', [])) + if container.attrs['Config'].get('Cmd', None): + stats['Command'].extend(container.attrs['Config'].get('Cmd', [])) + if not stats['Command']: + stats['Command'] = None + + if stats['Status'] in self.CONTAINER_ACTIVE_STATUS: + stats['StartedAt'] = container.attrs['State']['StartedAt'] + stats_fetcher = self.stats_fetchers[container.id] + activity_stats = stats_fetcher.activity_stats + stats.update(activity_stats) + + # Additional fields + stats['cpu_percent'] = stats["cpu"]['total'] + stats['memory_usage'] = stats["memory"].get('usage') + if stats['memory'].get('cache') is not None: + stats['memory_usage'] -= stats['memory']['cache'] + stats['io_r'] = stats['io'].get('ior') + stats['io_w'] = stats['io'].get('iow') + stats['network_rx'] = stats['network'].get('rx') + stats['network_tx'] = stats['network'].get('tx') + stats['Uptime'] = pretty_date( + parser.parse(stats['StartedAt']).astimezone(tz.tzlocal()).replace(tzinfo=None) + ) + else: + stats['io'] = {} + stats['cpu'] = {} + stats['memory'] = {} + stats['network'] = {} + stats['io_r'] = None + stats['io_w'] = None + stats['cpu_percent'] = None + stats['memory_percent'] = None + stats['network_rx'] = None + stats['network_tx'] = None + stats['Uptime'] = None + + return stats diff --git a/glances/plugins/glances_containers.py b/glances/plugins/glances_containers.py index b316c78e..74804c82 100644 --- a/glances/plugins/glances_containers.py +++ b/glances/plugins/glances_containers.py @@ -7,7 +7,7 @@ # SPDX-License-Identifier: LGPL-3.0-only # -"""Docker (and Podman) plugin.""" +"""Containers plugin.""" import os import threading @@ -18,19 +18,7 @@ from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_ from glances.logger import logger from glances.plugins.glances_plugin import GlancesPlugin from glances.processes import sort_stats as sort_stats_processes, glances_processes -from glances.timer import getTimeSinceLastUpdate - -# Docker-py library (optional and Linux-only) -# https://github.com/docker/docker-py -try: - import docker - from dateutil import parser, tz -except Exception as e: - import_docker_error_tag = True - # Display debug message if import KeyError - logger.debug("Error loading Docker deps Lib. Docker plugin is disabled ({})".format(e)) -else: - import_docker_error_tag = False +from glances.plugins.containers.glances_docker import import_docker_error_tag, DockerContainersExtension # Podman library (optional and Linux-only) # https://pypi.org/project/podman/ @@ -88,9 +76,7 @@ class Plugin(GlancesPlugin): def __init__(self, args=None, config=None): """Init the plugin.""" - super(Plugin, self).__init__(args=args, - config=config, - items_history_list=items_history_list) + super(Plugin, self).__init__(args=args, config=config, items_history_list=items_history_list) # The plugin can be disabled using: args.disable_docker self.args = args @@ -102,10 +88,8 @@ class Plugin(GlancesPlugin): self.display_curse = True # Init the Docker API - if not import_docker_error_tag: - self.docker_client = self.connect_docker() - else: - self.docker_client = None + self.docker_extension = DockerContainersExtension() if not import_docker_error_tag else None + self.docker_extension: DockerContainersExtension # Init the Podman API self._version_podman = {} @@ -143,10 +127,8 @@ class Plugin(GlancesPlugin): def exit(self): """Overwrite the exit method to close threads.""" - for t in itervalues(self.thread_docker_list): - t.stop() - for t in itervalues(self.thread_podman_list): - t.stop() + if self.docker_extension: + self.docker_extension.stop() # Call the father class super(Plugin, self).exit() @@ -235,17 +217,17 @@ class Plugin(GlancesPlugin): def update(self): """Update Docker and podman stats using the input method.""" # Connection should be ok - if self.docker_client is None and self.podman_client is None: + if self.docker_extension is None and self.podman_client is None: return self.get_init_value() if self.input_method == 'local': # Update stats - stats_docker = self.update_docker() - stats_podman = self.update_podman() + stats_docker = self.update_docker() if self.docker_extension else {} + stats_podman = self.update_podman() if self.podman_client else {} stats = { 'version': stats_docker.get('version', {}), 'version_podman': stats_podman.get('version', {}), - 'containers': stats_docker.get('containers', []) + stats_podman.get('containers', []) + 'containers': stats_docker.get('containers', []) + stats_podman.get('containers', []), } elif self.input_method == 'snmp': # Update stats using SNMP @@ -260,133 +242,8 @@ class Plugin(GlancesPlugin): def update_docker(self): """Update Docker stats using the input method.""" - # Init new docker stats - stats = self.get_init_value() - - # Docker version - # Example: { - # "KernelVersion": "3.16.4-tinycore64", - # "Arch": "amd64", - # "ApiVersion": "1.15", - # "Version": "1.3.0", - # "GitCommit": "c78088f", - # "Os": "linux", - # "GoVersion": "go1.3.3" - # } - try: - stats['version'] = self.docker_client.version() - except Exception as e: - # Correct issue#649 - logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e)) - return stats - - # Update current containers list - try: - # Issue #1152: Docker module doesn't export details about stopped containers - # The Docker/all key of the configuration file should be set to True - containers = self.docker_client.containers.list(all=self._all_tag()) or [] - except Exception as e: - logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e)) - return stats - - # Start new thread for new container - for container in containers: - if container.id not in self.thread_docker_list: - # Thread did not exist in the internal dict - # Create it, add it to the internal dict and start it - logger.debug( - "{} plugin - Create thread for container {}".format(self.plugin_name, container.id[:12]) - ) - t = ThreadContainerGrabber(container) - self.thread_docker_list[container.id] = t - t.start() - - # Stop threads for non-existing containers - absent_containers = set(iterkeys(self.thread_docker_list)) - set([c.id for c in containers]) - for container_id in absent_containers: - # Stop the thread - logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12])) - self.thread_docker_list[container_id].stop() - # Delete the item from the dict - del self.thread_docker_list[container_id] - - # Get stats for all containers - stats['containers'] = [] - for container in containers: - # logger.info(['{}: {}'.format(key, container.attrs[key]) for key in sorted(container.attrs.keys())]) - # logger.info(container.attrs['State']['Status']) - # Shall we display the stats ? - if not self.is_display(nativestr(container.name)): - continue - - # Init the stats for the current container - container_stats = {} - # The key is the container name and not the Id - container_stats['key'] = self.get_key() - # Export name - container_stats['name'] = nativestr(container.name) - # Container Id - container_stats['Id'] = container.id - # Container Image - container_stats['Image'] = container.image.tags - # Global stats (from attrs) - # Container Status - container_stats['Status'] = container.attrs['State']['Status'] - # Container Command (see #1912) - container_stats['Command'] = [] - if container.attrs['Config'].get('Entrypoint', None): - container_stats['Command'].extend(container.attrs['Config'].get('Entrypoint', [])) - if container.attrs['Config'].get('Cmd', None): - container_stats['Command'].extend(container.attrs['Config'].get('Cmd', [])) - if not container_stats['Command']: - container_stats['Command'] = None - # Standards stats - # See https://docs.docker.com/engine/api/v1.41/#operation/ContainerStats - # Be aware that the API can change... (example see issue #1857) - if container_stats['Status'] in ('running', 'paused'): - # CPU - container_stats['cpu'] = self.get_docker_cpu(container.id, self.thread_docker_list[container.id].stats) - container_stats['cpu_percent'] = container_stats['cpu'].get('total', None) - # MEM - container_stats['memory'] = self.get_docker_memory( - container.id, self.thread_docker_list[container.id].stats - ) - container_stats['memory_usage'] = container_stats['memory'].get('usage', None) - if container_stats['memory'].get('cache', None) is not None: - container_stats['memory_usage'] -= container_stats['memory']['cache'] - # IO - container_stats['io'] = self.get_docker_io(container.id, self.thread_docker_list[container.id].stats) - container_stats['io_r'] = container_stats['io'].get('ior', None) - container_stats['io_w'] = container_stats['io'].get('iow', None) - # NET - container_stats['network'] = self.get_docker_network( - container.id, self.thread_docker_list[container.id].stats - ) - container_stats['network_rx'] = container_stats['network'].get('rx', None) - container_stats['network_tx'] = container_stats['network'].get('tx', None) - # Uptime - container_stats['Uptime'] = pretty_date( - # parser.parse(container.attrs['State']['StartedAt']).replace(tzinfo=None) - parser.parse(container.attrs['State']['StartedAt']) - .astimezone(tz.tzlocal()) - .replace(tzinfo=None) - ) - else: - container_stats['cpu'] = {} - container_stats['cpu_percent'] = None - container_stats['memory'] = {} - container_stats['memory_percent'] = None - container_stats['io'] = {} - container_stats['io_r'] = None - container_stats['io_w'] = None - container_stats['network'] = {} - container_stats['network_rx'] = None - container_stats['network_tx'] = None - container_stats['Uptime'] = None - # Add current container stats to the stats list - stats['containers'].append(container_stats) - - return stats + version, containers = self.docker_extension.update(all_tag=self._all_tag()) + return {"version": version, "containers": containers} def update_podman(self): """Update Podman stats.""" From ebb26e6a708ff44a3809068b2d025a0993427dfa Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:51:59 +0530 Subject: [PATCH 08/21] chg: containers Plugin - switch to basic podman extension unit Pod support is still missing --- glances/plugins/containers/glances_podman.py | 258 ++++++++++++ glances/plugins/glances_containers.py | 419 +------------------ 2 files changed, 281 insertions(+), 396 deletions(-) create mode 100644 glances/plugins/containers/glances_podman.py diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py new file mode 100644 index 00000000..992ca2a1 --- /dev/null +++ b/glances/plugins/containers/glances_podman.py @@ -0,0 +1,258 @@ +"""Podman Extension unit for Glances' Containers plugin.""" +from datetime import datetime + +from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float +from glances.logger import logger +from glances.plugins.containers.stats_fetcher import StatsFetcher + +# Podman library (optional and Linux-only) +# https://pypi.org/project/podman/ +try: + import podman +except Exception as e: + import_podman_error_tag = True + # Display debug message if import KeyError + logger.debug("Error loading Podman deps Lib. Podman feature in the Containers plugin is disabled ({})".format(e)) +else: + import_podman_error_tag = False + + +class PodmanStatsFetcher(StatsFetcher): + @property + def activity_stats(self): + io_stats = self._get_io_stats() + cpu_stats = self._get_cpu_stats() + memory_stats = self._get_memory_stats() + network_stats = self._get_network_stats() + + computed_stats = { + "io": io_stats or {}, + "memory": memory_stats or {}, + "network": network_stats or {}, + "cpu": cpu_stats or {"total": 0.0}, + } + return computed_stats + + def _get_cpu_stats(self): + """Return the container CPU usage. + + Output: a dict {'total': 1.49} + """ + if "cpu_percent" not in self.stats: + logger.debug("containers plugin - Missing CPU usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + cpu_usage = string_value_to_float(self.stats["cpu_percent"].rstrip("%")) + return {"total": cpu_usage} + + def _get_memory_stats(self): + """Return the container MEMORY. + + Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} + """ + if "mem_usage" not in self.stats or "/" not in self.stats["mem_usage"]: + logger.debug("containers plugin - Missing MEM usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + memory_usage_str = self.stats["mem_usage"] + usage_str, limit_str = memory_usage_str.split("/") + + try: + usage = string_value_to_float(usage_str) + limit = string_value_to_float(limit_str) + except ValueError as e: + logger.debug("containers plugin - Compute MEM usage failed for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + return {"usage": usage, "limit": limit} + + def _get_network_stats(self): + """Return the container network usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + rx: Number of bytes received + tx: Number of bytes transmitted + """ + if "net_io" not in self.stats or "/" not in self.stats["net_io"]: + logger.debug("containers plugin - Missing Network usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + net_io_str = self.stats["net_io"] + rx_str, tx_str = net_io_str.split("/") + + try: + rx = string_value_to_float(rx_str) + tx = string_value_to_float(tx_str) + except ValueError as e: + logger.debug("containers plugin - Compute Network usage failed for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure + return {"rx": rx, "tx": tx, "time_since_update": 1} + + def _get_io_stats(self): + """Return the container IO usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + ior: Number of bytes read + iow: Number of bytes written + """ + if "block_io" not in self.stats or "/" not in self.stats["block_io"]: + logger.debug("containers plugin - Missing BlockIO usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + block_io_str = self.stats["block_io"] + ior_str, iow_str = block_io_str.split("/") + + try: + ior = string_value_to_float(ior_str) + iow = string_value_to_float(iow_str) + except ValueError as e: + logger.debug("containers plugin - Compute BlockIO usage failed for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure + return {"ior": ior, "iow": iow, "time_since_update": 1} + + +class PodmanContainersExtension: + """Glances' Containers Plugin's Docker Extension unit""" + + CONTAINER_ACTIVE_STATUS = ['running', 'paused'] + + def __init__(self, podman_sock): + if import_podman_error_tag: + raise Exception("Missing libs required to run Podman Extension (Containers)") + + self.client = None + self.ext_name = "Podman (Containers)" + self.podman_sock = podman_sock + self.stats_fetchers = {} + self._version = {} + self.connect() + + def connect(self): + """Connect to Podman.""" + try: + self.client = podman.PodmanClient(base_url=self.podman_sock) + except Exception as e: + logger.error("{} plugin - Can not connect to Podman ({})".format(self.ext_name, e)) + + try: + version_podman = self.client.version() + except Exception as e: + logger.error("{} plugin - Cannot get Podman version ({})".format(self.ext_name, e)) + else: + self._version = { + 'Version': version_podman['Version'], + 'ApiVersion': version_podman['ApiVersion'], + 'MinAPIVersion': version_podman['MinAPIVersion'], + } + + def stop(self): + # Stop all streaming threads + for t in itervalues(self.stats_fetchers): + t.stop() + + def update(self, all_tag): + """Update Podman stats using the input method.""" + + try: + version_stats = self.client.version() + except Exception as e: + # Correct issue#649 + logger.error("{} plugin - Cannot get Podman version ({})".format(self.ext_name, e)) + return {}, [] + + # Update current containers list + try: + # Issue #1152: Podman module doesn't export details about stopped containers + # The Containers/all key of the configuration file should be set to True + containers = self.client.containers.list(all=all_tag) + except Exception as e: + logger.error("{} plugin - Cannot get containers list ({})".format(self.ext_name, e)) + return version_stats, [] + + # Start new thread for new container + for container in containers: + if container.id not in self.stats_fetchers: + # StatsFetcher did not exist in the internal dict + # Create it, add it to the internal dict + logger.debug("{} plugin - Create thread for container {}".format(self.ext_name, container.id[:12])) + self.stats_fetchers[container.id] = PodmanStatsFetcher(container) + + # Stop threads for non-existing containers + absent_containers = set(iterkeys(self.stats_fetchers)) - set(c.id for c in containers) + for container_id in absent_containers: + # Stop the StatsFetcher + logger.debug("{} plugin - Stop thread for old container {}".format(self.ext_name, container_id[:12])) + self.stats_fetchers[container_id].stop() + # Delete the StatsFetcher from the dict + del self.stats_fetchers[container_id] + + # Get stats for all containers + container_stats = [self.generate_stats(container) for container in containers] + return version_stats, container_stats + + @property + def key(self): + """Return the key of the list.""" + return 'name' + + def generate_stats(self, container): + # Init the stats for the current container + stats = { + 'key': self.key, + # Export name + 'name': nativestr(container.name), + # Container Id + 'Id': container.id, + # Container Image + 'Image': str(container.image.tags), + # Container Status (from attrs) + 'Status': container.attrs['State'], + 'Created': container.attrs['Created'], + 'Command': container.attrs.get('Command') or [], + } + + if stats['Status'] in self.CONTAINER_ACTIVE_STATUS: + stats['StartedAt'] = datetime.fromtimestamp(container.attrs['StartedAt']) + stats_fetcher = self.stats_fetchers[container.id] + activity_stats = stats_fetcher.activity_stats + stats.update(activity_stats) + + # Additional fields + stats['cpu_percent'] = stats["cpu"]['total'] + stats['memory_usage'] = stats["memory"].get('usage') + if stats['memory'].get('cache') is not None: + stats['memory_usage'] -= stats['memory']['cache'] + stats['io_r'] = stats['io'].get('ior') + stats['io_w'] = stats['io'].get('iow') + stats['network_rx'] = stats['network'].get('rx') + stats['network_tx'] = stats['network'].get('tx') + stats['Uptime'] = pretty_date(stats['StartedAt']) + else: + stats['io'] = {} + stats['cpu'] = {} + stats['memory'] = {} + stats['network'] = {} + stats['io_r'] = None + stats['io_w'] = None + stats['cpu_percent'] = None + stats['memory_percent'] = None + stats['network_rx'] = None + stats['network_tx'] = None + stats['Uptime'] = None + + return stats diff --git a/glances/plugins/glances_containers.py b/glances/plugins/glances_containers.py index 74804c82..de1a89fc 100644 --- a/glances/plugins/glances_containers.py +++ b/glances/plugins/glances_containers.py @@ -10,26 +10,17 @@ """Containers plugin.""" import os -import threading -import time from copy import deepcopy +from typing import Optional -from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float from glances.logger import logger +from glances.plugins.containers.glances_docker import ( + DockerContainersExtension, import_docker_error_tag) +from glances.plugins.containers.glances_podman import ( + PodmanContainersExtension, import_podman_error_tag) from glances.plugins.glances_plugin import GlancesPlugin -from glances.processes import sort_stats as sort_stats_processes, glances_processes -from glances.plugins.containers.glances_docker import import_docker_error_tag, DockerContainersExtension - -# Podman library (optional and Linux-only) -# https://pypi.org/project/podman/ -try: - from podman import PodmanClient -except Exception as e: - import_podman_error_tag = True - # Display debug message if import KeyError - logger.debug("Error loading Podman deps Lib. Podman feature in the Docker plugin is disabled ({})".format(e)) -else: - import_podman_error_tag = False +from glances.processes import glances_processes +from glances.processes import sort_stats as sort_stats_processes # Define the items history list (list of items to add to history) # TODO: For the moment limited to the CPU. Had to change the graph exports @@ -89,34 +80,12 @@ class Plugin(GlancesPlugin): # Init the Docker API self.docker_extension = DockerContainersExtension() if not import_docker_error_tag else None - self.docker_extension: DockerContainersExtension # Init the Podman API - self._version_podman = {} - if not import_podman_error_tag: - self.podman_client = self.connect_podman() - else: + if import_podman_error_tag: self.podman_client = None - - # Dict of Docker thread (to grab stats asynchronously, one thread is created per container) - # key: Container Id - # value: instance of ThreadContainerGrabber - self.thread_docker_list = {} - - # Dict of Podman thread (to grab stats asynchronously, one thread is created per container) - # key: Container Id - # value: instance of ThreadContainerGrabber - self.thread_podman_list = {} - - # Dict of Network stats (Storing previous network stats to compute Rx/s and Tx/s) - # key: Container Id - # value: network stats dict - self.network_old = {} - - # Dict of Disk IO stats (Storing previous disk_io stats to compute Rx/s and Tx/s) - # key: Container Id - # value: network stats dict - self.io_old = {} + else: + self.podman_client = PodmanContainersExtension(podman_sock=self._podman_sock()) # Sort key self.sort_key = None @@ -125,6 +94,17 @@ class Plugin(GlancesPlugin): self.update() self.refresh_timer.set(0) + def _podman_sock(self): + """Return the podman sock. + Could be desfined in the [docker] section thanks to the podman_sock option. + Default value: unix:///run/user/1000/podman/podman.sock + """ + conf_podman_sock = self.get_conf_value('podman_sock') + if len(conf_podman_sock) == 0: + return "unix:///run/user/1000/podman/podman.sock" + else: + return conf_podman_sock[0] + def exit(self): """Overwrite the exit method to close threads.""" if self.docker_extension: @@ -155,50 +135,6 @@ class Plugin(GlancesPlugin): return ret - def connect_docker(self): - """Connect to the Docker server.""" - try: - # Do not use the timeout option (see issue #1878) - ret = docker.from_env() - except Exception as e: - logger.error("docker plugin - Can not connect to Docker ({})".format(e)) - ret = None - - return ret - - def connect_podman(self): - """Connect to Podman.""" - try: - ret = PodmanClient(base_url=self._podman_sock()) - except Exception as e: - logger.error("docker plugin - Can not connect to Podman ({})".format(e)) - ret = None - - try: - version_podman = ret.version() - except Exception as e: - logger.error("{} plugin - Cannot get Podman version ({})".format(self.plugin_name, e)) - ret = None - else: - self._version_podman = { - 'Version': version_podman['Version'], - 'ApiVersion': version_podman['ApiVersion'], - 'MinAPIVersion': version_podman['MinAPIVersion'], - } - - return ret - - def _podman_sock(self): - """Return the podman sock. - Could be desfined in the [docker] section thanks to the podman_sock option. - Default value: unix:///run/user/1000/podman/podman.sock - """ - conf_podman_sock = self.get_conf_value('podman_sock') - if len(conf_podman_sock) == 0: - return "unix:///run/user/1000/podman/podman.sock" - else: - return conf_podman_sock[0] - def _all_tag(self): """Return the all tag of the Glances/Docker configuration file. @@ -247,261 +183,8 @@ class Plugin(GlancesPlugin): def update_podman(self): """Update Podman stats.""" - # Init new docker stats - stats = self.get_init_value() - - # Podman version - # Request very long so it is only done once in the connect_podman method - stats['version'] = self._version_podman - - # Update current containers list - try: - containers = self.podman_client.containers.list() or [] - except Exception as e: - logger.error("{} plugin - Cannot get Podman containers list ({})".format(self.plugin_name, e)) - return stats - - # And the stats for each container - try: - # Return example: - # [{'CPU': '3.21%', - # 'MemUsage': '352.3kB / 7.836GB', 'MemUsageBytes': '344KiB / 7.298GiB', 'Mem': '0.00%', - # 'NetIO': '-- / --', - # 'BlockIO': '-- / --', - # 'PIDS': '1', 'Pod': '8d0f1c783def', 'CID': '9491515251ed', - # 'Name': '8d0f1c783def-infra'}, ... ] - podman_stats = {s['CID'][:12]: s for s in self.podman_client.pods.stats()} - except Exception as e: - logger.error("{} plugin - Cannot get Podman containers list ({})".format(self.plugin_name, e)) - return stats - - # Get stats for all containers - stats['containers'] = [] - for container in containers: - # Shall we display the stats ? - if not self.is_display(nativestr(container.name)): - continue - - # Init the stats for the current container - container_stats = {} - # The key is the container name and not the Id - container_stats['key'] = self.get_key() - # Export name - container_stats['name'] = nativestr(container.name) - # Container Id - container_stats['Id'] = container.id - container_stats['IdShort'] = container.id[:12] - # Container Image - container_stats['Image'] = container.image.tags - # Container Status (from attrs) - container_stats['Status'] = container.attrs['State'] - # Container Command - container_stats['Command'] = container.attrs['Command'] - # Standards stats - if container_stats['Status'] in ('running', 'paused'): - # CPU - # Convert: '3.21%' to 3.21 - container_stats['cpu_percent'] = float(podman_stats[container_stats['IdShort']]['CPU'][:-1]) - container_stats['cpu'] = {'total': container_stats['cpu_percent']} - # MEMORY - # Convert 'MemUsage': '352.3kB / 7.836GB' to bytes - # Yes it is ungly but the API do not expose the memory limit in bytes... - container_stats['memory'] = { - 'usage': string_value_to_float(podman_stats[container_stats['IdShort']]['MemUsage'].split(' / ')[0]), - 'limit': string_value_to_float(podman_stats[container_stats['IdShort']]['MemUsage'].split(' / ')[1]), - } - container_stats['memory_percent'] = float(podman_stats[container_stats['IdShort']]['Mem'][:-1]) - # Not available for the moment: https://github.com/containers/podman/issues/11695 - container_stats['io'] = {} - container_stats['io_r'] = string_value_to_float(podman_stats[container_stats['IdShort']]['BlockIO'].split(' / ')[0]) - container_stats['io_w'] = string_value_to_float(podman_stats[container_stats['IdShort']]['BlockIO'].split(' / ')[1]) - container_stats['network'] = {} - container_stats['network_rx'] = string_value_to_float(podman_stats[container_stats['IdShort']]['NetIO'].split(' / ')[0]) - container_stats['network_tx'] = string_value_to_float(podman_stats[container_stats['IdShort']]['NetIO'].split(' / ')[1]) - # - container_stats['Uptime'] = None - else: - container_stats['cpu'] = {} - container_stats['cpu_percent'] = None - container_stats['memory'] = {} - container_stats['memory_percent'] = None - container_stats['io'] = {} - container_stats['io_r'] = None - container_stats['io_w'] = None - container_stats['network'] = {} - container_stats['network_rx'] = None - container_stats['network_tx'] = None - container_stats['Uptime'] = None - # Add current container stats to the stats list - stats['containers'].append(container_stats) - - return stats - - def get_docker_cpu(self, container_id, all_stats): - """Return the container CPU usage. - - Input: id is the full container id - all_stats is the output of the stats method of the Docker API - Output: a dict {'total': 1.49} - """ - cpu_stats = {'total': 0.0} - - try: - cpu = { - 'system': all_stats['cpu_stats']['system_cpu_usage'], - 'total': all_stats['cpu_stats']['cpu_usage']['total_usage'], - } - precpu = { - 'system': all_stats['precpu_stats']['system_cpu_usage'], - 'total': all_stats['precpu_stats']['cpu_usage']['total_usage'], - } - # Issue #1857 - # If either precpu_stats.online_cpus or cpu_stats.online_cpus is nil - # then for compatibility with older daemons the length of - # the corresponding cpu_usage.percpu_usage array should be used. - cpu['nb_core'] = all_stats['cpu_stats'].get('online_cpus', None) - if cpu['nb_core'] is None: - cpu['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or []) - except KeyError as e: - logger.debug("docker plugin - Cannot grab CPU usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - else: - try: - cpu_delta = cpu['total'] - precpu['total'] - system_cpu_delta = cpu['system'] - precpu['system'] - # CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0 - cpu_stats['total'] = (cpu_delta / system_cpu_delta) * cpu['nb_core'] * 100.0 - except TypeError as e: - logger.debug("docker plugin - Cannot compute CPU usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - - # Return the stats - return cpu_stats - - def get_docker_memory(self, container_id, all_stats): - """Return the container MEMORY. - - Input: id is the full container id - all_stats is the output of the stats method of the Docker API - Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} - """ - memory_stats = {} - # Read the stats - try: - # Mandatory fields - memory_stats['usage'] = all_stats['memory_stats']['usage'] - memory_stats['limit'] = all_stats['memory_stats']['limit'] - # Issue #1857 - # Some stats are not always available in ['memory_stats']['stats'] - if 'rss' in all_stats['memory_stats']['stats']: - memory_stats['rss'] = all_stats['memory_stats']['stats']['rss'] - elif 'total_rss' in all_stats['memory_stats']['stats']: - memory_stats['rss'] = all_stats['memory_stats']['stats']['total_rss'] - else: - memory_stats['rss'] = None - memory_stats['cache'] = all_stats['memory_stats']['stats'].get('cache', None) - memory_stats['max_usage'] = all_stats['memory_stats'].get('max_usage', None) - except (KeyError, TypeError) as e: - # all_stats do not have MEM information - logger.debug("docker plugin - Cannot grab MEM usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - # Return the stats - return memory_stats - - def get_docker_network(self, container_id, all_stats): - """Return the container network usage using the Docker API (v1.0 or higher). - - Input: id is the full container id - Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. - with: - time_since_update: number of seconds elapsed between the latest grab - rx: Number of bytes received - tx: Number of bytes transmitted - """ - # Init the returned dict - network_new = {} - - # Read the rx/tx stats (in bytes) - try: - net_stats = all_stats["networks"] - except KeyError as e: - # all_stats do not have NETWORK information - logger.debug("docker plugin - Cannot grab NET usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - # No fallback available... - return network_new - - # Previous network interface stats are stored in the self.network_old variable - # By storing time data we enable Rx/s and Tx/s calculations in the XML/RPC API, which would otherwise - # be overly difficult work for users of the API - try: - network_new['cumulative_rx'] = net_stats["eth0"]["rx_bytes"] - network_new['cumulative_tx'] = net_stats["eth0"]["tx_bytes"] - except KeyError as e: - # all_stats do not have INTERFACE information - logger.debug( - "docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e) - ) - logger.debug(all_stats) - else: - network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id)) - if container_id in self.network_old: - network_new['rx'] = network_new['cumulative_rx'] - self.network_old[container_id]['cumulative_rx'] - network_new['tx'] = network_new['cumulative_tx'] - self.network_old[container_id]['cumulative_tx'] - - # Save stats to compute next bitrate - self.network_old[container_id] = network_new - - # Return the stats - return network_new - - def get_docker_io(self, container_id, all_stats): - """Return the container IO usage using the Docker API (v1.0 or higher). - - Input: id is the full container id - Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. - with: - time_since_update: number of seconds elapsed between the latest grab - ior: Number of bytes read - iow: Number of bytes written - """ - # Init the returned dict - io_new = {} - - # Read the ior/iow stats (in bytes) - try: - io_stats = all_stats["blkio_stats"] - except KeyError as e: - # all_stats do not have io information - logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - # No fallback available... - return io_new - - # Previous io interface stats are stored in the self.io_old variable - # By storing time data we enable IoR/s and IoW/s calculations in the - # XML/RPC API, which would otherwise be overly difficult work - # for users of the API - try: - io_service_bytes_recursive = io_stats['io_service_bytes_recursive'] - - # Read IOR and IOW value in the structure list of dict - io_new['cumulative_ior'] = [i for i in io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value'] - io_new['cumulative_iow'] = [i for i in io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value'] - except (TypeError, IndexError, KeyError, AttributeError) as e: - # all_stats do not have io information - logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e)) - else: - io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id)) - if container_id in self.io_old: - io_new['ior'] = io_new['cumulative_ior'] - self.io_old[container_id]['cumulative_ior'] - io_new['iow'] = io_new['cumulative_iow'] - self.io_old[container_id]['cumulative_iow'] - - # Save stats to compute next bitrate - self.io_old[container_id] = io_new - - # Return the stats - return io_new + version, containers = self.podman_client.update(all_tag=self._all_tag()) + return {"version": version, "containers": containers} def get_user_ticks(self): """Return the user ticks by reading the environment variable.""" @@ -701,62 +384,6 @@ class Plugin(GlancesPlugin): return 'CAREFUL' -class ThreadContainerGrabber(threading.Thread): - """ - Specific thread to grab container stats. - - stats is a dict - """ - - def __init__(self, container): - """Init the class. - - container: instance of Container returned by Docker or Podman client - """ - super(ThreadContainerGrabber, self).__init__() - # Event needed to stop properly the thread - self._stopper = threading.Event() - # The docker-py return stats as a stream - self._container = container - # The class return the stats as a dict - self._stats = {} - logger.debug("docker plugin - Create thread for container {}".format(self._container.name)) - - def run(self): - """Grab the stats. - - Infinite loop, should be stopped by calling the stop() method - """ - try: - for i in self._container.stats(decode=True): - self._stats = i - time.sleep(0.1) - if self.stopped(): - break - except Exception as e: - logger.debug("docker plugin - Exception thrown during run ({})".format(e)) - self.stop() - - @property - def stats(self): - """Stats getter.""" - return self._stats - - @stats.setter - def stats(self, value): - """Stats setter.""" - self._stats = value - - def stop(self, timeout=None): - """Stop the thread.""" - logger.debug("docker plugin - Close thread for container {}".format(self._container.name)) - self._stopper.set() - - def stopped(self): - """Return True is the thread is stopped.""" - return self._stopper.is_set() - - def sort_docker_stats(stats): # Sort Docker stats using the same function than processes sort_by = glances_processes.sort_key From 928752a453cc027a9fae1f49f6cf00be66425d64 Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Wed, 15 Feb 2023 00:08:02 +0530 Subject: [PATCH 09/21] fix: containers (Podman) - wrong response format Ref: https://docs.podman.io/en/latest/Reference.html --- glances/plugins/containers/glances_podman.py | 136 +++++-------------- 1 file changed, 37 insertions(+), 99 deletions(-) diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py index 992ca2a1..ff2119d4 100644 --- a/glances/plugins/containers/glances_podman.py +++ b/glances/plugins/containers/glances_podman.py @@ -1,7 +1,7 @@ """Podman Extension unit for Glances' Containers plugin.""" from datetime import datetime -from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float +from glances.compat import iterkeys, itervalues, nativestr, pretty_date from glances.logger import logger from glances.plugins.containers.stats_fetcher import StatsFetcher @@ -18,112 +18,50 @@ else: class PodmanStatsFetcher(StatsFetcher): + MANDATORY_FIELDS = ["CPU", "MemUsage", "MemLimit", "NetInput", "NetOutput", "BlockInput", "BlockOutput"] + + @property + def stats(self): + if self._raw_stats["Error"]: + logger.error("containers plugin - Stats fetching failed: {}".format(self._raw_stats["Error"])) + logger.error(self._raw_stats) + + return self._raw_stats["Stats"][0] + @property def activity_stats(self): - io_stats = self._get_io_stats() - cpu_stats = self._get_cpu_stats() - memory_stats = self._get_memory_stats() - network_stats = self._get_network_stats() + result_stats = {"cpu": {}, "memory": {}, "io": {}, "network": {}} - computed_stats = { - "io": io_stats or {}, - "memory": memory_stats or {}, - "network": network_stats or {}, - "cpu": cpu_stats or {"total": 0.0}, - } - return computed_stats - - def _get_cpu_stats(self): - """Return the container CPU usage. - - Output: a dict {'total': 1.49} - """ - if "cpu_percent" not in self.stats: - logger.debug("containers plugin - Missing CPU usage fields for container {}".format(self._container.id)) + if any(field not in self.stats for field in self.MANDATORY_FIELDS): + logger.debug("containers plugin - Missing mandatory fields for container {}".format(self._container.id)) logger.debug(self.stats) - return None - - cpu_usage = string_value_to_float(self.stats["cpu_percent"].rstrip("%")) - return {"total": cpu_usage} - - def _get_memory_stats(self): - """Return the container MEMORY. - - Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} - """ - if "mem_usage" not in self.stats or "/" not in self.stats["mem_usage"]: - logger.debug("containers plugin - Missing MEM usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - memory_usage_str = self.stats["mem_usage"] - usage_str, limit_str = memory_usage_str.split("/") + return result_stats try: - usage = string_value_to_float(usage_str) - limit = string_value_to_float(limit_str) - except ValueError as e: - logger.debug("containers plugin - Compute MEM usage failed for container {}".format(self._container.id)) + cpu_usage = float(self.stats.get("CPU", 0)) + + mem_usage = float(self.stats["MemUsage"]) + mem_limit = float(self.stats["MemLimit"]) + + rx = float(self.stats["NetInput"]) + tx = float(self.stats["NetOutput"]) + + ior = float(self.stats["BlockInput"]) + iow = float(self.stats["BlockOutput"]) + + # Hardcode `time_since_update` to 1 as podman already sends the calculated rate + result_stats = { + "cpu": {"total": cpu_usage}, + "memory": {"usage": mem_usage, "limit": mem_limit}, + "io": {"ior": ior, "iow": iow, "time_since_update": 1}, + "network": {"rx": rx, "tx": tx, "time_since_update": 1}, + } + except ValueError: + logger.debug("containers plugin - Non float stats values found for container {}".format(self._container.id)) logger.debug(self.stats) - return None + return result_stats - return {"usage": usage, "limit": limit} - - def _get_network_stats(self): - """Return the container network usage using the Docker API (v1.0 or higher). - - Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. - with: - time_since_update: number of seconds elapsed between the latest grab - rx: Number of bytes received - tx: Number of bytes transmitted - """ - if "net_io" not in self.stats or "/" not in self.stats["net_io"]: - logger.debug("containers plugin - Missing Network usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - net_io_str = self.stats["net_io"] - rx_str, tx_str = net_io_str.split("/") - - try: - rx = string_value_to_float(rx_str) - tx = string_value_to_float(tx_str) - except ValueError as e: - logger.debug("containers plugin - Compute Network usage failed for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure - return {"rx": rx, "tx": tx, "time_since_update": 1} - - def _get_io_stats(self): - """Return the container IO usage using the Docker API (v1.0 or higher). - - Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. - with: - time_since_update: number of seconds elapsed between the latest grab - ior: Number of bytes read - iow: Number of bytes written - """ - if "block_io" not in self.stats or "/" not in self.stats["block_io"]: - logger.debug("containers plugin - Missing BlockIO usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - block_io_str = self.stats["block_io"] - ior_str, iow_str = block_io_str.split("/") - - try: - ior = string_value_to_float(ior_str) - iow = string_value_to_float(iow_str) - except ValueError as e: - logger.debug("containers plugin - Compute BlockIO usage failed for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure - return {"ior": ior, "iow": iow, "time_since_update": 1} + return result_stats class PodmanContainersExtension: From 7c3fe93226484ceb3d4ff4b13978593b67a1cd89 Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Sat, 18 Feb 2023 23:37:16 +0530 Subject: [PATCH 10/21] chg: containers Plugin - basic pod support StatsFetcher -> StatsStreamer --- glances/plugins/containers/glances_docker.py | 85 ++++----- glances/plugins/containers/glances_podman.py | 191 ++++++++++++++++--- glances/plugins/containers/stats_fetcher.py | 72 ------- glances/plugins/containers/stats_streamer.py | 76 ++++++++ glances/plugins/glances_containers.py | 26 ++- 5 files changed, 299 insertions(+), 151 deletions(-) delete mode 100644 glances/plugins/containers/stats_fetcher.py create mode 100644 glances/plugins/containers/stats_streamer.py diff --git a/glances/plugins/containers/glances_docker.py b/glances/plugins/containers/glances_docker.py index 11ea2ef9..a642db9f 100644 --- a/glances/plugins/containers/glances_docker.py +++ b/glances/plugins/containers/glances_docker.py @@ -1,10 +1,9 @@ """Docker Extension unit for Glances' Containers plugin.""" -import threading import time from glances.compat import iterkeys, itervalues, nativestr, pretty_date from glances.logger import logger -from glances.plugins.containers.stats_fetcher import StatsFetcher +from glances.plugins.containers.stats_streamer import StatsStreamer # Docker-py library (optional and Linux-only) # https://github.com/docker/docker-py @@ -19,48 +18,43 @@ else: import_docker_error_tag = False -class DockerStatsFetcher(StatsFetcher): +class DockerStatsFetcher: MANDATORY_MEMORY_FIELDS = ["usage", 'limit'] def __init__(self, container): - super().__init__(container) - # Lock to avoid the daemon thread updating stats when main thread reads the stats - self._stats_lock = threading.Lock() + self._container = container # Previous computes stats are stored in the self._old_computed_stats variable - # By storing time data we enable IoR/s and IoW/s calculations in the XML/RPC API, which would otherwise - # be overly difficult work for users of the API + # We store time data to enable IoR/s & IoW/s calculations to avoid complexity for consumers of the APIs exposed. self._old_computed_stats = {} # Last time when output stats (results) were computed - self._last_stats_output_time = 0 - # Last time when the raw_stats were updated by worker thread - self._last_raws_stats_update_time = 1 + self._last_stats_computed_time = 0 + + # Threaded Streamer + stats_iterable = container.stats(decode=True) + self._streamer = StatsStreamer(stats_iterable, initial_stream_value={}) + + def _log_debug(self, msg, exception=None): + logger.debug("containers (Docker) ID: {} - {} ({}) ".format(self._container.id, msg, exception)) + logger.debug(self._streamer.stats) + + def stop(self): + self._streamer.stop() @property def activity_stats(self): """Activity Stats - Each successive access of activity_stats will cause computation of activity_stats from raw_stats + Each successive access of activity_stats will cause computation of activity_stats """ computed_activity_stats = self._compute_activity_stats() self._old_computed_stats = computed_activity_stats - self._last_stats_output_time = time.time() + self._last_stats_computed_time = time.time() return computed_activity_stats - def _pre_raw_stats_update_hook(self): - self._stats_lock.acquire() - - def _post_raw_stats_update_hook(self): - self._last_raws_stats_update_time = time.time() - self._stats_lock.release() - - @property - def time_since_update(self): - return self._last_raws_stats_update_time - self._last_stats_output_time - def _compute_activity_stats(self): - with self._stats_lock: + with self._streamer.result_lock: io_stats = self._get_io_stats() cpu_stats = self._get_cpu_stats() memory_stats = self._get_memory_stats() @@ -74,6 +68,11 @@ class DockerStatsFetcher(StatsFetcher): } return computed_stats + @property + def time_since_update(self): + # In case no update, default to 1 + return max(1, self._streamer.last_update_time - self._last_stats_computed_time) + def _get_cpu_stats(self): """Return the container CPU usage. @@ -82,8 +81,8 @@ class DockerStatsFetcher(StatsFetcher): stats = {'total': 0.0} try: - cpu_stats = self.stats['cpu_stats'] - precpu_stats = self.stats['precpu_stats'] + cpu_stats = self._streamer.stats['cpu_stats'] + precpu_stats = self._streamer.stats['precpu_stats'] cpu = {'system': cpu_stats['system_cpu_usage'], 'total': cpu_stats['cpu_usage']['total_usage']} precpu = {'system': precpu_stats['system_cpu_usage'], 'total': precpu_stats['cpu_usage']['total_usage']} @@ -93,8 +92,7 @@ class DockerStatsFetcher(StatsFetcher): # the corresponding cpu_usage.percpu_usage array should be used. cpu['nb_core'] = cpu_stats.get('online_cpus') or len(cpu_stats['cpu_usage']['percpu_usage'] or []) except KeyError as e: - logger.debug("containers plugin - Can't grab CPU stat for container {} ({})".format(self._container.id, e)) - logger.debug(self.stats) + self._log_debug("Can't grab CPU stats", e) return None try: @@ -103,9 +101,7 @@ class DockerStatsFetcher(StatsFetcher): # CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0 stats['total'] = (cpu_delta / system_cpu_delta) * cpu['nb_core'] * 100.0 except TypeError as e: - msg = "containers plugin - Can't compute CPU usage for container {} ({})".format(self._container.id, e) - logger.debug(msg) - logger.debug(self.stats) + self._log_debug("Can't compute CPU usage", e) return None # Return the stats @@ -116,12 +112,11 @@ class DockerStatsFetcher(StatsFetcher): Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} """ - memory_stats = self.stats.get('memory_stats') + memory_stats = self._streamer.stats.get('memory_stats') # Checks for memory_stats & mandatory fields if not memory_stats or any(field not in memory_stats for field in self.MANDATORY_MEMORY_FIELDS): - logger.debug("containers plugin - Missing MEM usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) + self._log_debug("Missing MEM usage fields") return None stats = {field: memory_stats[field] for field in self.MANDATORY_MEMORY_FIELDS} @@ -132,9 +127,7 @@ class DockerStatsFetcher(StatsFetcher): stats['max_usage'] = detailed_stats.get('max_usage') stats['cache'] = detailed_stats.get('cache') except (KeyError, TypeError) as e: - # self.stats do not have MEM information - logger.debug("containers plugin - Can't grab MEM usage for container {} ({})".format(self._container.id, e)) - logger.debug(self.stats) + self._log_debug("Can't grab MEM usage", e) # stats do not have MEM information return None # Return the stats @@ -149,12 +142,11 @@ class DockerStatsFetcher(StatsFetcher): rx: Number of bytes received tx: Number of bytes transmitted """ - eth0_stats = self.stats.get('networks', {}).get('eth0') + eth0_stats = self._streamer.stats.get('networks', {}).get('eth0') # Checks for net_stats & mandatory fields if not eth0_stats or any(field not in eth0_stats for field in ['rx_bytes', 'tx_bytes']): - logger.debug("containers plugin - Missing Network usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) + self._log_debug("Missing Network usage fields") return None # Read the rx/tx stats (in bytes) @@ -179,12 +171,11 @@ class DockerStatsFetcher(StatsFetcher): ior: Number of bytes read iow: Number of bytes written """ - io_service_bytes_recursive = self.stats.get('blkio_stats', {}).get('io_service_bytes_recursive') + io_service_bytes_recursive = self._streamer.stats.get('blkio_stats', {}).get('io_service_bytes_recursive') # Checks for net_stats if not io_service_bytes_recursive: - logger.debug("containers plugin - Missing blockIO usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) + self._log_debug("Missing blockIO usage fields") return None # Read the ior/iow stats (in bytes) @@ -193,11 +184,7 @@ class DockerStatsFetcher(StatsFetcher): cumulative_ior = [i for i in io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value'] cumulative_iow = [i for i in io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value'] except (TypeError, IndexError, KeyError, AttributeError) as e: - # self.stats do not have io information - logger.debug( - "containers plugin - Can't grab blockIO usage for container {} ({})".format(self._container.id, e) - ) - logger.debug(self.stats) + self._log_debug("Can't grab blockIO usage", e) # stats do not have io information return None stats = {'cumulative_ior': cumulative_ior, 'cumulative_iow': cumulative_iow} diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py index ff2119d4..602d6d52 100644 --- a/glances/plugins/containers/glances_podman.py +++ b/glances/plugins/containers/glances_podman.py @@ -1,9 +1,10 @@ """Podman Extension unit for Glances' Containers plugin.""" +import json from datetime import datetime -from glances.compat import iterkeys, itervalues, nativestr, pretty_date +from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float from glances.logger import logger -from glances.plugins.containers.stats_fetcher import StatsFetcher +from glances.plugins.containers.stats_streamer import StatsStreamer # Podman library (optional and Linux-only) # https://pypi.org/project/podman/ @@ -17,37 +18,51 @@ else: import_podman_error_tag = False -class PodmanStatsFetcher(StatsFetcher): +class PodmanContainerStatsFetcher: MANDATORY_FIELDS = ["CPU", "MemUsage", "MemLimit", "NetInput", "NetOutput", "BlockInput", "BlockOutput"] + def __init__(self, container): + self._container = container + + # Threaded Streamer + stats_iterable = container.stats(decode=True) + self._streamer = StatsStreamer(stats_iterable, initial_stream_value={}) + + def _log_debug(self, msg, exception=None): + logger.debug("containers (Podman) ID: {} - {} ({})".format(self._container.id, msg, exception)) + logger.debug(self._streamer.stats) + + def stop(self): + self._streamer.stop() + @property def stats(self): - if self._raw_stats["Error"]: - logger.error("containers plugin - Stats fetching failed: {}".format(self._raw_stats["Error"])) - logger.error(self._raw_stats) + stats = self._streamer.stats + if stats["Error"]: + self._log_debug("Stats fetching failed", stats["Error"]) - return self._raw_stats["Stats"][0] + return stats["Stats"][0] @property def activity_stats(self): result_stats = {"cpu": {}, "memory": {}, "io": {}, "network": {}} + api_stats = self.stats - if any(field not in self.stats for field in self.MANDATORY_FIELDS): - logger.debug("containers plugin - Missing mandatory fields for container {}".format(self._container.id)) - logger.debug(self.stats) + if any(field not in api_stats for field in self.MANDATORY_FIELDS): + self._log_debug("Missing mandatory fields") return result_stats try: - cpu_usage = float(self.stats.get("CPU", 0)) + cpu_usage = float(api_stats.get("CPU", 0)) - mem_usage = float(self.stats["MemUsage"]) - mem_limit = float(self.stats["MemLimit"]) + mem_usage = float(api_stats["MemUsage"]) + mem_limit = float(api_stats["MemLimit"]) - rx = float(self.stats["NetInput"]) - tx = float(self.stats["NetOutput"]) + rx = float(api_stats["NetInput"]) + tx = float(api_stats["NetOutput"]) - ior = float(self.stats["BlockInput"]) - iow = float(self.stats["BlockOutput"]) + ior = float(api_stats["BlockInput"]) + iow = float(api_stats["BlockOutput"]) # Hardcode `time_since_update` to 1 as podman already sends the calculated rate result_stats = { @@ -56,14 +71,136 @@ class PodmanStatsFetcher(StatsFetcher): "io": {"ior": ior, "iow": iow, "time_since_update": 1}, "network": {"rx": rx, "tx": tx, "time_since_update": 1}, } - except ValueError: - logger.debug("containers plugin - Non float stats values found for container {}".format(self._container.id)) - logger.debug(self.stats) - return result_stats + except ValueError as e: + self._log_debug("Non float stats values found", e) return result_stats +class PodmanPodStatsFetcher: + def __init__(self, pod_manager): + self._pod_manager = pod_manager + + # Threaded Streamer + stats_iterable = pod_manager.stats(stream=True, decode=True) + self._streamer = StatsStreamer(stats_iterable, initial_stream_value={}) + + def _log_debug(self, msg, exception=None): + logger.debug("containers (Podman): Pod Manager - {} ({})".format(msg, exception)) + logger.debug(self._streamer.stats) + + def stop(self): + self._streamer.stop() + + @property + def activity_stats(self): + result_stats = {} + container_stats = self._streamer.stats + for stat in container_stats: + io_stats = self._get_io_stats(stat) + cpu_stats = self._get_cpu_stats(stat) + memory_stats = self._get_memory_stats(stat) + network_stats = self._get_network_stats(stat) + + computed_stats = { + "name": stat["Name"], + "cid": stat["CID"], + "pod_id": stat["Pod"], + "io": io_stats or {}, + "memory": memory_stats or {}, + "network": network_stats or {}, + "cpu": cpu_stats or {"total": 0.0}, + } + result_stats[stat["CID"]] = computed_stats + + return result_stats + + def _get_cpu_stats(self, stats): + """Return the container CPU usage. + + Output: a dict {'total': 1.49} + """ + if "CPU" not in stats: + self._log_debug("Missing CPU usage fields") + return None + + cpu_usage = string_value_to_float(stats["CPU"].rstrip("%")) + return {"total": cpu_usage} + + def _get_memory_stats(self, stats): + """Return the container MEMORY. + + Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} + """ + if "MemUsage" not in stats or "/" not in stats["MemUsage"]: + self._log_debug("Missing MEM usage fields") + return None + + memory_usage_str = stats["MemUsage"] + usage_str, limit_str = memory_usage_str.split("/") + + try: + usage = string_value_to_float(usage_str) + limit = string_value_to_float(limit_str) + except ValueError as e: + self._log_debug("Compute MEM usage failed", e) + return None + + return {"usage": usage, "limit": limit} + + def _get_network_stats(self, stats): + """Return the container network usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + rx: Number of bytes received + tx: Number of bytes transmitted + """ + if "NetIO" not in stats or "/" not in stats["NetIO"]: + self._log_debug("Compute MEM usage failed") + return None + + net_io_str = stats["NetIO"] + rx_str, tx_str = net_io_str.split("/") + + try: + rx = string_value_to_float(rx_str) + tx = string_value_to_float(tx_str) + except ValueError as e: + self._log_debug("Compute MEM usage failed", e) + return None + + # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure + return {"rx": rx, "tx": tx, "time_since_update": 1} + + def _get_io_stats(self, stats): + """Return the container IO usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + ior: Number of bytes read + iow: Number of bytes written + """ + if "BlockIO" not in stats or "/" not in stats["BlockIO"]: + self._log_debug("Missing BlockIO usage fields") + return None + + block_io_str = stats["BlockIO"] + ior_str, iow_str = block_io_str.split("/") + + try: + ior = string_value_to_float(ior_str) + iow = string_value_to_float(iow_str) + except ValueError as e: + self._log_debug("Compute BlockIO usage failed", e) + return None + + # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure + return {"ior": ior, "iow": iow, "time_since_update": 1} + + class PodmanContainersExtension: """Glances' Containers Plugin's Docker Extension unit""" @@ -77,6 +214,7 @@ class PodmanContainersExtension: self.ext_name = "Podman (Containers)" self.podman_sock = podman_sock self.stats_fetchers = {} + self.pod_fetcher = None self._version = {} self.connect() @@ -118,6 +256,8 @@ class PodmanContainersExtension: # Issue #1152: Podman module doesn't export details about stopped containers # The Containers/all key of the configuration file should be set to True containers = self.client.containers.list(all=all_tag) + if not self.pod_fetcher: + self.pod_fetcher = PodmanPodStatsFetcher(self.client.pods) except Exception as e: logger.error("{} plugin - Cannot get containers list ({})".format(self.ext_name, e)) return version_stats, [] @@ -128,7 +268,7 @@ class PodmanContainersExtension: # StatsFetcher did not exist in the internal dict # Create it, add it to the internal dict logger.debug("{} plugin - Create thread for container {}".format(self.ext_name, container.id[:12])) - self.stats_fetchers[container.id] = PodmanStatsFetcher(container) + self.stats_fetchers[container.id] = PodmanContainerStatsFetcher(container) # Stop threads for non-existing containers absent_containers = set(iterkeys(self.stats_fetchers)) - set(c.id for c in containers) @@ -141,6 +281,13 @@ class PodmanContainersExtension: # Get stats for all containers container_stats = [self.generate_stats(container) for container in containers] + + pod_stats = self.pod_fetcher.activity_stats + for stats in container_stats: + if stats["Id"][:12] in pod_stats: + stats["pod_name"] = pod_stats[stats["Id"][:12]]["name"] + stats["pod_id"] = pod_stats[stats["Id"][:12]]["pod_id"] + return version_stats, container_stats @property diff --git a/glances/plugins/containers/stats_fetcher.py b/glances/plugins/containers/stats_fetcher.py deleted file mode 100644 index ed08f4ce..00000000 --- a/glances/plugins/containers/stats_fetcher.py +++ /dev/null @@ -1,72 +0,0 @@ -import threading -import time - -from glances.logger import logger - - -class StatsFetcher: - # Should be an Abstract Base Class - # Inherit from abc.ABC by Glancesv4 (not inheriting for compatibility with py2) - """ - Streams the container stats through threading - - Use `StatsFetcher.stats` to access the streamed results - """ - - def __init__(self, container): - """Init the class. - - container: instance of Container returned by Docker or Podman client - """ - # The docker-py return stats as a stream - self._container = container - # Container stats are maintained as dicts - self._raw_stats = {} - # Use a Thread to stream stats - self._thread = threading.Thread(target=self._fetch_stats, daemon=True) - # Event needed to stop properly the thread - self._stopper = threading.Event() - - self._thread.start() - logger.debug("docker plugin - Create thread for container {}".format(self._container.name)) - - def _fetch_stats(self): - """Grab the stats. - - Infinite loop, should be stopped by calling the stop() method - """ - try: - for new_stats in self._container.stats(decode=True): - self._pre_raw_stats_update_hook() - self._raw_stats = new_stats - self._post_raw_stats_update_hook() - - time.sleep(0.1) - if self.stopped(): - break - - except Exception as e: - logger.debug("docker plugin - Exception thrown during run ({})".format(e)) - self.stop() - - def stopped(self): - """Return True is the thread is stopped.""" - return self._stopper.is_set() - - def stop(self, timeout=None): - """Stop the thread.""" - logger.debug("docker plugin - Close thread for container {}".format(self._container.name)) - self._stopper.set() - - @property - def stats(self): - """Raw Stats getter.""" - return self._raw_stats - - def _pre_raw_stats_update_hook(self): - """Hook that runs before worker thread updates the raw_stats""" - pass - - def _post_raw_stats_update_hook(self): - """Hook that runs after worker thread updates the raw_stats""" - pass diff --git a/glances/plugins/containers/stats_streamer.py b/glances/plugins/containers/stats_streamer.py new file mode 100644 index 00000000..0bf7d38e --- /dev/null +++ b/glances/plugins/containers/stats_streamer.py @@ -0,0 +1,76 @@ +import threading +import time + +from glances.logger import logger + + +class StatsStreamer: + """ + Utility class to stream an iterable using a background / daemon Thread + + Use `StatsStreamer.stats` to access the latest streamed results + """ + + def __init__(self, iterable, initial_stream_value=None): + """ + iterable: an Iterable instance that needs to be streamed + """ + self._iterable = iterable + # Iterable results are stored here + self._raw_result = initial_stream_value + # Use a Thread to stream iterable (daemon=True to automatically kill thread when main process dies) + self._thread = threading.Thread(target=self._stream_results, daemon=True) + # Event needed to stop the thread manually + self._stopper = threading.Event() + # Lock to avoid the daemon thread updating stats when main thread reads the stats + self.result_lock = threading.Lock() + # Last result streamed time (initial val 0) + self._last_update_time = 0 + + self._thread.start() + + def stop(self): + """Stop the thread.""" + self._stopper.set() + + def stopped(self): + """Return True is the thread is stopped.""" + return self._stopper.is_set() + + def _stream_results(self): + """Grab the stats. + + Infinite loop, should be stopped by calling the stop() method + """ + try: + for res in self._iterable: + self._pre_update_hook() + self._raw_result = res + self._post_update_hook() + + time.sleep(0.1) + if self.stopped(): + break + + except Exception as e: + logger.debug("docker plugin - Exception thrown during run ({})".format(e)) + self.stop() + + def _pre_update_hook(self): + """Hook that runs before worker thread updates the raw_stats""" + self.result_lock.acquire() + + def _post_update_hook(self): + """Hook that runs after worker thread updates the raw_stats""" + self._last_update_time = time.time() + self.result_lock.release() + + @property + def stats(self): + """Raw Stats getter.""" + return self._raw_result + + @property + def last_update_time(self): + """Raw Stats getter.""" + return self._last_update_time diff --git a/glances/plugins/glances_containers.py b/glances/plugins/glances_containers.py index de1a89fc..ddd6c837 100644 --- a/glances/plugins/glances_containers.py +++ b/glances/plugins/glances_containers.py @@ -240,6 +240,10 @@ class Plugin(GlancesPlugin): if not self.stats or 'containers' not in self.stats or len(self.stats['containers']) == 0 or self.is_disabled(): return ret + show_pod_name = False + if any(ct.get("pod_name") for ct in self.stats["containers"]): + show_pod_name = True + # Build the string message # Title msg = '{}'.format('CONTAINERS') @@ -259,6 +263,10 @@ class Plugin(GlancesPlugin): self.config.get_int_value('containers', 'max_name_size', default=20) if self.config is not None else 20, len(max(self.stats['containers'], key=lambda x: len(x['name']))['name']), ) + + if show_pod_name: + msg = ' {:{width}}'.format('Pod', width=12) + ret.append(self.curse_add_line(msg)) msg = ' {:{width}}'.format('Name', width=name_max_width) ret.append(self.curse_add_line(msg, 'SORT' if self.sort_key == 'name' else 'DEFAULT')) msg = '{:>10}'.format('Status') @@ -284,6 +292,8 @@ class Plugin(GlancesPlugin): # Data for container in self.stats['containers']: ret.append(self.curse_new_line()) + if show_pod_name: + ret.append(self.curse_add_line(' {:{width}}'.format(container.get("pod_id", " - "), width=12))) # Name ret.append(self.curse_add_line(self._msg_name(container=container, max_width=name_max_width))) # Status @@ -338,10 +348,10 @@ class Plugin(GlancesPlugin): unit = 'b' try: value = ( - self.auto_unit( - int(container['network']['rx'] // container['network']['time_since_update'] * to_bit) - ) - + unit + self.auto_unit( + int(container['network']['rx'] // container['network']['time_since_update'] * to_bit) + ) + + unit ) msg = '{:>7}'.format(value) except KeyError: @@ -349,10 +359,10 @@ class Plugin(GlancesPlugin): ret.append(self.curse_add_line(msg)) try: value = ( - self.auto_unit( - int(container['network']['tx'] // container['network']['time_since_update'] * to_bit) - ) - + unit + self.auto_unit( + int(container['network']['tx'] // container['network']['time_since_update'] * to_bit) + ) + + unit ) msg = ' {:<7}'.format(value) except KeyError: From 95b7b94b1f9345e0443aa8527f6e4ee052958e3c Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Sun, 19 Feb 2023 01:08:16 +0530 Subject: [PATCH 11/21] chg: containers Plugin - include engine name --- glances/plugins/glances_containers.py | 36 ++++++++++++++++++--------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/glances/plugins/glances_containers.py b/glances/plugins/glances_containers.py index ddd6c837..f68e38be 100644 --- a/glances/plugins/glances_containers.py +++ b/glances/plugins/glances_containers.py @@ -14,10 +14,8 @@ from copy import deepcopy from typing import Optional from glances.logger import logger -from glances.plugins.containers.glances_docker import ( - DockerContainersExtension, import_docker_error_tag) -from glances.plugins.containers.glances_podman import ( - PodmanContainersExtension, import_podman_error_tag) +from glances.plugins.containers.glances_docker import DockerContainersExtension, import_docker_error_tag +from glances.plugins.containers.glances_podman import PodmanContainersExtension, import_podman_error_tag from glances.plugins.glances_plugin import GlancesPlugin from glances.processes import glances_processes from glances.processes import sort_stats as sort_stats_processes @@ -179,11 +177,15 @@ class Plugin(GlancesPlugin): def update_docker(self): """Update Docker stats using the input method.""" version, containers = self.docker_extension.update(all_tag=self._all_tag()) + for container in containers: + container["engine"] = 'docker' return {"version": version, "containers": containers} def update_podman(self): """Update Podman stats.""" version, containers = self.podman_client.update(all_tag=self._all_tag()) + for container in containers: + container["engine"] = 'podman' return {"version": version, "containers": containers} def get_user_ticks(self): @@ -244,6 +246,10 @@ class Plugin(GlancesPlugin): if any(ct.get("pod_name") for ct in self.stats["containers"]): show_pod_name = True + show_engine_name = False + if len(set(ct["engine"] for ct in self.stats["containers"])) > 1: + show_engine_name = True + # Build the string message # Title msg = '{}'.format('CONTAINERS') @@ -264,6 +270,9 @@ class Plugin(GlancesPlugin): len(max(self.stats['containers'], key=lambda x: len(x['name']))['name']), ) + if show_engine_name: + msg = ' {:{width}}'.format('Engine', width=6) + ret.append(self.curse_add_line(msg)) if show_pod_name: msg = ' {:{width}}'.format('Pod', width=12) ret.append(self.curse_add_line(msg)) @@ -289,9 +298,12 @@ class Plugin(GlancesPlugin): ret.append(self.curse_add_line(msg)) msg = ' {:8}'.format('Command') ret.append(self.curse_add_line(msg)) + # Data for container in self.stats['containers']: ret.append(self.curse_new_line()) + if show_engine_name: + ret.append(self.curse_add_line(' {:{width}}'.format(container["engine"], width=6))) if show_pod_name: ret.append(self.curse_add_line(' {:{width}}'.format(container.get("pod_id", " - "), width=12))) # Name @@ -348,10 +360,10 @@ class Plugin(GlancesPlugin): unit = 'b' try: value = ( - self.auto_unit( - int(container['network']['rx'] // container['network']['time_since_update'] * to_bit) - ) - + unit + self.auto_unit( + int(container['network']['rx'] // container['network']['time_since_update'] * to_bit) + ) + + unit ) msg = '{:>7}'.format(value) except KeyError: @@ -359,10 +371,10 @@ class Plugin(GlancesPlugin): ret.append(self.curse_add_line(msg)) try: value = ( - self.auto_unit( - int(container['network']['tx'] // container['network']['time_since_update'] * to_bit) - ) - + unit + self.auto_unit( + int(container['network']['tx'] // container['network']['time_since_update'] * to_bit) + ) + + unit ) msg = ' {:<7}'.format(value) except KeyError: From e56b29292818d324af3d51054101a10c460cf385 Mon Sep 17 00:00:00 2001 From: nicolargo Date: Sat, 25 Feb 2023 15:17:29 +0100 Subject: [PATCH 12/21] Refactor some deps --- optional-requirements.txt | 2 +- setup.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/optional-requirements.txt b/optional-requirements.txt index 4c343807..45152524 100644 --- a/optional-requirements.txt +++ b/optional-requirements.txt @@ -35,4 +35,4 @@ sparklines statsd wifi zeroconf==0.47.3; python_version < "3.7" -zeroconf; python_version >= "3.7" +zeroconf>=0.19.1; python_version >= "3.7" diff --git a/setup.py b/setup.py index 1d5d62ec..4e622a23 100755 --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ def get_install_extras_require(): 'cloud': ['requests'], 'docker': ['docker>=2.0.0', 'python-dateutil', 'six'], 'export': ['bernhard', 'cassandra-driver', 'couchdb', 'elasticsearch', - 'graphitesender', 'influxdb>=1.0.0', 'kafka-python', 'pymongo', + 'graphitesender', 'influxdb>=1.0.0', 'kafka-python', 'pika', 'paho-mqtt', 'potsdb', 'prometheus_client', 'pyzmq', 'statsd'], 'folders': ['scandir'], # python_version<"3.5" @@ -81,7 +81,9 @@ def get_install_extras_require(): } if PY3: extras_require['export'].append('influxdb-client') + extras_require['export'].append('pymongo') extras_require['gpu'] = ['py3nvml'] + extras_require['podman'] = ['podman'] if sys.platform.startswith('linux'): extras_require['sensors'] = ['batinfo'] From f7f4f389acc0ee9cda9df701c7287298671cffab Mon Sep 17 00:00:00 2001 From: Bharath Vignesh J K <52282402+RazCrimson@users.noreply.github.com> Date: Sun, 7 May 2023 03:49:58 +0530 Subject: [PATCH 13/21] chg: containers - tmp fix to make podman work --- glances/plugins/containers/glances_podman.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py index 602d6d52..0a169ba2 100644 --- a/glances/plugins/containers/glances_podman.py +++ b/glances/plugins/containers/glances_podman.py @@ -82,7 +82,8 @@ class PodmanPodStatsFetcher: self._pod_manager = pod_manager # Threaded Streamer - stats_iterable = pod_manager.stats(stream=True, decode=True) + # Temporary patch to get podman extension working + stats_iterable = (pod_manager.stats(decode=True) for _ in iter(int, 1)) self._streamer = StatsStreamer(stats_iterable, initial_stream_value={}) def _log_debug(self, msg, exception=None): From 14e6510e794c26f747508558b32be3ea4184251a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 7 May 2023 15:45:33 +0000 Subject: [PATCH 14/21] chore(deps): update dependency webpack-dev-server to v4.15.0 --- glances/outputs/static/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/glances/outputs/static/package-lock.json b/glances/outputs/static/package-lock.json index d487240c..9549ed8e 100644 --- a/glances/outputs/static/package-lock.json +++ b/glances/outputs/static/package-lock.json @@ -7744,9 +7744,9 @@ } }, "node_modules/webpack-dev-server": { - "version": "4.14.0", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.14.0.tgz", - "integrity": "sha512-KUgiUNUZldyx5xz3uK0dnXmvsSz03TAMCLtO1cUOb5oishh9sfP3vaI4XNY3EztrPUu98WKzamNfuaydTedYWQ==", + "version": "4.15.0", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.0.tgz", + "integrity": "sha512-HmNB5QeSl1KpulTBQ8UT4FPrByYyaLxpJoQ0+s7EvUrMc16m0ZS1sgb1XGqzmgCPk0c9y+aaXxn11tbLzuM7NQ==", "dev": true, "dependencies": { "@types/bonjour": "^3.5.9", @@ -14014,9 +14014,9 @@ } }, "webpack-dev-server": { - "version": "4.14.0", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.14.0.tgz", - "integrity": "sha512-KUgiUNUZldyx5xz3uK0dnXmvsSz03TAMCLtO1cUOb5oishh9sfP3vaI4XNY3EztrPUu98WKzamNfuaydTedYWQ==", + "version": "4.15.0", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.0.tgz", + "integrity": "sha512-HmNB5QeSl1KpulTBQ8UT4FPrByYyaLxpJoQ0+s7EvUrMc16m0ZS1sgb1XGqzmgCPk0c9y+aaXxn11tbLzuM7NQ==", "dev": true, "requires": { "@types/bonjour": "^3.5.9", From 58a97cb4eb91704b019d91fb0c798b754c03b203 Mon Sep 17 00:00:00 2001 From: Bharath Vignesh J K <52282402+RazCrimson@users.noreply.github.com> Date: Sun, 7 May 2023 21:52:57 +0530 Subject: [PATCH 15/21] chore: leftover formatting --- glances/compat.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/glances/compat.py b/glances/compat.py index feb94e1f..4626b8c0 100644 --- a/glances/compat.py +++ b/glances/compat.py @@ -388,7 +388,9 @@ def string_value_to_float(s): 'TB': 1000000000000, 'PB': 1000000000000000, } - unpack_string = [i[0] if i[1] == '' else i[1].upper() for i in re.findall(r'([\d.]+)|([^\d.]+)', s.replace(' ', ''))] + unpack_string = [ + i[0] if i[1] == '' else i[1].upper() for i in re.findall(r'([\d.]+)|([^\d.]+)', s.replace(' ', '')) + ] if len(unpack_string) == 2: value, unit = unpack_string elif len(unpack_string) == 1: From 9e52775929efb035be2659cc0ad21e0e265f17be Mon Sep 17 00:00:00 2001 From: Bharath Vignesh J K <52282402+RazCrimson@users.noreply.github.com> Date: Sun, 7 May 2023 21:44:52 +0530 Subject: [PATCH 16/21] chg: containers (Podman) - cache version calls To reduce update times --- glances/plugins/containers/glances_podman.py | 62 ++++++++++---------- 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py index 0a169ba2..e8545ac8 100644 --- a/glances/plugins/containers/glances_podman.py +++ b/glances/plugins/containers/glances_podman.py @@ -1,5 +1,6 @@ """Podman Extension unit for Glances' Containers plugin.""" import json +import time from datetime import datetime from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float @@ -9,7 +10,7 @@ from glances.plugins.containers.stats_streamer import StatsStreamer # Podman library (optional and Linux-only) # https://pypi.org/project/podman/ try: - import podman + from podman import PodmanClient except Exception as e: import_podman_error_tag = True # Display debug message if import KeyError @@ -211,85 +212,86 @@ class PodmanContainersExtension: if import_podman_error_tag: raise Exception("Missing libs required to run Podman Extension (Containers)") + self.ext_name = "containers (Podman)" + self.client = None - self.ext_name = "Podman (Containers)" self.podman_sock = podman_sock - self.stats_fetchers = {} - self.pod_fetcher = None + self.pods_stats_fetcher = None + self.container_stats_fetchers = {} + + # Cache version details as the version call is costly (in terms of time) self._version = {} + self._last_version_update = 0 + self.connect() def connect(self): """Connect to Podman.""" try: - self.client = podman.PodmanClient(base_url=self.podman_sock) + self.client = PodmanClient(base_url=self.podman_sock) except Exception as e: logger.error("{} plugin - Can not connect to Podman ({})".format(self.ext_name, e)) + def update_version(self): try: - version_podman = self.client.version() + self._version = self.client.version() + self._last_version_update = time.time() except Exception as e: logger.error("{} plugin - Cannot get Podman version ({})".format(self.ext_name, e)) - else: - self._version = { - 'Version': version_podman['Version'], - 'ApiVersion': version_podman['ApiVersion'], - 'MinAPIVersion': version_podman['MinAPIVersion'], - } def stop(self): # Stop all streaming threads - for t in itervalues(self.stats_fetchers): + for t in itervalues(self.container_stats_fetchers): t.stop() + if self.pods_stats_fetcher: + self.pods_stats_fetcher.stop() + def update(self, all_tag): """Update Podman stats using the input method.""" - try: - version_stats = self.client.version() - except Exception as e: - # Correct issue#649 - logger.error("{} plugin - Cannot get Podman version ({})".format(self.ext_name, e)) - return {}, [] + curr_time = time.time() + if curr_time - self._last_version_update > 300: # 300 seconds + self.update_version() # Update current containers list try: # Issue #1152: Podman module doesn't export details about stopped containers # The Containers/all key of the configuration file should be set to True containers = self.client.containers.list(all=all_tag) - if not self.pod_fetcher: - self.pod_fetcher = PodmanPodStatsFetcher(self.client.pods) + if not self.pods_stats_fetcher: + self.pods_stats_fetcher = PodmanPodStatsFetcher(self.client.pods) except Exception as e: logger.error("{} plugin - Cannot get containers list ({})".format(self.ext_name, e)) - return version_stats, [] + return self._version, [] # Start new thread for new container for container in containers: - if container.id not in self.stats_fetchers: + if container.id not in self.container_stats_fetchers: # StatsFetcher did not exist in the internal dict # Create it, add it to the internal dict logger.debug("{} plugin - Create thread for container {}".format(self.ext_name, container.id[:12])) - self.stats_fetchers[container.id] = PodmanContainerStatsFetcher(container) + self.container_stats_fetchers[container.id] = PodmanContainerStatsFetcher(container) # Stop threads for non-existing containers - absent_containers = set(iterkeys(self.stats_fetchers)) - set(c.id for c in containers) + absent_containers = set(iterkeys(self.container_stats_fetchers)) - set(c.id for c in containers) for container_id in absent_containers: # Stop the StatsFetcher logger.debug("{} plugin - Stop thread for old container {}".format(self.ext_name, container_id[:12])) - self.stats_fetchers[container_id].stop() + self.container_stats_fetchers[container_id].stop() # Delete the StatsFetcher from the dict - del self.stats_fetchers[container_id] + del self.container_stats_fetchers[container_id] # Get stats for all containers container_stats = [self.generate_stats(container) for container in containers] - pod_stats = self.pod_fetcher.activity_stats + pod_stats = self.pods_stats_fetcher.activity_stats for stats in container_stats: if stats["Id"][:12] in pod_stats: stats["pod_name"] = pod_stats[stats["Id"][:12]]["name"] stats["pod_id"] = pod_stats[stats["Id"][:12]]["pod_id"] - return version_stats, container_stats + return self._version, container_stats @property def key(self): @@ -314,7 +316,7 @@ class PodmanContainersExtension: if stats['Status'] in self.CONTAINER_ACTIVE_STATUS: stats['StartedAt'] = datetime.fromtimestamp(container.attrs['StartedAt']) - stats_fetcher = self.stats_fetchers[container.id] + stats_fetcher = self.container_stats_fetchers[container.id] activity_stats = stats_fetcher.activity_stats stats.update(activity_stats) From 8457d2f244583577aab72aeb900e8047e8ec9fec Mon Sep 17 00:00:00 2001 From: Bharath Vignesh J K <52282402+RazCrimson@users.noreply.github.com> Date: Sun, 7 May 2023 21:47:56 +0530 Subject: [PATCH 17/21] chg: containers - use proper names Other minor syntactic sugar changes --- glances/plugins/containers/glances_docker.py | 8 ++++---- glances/plugins/containers/glances_podman.py | 7 +++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/glances/plugins/containers/glances_docker.py b/glances/plugins/containers/glances_docker.py index a642db9f..61f3623e 100644 --- a/glances/plugins/containers/glances_docker.py +++ b/glances/plugins/containers/glances_docker.py @@ -210,7 +210,7 @@ class DockerContainersExtension: raise Exception("Missing libs required to run Docker Extension (Containers) ") self.client = None - self.ext_name = "Docker Ext" + self.ext_name = "containers (Docker)" self.stats_fetchers = {} self.connect() @@ -221,7 +221,7 @@ class DockerContainersExtension: # Do not use the timeout option (see issue #1878) self.client = docker.from_env() except Exception as e: - logger.error("docker plugin - Can not connect to Docker ({})".format(e)) + logger.error("{} plugin - Can't connect to Docker ({})".format(self.ext_name, e)) self.client = None def stop(self): @@ -245,7 +245,7 @@ class DockerContainersExtension: version_stats = self.client.version() except Exception as e: # Correct issue#649 - logger.error("{} plugin - Cannot get Docker version ({})".format(self.ext_name, e)) + logger.error("{} plugin - Can't get Docker version ({})".format(self.ext_name, e)) return {}, [] # Update current containers list @@ -254,7 +254,7 @@ class DockerContainersExtension: # The Containers/all key of the configuration file should be set to True containers = self.client.containers.list(all=all_tag) except Exception as e: - logger.error("{} plugin - Cannot get containers list ({})".format(self.ext_name, e)) + logger.error("{} plugin - Can't get containers list ({})".format(self.ext_name, e)) return version_stats, [] # Start new thread for new container diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py index e8545ac8..c1bc3801 100644 --- a/glances/plugins/containers/glances_podman.py +++ b/glances/plugins/containers/glances_podman.py @@ -1,5 +1,4 @@ """Podman Extension unit for Glances' Containers plugin.""" -import json import time from datetime import datetime @@ -230,14 +229,14 @@ class PodmanContainersExtension: try: self.client = PodmanClient(base_url=self.podman_sock) except Exception as e: - logger.error("{} plugin - Can not connect to Podman ({})".format(self.ext_name, e)) + logger.error("{} plugin - Can't connect to Podman ({})".format(self.ext_name, e)) def update_version(self): try: self._version = self.client.version() self._last_version_update = time.time() except Exception as e: - logger.error("{} plugin - Cannot get Podman version ({})".format(self.ext_name, e)) + logger.error("{} plugin - Can't get Podman version ({})".format(self.ext_name, e)) def stop(self): # Stop all streaming threads @@ -262,7 +261,7 @@ class PodmanContainersExtension: if not self.pods_stats_fetcher: self.pods_stats_fetcher = PodmanPodStatsFetcher(self.client.pods) except Exception as e: - logger.error("{} plugin - Cannot get containers list ({})".format(self.ext_name, e)) + logger.error("{} plugin - Can't get containers list ({})".format(self.ext_name, e)) return self._version, [] # Start new thread for new container From 8972880b77de642708551d24cf13e18898e4e39f Mon Sep 17 00:00:00 2001 From: Bharath Vignesh J K <52282402+RazCrimson@users.noreply.github.com> Date: Sun, 7 May 2023 19:44:00 +0530 Subject: [PATCH 18/21] chg: Dockerfile - fix alpine ARM build failures for cryptography Issue: #2368 --- docker-files/alpine.Dockerfile | 13 ++++++++++--- optional-requirements.txt | 2 +- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/docker-files/alpine.Dockerfile b/docker-files/alpine.Dockerfile index 2b71b2ca..0c470839 100644 --- a/docker-files/alpine.Dockerfile +++ b/docker-files/alpine.Dockerfile @@ -29,8 +29,10 @@ RUN apk add --no-cache \ smartmontools \ iputils \ tzdata \ - # Required for 'cryptography' dependency - gcc libffi-dev openssl-dev cargo pkgconfig + # Required for 'cryptography' dependency of optional requirement 'cassandra-driver' \ + # Refer: https://cryptography.io/en/latest/installation/#alpine \ + # `git` required to clone cargo crates (dependencies) + gcc libffi-dev openssl-dev cargo pkgconfig git ############################################################################## # Install the dependencies beforehand to make them cacheable @@ -58,9 +60,14 @@ RUN pip3 install --no-cache-dir --user glances FROM build as buildOptionalRequirements ARG PYTHON_VERSION +# Required for optional dependency cassandra-driver +ENV CASS_DRIVER_NO_CYTHON=1 +# See issue 2368 +ENV CARGO_NET_GIT_FETCH_WITH_CLI=true + COPY requirements.txt . COPY optional-requirements.txt . -RUN CASS_DRIVER_NO_CYTHON=1 pip3 install --no-cache-dir --user -r optional-requirements.txt +RUN pip3 install --no-cache-dir --user -r optional-requirements.txt ############################################################################## # full image diff --git a/optional-requirements.txt b/optional-requirements.txt index a331b53e..138f3ceb 100644 --- a/optional-requirements.txt +++ b/optional-requirements.txt @@ -4,7 +4,7 @@ batinfo bernhard bottle -#cassandra-driver # cassandra-driver breaks Glances CI (Alpine Docker). See detail in issue #2368 +cassandra-driver chevron couchdb docker==6.0.1 From f95ed3723e69e990628baaa945b653fc1d5c0f12 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 7 May 2023 17:43:09 +0000 Subject: [PATCH 19/21] chore(deps): update dependency eslint-plugin-vue to v9.11.1 --- glances/outputs/static/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/glances/outputs/static/package-lock.json b/glances/outputs/static/package-lock.json index d487240c..f9f97e62 100644 --- a/glances/outputs/static/package-lock.json +++ b/glances/outputs/static/package-lock.json @@ -2657,9 +2657,9 @@ } }, "node_modules/eslint-plugin-vue": { - "version": "9.11.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-vue/-/eslint-plugin-vue-9.11.0.tgz", - "integrity": "sha512-bBCJAZnkBV7ATH4Z1E7CvN3nmtS4H7QUU3UBxPdo8WohRU+yHjnQRALpTbxMVcz0e4Mx3IyxIdP5HYODMxK9cQ==", + "version": "9.11.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-vue/-/eslint-plugin-vue-9.11.1.tgz", + "integrity": "sha512-SNtBGDrRkPUFsREswPceqdvZ7YVdWY+iCYiDC+RoxwVieeQ7GJU1FLDlkcaYTOD2os/YuVgI1Fdu8YGM7fmoow==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.3.0", @@ -10276,9 +10276,9 @@ } }, "eslint-plugin-vue": { - "version": "9.11.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-vue/-/eslint-plugin-vue-9.11.0.tgz", - "integrity": "sha512-bBCJAZnkBV7ATH4Z1E7CvN3nmtS4H7QUU3UBxPdo8WohRU+yHjnQRALpTbxMVcz0e4Mx3IyxIdP5HYODMxK9cQ==", + "version": "9.11.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-vue/-/eslint-plugin-vue-9.11.1.tgz", + "integrity": "sha512-SNtBGDrRkPUFsREswPceqdvZ7YVdWY+iCYiDC+RoxwVieeQ7GJU1FLDlkcaYTOD2os/YuVgI1Fdu8YGM7fmoow==", "dev": true, "requires": { "@eslint-community/eslint-utils": "^4.3.0", From a29f33592690d5ed9dd2a6714bdcaf1013c8838d Mon Sep 17 00:00:00 2001 From: Bharath Vignesh J K <52282402+RazCrimson@users.noreply.github.com> Date: Sun, 7 May 2023 23:30:42 +0530 Subject: [PATCH 20/21] chore: containers - drop unused import --- glances/plugins/glances_containers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/glances/plugins/glances_containers.py b/glances/plugins/glances_containers.py index f68e38be..3aee8e83 100644 --- a/glances/plugins/glances_containers.py +++ b/glances/plugins/glances_containers.py @@ -11,7 +11,6 @@ import os from copy import deepcopy -from typing import Optional from glances.logger import logger from glances.plugins.containers.glances_docker import DockerContainersExtension, import_docker_error_tag From 73e3a5147ab8cb7c6b23dc20e7fdaf24013351df Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 00:16:23 +0000 Subject: [PATCH 21/21] chore(deps): update dependency docker to v6.1.1 --- optional-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optional-requirements.txt b/optional-requirements.txt index 138f3ceb..7f6e5fcf 100644 --- a/optional-requirements.txt +++ b/optional-requirements.txt @@ -7,7 +7,7 @@ bottle cassandra-driver chevron couchdb -docker==6.0.1 +docker==6.1.1 elasticsearch graphitesender hddtemp