Solve conflict with develop

This commit is contained in:
nicolargo 2021-11-28 14:13:29 +01:00
commit d72f7b0928
34 changed files with 3072 additions and 591 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -114,7 +114,7 @@ Note: if you want to use SSL, please set 'protocol=https'.
Grafana
-------
For Grafana users, Glances provides a dedicated `dashboard`_.
For Grafana users, Glances provides a dedicated for `InfluxQL`_ or `Flux`_ InfluxDB datasource.
.. image:: ../_static/glances-influxdb.png
@ -122,4 +122,5 @@ To use it, just import the file in your ``Grafana`` web interface.
.. image:: ../_static/grafana.png
.. _dashboard: https://github.com/nicolargo/glances/blob/master/conf/glances-grafana.json
.. _InfluxQL: https://github.com/nicolargo/glances/blob/master/conf/glances-grafana-influxql.json
.. _Flux: https://github.com/nicolargo/glances/blob/master/conf/glances-grafana-flux.json

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "GLANCES" "1" "Nov 21, 2021" "3.2.4.1" "Glances"
.TH "GLANCES" "1" "Nov 28, 2021" "3.2.5_beta01" "Glances"
.SH NAME
glances \- An eye on your system
.

View File

@ -47,7 +47,7 @@ class GlancesAmp(object):
EMAIL = '?'
def __init__(self, name=None, args=None):
"""Init AMP classe."""
"""Init AMP class."""
logger.debug("AMP - Init {} version {}".format(self.NAME, self.VERSION))
# AMP name (= module name without glances_)
@ -69,7 +69,7 @@ class GlancesAmp(object):
def load_config(self, config):
"""Load AMP parameters from the configuration file."""
# Read AMP confifuration.
# Read AMP configuration.
# For ex, the AMP foo should have the following section:
#
# [foo]
@ -77,12 +77,11 @@ class GlancesAmp(object):
# regex=\/usr\/bin\/nginx
# refresh=60
#
# and optionnaly:
# and optionally:
#
# one_line=false
# option1=opt1
# ...
#
amp_section = 'amp_' + self.amp_name
if (hasattr(config, 'has_section') and
config.has_section(amp_section)):
@ -137,7 +136,7 @@ class GlancesAmp(object):
return self.get('refresh')
def one_line(self):
"""Return True|False if the AMP shoukd be displayed in oneline (one_lineline=true|false)."""
"""Return True|False if the AMP should be displayed in one line (one_line=true|false)."""
ret = self.get('one_line')
if ret is None:
return False
@ -149,7 +148,9 @@ class GlancesAmp(object):
return self.timer.get()
def should_update(self):
"""Return True is the AMP should be updated:
"""Return True is the AMP should be updated
Conditions for update:
- AMP is enable
- only update every 'refresh' seconds
"""
@ -177,7 +178,8 @@ class GlancesAmp(object):
def set_result(self, result, separator=''):
"""Store the result (string) into the result key of the AMP
if one_line is true then replace \n by separator
If one_line is true then it replaces `\n` by the separator
"""
if self.one_line():
self.configs['result'] = u(result).replace('\n', separator)

View File

@ -21,7 +21,7 @@ r"""
Default AMP
=========
Monitor a process by executing a command line. This is the default AMP's behavor
Monitor a process by executing a command line. This is the default AMP's behavior
if no AMP script is found.
Configuration file example
@ -75,7 +75,7 @@ class Amp(GlancesAmp):
sum([p['memory_percent'] for p in process_list])))
return self.result()
# Run command(s)
# Comman separated commands can be executed
# Comma separated commands can be executed
try:
msg = ''
for cmd in res.split(';'):

View File

@ -26,8 +26,8 @@ Monitor the Nginx process using the status page.
How to read the stats
---------------------
Active connections Number of all open connections. This doesnt mean number of users.
A single user, for a single pageview can open many concurrent connections to your server.
Active connections Number of all open connections. This doesn't mean number of users.
A single user, for a single page-view can open many concurrent connections to your server.
Server accepts handled requests This shows three values.
First is total accepted connections.
Second is total handled connections. Usually first 2 values are same.

View File

@ -21,7 +21,7 @@ r"""
SystemV AMP
===========
Monitor the state of the Syste V init system and service.
Monitor the state of the System V init system and service.
How to read the stats
---------------------

View File

@ -41,10 +41,10 @@ class Export(GlancesExport):
"""Init the Cassandra export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.keyspace = None
# Optionals configuration keys
# Optional configuration keys
self.protocol_version = 3
self.replication_factor = 2
self.table = None

View File

@ -37,10 +37,10 @@ class Export(GlancesExport):
"""Init the CouchDB export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.db = None
# Optionals configuration keys
# Optional configuration keys
self.user = None
self.password = None
@ -102,8 +102,8 @@ class Export(GlancesExport):
data['type'] = name
data['time'] = couchdb.mapping.DateTimeField()._to_json(datetime.now())
# Write input to the CouchDB database
# Result can be view: http://127.0.0.1:5984/_utils
# Write data to the CouchDB database
# Result can be seen at: http://127.0.0.1:5984/_utils
try:
self.client[self.db].save(data)
except Exception as e:

View File

@ -49,7 +49,7 @@ class Export(GlancesExport):
else:
# A CSV file already exit, append new data
file_mode = 'a'
# Header will be check later
# Header will be checked later
# Get the existing one
try:
self.csv_file = open_csv_file(self.csv_filename, 'r')
@ -115,8 +115,8 @@ class Export(GlancesExport):
self.writer.writerow(csv_header)
# File already exist, check if header are compatible
if self.old_header != csv_header:
# Header are differents, log an error and do not write data
logger.error("Cannot append data to existing CSV file. Headers are differents.")
# Header are different, log an error and do not write data
logger.error("Cannot append data to existing CSV file. Headers are different.")
logger.debug("Old header: {}".format(self.old_header))
logger.debug("New header: {}".format(csv_header))
else:

View File

@ -37,7 +37,7 @@ class Export(GlancesExport):
"""Init the ES export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.index = None
# Load the ES configuration file
@ -76,14 +76,14 @@ class Export(GlancesExport):
# Create DB input
# https://elasticsearch-py.readthedocs.io/en/master/helpers.html
actions = []
dtnow = datetime.utcnow().isoformat('T')
dt_now = datetime.utcnow().isoformat('T')
action = {
"_index": index,
"_id": '{}.{}'.format(name, dtnow),
"_id": '{}.{}'.format(name, dt_now),
"_type": 'glances-{}'.format(name),
"_source": {
"plugin": name,
"timestamp": dtnow
"timestamp": dt_now
}
}
action['_source'].update(zip(columns, [str(p) for p in points]))

View File

@ -33,9 +33,9 @@ class GlancesExport(object):
"""Main class for Glances export IF."""
# For the moment, only thoses plugins can be exported
# TODO: remove this part and make all plugins exportable (see issue #1556)
# TODO: also make this list configurable by the user (see issue #1443)
# For the moment, only the below plugins can be exported
# @TODO: remove this part and make all plugins exportable (see issue #1556)
# @TODO: also make this list configurable by the user (see issue #1443)
exportable_plugins = ['cpu',
'percpu',
'load',
@ -62,8 +62,8 @@ class GlancesExport(object):
self.config = config
self.args = args
# By default export is disable
# Had to be set to True in the __init__ class of child
# By default export is disabled
# Needs to be set to True in the __init__ class of child
self.export_enable = False
# Mandatory for (most of) the export module
@ -92,8 +92,8 @@ class GlancesExport(object):
"""Load the export <section> configuration in the Glances configuration file.
:param section: name of the export section to load
:param mandatories: a list of mandatories parameters to load
:param options: a list of optionnals parameters to load
:param mandatories: a list of mandatory parameters to load
:param options: a list of optional parameters to load
:returns: Boolean -- True if section is found
"""
@ -139,29 +139,26 @@ class GlancesExport(object):
def parse_tags(self, tags):
"""Parse tags into a dict.
input tags: a comma separated list of 'key:value' pairs.
Example: foo:bar,spam:eggs
output dtags: a dict of tags.
Example: {'foo': 'bar', 'spam': 'eggs'}
:param tags: a comma separated list of 'key:value' pairs. Example: foo:bar,spam:eggs
:return: a dict of tags. Example: {'foo': 'bar', 'spam': 'eggs'}
"""
dtags = {}
d_tags = {}
if tags:
try:
dtags = dict([x.split(':') for x in tags.split(',')])
d_tags = dict([x.split(':') for x in tags.split(',')])
except ValueError:
# one of the 'key:value' pairs was missing
logger.info('Invalid tags passed: %s', tags)
dtags = {}
d_tags = {}
return dtags
return d_tags
def update(self, stats):
"""Update stats to a server.
The method builds two lists: names and values
and calls the export method to export the stats.
The method builds two lists: names and values and calls the export method to export the stats.
Note: this class can be overwrite (for example in CSV and Graph).
Note: this class can be overwritten (for example in CSV and Graph).
"""
if not self.export_enable:
return False

View File

@ -36,12 +36,12 @@ class Export(GlancesExport):
"""Init the InfluxDB export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.user = None
self.password = None
self.db = None
# Optionals configuration keys
# Optional configuration keys
self.protocol = 'http'
self.prefix = None
self.tags = None
@ -99,7 +99,9 @@ class Export(GlancesExport):
def _normalize(self, name, columns, points):
"""Normalize data for the InfluxDB's data model.
Output is a list of measurements."""
:return: a list of measurements.
"""
ret = []
# Build initial dict by crossing columns and point
@ -119,7 +121,7 @@ class Export(GlancesExport):
if k.startswith('{}.'.format(measurement))}
else:
fields = data_dict
# Transform to InfluxDB datamodel
# Transform to InfluxDB data model
# https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_reference/
for k in fields:
# Do not export empty (None) value

View File

@ -36,10 +36,10 @@ class Export(GlancesExport):
"""Init the Graphite export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
# N/A
# Optionals configuration keys
# Optional configuration keys
self.debug = False
self.prefix = None
self.system_name = None

View File

@ -35,12 +35,12 @@ class Export(GlancesExport):
"""Init the InfluxDB export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.org = None
self.bucket = None
self.token = None
# Optionals configuration keys
# Optional configuration keys
self.protocol = 'http'
self.prefix = None
self.tags = None
@ -94,7 +94,10 @@ class Export(GlancesExport):
return write_client
def _normalize(self, name, columns, points):
"""Normalize data for the InfluxDB's data model."""
"""Normalize data for the InfluxDB's data model.
:return: a list of measurements.
"""
ret = []
# Build initial dict by crossing columns and point

View File

@ -38,10 +38,10 @@ class Export(GlancesExport):
"""Init the Kafka export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.topic = None
# Optionals configuration keys
# Optional configuration keys
self.compression = None
self.tags = None

View File

@ -39,7 +39,7 @@ class Export(GlancesExport):
"""Init the MQTT export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.user = None
self.password = None
self.topic = None
@ -102,9 +102,9 @@ class Export(GlancesExport):
for sensor, value in zip(columns, points):
try:
sensor = [whitelisted(name) for name in sensor.split('.')]
tobeexport = [self.topic, self.hostname, name]
tobeexport.extend(sensor)
topic = '/'.join(tobeexport)
to_export = [self.topic, self.hostname, name]
to_export.extend(sensor)
topic = '/'.join(to_export)
self.client.publish(topic, value)
except Exception as e:

View File

@ -36,7 +36,7 @@ class Export(GlancesExport):
"""Init the OpenTSDB export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
# N/A
# Optionals configuration keys

View File

@ -39,7 +39,7 @@ class Export(GlancesExport):
"""Init the RabbitMQ export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.user = None
self.password = None
self.queue = None

View File

@ -37,7 +37,7 @@ class Export(GlancesExport):
"""Init the RESTful export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.protocol = None
self.path = None

View File

@ -38,10 +38,10 @@ class Export(GlancesExport):
"""Init the Riemann export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
# N/A
# Optionals configuration keys
# Optional configuration keys
# N/A
# Load the Riemann configuration

View File

@ -36,10 +36,10 @@ class Export(GlancesExport):
"""Init the Statsd export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
# N/A
# Optionals configuration keys
# Optional configuration keys
self.prefix = None
# Load the configuration file

View File

@ -38,7 +38,7 @@ class Export(GlancesExport):
"""Init the ZeroMQ export IF."""
super(Export, self).__init__(config=config, args=args)
# Mandatories configuration keys (additional to host and port)
# Mandatory configuration keys (additional to host and port)
self.prefix = None
# Optionals configuration keys

View File

@ -26,7 +26,7 @@ from math import modf
class Bar(object):
r"""Manage bar (progression or status).
"""Manage bar (progression or status).
import sys
import time

View File

@ -323,7 +323,7 @@ class _GlancesCurses(object):
}
def set_cursor(self, value):
"""Configure the curse cursor apparence.
"""Configure the curse cursor appearance.
0: invisible
1: visible
@ -526,8 +526,10 @@ class _GlancesCurses(object):
def __get_stat_display(self, stats, layer):
"""Return a dict of dict with all the stats display.
stats: Global stats dict
layer: ~ cs_status
# TODO: Drop extra parameter
:param stats: Global stats dict
:param layer: ~ cs_status
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
@ -562,16 +564,14 @@ class _GlancesCurses(object):
def display(self, stats, cs_status=None):
"""Display stats on the screen.
stats: Stats database to display
cs_status:
:param stats: Stats database to display
:param cs_status:
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
Return:
True if the stats have been displayed
False if the help have been displayed
:return True if the stats have been displayed else False if the help have been displayed
"""
# Init the internal line/column for Glances Curses
self.init_line_column()
@ -579,7 +579,7 @@ class _GlancesCurses(object):
# Update the stats messages
###########################
# Get all the plugins but quicklook and proceslist
# Get all the plugins but quicklook and process list
self.args.cs_status = cs_status
__stat_display = self.__get_stat_display(stats, layer=cs_status)
@ -624,7 +624,7 @@ class _GlancesCurses(object):
# =====================================
# Display first line (system+ip+uptime)
# Optionnaly: Cloud on second line
# Optionally: Cloud on second line
# =====================================
self.__display_header(__stat_display)
@ -853,8 +853,8 @@ class _GlancesCurses(object):
"""
Display a centered popup.
popup_type='info'
Just an infotmation popup, no user interaction
popup_type: ='info'
Just an information popup, no user interaction
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
@ -905,19 +905,19 @@ class _GlancesCurses(object):
self.wait(duration * 1000)
return True
elif popup_type == 'input':
# Create a subwindow for the text field
subpop = popup.derwin(1, input_size, 2, 2 + len(m))
subpop.attron(self.colors_list['FILTER'])
# Create a sub-window for the text field
sub_pop = popup.derwin(1, input_size, 2, 2 + len(m))
sub_pop.attron(self.colors_list['FILTER'])
# Init the field with the current value
if input_value is not None:
subpop.addnstr(0, 0, input_value, len(input_value))
sub_pop.addnstr(0, 0, input_value, len(input_value))
# Display the popup
popup.refresh()
subpop.refresh()
# Create the textbox inside the subwindows
sub_pop.refresh()
# Create the textbox inside the sub-windows
self.set_cursor(2)
self.term_window.keypad(1)
textbox = GlancesTextbox(subpop, insert_mode=True)
textbox = GlancesTextbox(sub_pop, insert_mode=True)
textbox.edit()
self.set_cursor(0)
# self.term_window.keypad(0)
@ -929,18 +929,18 @@ class _GlancesCurses(object):
logger.debug("User centers an empty string")
return None
elif popup_type == 'yesno':
# # Create a subwindow for the text field
subpop = popup.derwin(1, 2, len(sentence_list) + 1, len(m) + 2)
subpop.attron(self.colors_list['FILTER'])
# # Create a sub-window for the text field
sub_pop = popup.derwin(1, 2, len(sentence_list) + 1, len(m) + 2)
sub_pop.attron(self.colors_list['FILTER'])
# Init the field with the current value
subpop.addnstr(0, 0, '', 0)
sub_pop.addnstr(0, 0, '', 0)
# Display the popup
popup.refresh()
subpop.refresh()
# Create the textbox inside the subwindows
sub_pop.refresh()
# Create the textbox inside the sub-windows
self.set_cursor(2)
self.term_window.keypad(1)
textbox = GlancesTextboxYesNo(subpop, insert_mode=False)
textbox = GlancesTextboxYesNo(sub_pop, insert_mode=False)
textbox.edit()
self.set_cursor(0)
# self.term_window.keypad(0)
@ -953,10 +953,11 @@ class _GlancesCurses(object):
add_space=0):
"""Display the plugin_stats on the screen.
If display_optional=True display the optional stats
If display_additional=True display additionnal stats
max_y: do not display line > max_y
add_space: add x space (line) after the plugin
:param plugin_stats:
:param display_optional: display the optional stats if True
:param display_additional: display additional stats if True
:param max_y: do not display line > max_y
:param add_space: add x space (line) after the plugin
"""
# Exit if:
# - the plugin_stats message is empty
@ -1011,11 +1012,11 @@ class _GlancesCurses(object):
if not display_additional and m['additional']:
continue
# Is it possible to display the stat with the current screen size
# !!! Crach if not try/except... Why ???
# !!! Crash if not try/except... Why ???
try:
self.term_window.addnstr(y, x,
m['msg'],
# Do not disply outside the screen
# Do not display outside the screen
screen_x - x,
self.colors_list[m['decoration']])
except Exception:
@ -1049,8 +1050,8 @@ class _GlancesCurses(object):
def flush(self, stats, cs_status=None):
"""Clear and update the screen.
stats: Stats database to display
cs_status:
:param stats: Stats database to display
:param cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
@ -1065,20 +1066,17 @@ class _GlancesCurses(object):
return_to_browser=False):
"""Update the screen.
INPUT
stats: Stats database to display
duration: duration of the loop
cs_status:
:param stats: Stats database to display
:param duration: duration of the loop
:param cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
return_to_browser:
:param return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
OUTPUT
True: Exit key has been pressed
False: Others cases...
:return: True if exit key has been pressed else False
"""
# Flush display
self.flush(stats, cs_status=cs_status)
@ -1139,7 +1137,7 @@ class _GlancesCurses(object):
return c
def get_stats_display_height(self, curse_msg):
r"""Return the height of the formatted curses message.
"""Return the height of the formatted curses message.
The height is defined by the number of '\n' (new line).
"""

View File

@ -97,18 +97,18 @@ class GlancesCursesBrowser(_GlancesCurses):
for item in stats:
color = item['status']
counts[color] = counts.get(color, 0) + 1
result = ''
for key in counts.keys():
result += key + ': ' + str(counts[key]) + ' '
return result
def _get_stats(self, stats):
stats_list = None
if self._stats_list is not None:
stats_list = self._stats_list
stats_list.sort(reverse = self._revesed_sorting,
stats_list.sort(reverse = self._revesed_sorting,
key = lambda x: { 'UNKNOWN' : 0,
'OFFLINE' : 1,
'PROTECTED' : 2,
@ -116,9 +116,9 @@ class GlancesCursesBrowser(_GlancesCurses):
'ONLINE': 4 }.get(x['status'], 99))
else:
stats_list = stats
return stats_list
def cursor_up(self, stats):
"""Set the cursor to position N-1 in the list."""
if 0 <= self.cursor_position - 1:
@ -126,14 +126,14 @@ class GlancesCursesBrowser(_GlancesCurses):
else:
if self._current_page - 1 < 0 :
self._current_page = self._page_max - 1
self.cursor_position = (len(stats) - 1) % self._page_max_lines
self.cursor_position = (len(stats) - 1) % self._page_max_lines
else:
self._current_page -= 1
self.cursor_position = self._page_max_lines - 1
def cursor_down(self, stats):
"""Set the cursor to position N-1 in the list."""
if self.cursor_position + 1 < self.get_pagelines(stats):
self.cursor_position += 1
else:
@ -186,7 +186,7 @@ class GlancesCursesBrowser(_GlancesCurses):
self.cursor_down(stats)
logger.debug("Server {}/{} selected".format(self.cursor + 1, len(stats)))
elif self.pressedkey == curses.KEY_PPAGE:
# 'Page UP' > Prev page in the server list
# 'Page UP' > Prev page in the server list
self.cursor_pageup(stats)
logger.debug("PageUP: Server ({}/{}) pages.".format(self._current_page + 1, self._page_max))
elif self.pressedkey == curses.KEY_NPAGE:
@ -197,7 +197,7 @@ class GlancesCursesBrowser(_GlancesCurses):
self._stats_list = None
refresh = True
elif self.pressedkey == ord('2'):
self._revesed_sorting = False
self._revesed_sorting = False
self._stats_list = stats.copy()
refresh = True
elif self.pressedkey == ord('3'):
@ -222,7 +222,10 @@ class GlancesCursesBrowser(_GlancesCurses):
Wait for __refresh_time sec / catch key every 100 ms.
stats: Dict of dict with servers stats
:param stats: Dict of dict with servers stats
:param cs_status:
:param duration:
:param return_to_browser:
"""
# Flush display
logger.debug('Servers list: {}'.format(stats))
@ -248,7 +251,7 @@ class GlancesCursesBrowser(_GlancesCurses):
def flush(self, stats):
"""Update the servers' list screen.
stats: List of dict with servers stats
:param stats: List of dict with servers stats
"""
self.erase()
self.display(stats)
@ -256,9 +259,7 @@ class GlancesCursesBrowser(_GlancesCurses):
def display(self, stats, cs_status=None):
"""Display the servers list.
Return:
True if the stats have been displayed
False if the stats have not been displayed (no server available)
:return: True if the stats have been displayed else False (no server available)
"""
# Init the internal line/column for Glances Curses
self.init_line_column()
@ -301,13 +302,13 @@ class GlancesCursesBrowser(_GlancesCurses):
if stats_len > stats_max and screen_y > 2:
msg = '{} servers displayed.({}/{}) {}'.format(self.get_pagelines(stats),
self._current_page + 1,
self._page_max,
self._current_page + 1,
self._page_max,
self._get_status_count(stats))
self.term_window.addnstr(y + 1, x,
msg,
screen_x - x)
if stats_len == 0:
return False
@ -347,10 +348,10 @@ class GlancesCursesBrowser(_GlancesCurses):
self.cursor = len(stats) - 1
stats_list = self._get_stats(stats)
start_line = self._page_max_lines * self._current_page
start_line = self._page_max_lines * self._current_page
end_line = start_line + self.get_pagelines(stats_list)
current_page = stats_list[start_line:end_line]
# Display table
line = 0
for v in current_page:

View File

@ -43,7 +43,7 @@ except (UnicodeEncodeError, TypeError) as e:
class Sparkline(object):
r"""Manage sparklines (see https://pypi.org/project/sparklines/)."""
"""Manage sparklines (see https://pypi.org/project/sparklines/)."""
def __init__(self, size, pre_char='[', post_char=']', empty_char=' ', with_text=True):
# If the sparklines python module available ?
@ -64,7 +64,7 @@ class Sparkline(object):
@property
def size(self, with_decoration=False):
# Return the sparkine size, with or without decoration
# Return the sparkline size, with or without decoration
if with_decoration:
return self.__size
if self.__with_text:

View File

@ -27,9 +27,7 @@ from glances.globals import printandflush
class GlancesStdout(object):
"""
This class manages the Stdout display.
"""
"""This class manages the Stdout display."""
def __init__(self, config=None, args=None):
# Init
@ -41,7 +39,9 @@ class GlancesStdout(object):
def build_list(self):
"""Return a list of tuples taken from self.args.stdout
[(plugin, attribute), ... ]"""
:return: A list of tuples. Example -[(plugin, attribute), ... ]
"""
ret = []
for p in self.args.stdout.split(','):
if '.' in p:
@ -58,6 +58,7 @@ class GlancesStdout(object):
stats,
duration=3):
"""Display stats to stdout.
Refresh every duration second.
"""
for plugin, attribute in self.plugins_list:

View File

@ -184,9 +184,7 @@ def print_limits(stats):
class GlancesStdoutApiDoc(object):
"""
This class manages the fields description display.
"""
"""This class manages the fields description display."""
def __init__(self, config=None, args=None):
# Init
@ -199,8 +197,7 @@ class GlancesStdoutApiDoc(object):
def update(self,
stats,
duration=1):
"""Display issue
"""
"""Display issue"""
# Display header
print(APIDOC_HEADER)

View File

@ -27,9 +27,7 @@ from glances.globals import printandflush
class GlancesStdoutCsv(object):
"""
This class manages the StdoutCsv display.
"""
"""This class manages the StdoutCsv display."""
separator = ','
na = 'N/A'
@ -47,7 +45,9 @@ class GlancesStdoutCsv(object):
def build_list(self):
"""Return a list of tuples taken from self.args.stdout
[(plugin, attribute), ... ]"""
:return: A list of tuples. Example -[(plugin, attribute), ... ]
"""
ret = []
for p in self.args.stdout_csv.split(','):
if '.' in p:
@ -110,6 +110,7 @@ class GlancesStdoutCsv(object):
stats,
duration=3):
"""Display stats to stdout.
Refresh every duration second.
"""
# Build the stats list

View File

@ -54,9 +54,7 @@ class colors:
class GlancesStdoutIssue(object):
"""
This class manages the Issue display.
"""
"""This class manages the Issue display."""
def __init__(self, config=None, args=None):
# Init
@ -89,8 +87,7 @@ class GlancesStdoutIssue(object):
def update(self,
stats,
duration=3):
"""Display issue
"""
"""Display issue"""
self.print_version()
for plugin in sorted(stats._plugins):

View File

@ -29,6 +29,7 @@ from glances.globals import iterkeys, itervalues, nativestr
from glances.timer import getTimeSinceLastUpdate
from glances.plugins.plugin.model import GlancesPluginModel
from glances.processes import sort_stats as sort_stats_processes, glances_processes
from glances.timer import getTimeSinceLastUpdate
# Docker-py library (optional and Linux-only)
# https://github.com/docker/docker-py
@ -66,7 +67,6 @@ items_history_list = [{'name': 'cpu_percent',
'description': 'Container CPU consumption in %',
'y_unit': '%'}]
# List of key to remove before export
export_exclude_list = ['cpu', 'io', 'memory', 'network']
@ -95,11 +95,21 @@ class PluginModel(GlancesPluginModel):
# Init the Docker API
self.docker_client = self.connect()
# Dict of thread (to grab stats asynchronously, one thread is created by container)
# Dict of thread (to grab stats asynchronously, one thread is created per container)
# key: Container Id
# value: instance of ThreadDockerGrabber
self.thread_list = {}
# Dict of Network stats (Storing previous network stats to compute Rx/s and Tx/s)
# key: Container Id
# value: network stats dict
self.network_old = {}
# Dict of Disk IO stats (Storing previous disk_io stats to compute Rx/s and Tx/s)
# key: Container Id
# value: network stats dict
self.io_old = {}
# Force a first update because we need two update to have the first stat
self.update()
self.refresh_timer.set(0)
@ -217,8 +227,8 @@ class PluginModel(GlancesPluginModel):
t.start()
# Stop threads for non-existing containers
nonexisting_containers = set(iterkeys(self.thread_list)) - set([c.id for c in containers])
for container_id in nonexisting_containers:
absent_containers = set(iterkeys(self.thread_list)) - set([c.id for c in containers])
for container_id in absent_containers:
# Stop the thread
logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12]))
self.thread_list[container_id].stop()
@ -267,7 +277,8 @@ class PluginModel(GlancesPluginModel):
container_stats['cpu'] = self.get_docker_cpu(container.id, self.thread_list[container.id].stats)
container_stats['cpu_percent'] = container_stats['cpu'].get('total', None)
# MEM
container_stats['memory'] = self.get_docker_memory(container.id, self.thread_list[container.id].stats)
container_stats['memory'] = self.get_docker_memory(container.id,
self.thread_list[container.id].stats)
container_stats['memory_usage'] = container_stats['memory'].get('usage', None)
if container_stats['memory'].get('cache', None) is not None:
container_stats['memory_usage'] -= container_stats['memory']['cache']
@ -276,7 +287,8 @@ class PluginModel(GlancesPluginModel):
container_stats['io_r'] = container_stats['io'].get('ior', None)
container_stats['io_w'] = container_stats['io'].get('iow', None)
# NET
container_stats['network'] = self.get_docker_network(container.id, self.thread_list[container.id].stats)
container_stats['network'] = self.get_docker_network(container.id,
self.thread_list[container.id].stats)
container_stats['network_rx'] = container_stats['network'].get('rx', None)
container_stats['network_tx'] = container_stats['network'].get('tx', None)
else:
@ -310,51 +322,41 @@ class PluginModel(GlancesPluginModel):
all_stats is the output of the stats method of the Docker API
Output: a dict {'total': 1.49}
"""
ret = {'total': 0.0}
cpu_stats = {'total': 0.0}
# Read the stats
# For each container, you will find a pseudo-file cpuacct.stat,
# containing the CPU usage accumulated by the processes of the container.
# Those times are expressed in ticks of 1/USER_HZ of a second.
# On x86 systems, USER_HZ is 100.
cpu_new = {}
precpu_new = {}
try:
cpu_new['total'] = all_stats['cpu_stats']['cpu_usage'].get(
'total_usage', None)
precpu_new['total'] = all_stats['precpu_stats']['cpu_usage'].get(
'total_usage', None)
cpu_new['system'] = all_stats['cpu_stats'].get(
'system_cpu_usage', None)
precpu_new['system'] = all_stats['precpu_stats'].get(
'system_cpu_usage', None)
cpu = {
'system': all_stats['cpu_stats']['system_cpu_usage'],
'total': all_stats['cpu_stats']['cpu_usage']['total_usage']
}
precpu = {
'system': all_stats['precpu_stats']['system_cpu_usage'],
'total': all_stats['precpu_stats']['cpu_usage']['total_usage']
}
# Issue #1857
# If either precpu_stats.online_cpus or cpu_stats.online_cpus is nil
# then for compatibility with older daemons the length of
# the corresponding cpu_usage.percpu_usage array should be used.
if 'online_cpus' in all_stats['cpu_stats'] and \
all_stats['cpu_stats']['online_cpus'] is not None:
cpu_new['nb_core'] = all_stats['cpu_stats']['online_cpus']
else:
cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or [])
cpu['nb_core'] = all_stats['cpu_stats'].get('online_cpus', None)
if cpu['nb_core'] is None:
cpu['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or [])
except KeyError as e:
logger.debug(
"docker plugin - Cannot grab CPU usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
else:
try:
cpu_delta = cpu_new['total'] - precpu_new['total']
system_cpu_delta = cpu_new['system'] - precpu_new['system']
cpu_delta = cpu['total'] - precpu['total']
system_cpu_delta = cpu['system'] - precpu['system']
# CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0
ret['total'] = (cpu_delta / system_cpu_delta) * \
cpu_new['nb_core'] * 100.0
cpu_stats['total'] = (cpu_delta / system_cpu_delta) * cpu['nb_core'] * 100.0
except TypeError as e:
logger.debug(
"docker plugin - Cannot compute CPU usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
# Return the stats
return ret
return cpu_stats
def get_docker_memory(self, container_id, all_stats):
"""Return the container MEMORY.
@ -363,30 +365,28 @@ class PluginModel(GlancesPluginModel):
all_stats is the output of the stats method of the Docker API
Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...}
"""
ret = {}
memory_stats = {}
# Read the stats
try:
# Mandatory fields
memory_stats['usage'] = all_stats['memory_stats']['usage']
memory_stats['limit'] = all_stats['memory_stats']['limit']
# Issue #1857
# Some stats are not always available in ['memory_stats']['stats']
if 'rss' in all_stats['memory_stats']['stats']:
ret['rss'] = all_stats['memory_stats']['stats']['rss']
memory_stats['rss'] = all_stats['memory_stats']['stats']['rss']
elif 'total_rss' in all_stats['memory_stats']['stats']:
ret['rss'] = all_stats['memory_stats']['stats']['total_rss']
memory_stats['rss'] = all_stats['memory_stats']['stats']['total_rss']
else:
ret['rss'] = None
ret['cache'] = all_stats['memory_stats']['stats'].get(
'cache', None)
ret['max_usage'] = all_stats['memory_stats'].get(
'max_usage', None)
# Mandatory fields
ret['usage'] = all_stats['memory_stats']['usage']
ret['limit'] = all_stats['memory_stats']['limit']
memory_stats['rss'] = None
memory_stats['cache'] = all_stats['memory_stats']['stats'].get('cache', None)
memory_stats['max_usage'] = all_stats['memory_stats'].get('max_usage', None)
except (KeyError, TypeError) as e:
# all_stats do not have MEM information
logger.debug("docker plugin - Cannot grab MEM usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
# Return the stats
return ret
return memory_stats
def get_docker_network(self, container_id, all_stats):
"""Return the container network usage using the Docker API (v1.0 or higher).
@ -403,7 +403,7 @@ class PluginModel(GlancesPluginModel):
# Read the rx/tx stats (in bytes)
try:
netcounters = all_stats["networks"]
net_stats = all_stats["networks"]
except KeyError as e:
# all_stats do not have NETWORK information
logger.debug("docker plugin - Cannot grab NET usage for container {} ({})".format(container_id, e))
@ -411,37 +411,24 @@ class PluginModel(GlancesPluginModel):
# No fallback available...
return network_new
# Previous network interface stats are stored in the network_old variable
if not hasattr(self, 'netcounters_old'):
# First call, we init the network_old var
self.netcounters_old = {}
try:
self.netcounters_old[container_id] = netcounters
except (IOError, UnboundLocalError):
pass
if container_id not in self.netcounters_old:
try:
self.netcounters_old[container_id] = netcounters
except (IOError, UnboundLocalError):
pass
# Previous network interface stats are stored in the self.network_old variable
# By storing time data we enable Rx/s and Tx/s calculations in the XML/RPC API, which would otherwise
# be overly difficult work for users of the API
try:
network_new['cumulative_rx'] = net_stats["eth0"]["rx_bytes"]
network_new['cumulative_tx'] = net_stats["eth0"]["tx_bytes"]
except KeyError as e:
# all_stats do not have INTERFACE information
logger.debug("docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
else:
# By storing time data we enable Rx/s and Tx/s calculations in the
# XML/RPC API, which would otherwise be overly difficult work
# for users of the API
try:
network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
network_new['rx'] = netcounters["eth0"]["rx_bytes"] - self.netcounters_old[container_id]["eth0"]["rx_bytes"]
network_new['tx'] = netcounters["eth0"]["tx_bytes"] - self.netcounters_old[container_id]["eth0"]["tx_bytes"]
network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"]
network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"]
except KeyError as e:
# all_stats do not have INTERFACE information
logger.debug("docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
if container_id in self.network_old:
network_new['rx'] = network_new['cumulative_rx'] - self.network_old[container_id]['cumulative_rx']
network_new['tx'] = network_new['cumulative_tx'] - self.network_old[container_id]['cumulative_tx']
# Save stats to compute next bitrate
self.netcounters_old[container_id] = netcounters
self.network_old[container_id] = network_new
# Return the stats
return network_new
@ -461,7 +448,7 @@ class PluginModel(GlancesPluginModel):
# Read the ior/iow stats (in bytes)
try:
iocounters = all_stats["blkio_stats"]
io_stats = all_stats["blkio_stats"]
except KeyError as e:
# all_stats do not have io information
logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
@ -469,45 +456,27 @@ class PluginModel(GlancesPluginModel):
# No fallback available...
return io_new
# Previous io interface stats are stored in the io_old variable
if not hasattr(self, 'iocounters_old'):
# First call, we init the io_old var
self.iocounters_old = {}
try:
self.iocounters_old[container_id] = iocounters
except (IOError, UnboundLocalError):
pass
# Previous io interface stats are stored in the self.io_old variable
# By storing time data we enable IoR/s and IoW/s calculations in the
# XML/RPC API, which would otherwise be overly difficult work
# for users of the API
try:
io_service_bytes_recursive = io_stats['io_service_bytes_recursive']
if container_id not in self.iocounters_old:
try:
self.iocounters_old[container_id] = iocounters
except (IOError, UnboundLocalError):
pass
# Read IOR and IOW value in the structure list of dict
io_new['cumulative_ior'] = [i for i in io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value']
io_new['cumulative_iow'] = [i for i in io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value']
except (TypeError, IndexError, KeyError, AttributeError) as e:
# all_stats do not have io information
logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
else:
# By storing time data we enable IoR/s and IoW/s calculations in the
# XML/RPC API, which would otherwise be overly difficult work
# for users of the API
try:
new_io_service_bytes_recursive = iocounters['io_service_bytes_recursive']
old_io_service_bytes_recursive = self.iocounters_old[container_id]['io_service_bytes_recursive']
io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id))
if container_id in self.io_old:
io_new['ior'] = io_new['cumulative_ior'] - self.io_old[container_id]['cumulative_ior']
io_new['iow'] = io_new['cumulative_iow'] - self.io_old[container_id]['cumulative_iow']
# Read IOR and IOW value in the structure list of dict
ior = [i for i in new_io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value']
iow = [i for i in new_io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value']
ior_old = [i for i in old_io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value']
iow_old = [i for i in old_io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value']
except (TypeError, IndexError, KeyError, AttributeError) as e:
# all_stats do not have io information
logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
else:
io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id))
io_new['ior'] = ior - ior_old
io_new['iow'] = iow - iow_old
io_new['cumulative_ior'] = ior
io_new['cumulative_iow'] = iow
# Save stats to compute next bitrate
self.iocounters_old[container_id] = iocounters
# Save stats to compute next bitrate
self.io_old[container_id] = io_new
# Return the stats
return io_new
@ -569,8 +538,8 @@ class PluginModel(GlancesPluginModel):
# Only process if stats exist (and non null) and display plugin enable...
if not self.stats \
or 'containers' not in self.stats or len(self.stats['containers']) == 0 \
or self.is_disabled():
or 'containers' not in self.stats or len(self.stats['containers']) == 0 \
or self.is_disabled():
return ret
# Build the string message
@ -591,7 +560,7 @@ class PluginModel(GlancesPluginModel):
default=20)
if self.config is not None else 20,
len(max(self.stats['containers'],
key=lambda x: len(x['name']))['name']))
key=lambda x: len(x['name']))['name']))
msg = ' {:{width}}'.format('Name', width=name_max_width)
ret.append(self.curse_add_line(msg))
msg = '{:>10}'.format('Status')
@ -663,7 +632,8 @@ class PluginModel(GlancesPluginModel):
unit = 'b'
for r in ['rx', 'tx']:
try:
value = self.auto_unit(int(container['network'][r] // container['network']['time_since_update'] * to_bit)) + unit
value = self.auto_unit(
int(container['network'][r] // container['network']['time_since_update'] * to_bit)) + unit
msg = '{:>7}'.format(value)
except KeyError:
msg = '{:>7}'.format('_')