diff --git a/infobright/python_modules/infobright.py b/infobright/python_modules/infobright.py index 5e60fee1..759a8a15 100644 --- a/infobright/python_modules/infobright.py +++ b/infobright/python_modules/infobright.py @@ -56,7 +56,7 @@ MAX_UPDATE_TIME = 15 -def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=True, get_slave=True): +def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_main=True, get_subordinate=True): """ """ @@ -114,28 +114,28 @@ def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=T # try not to fail ? # BRIGHTHOUSE ENGINE status variables are pretty obscure get_brighthouse_engine = get_brighthouse_engine and variables.has_key('brighthouse_ini_controlmessages') - get_master = get_master and variables['log_bin'].lower() == 'on' + get_main = get_main and variables['log_bin'].lower() == 'on' if get_brighthouse_engine: logging.warn('get_brighthouse_engine status not implemented') - master_logs = tuple - if get_master: + main_logs = tuple + if get_main: cursor = conn.cursor(MySQLdb.cursors.Cursor) cursor.execute("SHOW MASTER LOGS") - master_logs = cursor.fetchall() + main_logs = cursor.fetchall() cursor.close() - slave_status = {} - if get_slave: + subordinate_status = {} + if get_subordinate: cursor = conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute("SHOW SLAVE STATUS") res = cursor.fetchone() if res: for (k,v) in res.items(): - slave_status[k.lower()] = v + subordinate_status[k.lower()] = v else: - get_slave = False + get_subordinate = False cursor.close() cursor = conn.cursor(MySQLdb.cursors.DictCursor) @@ -199,8 +199,8 @@ def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=T 'select_range', 'select_range_check', 'select_scan', - 'slave_open_temp_tables', - 'slave_retried_transactions', + 'subordinate_open_temp_tables', + 'subordinate_retried_transactions', 'slow_launch_threads', 'slow_queries', 'sort_range', @@ -222,7 +222,7 @@ def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=T 'qcache_free_blocks', 'qcache_free_memory', 'qcache_total_blocks', - 'slave_open_temp_tables', + 'subordinate_open_temp_tables', 'threads_cached', 'threads_connected', 'threads_running', @@ -298,32 +298,32 @@ def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=T infobright_stats['open_files_used'] = int(global_status['open_files']) / int(variables['open_files_limit']) - # process master logs - if get_master: - infobright_stats['binlog_count'] = len(master_logs) - infobright_stats['binlog_space_current'] = master_logs[-1][1] - #infobright_stats['binlog_space_total'] = sum((long(s[1]) for s in master_logs)) + # process main logs + if get_main: + infobright_stats['binlog_count'] = len(main_logs) + infobright_stats['binlog_space_current'] = main_logs[-1][1] + #infobright_stats['binlog_space_total'] = sum((long(s[1]) for s in main_logs)) infobright_stats['binlog_space_total'] = 0 - for s in master_logs: + for s in main_logs: infobright_stats['binlog_space_total'] += int(s[1]) - infobright_stats['binlog_space_used'] = float(master_logs[-1][1]) / float(variables['max_binlog_size']) * 100 - - # process slave status - if get_slave: - infobright_stats['slave_exec_master_log_pos'] = slave_status['exec_master_log_pos'] - #infobright_stats['slave_io'] = 1 if slave_status['slave_io_running'].lower() == "yes" else 0 - if slave_status['slave_io_running'].lower() == "yes": - infobright_stats['slave_io'] = 1 + infobright_stats['binlog_space_used'] = float(main_logs[-1][1]) / float(variables['max_binlog_size']) * 100 + + # process subordinate status + if get_subordinate: + infobright_stats['subordinate_exec_main_log_pos'] = subordinate_status['exec_main_log_pos'] + #infobright_stats['subordinate_io'] = 1 if subordinate_status['subordinate_io_running'].lower() == "yes" else 0 + if subordinate_status['subordinate_io_running'].lower() == "yes": + infobright_stats['subordinate_io'] = 1 else: - infobright_stats['slave_io'] = 0 - #infobright_stats['slave_sql'] = 1 if slave_status['slave_sql_running'].lower() =="yes" else 0 - if slave_status['slave_sql_running'].lower() == "yes": - infobright_stats['slave_sql'] = 1 + infobright_stats['subordinate_io'] = 0 + #infobright_stats['subordinate_sql'] = 1 if subordinate_status['subordinate_sql_running'].lower() =="yes" else 0 + if subordinate_status['subordinate_sql_running'].lower() == "yes": + infobright_stats['subordinate_sql'] = 1 else: - infobright_stats['slave_sql'] = 0 - infobright_stats['slave_lag'] = slave_status['seconds_behind_master'] - infobright_stats['slave_relay_log_pos'] = slave_status['relay_log_pos'] - infobright_stats['slave_relay_log_space'] = slave_status['relay_log_space'] + infobright_stats['subordinate_sql'] = 0 + infobright_stats['subordinate_lag'] = subordinate_status['seconds_behind_main'] + infobright_stats['subordinate_relay_log_pos'] = subordinate_status['relay_log_pos'] + infobright_stats['subordinate_relay_log_space'] = subordinate_status['relay_log_space'] logging.debug('success updating stats') @@ -370,8 +370,8 @@ def metric_init(params): REPORT_BRIGHTHOUSE = str(params.get('get_brighthouse', True)) == "True" REPORT_BRIGHTHOUSE_ENGINE = str(params.get('get_brighthouse_engine', True)) == "True" - REPORT_MASTER = str(params.get('get_master', True)) == "True" - REPORT_SLAVE = str(params.get('get_slave', True)) == "True" + REPORT_MASTER = str(params.get('get_main', True)) == "True" + REPORT_SLAVE = str(params.get('get_subordinate', True)) == "True" logging.debug("init: " + str(params)) @@ -391,9 +391,9 @@ def metric_init(params): delta_per_second = True mysql_stats_descriptions = {} - master_stats_descriptions = {} + main_stats_descriptions = {} brighthouse_stats_descriptions = {} - slave_stats_descriptions = {} + subordinate_stats_descriptions = {} mysql_stats_descriptions = dict( aborted_clients = { @@ -652,15 +652,15 @@ def metric_init(params): 'units': 'joins', }, - slave_open_temp_tables = { - 'description': 'The number of temporary tables that the slave SQL thread currently has open', + subordinate_open_temp_tables = { + 'description': 'The number of temporary tables that the subordinate SQL thread currently has open', 'value_type': 'float', 'units': 'tables', 'slope': 'both', }, - slave_retried_transactions = { - 'description': 'The total number of times since startup that the replication slave SQL thread has retried transactions', + subordinate_retried_transactions = { + 'description': 'The total number of times since startup that the replication subordinate SQL thread has retried transactions', 'value_type': 'float', 'units': 'count', }, @@ -974,7 +974,7 @@ def metric_init(params): if REPORT_MASTER: - master_stats_descriptions = dict( + main_stats_descriptions = dict( binlog_count = { 'description': "Number of binary logs", 'units': 'logs', @@ -1002,34 +1002,34 @@ def metric_init(params): ) if REPORT_SLAVE: - slave_stats_descriptions = dict( - slave_exec_master_log_pos = { - 'description': "The position of the last event executed by the SQL thread from the master's binary log", + subordinate_stats_descriptions = dict( + subordinate_exec_main_log_pos = { + 'description': "The position of the last event executed by the SQL thread from the main's binary log", 'units': 'bytes', 'slope': 'both', }, - slave_io = { - 'description': "Whether the I/O thread is started and has connected successfully to the master", + subordinate_io = { + 'description': "Whether the I/O thread is started and has connected successfully to the main", 'value_type': 'uint8', 'units': 'True/False', 'slope': 'both', }, - slave_lag = { + subordinate_lag = { 'description': "Replication Lag", 'units': 'secs', 'slope': 'both', }, - slave_relay_log_pos = { + subordinate_relay_log_pos = { 'description': "The position up to which the SQL thread has read and executed in the current relay log", 'units': 'bytes', 'slope': 'both', }, - slave_sql = { - 'description': "Slave SQL Running", + subordinate_sql = { + 'description': "Subordinate SQL Running", 'value_type': 'uint8', 'units': 'True/False', 'slope': 'both', @@ -1042,7 +1042,7 @@ def metric_init(params): update_stats(REPORT_BRIGHTHOUSE, REPORT_BRIGHTHOUSE_ENGINE, REPORT_MASTER, REPORT_SLAVE) - for stats_descriptions in (brighthouse_stats_descriptions, master_stats_descriptions, mysql_stats_descriptions, slave_stats_descriptions): + for stats_descriptions in (brighthouse_stats_descriptions, main_stats_descriptions, mysql_stats_descriptions, subordinate_stats_descriptions): for label in stats_descriptions: if infobright_stats.has_key(label): format = '%u' @@ -1089,8 +1089,8 @@ def metric_cleanup(): parser.add_option("-S", "--socket", dest="unix_socket", help="unix_socket", default="") parser.add_option("--no-brighthouse", dest="get_brighthouse", action="store_false", default=True) parser.add_option("--no-brighthouse-engine", dest="get_brighthouse_engine", action="store_false", default=False) - parser.add_option("--no-master", dest="get_master", action="store_false", default=True) - parser.add_option("--no-slave", dest="get_slave", action="store_false", default=True) + parser.add_option("--no-main", dest="get_main", action="store_false", default=True) + parser.add_option("--no-subordinate", dest="get_subordinate", action="store_false", default=True) parser.add_option("-b", "--gmetric-bin", dest="gmetric_bin", help="path to gmetric binary", default="/usr/bin/gmetric") parser.add_option("-c", "--gmond-conf", dest="gmond_conf", help="path to gmond.conf", default="/etc/ganglia/gmond.conf") parser.add_option("-g", "--gmetric", dest="gmetric", help="submit via gmetric", action="store_true", default=False) @@ -1105,8 +1105,8 @@ def metric_cleanup(): 'port': options.port, 'get_brighthouse': options.get_brighthouse, 'get_brighthouse_engine': options.get_brighthouse_engine, - 'get_master': options.get_master, - 'get_slave': options.get_slave, + 'get_main': options.get_main, + 'get_subordinate': options.get_subordinate, 'unix_socket': options.unix_socket, }) diff --git a/jenkins/python_modules/jenkins.py b/jenkins/python_modules/jenkins.py index 71324ab2..63817f60 100644 --- a/jenkins/python_modules/jenkins.py +++ b/jenkins/python_modules/jenkins.py @@ -169,17 +169,17 @@ def metric_init(params): 'value_type': 'float', 'format': '%.3f', 'units': 'executors', - 'description': 'Number of busy executors (master and slaves)'}, + 'description': 'Number of busy executors (main and subordinates)'}, jenkins_overallload_queue_length = { 'value_type': 'float', 'format': '%.3f', 'units': 'queued items', - 'description': 'Length of the queue (master and slaves)'}, + 'description': 'Length of the queue (main and subordinates)'}, jenkins_overallload_total_executors = { 'value_type': 'float', 'format': '%.3f', 'units': 'executors', - 'description': 'Number of executors (master and slaves)'}, + 'description': 'Number of executors (main and subordinates)'}, jenkins_jobs_total = { 'description': 'Total number of jobs'}, jenkins_jobs_blue = { diff --git a/mongodb/python_modules/mongodb.py b/mongodb/python_modules/mongodb.py index 146ba37d..bb87e5ca 100755 --- a/mongodb/python_modules/mongodb.py +++ b/mongodb/python_modules/mongodb.py @@ -133,10 +133,10 @@ def get_rate(name): def get_opcounter_rate(name): """Return change over time for an opcounter metric""" - master_rate = get_rate(name) + main_rate = get_rate(name) repl_rate = get_rate(name.replace('opcounters_', 'opcountersRepl_')) - return master_rate + repl_rate + return main_rate + repl_rate def get_globalLock_ratio(name): @@ -175,8 +175,8 @@ def get_connections_current_ratio(name): return result -def get_slave_delay(name): - """Return the replica set slave delay""" +def get_subordinate_delay(name): + """Return the replica set subordinate delay""" # get metrics metrics = get_metrics()[0] @@ -185,17 +185,17 @@ def get_slave_delay(name): if 'rs_status_myState' not in metrics['data'] or metrics['data']['rs_status_myState'] != 2: result = 0 - # compare my optime with the master's + # compare my optime with the main's else: - master = {} - slave = {} + main = {} + subordinate = {} try: for member in metrics['data']['rs_status_members']: if member['state'] == 1: - master = member + main = member if member['name'].split(':')[0] == socket.getfqdn(): - slave = member - result = max(0, master['optime']['t'] - slave['optime']['t']) / 1000 + subordinate = member + result = max(0, main['optime']['t'] - subordinate['optime']['t']) / 1000 except KeyError: result = 0 @@ -454,14 +454,14 @@ def metric_init(lparams): 'groups': groups }, { - 'name': NAME_PREFIX + 'slave_delay', - 'call_back': get_slave_delay, + 'name': NAME_PREFIX + 'subordinate_delay', + 'call_back': get_subordinate_delay, 'time_max': time_max, 'value_type': 'uint', 'units': 'Seconds', 'slope': 'both', 'format': '%u', - 'description': 'Replica Set Slave Delay', + 'description': 'Replica Set Subordinate Delay', 'groups': groups }, { diff --git a/mysqld/python_modules/mysql.py b/mysqld/python_modules/mysql.py index 67376dd9..080992a2 100644 --- a/mysqld/python_modules/mysql.py +++ b/mysqld/python_modules/mysql.py @@ -65,7 +65,7 @@ MAX_UPDATE_TIME = 15 -def update_stats(get_innodb=True, get_master=True, get_slave=True): +def update_stats(get_innodb=True, get_main=True, get_subordinate=True): """ """ @@ -130,7 +130,7 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): # try not to fail ? get_innodb = get_innodb and have_innodb - get_master = get_master and variables['log_bin'].lower() == 'on' + get_main = get_main and variables['log_bin'].lower() == 'on' innodb_status = defaultdict(int) if get_innodb: @@ -140,23 +140,23 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): cursor.close() logging.debug('innodb_status: ' + str(innodb_status)) - master_logs = tuple - if get_master: + main_logs = tuple + if get_main: cursor = conn.cursor(MySQLdb.cursors.Cursor) cursor.execute("SHOW MASTER LOGS") - master_logs = cursor.fetchall() + main_logs = cursor.fetchall() cursor.close() - slave_status = {} - if get_slave: + subordinate_status = {} + if get_subordinate: cursor = conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute("SHOW SLAVE STATUS") res = cursor.fetchone() if res: for (k,v) in res.items(): - slave_status[k.lower()] = v + subordinate_status[k.lower()] = v else: - get_slave = False + get_subordinate = False cursor.close() cursor = conn.cursor(MySQLdb.cursors.DictCursor) @@ -220,8 +220,8 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): 'select_range', 'select_range_check', 'select_scan', - 'slave_open_temp_tables', - 'slave_retried_transactions', + 'subordinate_open_temp_tables', + 'subordinate_retried_transactions', 'slow_launch_threads', 'slow_queries', 'sort_range', @@ -243,7 +243,7 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): 'qcache_free_blocks', 'qcache_free_memory', 'qcache_total_blocks', - 'slave_open_temp_tables', + 'subordinate_open_temp_tables', 'threads_cached', 'threads_connected', 'threads_running', @@ -301,32 +301,32 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): else: mysql_stats[key] = innodb_status[istat] - # process master logs - if get_master: - mysql_stats['binlog_count'] = len(master_logs) - mysql_stats['binlog_space_current'] = master_logs[-1][1] - #mysql_stats['binlog_space_total'] = sum((long(s[1]) for s in master_logs)) + # process main logs + if get_main: + mysql_stats['binlog_count'] = len(main_logs) + mysql_stats['binlog_space_current'] = main_logs[-1][1] + #mysql_stats['binlog_space_total'] = sum((long(s[1]) for s in main_logs)) mysql_stats['binlog_space_total'] = 0 - for s in master_logs: + for s in main_logs: mysql_stats['binlog_space_total'] += int(s[1]) - mysql_stats['binlog_space_used'] = float(master_logs[-1][1]) / float(variables['max_binlog_size']) * 100 - - # process slave status - if get_slave: - mysql_stats['slave_exec_master_log_pos'] = slave_status['exec_master_log_pos'] - #mysql_stats['slave_io'] = 1 if slave_status['slave_io_running'].lower() == "yes" else 0 - if slave_status['slave_io_running'].lower() == "yes": - mysql_stats['slave_io'] = 1 + mysql_stats['binlog_space_used'] = float(main_logs[-1][1]) / float(variables['max_binlog_size']) * 100 + + # process subordinate status + if get_subordinate: + mysql_stats['subordinate_exec_main_log_pos'] = subordinate_status['exec_main_log_pos'] + #mysql_stats['subordinate_io'] = 1 if subordinate_status['subordinate_io_running'].lower() == "yes" else 0 + if subordinate_status['subordinate_io_running'].lower() == "yes": + mysql_stats['subordinate_io'] = 1 else: - mysql_stats['slave_io'] = 0 - #mysql_stats['slave_sql'] = 1 if slave_status['slave_sql_running'].lower() =="yes" else 0 - if slave_status['slave_sql_running'].lower() == "yes": - mysql_stats['slave_sql'] = 1 + mysql_stats['subordinate_io'] = 0 + #mysql_stats['subordinate_sql'] = 1 if subordinate_status['subordinate_sql_running'].lower() =="yes" else 0 + if subordinate_status['subordinate_sql_running'].lower() == "yes": + mysql_stats['subordinate_sql'] = 1 else: - mysql_stats['slave_sql'] = 0 - mysql_stats['slave_lag'] = slave_status['seconds_behind_master'] - mysql_stats['slave_relay_log_pos'] = slave_status['relay_log_pos'] - mysql_stats['slave_relay_log_space'] = slave_status['relay_log_space'] + mysql_stats['subordinate_sql'] = 0 + mysql_stats['subordinate_lag'] = subordinate_status['seconds_behind_main'] + mysql_stats['subordinate_relay_log_pos'] = subordinate_status['relay_log_pos'] + mysql_stats['subordinate_relay_log_space'] = subordinate_status['relay_log_space'] logging.debug('success updating stats') @@ -369,8 +369,8 @@ def metric_init(params): global REPORT_SLAVE REPORT_INNODB = str(params.get('get_innodb', True)) == "True" - REPORT_MASTER = str(params.get('get_master', True)) == "True" - REPORT_SLAVE = str(params.get('get_slave', True)) == "True" + REPORT_MASTER = str(params.get('get_main', True)) == "True" + REPORT_SLAVE = str(params.get('get_subordinate', True)) == "True" logging.debug("init: " + str(params)) @@ -387,9 +387,9 @@ def metric_init(params): if params.get("delta_per_second", '') != '': delta_per_second = True - master_stats_descriptions = {} + main_stats_descriptions = {} innodb_stats_descriptions = {} - slave_stats_descriptions = {} + subordinate_stats_descriptions = {} misc_stats_descriptions = dict( aborted_clients = { @@ -648,15 +648,15 @@ def metric_init(params): 'units': 'joins', }, - slave_open_temp_tables = { - 'description': 'The number of temporary tables that the slave SQL thread currently has open', + subordinate_open_temp_tables = { + 'description': 'The number of temporary tables that the subordinate SQL thread currently has open', 'value_type': 'float', 'units': 'tables', 'slope': 'both', }, - slave_retried_transactions = { - 'description': 'The total number of times since startup that the replication slave SQL thread has retried transactions', + subordinate_retried_transactions = { + 'description': 'The total number of times since startup that the replication subordinate SQL thread has retried transactions', 'value_type': 'float', 'units': 'count', }, @@ -755,7 +755,7 @@ def metric_init(params): ) if REPORT_MASTER: - master_stats_descriptions = dict( + main_stats_descriptions = dict( binlog_count = { 'description': "Number of binary logs", 'units': 'logs', @@ -783,34 +783,34 @@ def metric_init(params): ) if REPORT_SLAVE: - slave_stats_descriptions = dict( - slave_exec_master_log_pos = { - 'description': "The position of the last event executed by the SQL thread from the master's binary log", + subordinate_stats_descriptions = dict( + subordinate_exec_main_log_pos = { + 'description': "The position of the last event executed by the SQL thread from the main's binary log", 'units': 'bytes', 'slope': 'both', }, - slave_io = { - 'description': "Whether the I/O thread is started and has connected successfully to the master", + subordinate_io = { + 'description': "Whether the I/O thread is started and has connected successfully to the main", 'value_type': 'uint8', 'units': 'True/False', 'slope': 'both', }, - slave_lag = { + subordinate_lag = { 'description': "Replication Lag", 'units': 'secs', 'slope': 'both', }, - slave_relay_log_pos = { + subordinate_relay_log_pos = { 'description': "The position up to which the SQL thread has read and executed in the current relay log", 'units': 'bytes', 'slope': 'both', }, - slave_sql = { - 'description': "Slave SQL Running", + subordinate_sql = { + 'description': "Subordinate SQL Running", 'value_type': 'uint8', 'units': 'True/False', 'slope': 'both', @@ -1094,7 +1094,7 @@ def metric_init(params): time.sleep(MAX_UPDATE_TIME) update_stats(REPORT_INNODB, REPORT_MASTER, REPORT_SLAVE) - for stats_descriptions in (innodb_stats_descriptions, master_stats_descriptions, misc_stats_descriptions, slave_stats_descriptions): + for stats_descriptions in (innodb_stats_descriptions, main_stats_descriptions, misc_stats_descriptions, subordinate_stats_descriptions): for label in stats_descriptions: if mysql_stats.has_key(label): format = '%u' @@ -1140,8 +1140,8 @@ def metric_cleanup(): parser.add_option("-P", "--port", dest="port", help="port", default=3306, type="int") parser.add_option("-S", "--socket", dest="unix_socket", help="unix_socket", default="") parser.add_option("--no-innodb", dest="get_innodb", action="store_false", default=True) - parser.add_option("--no-master", dest="get_master", action="store_false", default=True) - parser.add_option("--no-slave", dest="get_slave", action="store_false", default=True) + parser.add_option("--no-main", dest="get_main", action="store_false", default=True) + parser.add_option("--no-subordinate", dest="get_subordinate", action="store_false", default=True) parser.add_option("-b", "--gmetric-bin", dest="gmetric_bin", help="path to gmetric binary", default="/usr/bin/gmetric") parser.add_option("-c", "--gmond-conf", dest="gmond_conf", help="path to gmond.conf", default="/etc/ganglia/gmond.conf") parser.add_option("-g", "--gmetric", dest="gmetric", help="submit via gmetric", action="store_true", default=False) @@ -1155,8 +1155,8 @@ def metric_cleanup(): 'user': options.user, 'port': options.port, 'get_innodb': options.get_innodb, - 'get_master': options.get_master, - 'get_slave': options.get_slave, + 'get_main': options.get_main, + 'get_subordinate': options.get_subordinate, 'unix_socket': options.unix_socket, })