idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
28,300
def _pickle_batch ( self ) : payload = pickle . dumps ( self . batch ) header = struct . pack ( "!L" , len ( payload ) ) message = header + payload return message
Pickle the metrics into a form that can be understood by the graphite pickle connector .
28,301
def _send ( self , metric ) : metric_name = self . get_name_from_path ( metric . path ) tmax = "60" dmax = "0" slope = "both" metric_type = "double" units = "" group = "" self . gmetric . send ( metric_name , metric . value , metric_type , units , slope , tmax , dmax , group )
Send data to gmond .
28,302
def process ( self , metric ) : if not mosquitto : return line = str ( metric ) topic , value , timestamp = line . split ( ) if len ( self . prefix ) : topic = "%s/%s" % ( self . prefix , topic ) topic = topic . replace ( '.' , '/' ) topic = topic . replace ( '#' , '&' ) if self . timestamp == 0 : self . mqttc . publis...
Process a metric by converting metric name to MQTT topic name ; the payload is metric and timestamp .
28,303
def process ( self , metric ) : entry = { 'timestamp' : metric . timestamp , 'value' : metric . value , "tags" : { } } entry [ "tags" ] [ "hostname" ] = metric . host if self . cleanMetrics : metric = MetricWrapper ( metric , self . log ) if self . skipAggregates and metric . isAggregate ( ) : return for tagKey in metr...
Process a metric by sending it to TSDB
28,304
def _send ( self , content ) : retry = 0 success = False while retry < 3 and success is False : self . log . debug ( content ) try : request = urllib2 . Request ( "http://" + self . host + ":" + str ( self . port ) + "/api/put" , content , self . httpheader ) response = urllib2 . urlopen ( url = request , timeout = sel...
Send content to TSDB .
28,305
def get ( self , oid , host , port , community ) : ret = { } if not isinstance ( oid , tuple ) : oid = self . _convert_to_oid ( oid ) host = socket . gethostbyname ( host ) snmpAuthData = cmdgen . CommunityData ( 'agent-{}' . format ( community ) , community ) snmpTransportData = cmdgen . UdpTransportTarget ( ( host , ...
Perform SNMP get for a given OID
28,306
def walk ( self , oid , host , port , community ) : ret = { } if not isinstance ( oid , tuple ) : oid = self . _convert_to_oid ( oid ) host = socket . gethostbyname ( host ) snmpAuthData = cmdgen . CommunityData ( 'agent-{}' . format ( community ) , community ) snmpTransportData = cmdgen . UdpTransportTarget ( ( host ,...
Perform an SNMP walk on a given OID
28,307
def get_disk_statistics ( self ) : result = { } if os . access ( '/proc/diskstats' , os . R_OK ) : self . proc_diskstats = True fp = open ( '/proc/diskstats' ) try : for line in fp : try : columns = line . split ( ) if len ( columns ) < 14 : continue major = int ( columns [ 0 ] ) minor = int ( columns [ 1 ] ) device = ...
Create a map of disks in the machine .
28,308
def process ( self , metric ) : event = self . _metric_to_riemann_event ( metric ) try : self . client . send_event ( event ) except Exception as e : self . log . error ( "RiemannHandler: Error sending event to Riemann: %s" , e )
Send a metric to Riemann .
28,309
def _metric_to_riemann_event ( self , metric ) : path = '%s.%s.%s' % ( metric . getPathPrefix ( ) , metric . getCollectorPath ( ) , metric . getMetricPath ( ) ) return self . client . create_event ( { 'host' : metric . host , 'service' : path , 'time' : metric . timestamp , 'metric_f' : float ( metric . value ) , 'ttl'...
Convert a metric to a dictionary representing a Riemann event .
28,310
def collect ( self ) : if boto is None : self . log . error ( "Unable to import boto python module" ) return { } for s3instance in self . config [ 's3' ] : self . log . info ( "S3: byte_unit: %s" % self . config [ 'byte_unit' ] ) aws_access = self . config [ 's3' ] [ s3instance ] [ 'aws_access_key' ] aws_secret = self ...
Collect s3 bucket stats
28,311
def key_to_metric ( self , key ) : return '' . join ( l if l in string . letters else '_' for l in key )
Replace all non - letter characters with underscores
28,312
def parse_value ( self , value ) : value = value . strip ( ) if value == 'na' : return None try : return float ( value ) except : pass try : return float . fromhex ( value ) except : pass return None
Convert value string to float for reporting
28,313
def collect ( self ) : perfdata_dir = self . config [ 'perfdata_dir' ] try : filenames = os . listdir ( perfdata_dir ) except OSError : self . log . error ( "Cannot read directory `{dir}'" . format ( dir = perfdata_dir ) ) return for filename in filenames : self . _process_file ( os . path . join ( perfdata_dir , filen...
Collect statistics from a Nagios perfdata directory .
28,314
def _fields_valid ( self , d ) : if 'DATATYPE' not in d : return False datatype = d [ 'DATATYPE' ] if datatype == 'HOSTPERFDATA' : fields = self . GENERIC_FIELDS + self . HOST_FIELDS elif datatype == 'SERVICEPERFDATA' : fields = self . GENERIC_FIELDS + self . SERVICE_FIELDS else : return False for field in fields : if ...
Verify that all necessary fields are present
28,315
def _normalize_to_unit ( self , value , unit ) : if unit == 'ms' : return value / 1000.0 if unit == 'us' : return value / 1000000.0 if unit == 'KB' : return value * 1024 if unit == 'MB' : return value * 1024 * 1024 if unit == 'GB' : return value * 1024 * 1024 * 1024 if unit == 'TB' : return value * 1024 * 1024 * 1024 *...
Normalize the value to the unit returned .
28,316
def _parse_perfdata ( self , s ) : metrics = [ ] counters = re . findall ( self . TOKENIZER_RE , s ) if counters is None : self . log . warning ( "Failed to parse performance data: {s}" . format ( s = s ) ) return metrics for ( key , value , uom , warn , crit , min , max ) in counters : try : norm_value = self . _norma...
Parse performance data from a perfdata string
28,317
def _process_file ( self , path ) : try : f = open ( path ) for line in f : self . _process_line ( line ) os . remove ( path ) except IOError as ex : self . log . error ( "Could not open file `{path}': {error}" . format ( path = path , error = ex . strerror ) )
Parse and submit the metrics from a file
28,318
def _process_line ( self , line ) : fields = self . _extract_fields ( line ) if not self . _fields_valid ( fields ) : self . log . warning ( "Missing required fields for line: {line}" . format ( line = line ) ) metric_path_base = [ ] graphite_prefix = fields . get ( 'GRAPHITEPREFIX' ) graphite_postfix = fields . get ( ...
Parse and submit the metrics from a line of perfdata output
28,319
def _bind ( self ) : credentials = pika . PlainCredentials ( self . user , self . password ) params = pika . ConnectionParameters ( credentials = credentials , host = self . server , virtual_host = self . vhost , port = self . port ) self . connection = pika . BlockingConnection ( params ) self . channel = self . conne...
Create socket and bind
28,320
def process ( self , metric ) : if not pika : return routingKeyDic = { 'metric' : lambda : metric . path , 'custom' : lambda : self . custom_routing_key , 'host' : lambda : metric . host , 'metric.path' : metric . getMetricPath , 'path.prefix' : metric . getPathPrefix , 'collector.path' : metric . getCollectorPath , } ...
Process a metric and send it to RabbitMQ topic exchange
28,321
def _collect_for_instance ( self , instance , connection ) : with connection . cursor ( ) as cursor : for queue , metrics in self . get_queue_info ( instance , cursor ) : for name , metric in metrics . items ( ) : self . publish ( '.' . join ( ( instance , queue , name ) ) , metric ) with connection . cursor ( ) as cur...
Collects metrics for a named connection .
28,322
def get_queue_info ( self , instance , cursor ) : cursor . execute ( self . QUEUE_INFO_STATEMENT ) for queue_name , ticker_lag , ev_per_sec in cursor : yield queue_name , { 'ticker_lag' : ticker_lag , 'ev_per_sec' : ev_per_sec , }
Collects metrics for all queues on the connected database .
28,323
def get_consumer_info ( self , instance , cursor ) : cursor . execute ( self . CONSUMER_INFO_STATEMENT ) for queue_name , consumer_name , lag , pending_events , last_seen in cursor : yield queue_name , consumer_name , { 'lag' : lag , 'pending_events' : pending_events , 'last_seen' : last_seen , }
Collects metrics for all consumers on the connected database .
28,324
def collect ( self ) : def traverse ( d , metric_name = '' ) : for key , value in d . iteritems ( ) : if isinstance ( value , dict ) : if metric_name == '' : metric_name_next = key else : metric_name_next = metric_name + '.' + key traverse ( value , metric_name_next ) else : metric_name_finished = metric_name + '.' + k...
Publish all mdstat metrics .
28,325
def _parse_array_member_state ( self , block ) : members = block . split ( '\n' ) [ 0 ] . split ( ' : ' ) [ 1 ] . split ( ' ' ) [ 2 : ] device_regexp = re . compile ( '^(?P<member_name>.*)' '\[(?P<member_role_number>\d*)\]' '\(?(?P<member_state>[FS])?\)?$' ) ret = { 'active' : 0 , 'faulty' : 0 , 'spare' : 0 } for membe...
Parse the state of the the md array members .
28,326
def _parse_array_status ( self , block ) : array_status_regexp = re . compile ( '^ *(?P<blocks>\d*) blocks ' '(?:super (?P<superblock_version>\d\.\d) )?' '(?:level (?P<raid_level>\d), ' '(?P<chunk_size>\d*)k chunk, ' 'algorithm (?P<algorithm>\d) )?' '(?:\[(?P<total_members>\d*)/(?P<actual_members>\d*)\])?' '(?:(?P<roun...
Parse the status of the md array .
28,327
def _parse_array_bitmap ( self , block ) : array_bitmap_regexp = re . compile ( '^ *bitmap: (?P<allocated_pages>[0-9]*)/' '(?P<total_pages>[0-9]*) pages ' '\[(?P<page_size>[0-9]*)KB\], ' '(?P<chunk_size>[0-9]*)KB chunk.*$' , re . MULTILINE ) regexp_res = array_bitmap_regexp . search ( block ) if not regexp_res : return...
Parse the bitmap status of the md array .
28,328
def _parse_array_recovery ( self , block ) : array_recovery_regexp = re . compile ( '^ *\[.*\] *recovery = (?P<percent>\d*\.?\d*)%' ' \(\d*/\d*\) finish=(?P<remaining_time>\d*\.?\d*)min ' 'speed=(?P<speed>\d*)K/sec$' , re . MULTILINE ) regexp_res = array_recovery_regexp . search ( block ) if not regexp_res : return Non...
Parse the recovery progress of the md array .
28,329
def get_passenger_memory_stats ( self ) : command = [ self . config [ "passenger_memory_stats_bin" ] ] if str_to_bool ( self . config [ "use_sudo" ] ) : command . insert ( 0 , self . config [ "sudo_cmd" ] ) try : proc1 = subprocess . Popen ( command , stdout = subprocess . PIPE ) ( std_out , std_err ) = proc1 . communi...
Execute passenger - memory - stats parse its output return dictionary with stats .
28,330
def get_passenger_cpu_usage ( self , dict_stats ) : try : proc1 = subprocess . Popen ( [ "top" , "-b" , "-n" , "2" ] , stdout = subprocess . PIPE ) ( std_out , std_err ) = proc1 . communicate ( ) except OSError : return ( - 1 ) re_lspaces = re . compile ( "^\s*" ) re_digit = re . compile ( "^\d" ) overall_cpu = 0 for r...
Execute % top ; and return STDOUT .
28,331
def get_passenger_queue_stats ( self ) : queue_stats = { "top_level_queue_size" : 0.0 , "passenger_queue_size" : 0.0 , } command = [ self . config [ "passenger_status_bin" ] ] if str_to_bool ( self . config [ "use_sudo" ] ) : command . insert ( 0 , self . config [ "sudo_cmd" ] ) try : proc1 = subprocess . Popen ( comma...
Execute passenger - stats parse its output returnand requests in queue
28,332
def collect ( self ) : if not os . access ( self . config [ "bin" ] , os . X_OK ) : self . log . error ( "Path %s does not exist or is not executable" , self . config [ "bin" ] ) return { } dict_stats = self . get_passenger_memory_stats ( ) if len ( dict_stats . keys ( ) ) == 0 : return { } queue_stats = self . get_pas...
Collector Passenger stats
28,333
def _collect ( self , section = None ) : if self . config [ 'method' ] == 'http' : csv_data = self . http_get_csv_data ( section ) elif self . config [ 'method' ] == 'unix' : csv_data = self . unix_get_csv_data ( ) else : self . log . error ( "Unknown collection method: %s" , self . config [ 'method' ] ) csv_data = [ ]...
Collect HAProxy Stats
28,334
def match_process ( pid , name , cmdline , exe , cfg ) : if cfg [ 'selfmon' ] and pid == os . getpid ( ) : return True for exe_re in cfg [ 'exe' ] : if exe_re . search ( exe ) : return True for name_re in cfg [ 'name' ] : if name_re . search ( name ) : return True for cmdline_re in cfg [ 'cmdline' ] : if cmdline_re . s...
Decides whether a process matches with a given process descriptor
28,335
def collect ( self ) : if not psutil : self . log . error ( 'Unable to import psutil' ) self . log . error ( 'No process resource metrics retrieved' ) return None for process in psutil . process_iter ( ) : self . collect_process_info ( process ) for pg_name , counters in self . processes_info . iteritems ( ) : if count...
Collects resources usage of each process defined under the process subsection of the config file
28,336
def parse ( cls , string ) : match = re . match ( r'^(?P<name>[A-Za-z0-9\.\-_]+)\s+' + '(?P<value>[0-9\.]+)\s+' + '(?P<timestamp>[0-9\.]+)(\n?)$' , string ) try : groups = match . groupdict ( ) return Metric ( groups [ 'name' ] , groups [ 'value' ] , float ( groups [ 'timestamp' ] ) ) except : raise DiamondException ( ...
Parse a string and create a metric
28,337
def getPathPrefix ( self ) : if self . host is None : return self . path . split ( '.' ) [ 0 ] offset = self . path . index ( self . host ) - 1 return self . path [ 0 : offset ]
Returns the path prefix path servers . host . cpu . total . idle return servers
28,338
def getCollectorPath ( self ) : if self . host is None : return self . path . split ( '.' ) [ 2 ] offset = self . path . index ( self . host ) offset += len ( self . host ) + 1 endoffset = self . path . index ( '.' , offset ) return self . path [ offset : endoffset ]
Returns collector path servers . host . cpu . total . idle return cpu
28,339
def getMetricPath ( self ) : if self . host is None : path = self . path . split ( '.' ) [ 3 : ] return '.' . join ( path ) prefix = '.' . join ( [ self . getPathPrefix ( ) , self . host , self . getCollectorPath ( ) ] ) offset = len ( prefix ) + 1 return self . path [ offset : ]
Returns the metric path after the collector name servers . host . cpu . total . idle return total . idle
28,340
def _process ( self , metric ) : if not self . enabled : return try : try : self . lock . acquire ( ) self . process ( metric ) except Exception : self . log . error ( traceback . format_exc ( ) ) finally : if self . lock . locked ( ) : self . lock . release ( )
Decorator for processing handlers with a lock catching exceptions
28,341
def _flush ( self ) : if not self . enabled : return try : try : self . lock . acquire ( ) self . flush ( ) except Exception : self . log . error ( traceback . format_exc ( ) ) finally : if self . lock . locked ( ) : self . lock . release ( )
Decorator for flushing handlers with an lock catching exceptions
28,342
def _throttle_error ( self , msg , * args , ** kwargs ) : now = time . time ( ) if msg in self . _errors : if ( ( now - self . _errors [ msg ] ) >= self . server_error_interval ) : fn = self . log . error self . _errors [ msg ] = now else : fn = self . log . debug else : self . _errors [ msg ] = now fn = self . log . e...
Avoids sending errors repeatedly . Waits at least self . server_error_interval seconds before sending the same error string to the error logging facility . If not enough time has passed it calls log . debug instead
28,343
def _reset_errors ( self , msg = None ) : if msg is not None and msg in self . _errors : del self . _errors [ msg ] else : self . _errors = { }
Resets the logging throttle cache so the next error is emitted regardless of the value in self . server_error_interval
28,344
def signal_to_exception ( signum , frame ) : if signum == signal . SIGALRM : raise SIGALRMException ( ) if signum == signal . SIGHUP : raise SIGHUPException ( ) if signum == signal . SIGUSR1 : raise SIGUSR1Exception ( ) if signum == signal . SIGUSR2 : raise SIGUSR2Exception ( ) raise SignalException ( signum )
Called by the timeout alarm during the collector run time
28,345
def collect_snmp ( self , device , host , port , community ) : self . log . info ( "Collecting ServerTech PDU statistics from: %s" % device ) timestamp = time . time ( ) inputFeeds = { } for gaugeName , gaugeOid in self . PDU_SYSTEM_GAUGES . items ( ) : systemGauges = self . walk ( gaugeOid , host , port , community ) ...
Collect stats from device
28,346
def get_default_config_help ( self ) : config_help = super ( UsersCollector , self ) . get_default_config_help ( ) config_help . update ( { } ) return config_help
Returns the default collector help text
28,347
def _send ( self ) : if not statsd : return for metric in self . metrics : ( prefix , name ) = metric . path . rsplit ( "." , 1 ) logging . debug ( "Sending %s %s|g" , name , metric . value ) if metric . metric_type == 'GAUGE' : if hasattr ( statsd , 'StatsClient' ) : self . connection . gauge ( metric . path , metric ...
Send data to statsd . Fire and forget . Cross fingers and it ll arrive .
28,348
def _connect ( self ) : if not statsd : return if hasattr ( statsd , 'StatsClient' ) : self . connection = statsd . StatsClient ( host = self . host , port = self . port ) . pipeline ( ) else : self . connection = statsd . Connection ( host = self . host , port = self . port , sample_rate = 1.0 )
Connect to the statsd server
28,349
def _match_metric ( self , metric ) : if len ( self . _compiled_filters ) == 0 : return True for ( collector , filter_regex ) in self . _compiled_filters : if collector != metric . getCollectorPath ( ) : continue if filter_regex . match ( metric . getMetricPath ( ) ) : return True return False
matches the metric path if the metrics are empty it shorts to True
28,350
def process ( self , metric ) : if self . _match_metric ( metric ) : self . metrics . append ( metric ) if self . should_flush ( ) : self . _send ( )
Queue a metric . Flushing queue if batch size reached
28,351
def into_signalfx_point ( self , metric ) : dims = { "collector" : metric . getCollectorPath ( ) , "prefix" : metric . getPathPrefix ( ) , } if metric . host is not None and metric . host != "" : dims [ "host" ] = metric . host return { "metric" : metric . getMetricPath ( ) , "value" : metric . value , "dimensions" : d...
Convert diamond metric into something signalfx can understand
28,352
def gmetric_write ( NAME , VAL , TYPE , UNITS , SLOPE , TMAX , DMAX , GROUP ) : packer = Packer ( ) HOSTNAME = "test" SPOOF = 0 packer . pack_int ( 128 ) packer . pack_string ( HOSTNAME ) packer . pack_string ( NAME ) packer . pack_int ( SPOOF ) packer . pack_string ( TYPE ) packer . pack_string ( NAME ) packer . pack_...
Arguments are in all upper - case to match XML
28,353
def collect ( self ) : instances = { } for device in os . listdir ( '/dev/' ) : instances . update ( self . match_device ( device , '/dev/' ) ) for device_id in os . listdir ( '/dev/disk/by-id/' ) : instances . update ( self . match_device ( device , '/dev/disk/by-id/' ) ) metrics = { } for device , p in instances . it...
Collect and publish disk temperatures
28,354
def _get ( self , scheme , host , port , path , assert_key = None ) : url = '%s://%s:%i/%s' % ( scheme , host , port , path ) try : request = urllib2 . Request ( url ) if self . config [ 'user' ] and self . config [ 'password' ] : base64string = base64 . standard_b64encode ( '%s:%s' % ( self . config [ 'user' ] , self ...
Execute a ES API call . Convert response into JSON and optionally assert its structure .
28,355
def _set_or_sum_metric ( self , metrics , metric_path , value ) : if metric_path in metrics : metrics [ metric_path ] += value else : metrics [ metric_path ] = value
If we already have a datapoint for this metric lets add the value . This is used when the logstash mode is enabled .
28,356
def _client ( self , host , port , unix_socket , auth ) : db = int ( self . config [ 'db' ] ) timeout = int ( self . config [ 'timeout' ] ) try : cli = redis . Redis ( host = host , port = port , db = db , socket_timeout = timeout , password = auth , unix_socket_path = unix_socket ) cli . ping ( ) return cli except Exc...
Return a redis client for the configuration .
28,357
def _precision ( self , value ) : value = str ( value ) decimal = value . rfind ( '.' ) if decimal == - 1 : return 0 return len ( value ) - decimal - 1
Return the precision of the number
28,358
def _get_info ( self , host , port , unix_socket , auth ) : client = self . _client ( host , port , unix_socket , auth ) if client is None : return None info = client . info ( ) del client return info
Return info dict from specified Redis instance
28,359
def _get_config ( self , host , port , unix_socket , auth , config_key ) : client = self . _client ( host , port , unix_socket , auth ) if client is None : return None config_value = client . config_get ( config_key ) del client return config_value
Return config string from specified Redis instance and config key
28,360
def collect_instance ( self , nick , host , port , unix_socket , auth ) : info = self . _get_info ( host , port , unix_socket , auth ) if info is None : return data = dict ( ) if 'role' in info : if info [ 'role' ] == "master" : data [ 'replication.master' ] = 1 data [ 'replication.master_sync_in_progress' ] = 0 else :...
Collect metrics from a single Redis instance
28,361
def collect ( self ) : if redis is None : self . log . error ( 'Unable to import module redis' ) return { } for nick in self . instances . keys ( ) : ( host , port , unix_socket , auth ) = self . instances [ nick ] self . collect_instance ( nick , host , int ( port ) , unix_socket , auth )
Collect the stats from the redis instance and publish them .
28,362
def get_default_config ( self ) : config = super ( XFSCollector , self ) . get_default_config ( ) config . update ( { 'path' : 'xfs' } ) return config
Returns the xfs collector settings
28,363
def _bind ( self ) : self . log . debug ( "CloudWatch: Attempting to connect to CloudWatch at Region: %s" , self . region ) try : self . connection = boto . ec2 . cloudwatch . connect_to_region ( self . region ) self . log . debug ( "CloudWatch: Succesfully Connected to CloudWatch at Region: %s" , self . region ) excep...
Create CloudWatch Connection
28,364
def process ( self , metric ) : if not boto : return collector = str ( metric . getCollectorPath ( ) ) metricname = str ( metric . getMetricPath ( ) ) for rule in self . rules : self . log . debug ( "Comparing Collector: [%s] with (%s) " "and Metric: [%s] with (%s)" , str ( rule [ 'collector' ] ) , collector , str ( ru...
Process a metric and send it to CloudWatch
28,365
def send_metrics_to_cloudwatch ( self , rule , metric , dimensions ) : timestamp = datetime . datetime . utcfromtimestamp ( metric . timestamp ) self . log . debug ( "CloudWatch: Attempting to publish metric: %s to %s " "with value (%s) for dimensions %s @%s" , rule [ 'name' ] , rule [ 'namespace' ] , str ( metric . va...
Send metrics to CloudWatch for the given dimensions
28,366
def _publish_replset ( self , data , base_prefix ) : prefix = base_prefix + [ 'replset' ] self . _publish_dict_with_prefix ( data , prefix ) total_nodes = len ( data [ 'members' ] ) healthy_nodes = reduce ( lambda value , node : value + node [ 'health' ] , data [ 'members' ] , 0 ) self . _publish_dict_with_prefix ( { '...
Given a response to replSetGetStatus publishes all numeric values of the instance aggregate stats of healthy nodes vs total nodes and the observed statuses of all nodes in the replica set .
28,367
def _send ( self ) : try : if self . influx is None : self . log . debug ( "InfluxdbHandler: Socket is not connected. " "Reconnecting." ) self . _connect ( ) if self . influx is None : self . log . debug ( "InfluxdbHandler: Reconnect failed." ) else : metrics = [ ] for path in self . batch : metrics . append ( { "point...
Send data to Influxdb . Data that can not be sent will be kept in queued .
28,368
def _connect ( self ) : try : self . influx = InfluxDBClient ( self . hostname , self . port , self . username , self . password , self . database , self . ssl ) self . log . debug ( "InfluxdbHandler: Established connection to " "%s:%d/%s." , self . hostname , self . port , self . database ) except Exception as ex : se...
Connect to the influxdb server
28,369
def get_config ( self ) : if 'rmq_port' in self . config : self . rmq_port = int ( self . config [ 'rmq_port' ] ) if 'rmq_user' in self . config : self . rmq_user = self . config [ 'rmq_user' ] if 'rmq_password' in self . config : self . rmq_password = self . config [ 'rmq_password' ] if 'rmq_vhost' in self . config : ...
Get and set config options from config file
28,370
def _unbind ( self , rmq_server = None ) : try : self . connections [ rmq_server ] . close ( ) except AttributeError : pass self . connections [ rmq_server ] = None self . channels [ rmq_server ] = None
Close AMQP connection and unset channel
28,371
def process ( self , metric ) : for rmq_server in self . connections . keys ( ) : try : if ( ( self . connections [ rmq_server ] is None or self . connections [ rmq_server ] . is_open is False ) ) : self . _bind ( rmq_server ) channel = self . channels [ rmq_server ] channel . basic_publish ( exchange = self . rmq_exch...
Process a metric and send it to RMQ pub socket
28,372
def _load ( ) : with open ( NetstatCollector . PROC_TCP , 'r' ) as f : content = f . readlines ( ) content . pop ( 0 ) return content
Read the table of tcp connections & remove header
28,373
def get_default_config_help ( self ) : config_help = super ( MemoryLxcCollector , self ) . get_default_config_help ( ) config_help . update ( { "sys_path" : "Defaults to '/sys/fs/cgroup/lxc'" , } ) return config_help
Return help text for collector configuration .
28,374
def get_default_config ( self ) : config = super ( MemoryLxcCollector , self ) . get_default_config ( ) config . update ( { "path" : "lxc" , "sys_path" : "/sys/fs/cgroup/lxc" , } ) return config
Returns default settings for collector .
28,375
def collect ( self ) : lxc_metrics = [ "memory.usage_in_bytes" , "memory.limit_in_bytes" ] if os . path . isdir ( self . config [ "sys_path" ] ) is False : self . log . debug ( "sys_path '%s' isn't directory." , self . config [ "sys_path" ] ) return { } collected = { } for item in os . listdir ( self . config [ "sys_pa...
Collect memory stats of LXCs .
28,376
def _read_file ( self , filename ) : try : with open ( filename , "r" ) as fhandle : stats = float ( fhandle . readline ( ) . rstrip ( "\n" ) ) except Exception : stats = None return stats
Read contents of given file .
28,377
def load_modules_from_path ( path ) : if path [ - 1 : ] != '/' : path += '/' if not os . path . exists ( path ) : raise OSError ( "Directory does not exist: %s" % path ) sys . path . append ( path ) for f in os . listdir ( path ) : if len ( f ) > 3 and f [ - 3 : ] == '.py' : modname = f [ : - 3 ] __import__ ( modname ,...
Import all modules from the given directory
28,378
def process ( self , metric ) : self . queue . append ( metric ) if len ( self . queue ) >= self . queue_size : logging . debug ( "Queue is full, sending logs to Logentries" ) self . _send ( )
Process metric by sending it to datadog api
28,379
def _send ( self ) : while len ( self . queue ) > 0 : metric = self . queue . popleft ( ) topic , value , timestamp = str ( metric ) . split ( ) msg = json . dumps ( { "event" : { topic : value } } ) req = urllib2 . Request ( "https://js.logentries.com/v1/logs/" + self . log_token , msg ) try : urllib2 . urlopen ( req ...
Convert message to a json object and send to Lognetries
28,380
def collect ( self ) : devices = re . compile ( self . config [ 'devices' ] ) for device in os . listdir ( '/dev' ) : if devices . match ( device ) : command = [ self . config [ 'bin' ] , "-A" , os . path . join ( '/dev' , device ) ] if str_to_bool ( self . config [ 'use_sudo' ] ) : command . insert ( 0 , self . config...
Collect and publish S . M . A . R . T . attributes
28,381
def find_attr_start_line ( self , lines , min_line = 4 , max_line = 9 ) : for idx , line in enumerate ( lines [ min_line : max_line ] ) : col = line . split ( ) if len ( col ) > 1 and col [ 1 ] == 'ATTRIBUTE_NAME' : return idx + min_line + 1 self . log . warn ( 'ATTRIBUTE_NAME not found in second column of' ' smartctl ...
Return line number of the first real attribute and value . The first line is 0 . If the ATTRIBUTE_NAME header is not found return the index after max_line .
28,382
def get_disk_labels ( self ) : path = '/dev/disk/by-label/' labels = { } if not os . path . isdir ( path ) : return labels for label in os . listdir ( path ) : label = label . replace ( '\\x2f' , '/' ) device = os . path . realpath ( path + '/' + label ) labels [ device ] = label return labels
Creates a mapping of device nodes to filesystem labels
28,383
def get_file_systems ( self ) : result = { } if os . access ( '/proc/mounts' , os . R_OK ) : file = open ( '/proc/mounts' ) for line in file : try : mount = line . split ( ) device = mount [ 0 ] mount_point = mount [ 1 ] fs_type = mount [ 2 ] except ( IndexError , ValueError ) : continue if fs_type not in self . filesy...
Creates a map of mounted filesystems on the machine .
28,384
def process ( self , metric ) : path = metric . getCollectorPath ( ) path += '.' path += metric . getMetricPath ( ) if self . config [ 'apply_metric_prefix' ] : path = metric . getPathPrefix ( ) + '.' + path if self . include_reg . match ( path ) : if metric . metric_type == 'GAUGE' : m_type = 'gauge' else : m_type = '...
Process a metric by sending it to Librato
28,385
def _send ( self ) : self . queue . submit ( ) self . queue_max_timestamp = int ( time . time ( ) + self . queue_max_interval ) self . current_n_measurements = 0
Send data to Librato .
28,386
def collect ( self ) : stats = self . parse_stats_file ( self . config [ "status_path" ] ) if len ( stats ) == 0 : return { } elif "info" not in stats . keys ( ) : return { } elif "programstatus" not in stats . keys ( ) : return { } metrics = self . get_icinga_stats ( stats [ "programstatus" ] ) if "hoststatus" in stat...
Collect and publish metrics
28,387
def get_default_config ( self ) : config = super ( IcingaStatsCollector , self ) . get_default_config ( ) config . update ( { "path" : "icinga_stats" , "status_path" : "/var/lib/icinga/status.dat" , } ) return config
Returns default settings for collector
28,388
def get_icinga_stats ( self , app_stats ) : stats = { } stats = dict ( stats . items ( ) + self . _get_active_stats ( app_stats ) . items ( ) ) stats = dict ( stats . items ( ) + self . _get_cached_stats ( app_stats ) . items ( ) ) stats = dict ( stats . items ( ) + self . _get_command_execution ( app_stats ) . items (...
Extract metrics from programstatus
28,389
def parse_stats_file ( self , file_name ) : stats = { } try : with open ( file_name , "r" ) as fhandle : fbuffer = [ ] save_buffer = False for line in fhandle : line = line . rstrip ( "\n" ) line = self . _trim ( line ) if line == "" or line . startswith ( "#" ) : continue elif line . endswith ( "{" ) : save_buffer = T...
Read and parse given file_name return config as a dictionary
28,390
def get_host_stats ( self , hosts ) : stats = { "hosts.total" : 0 , "hosts.ok" : 0 , "hosts.down" : 0 , "hosts.unreachable" : 0 , "hosts.flapping" : 0 , "hosts.in_downtime" : 0 , "hosts.checked" : 0 , "hosts.scheduled" : 0 , "hosts.active_checks" : 0 , "hosts.passive_checks" : 0 , } for host in list ( hosts ) : if type...
Get statistics for Hosts resp . Host entities
28,391
def get_svc_stats ( self , svcs ) : stats = { "services.total" : 0 , "services.ok" : 0 , "services.warning" : 0 , "services.critical" : 0 , "services.unknown" : 0 , "services.flapping" : 0 , "services.in_downtime" : 0 , "services.checked" : 0 , "services.scheduled" : 0 , "services.active_checks" : 0 , "services.passive...
Get statistics for Services resp . Service entities
28,392
def _convert_tripplet ( self , tripplet ) : splitted = tripplet . split ( "," ) if len ( splitted ) != 3 : self . log . debug ( "Got %i chunks, expected 3." , len ( splitted ) ) return ( 0 , 0 , 0 ) try : x01 = int ( splitted [ 0 ] ) x05 = int ( splitted [ 1 ] ) x15 = int ( splitted [ 2 ] ) except Exception as exceptio...
Turn 10 178 528 into tuple of integers
28,393
def _get_uptime ( self , app_stats ) : if "program_start" not in app_stats . keys ( ) : return 0 if not app_stats [ "program_start" ] . isdigit ( ) : return 0 uptime = int ( time . time ( ) ) - int ( app_stats [ "program_start" ] ) if uptime < 0 : return 0 return uptime
Return Icinga s uptime
28,394
def _parse_config_buffer ( self , fbuffer ) : if len ( fbuffer ) < 1 or not fbuffer [ 0 ] . endswith ( "{" ) : return { } entity = { } entity_type = fbuffer . pop ( 0 ) entity_type = entity_type . rstrip ( "{" ) entity [ "_type" ] = self . _trim ( entity_type ) for chunk in fbuffer : splitted = chunk . split ( "=" ) if...
Parse buffered chunk of config into dict
28,395
def _sanitize_entity ( self , entity ) : aliases = { "current_state" : "state" , "is_flapping" : "flapping" , "scheduled_downtime_depth" : "in_downtime" , "has_been_checked" : "checked" , "should_be_scheduled" : "scheduled" , "active_checks_enabled" : "active_checks" , "passive_checks_enabled" : "passive_checks" , } sa...
Make given entity sane for further use .
28,396
def _trim ( self , somestr ) : tmp = RE_LSPACES . sub ( "" , somestr ) tmp = RE_TSPACES . sub ( "" , tmp ) return str ( tmp )
Trim left - right given string
28,397
def _list_request ( self ) : try : url = "http://%s:%s/%s%s?maxDepth=1&maxCollectionSize=0" % ( self . config [ 'host' ] , self . config [ 'port' ] , self . jolokia_path , self . LIST_URL ) timeout = max ( 2 , float ( self . config [ 'interval' ] ) * 2 / 3 ) with closing ( urllib2 . urlopen ( self . _create_request ( u...
Returns a dictionary with JMX domain names as keys
28,398
def _get_stats ( self ) : try : output = subprocess . check_output ( [ 'ceph' , '-s' ] ) except subprocess . CalledProcessError as err : self . log . info ( 'Could not get stats: %s' % err ) self . log . exception ( 'Could not get stats' ) return { } return process_ceph_status ( output )
Get ceph stats
28,399
def process_lsof ( self , users , types ) : d = { } for u in users : d [ u ] = { } tmp = os . popen ( "lsof -wbu %s | awk '{ print $5 }'" % ( u ) ) . read ( ) . split ( ) for t in types : d [ u ] [ t ] = tmp . count ( t ) return d
Get the list of users and file types to collect for and collect the data from lsof