text
stringlengths 0
828
|
|---|
stats_this_second = self._rollup_shard_stats_to_instance_stats(
|
{shard.name: future.result() for (shard, future) in zip(shards, fs)})
|
# power nap
|
time.sleep(1)
|
# fetch again
|
fs = []
|
with futures.ThreadPoolExecutor(len(shards)) as executor:
|
for shard in shards:
|
fs.append(executor.submit(shard.get_shard_stats))
|
futures.wait(fs, timeout=None, return_when=futures.ALL_COMPLETED)
|
stats_next_second = self._rollup_shard_stats_to_instance_stats(
|
{shard.name: future.result() for (shard, future) in zip(shards, fs)})
|
self._new_relic_stats = self._compile_new_relic_stats(stats_this_second, stats_next_second)
|
else:
|
# fetch stats like we did before (by hitting new_relic_stats API resource)
|
response = requests.get('{}{}'.format(self._url,
|
'new-relic-stats'),
|
**self._instances._default_request_kwargs)
|
self._new_relic_stats = json.loads(response.content).get(
|
'data') if response.status_code == 200 else {}
|
return self._new_relic_stats"
|
4517,"def _rollup_shard_stats_to_instance_stats(self, shard_stats):
|
""""""
|
roll up all shard stats to instance level stats
|
:param shard_stats: dict of {shard_name: shard level stats}
|
""""""
|
instance_stats = {}
|
opcounters_per_node = []
|
# aggregate replication_lag
|
instance_stats['replication_lag'] = max(map(lambda s: s['replication_lag'], shard_stats.values()))
|
aggregate_server_statistics = {}
|
for shard_name, stats in shard_stats.items():
|
for statistic_key in stats.get('shard_stats'):
|
if statistic_key != 'connections' and statistic_key in aggregate_server_statistics:
|
aggregate_server_statistics[statistic_key] = util.sum_values(aggregate_server_statistics[statistic_key],
|
stats.get('shard_stats')[statistic_key])
|
else:
|
aggregate_server_statistics[statistic_key] = stats.get('shard_stats')[statistic_key]
|
# aggregate per_node_stats into opcounters_per_node
|
opcounters_per_node.append({shard_name: {member: node_stats['opcounters']
|
for member, node_stats in stats.get('per_node_stats').items()}})
|
instance_stats['opcounters_per_node'] = opcounters_per_node
|
instance_stats['aggregate_server_statistics'] = aggregate_server_statistics
|
return instance_stats"
|
4518,"def _compile_new_relic_stats(self, stats_this_second, stats_next_second):
|
""""""
|
from instance 'stats_this_second' and instance 'stats_next_second', compute some per
|
second stats metrics and other aggregated metrics
|
:param dict stats_this_second:
|
:param dict stats_next_second:
|
:return: compiled instance stats that has metrics
|
{'opcounters_per_node_per_second': {...},
|
'server_statistics_per_second': {...},
|
'aggregate_server_statistics': {...},
|
'replication_lag': 0.0,
|
'aggregate_database_statistics': {}
|
}
|
""""""
|
server_statistics_per_second = {}
|
opcounters_per_node_per_second = []
|
for subdoc in [""opcounters"", ""network""]:
|
first_doc = stats_this_second['aggregate_server_statistics'][subdoc]
|
second_doc = stats_next_second['aggregate_server_statistics'][subdoc]
|
keys = set(first_doc.keys()) | set(second_doc.keys())
|
server_statistics_per_second[subdoc] = {key: int(second_doc[key]) - int(first_doc[key]) for key in keys if isinstance(first_doc[key], int)}
|
for node1, node2 in zip(stats_this_second['opcounters_per_node'], stats_next_second['opcounters_per_node']):
|
node_opcounters_per_second = {}
|
for repl, members in node2.items():
|
node_opcounters_per_second[repl] = {}
|
for member, ops in members.items():
|
node_opcounters_per_second[repl][member] = {}
|
for op, count in ops.items():
|
node_opcounters_per_second[repl][member][op] = count - node1[repl][member][op]
|
opcounters_per_node_per_second.append(node_opcounters_per_second)
|
return {'opcounters_per_node_per_second': opcounters_per_node_per_second,
|
'server_statistics_per_second': server_statistics_per_second,
|
'aggregate_server_statistics': stats_next_second.get('aggregate_server_statistics'),
|
'replication_lag': stats_next_second.get('replication_lag'),
|
'aggregate_database_statistics': self.get_aggregate_database_stats()}"
|
4519,"def get_stepdown_window(self):
|
""""""Get information on this instance's stepdown window.""""""
|
url = self._service_url + 'stepdown/'
|
response = requests.get(url, **self._instances._default_request_kwargs)
|
return response.json()"
|
4520,"def set_stepdown_window(self, start, end, enabled=True, scheduled=True, weekly=True):
|
""""""Set the stepdown window for this instance.
|
Date times are assumed to be UTC, so use UTC date times.
|
:param datetime.datetime start: The datetime which the stepdown window is to open.
|
:param datetime.datetime end: The datetime which the stepdown window is to close.
|
:param bool enabled: A boolean indicating whether or not stepdown is to be enabled.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.