_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q261200 | get_standardized_timestamp | validation | def get_standardized_timestamp(timestamp, ts_format):
"""
Given a timestamp string, return a time stamp in the epoch ms format. If no date is present in
timestamp then today's date will be added as a prefix before conversion to epoch ms
"""
if not timestamp:
return None
if timestamp == 'now':
timestamp = str(datetime.datetime.now())
if not ts_format:
ts_format = detect_timestamp_format(timestamp)
try:
if ts_format == 'unknown':
logger.error('Unable to determine timestamp format for : %s', timestamp)
return -1
elif ts_format == 'epoch':
ts = int(timestamp) * 1000
elif ts_format == 'epoch_ms':
ts = timestamp
elif ts_format == 'epoch_fraction':
ts = int(timestamp[:10]) * 1000 + int(timestamp[11:])
elif ts_format in ('%H:%M:%S', '%H:%M:%S.%f'):
date_today = str(datetime.date.today())
dt_obj = datetime.datetime.strptime(date_today + ' ' + timestamp, '%Y-%m-%d ' + ts_format)
ts = calendar.timegm(dt_obj.utctimetuple()) * 1000 + dt_obj.microsecond / 1000
else:
dt_obj = datetime.datetime.strptime(timestamp, ts_format)
ts = calendar.timegm(dt_obj.utctimetuple()) * 1000 + dt_obj.microsecond / 1000
except ValueError:
return -1
return str(ts) | python | {
"resource": ""
} |
q261201 | set_sla | validation | def set_sla(obj, metric, sub_metric, rules):
"""
Extract SLAs from a set of rules
"""
if not hasattr(obj, 'sla_map'):
return False
rules_list = rules.split()
for rule in rules_list:
if '<' in rule:
stat, threshold = rule.split('<')
sla = SLA(metric, sub_metric, stat, threshold, 'lt')
elif '>' in rule:
stat, threshold = rule.split('>')
sla = SLA(metric, sub_metric, stat, threshold, 'gt')
else:
if hasattr(obj, 'logger'):
obj.logger.error('Unsupported SLA type defined : ' + rule)
sla = None
obj.sla_map[metric][sub_metric][stat] = sla
if hasattr(obj, 'sla_list'):
obj.sla_list.append(sla) # TODO : remove this once report has grading done in the metric tables
return True | python | {
"resource": ""
} |
q261202 | JmeterMetric.aggregate_count_over_time | validation | def aggregate_count_over_time(self, metric_store, line_data, transaction_list, aggregate_timestamp):
"""
Organize and store the count of data from the log line into the metric store by metric type, transaction, timestamp
:param dict metric_store: The metric store used to store all the parsed jmeter log data
:param dict line_data: dict with the extracted k:v from the log line
:param list transaction_list: list of transaction to be used for storing the metrics from given line
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None
"""
for transaction in transaction_list:
if line_data.get('s') == 'true':
all_qps = metric_store['qps']
else:
all_qps = metric_store['eqps']
qps = all_qps[transaction]
if aggregate_timestamp in qps:
qps[aggregate_timestamp] += 1
else:
qps[aggregate_timestamp] = 1
return None | python | {
"resource": ""
} |
q261203 | JmeterMetric.parse | validation | def parse(self):
"""
Parse the Jmeter file and calculate key stats
:return: status of the metric parse
"""
file_status = True
for infile in self.infile_list:
file_status = file_status and naarad.utils.is_valid_file(infile)
if not file_status:
return False
status = self.parse_xml_jtl(self.aggregation_granularity)
gc.collect()
return status | python | {
"resource": ""
} |
q261204 | JmeterMetric.parse_xml_jtl | validation | def parse_xml_jtl(self, granularity):
"""
Parse Jmeter workload output in XML format and extract overall and per transaction data and key statistics
:param string granularity: The time period over which to aggregate and average the raw data. Valid values are 'hour', 'minute' or 'second'
:return: status of the metric parse
"""
data = defaultdict(list)
processed_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
tree = ElementTree.parse(input_file)
samples = tree.findall('./httpSample') + tree.findall('./sample')
for sample in samples:
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(sample.get('ts'))
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(sample.get('ts'), timestamp_format)
if ts == -1:
continue
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
aggregate_timestamp, averaging_factor = self.get_aggregation_timestamp(ts, granularity)
self.aggregate_count_over_time(processed_data, sample, [self._sanitize_label(sample.get('lb')), 'Overall_Summary'], aggregate_timestamp)
self.aggregate_values_over_time(processed_data, sample, [self._sanitize_label(sample.get('lb')), 'Overall_Summary'], ['t', 'by'], aggregate_timestamp)
logger.info('Finished parsing : %s', input_file)
logger.info('Processing metrics for output to csv')
self.average_values_for_plot(processed_data, data, averaging_factor)
logger.info('Writing time series csv')
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as csvf:
csvf.write('\n'.join(sorted(data[csv])))
logger.info('Processing raw data for stats')
self.calculate_key_stats(processed_data)
return True | python | {
"resource": ""
} |
q261205 | TopMetric.convert_to_G | validation | def convert_to_G(self, word):
"""
Given a size such as '2333M', return the converted value in G
"""
value = 0.0
if word[-1] == 'G' or word[-1] == 'g':
value = float(word[:-1])
elif word[-1] == 'M' or word[-1] == 'm':
value = float(word[:-1]) / 1000.0
elif word[-1] == 'K' or word[-1] == 'k':
value = float(word[:-1]) / 1000.0 / 1000.0
else: # No unit
value = float(word) / 1000.0 / 1000.0 / 1000.0
return str(value) | python | {
"resource": ""
} |
q261206 | TopMetric.parse | validation | def parse(self):
"""
Parse the top output file
Return status of the metric parse
The raw log file is like the following:
2014-06-23
top - 00:00:02 up 18 days, 7:08, 19 users, load average: 0.05, 0.03, 0.00
Tasks: 447 total, 1 running, 443 sleeping, 2 stopped, 1 zombie
Cpu(s): 1.6%us, 0.5%sy, 0.0%ni, 97.9%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Mem: 62.841G total, 15.167G used, 47.675G free, 643.434M buffers
Swap: 63.998G total, 0.000k used, 63.998G free, 11.324G cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1730 root 20 0 4457m 10m 3328 S 1.9 0.0 80:13.45 lwregd
The log lines can be generated by echo $t >> $RESULT/top.out &; top -b -n $COUNT -d $INTERVAL | grep -A 40 '^top' >> $RESULT/top.out &
"""
for infile in self.infile_list:
logger.info('Processing : %s', infile)
status = True
file_status = naarad.utils.is_valid_file(infile)
if not file_status:
return False
with open(infile) as fh:
for line in fh:
words = line.split()
if not words:
continue
# Pattern matches line of '2014-02-03'
if re.match('^\d\d\d\d-\d\d-\d\d$', line):
self.ts_date = words[0]
continue
prefix_word = words[0].strip()
if prefix_word == 'top':
self.process_top_line(words)
self.saw_pid = False # Turn off the processing of individual process line
elif self.ts_valid_lines:
if prefix_word == 'Tasks:':
self.process_tasks_line(words)
elif prefix_word == 'Cpu(s):':
self.process_cpu_line(words)
elif prefix_word == 'Mem:':
self.process_mem_line(words)
elif prefix_word == 'Swap:':
self.process_swap_line(words)
elif prefix_word == 'PID':
self.saw_pid = True # Turn on the processing of individual process line
self.process_headers = words
else: # Each individual process line
if self.saw_pid and len(words) >= len(self.process_headers): # Only valid process lines
self.process_individual_command(words)
# Putting data in csv files;
for out_csv in self.data.keys(): # All sub_metrics
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
fh.write('\n'.join(self.data[out_csv]))
gc.collect()
return status | python | {
"resource": ""
} |
q261207 | get_urls_from_seed | validation | def get_urls_from_seed(url):
"""
get a list of urls from a seeding url, return a list of urls
:param str url: a full/absolute url, e.g. http://www.cnn.com/logs/
:return: a list of full/absolute urls.
"""
if not url or type(url) != str or not naarad.utils.is_valid_url(url):
logger.error("get_urls_from_seed() does not have valid seeding url.")
return
# Extract the host info of "http://host:port/" in case of href urls are elative urls (e.g., /path/gc.log)
# Then join (host info and relative urls) to form the complete urls
base_index = url.find('/', len("https://")) # get the first "/" after http://" or "https://"; handling both cases.
base_url = url[:base_index] # base_url = "http://host:port" or https://host:port" or http://host" (where no port is given)
# Extract the "href" denoted urls
urls = []
try:
response = urllib2.urlopen(url)
hp = HTMLLinkExtractor()
hp.feed(response.read())
urls = hp.links
hp.close()
except urllib2.HTTPError:
logger.error("Got HTTPError when opening the url of %s" % url)
return urls
# Check whether the url is relative or complete
for i in range(len(urls)):
if not urls[i].startswith("http://") and not urls[i].startswith("https://"): # a relative url ?
urls[i] = base_url + urls[i]
return urls | python | {
"resource": ""
} |
q261208 | Diff.plot_diff | validation | def plot_diff(self, graphing_library='matplotlib'):
"""
Generate CDF diff plots of the submetrics
"""
diff_datasource = sorted(set(self.reports[0].datasource) & set(self.reports[1].datasource))
graphed = False
for submetric in diff_datasource:
baseline_csv = naarad.utils.get_default_csv(self.reports[0].local_location, (submetric + '.percentiles'))
current_csv = naarad.utils.get_default_csv(self.reports[1].local_location, (submetric + '.percentiles'))
if (not (naarad.utils.is_valid_file(baseline_csv) & naarad.utils.is_valid_file(current_csv))):
continue
baseline_plot = PD(input_csv=baseline_csv, csv_column=1, series_name=submetric, y_label=submetric, precision=None, graph_height=600, graph_width=1200,
graph_type='line', plot_label='baseline', x_label='Percentiles')
current_plot = PD(input_csv=current_csv, csv_column=1, series_name=submetric, y_label=submetric, precision=None, graph_height=600, graph_width=1200,
graph_type='line', plot_label='current', x_label='Percentiles')
graphed, div_file = Diff.graphing_modules[graphing_library].graph_data_on_the_same_graph([baseline_plot, current_plot],
os.path.join(self.output_directory, self.resource_path),
self.resource_path, (submetric + '.diff'))
if graphed:
self.plot_files.append(div_file)
return True | python | {
"resource": ""
} |
q261209 | Diff.check_sla | validation | def check_sla(self, sla, diff_metric):
"""
Check whether the SLA has passed or failed
"""
try:
if sla.display is '%':
diff_val = float(diff_metric['percent_diff'])
else:
diff_val = float(diff_metric['absolute_diff'])
except ValueError:
return False
if not (sla.check_sla_passed(diff_val)):
self.sla_failures += 1
self.sla_failure_list.append(DiffSLAFailure(sla, diff_metric))
return True | python | {
"resource": ""
} |
q261210 | Metric.get_aggregation_timestamp | validation | def get_aggregation_timestamp(self, timestamp, granularity='second'):
"""
Return a timestamp from the raw epoch time based on the granularity preferences passed in.
:param string timestamp: timestamp from the log line
:param string granularity: aggregation granularity used for plots.
:return: string aggregate_timestamp: timestamp used for metrics aggregation in all functions
"""
if granularity is None or granularity.lower() == 'none':
return int(timestamp), 1
elif granularity == 'hour':
return (int(timestamp) / (3600 * 1000)) * 3600 * 1000, 3600
elif granularity == 'minute':
return (int(timestamp) / (60 * 1000)) * 60 * 1000, 60
else:
return (int(timestamp) / 1000) * 1000, 1 | python | {
"resource": ""
} |
q261211 | Metric.aggregate_count_over_time | validation | def aggregate_count_over_time(self, metric_store, groupby_name, aggregate_timestamp):
"""
Organize and store the count of data from the log line into the metric store by columnm, group name, timestamp
:param dict metric_store: The metric store used to store all the parsed the log data
:param string groupby_name: the group name that the log line belongs to
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None
"""
all_qps = metric_store['qps']
qps = all_qps[groupby_name]
if aggregate_timestamp in qps:
qps[aggregate_timestamp] += 1
else:
qps[aggregate_timestamp] = 1
return None | python | {
"resource": ""
} |
q261212 | Metric.calc_key_stats | validation | def calc_key_stats(self, metric_store):
"""
Calculate stats such as percentile and mean
:param dict metric_store: The metric store used to store all the parsed log data
:return: None
"""
stats_to_calculate = ['mean', 'std', 'min', 'max'] # TODO: get input from user
percentiles_to_calculate = range(0, 100, 1) # TODO: get input from user
for column, groups_store in metric_store.items():
for group, time_store in groups_store.items():
data = metric_store[column][group].values()
if self.groupby:
column_name = group + '.' + column
else:
column_name = column
if column.startswith('qps'):
self.calculated_stats[column_name], self.calculated_percentiles[column_name] = naarad.utils.calculate_stats(data, stats_to_calculate, percentiles_to_calculate)
else:
self.calculated_stats[column_name], self.calculated_percentiles[column_name] = naarad.utils.calculate_stats(list(heapq.merge(*data)), stats_to_calculate,
percentiles_to_calculate)
self.update_summary_stats(column_name) | python | {
"resource": ""
} |
q261213 | Metric.check_important_sub_metrics | validation | def check_important_sub_metrics(self, sub_metric):
"""
check whether the given sub metric is in important_sub_metrics list
"""
if not self.important_sub_metrics:
return False
if sub_metric in self.important_sub_metrics:
return True
items = sub_metric.split('.')
if items[-1] in self.important_sub_metrics:
return True
return False | python | {
"resource": ""
} |
q261214 | Metric.plot_cdf | validation | def plot_cdf(self, graphing_library='matplotlib'):
"""
plot CDF for important sub-metrics
"""
graphed = False
for percentile_csv in self.percentiles_files:
csv_filename = os.path.basename(percentile_csv)
# The last element is .csv, don't need that in the name of the chart
column = self.csv_column_map[percentile_csv.replace(".percentiles.", ".")]
if not self.check_important_sub_metrics(column):
continue
column = naarad.utils.sanitize_string(column)
graph_title = '.'.join(csv_filename.split('.')[0:-1])
if self.sub_metric_description and column in self.sub_metric_description.keys():
graph_title += ' (' + self.sub_metric_description[column] + ')'
if self.sub_metric_unit and column in self.sub_metric_unit.keys():
plot_data = [PD(input_csv=percentile_csv, csv_column=1, series_name=graph_title, x_label='Percentiles',
y_label=column + ' (' + self.sub_metric_unit[column] + ')', precision=None, graph_height=600, graph_width=1200, graph_type='line')]
else:
plot_data = [PD(input_csv=percentile_csv, csv_column=1, series_name=graph_title, x_label='Percentiles', y_label=column, precision=None,
graph_height=600, graph_width=1200, graph_type='line')]
graphed, div_file = Metric.graphing_modules[graphing_library].graph_data_on_the_same_graph(plot_data, self.resource_directory,
self.resource_path, graph_title)
if graphed:
self.plot_files.append(div_file)
return True | python | {
"resource": ""
} |
q261215 | DerivativeDetector._set_scores | validation | def _set_scores(self):
"""
Compute anomaly scores for the time series.
"""
anom_scores = {}
self._compute_derivatives()
derivatives_ema = utils.compute_ema(self.smoothing_factor, self.derivatives)
for i, (timestamp, value) in enumerate(self.time_series_items):
anom_scores[timestamp] = abs(self.derivatives[i] - derivatives_ema[i])
stdev = numpy.std(anom_scores.values())
if stdev:
for timestamp in anom_scores.keys():
anom_scores[timestamp] /= stdev
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | {
"resource": ""
} |
q261216 | SARMetric.extract_metric_name | validation | def extract_metric_name(self, metric_name):
"""
Method to extract SAR metric names from the section given in the config. The SARMetric class assumes that
the section name will contain the SAR types listed in self.supported_sar_types tuple
:param str metric_name: Section name from the config
:return: str which identifies what kind of SAR metric the section represents
"""
for metric_type in self.supported_sar_types:
if metric_type in metric_name:
return metric_type
logger.error('Section [%s] does not contain a valid metric type, using type: "SAR-generic". Naarad works better '
'if it knows the metric type. Valid SAR metric names are: %s', metric_name, self.supported_sar_types)
return 'SAR-generic' | python | {
"resource": ""
} |
q261217 | SpeedCheck.run | validation | def run(self):
"""Perform the Oct2Py speed analysis.
Uses timeit to test the raw execution of an Octave command,
Then tests progressively larger array passing.
"""
print('Oct2Py speed test')
print('*' * 20)
time.sleep(1)
print('Raw speed: ')
avg = timeit.timeit(self.raw_speed, number=10) / 10
print(' {0:0.01f} usec per loop'.format(avg * 1e6))
sides = [1, 10, 100, 1000]
runs = [10, 10, 10, 5]
for (side, nruns) in zip(sides, runs):
self.array = np.reshape(np.arange(side ** 2), (-1))
print('Put {0}x{1}: '.format(side, side))
avg = timeit.timeit(self.large_array_put, number=nruns) / nruns
print(' {0:0.01f} msec'.format(avg * 1e3))
print('Get {0}x{1}: '.format(side, side))
avg = timeit.timeit(self.large_array_get, number=nruns) / nruns
print(' {0:0.01f} msec'.format(avg * 1e3))
self.octave.exit()
print('*' * 20)
print('Test complete!') | python | {
"resource": ""
} |
q261218 | Oct2Py.exit | validation | def exit(self):
"""Quits this octave session and cleans up.
"""
if self._engine:
self._engine.repl.terminate()
self._engine = None | python | {
"resource": ""
} |
q261219 | Oct2Py.restart | validation | def restart(self):
"""Restart an Octave session in a clean state
"""
if self._engine:
self._engine.repl.terminate()
executable = self._executable
if executable:
os.environ['OCTAVE_EXECUTABLE'] = executable
if 'OCTAVE_EXECUTABLE' not in os.environ and 'OCTAVE' in os.environ:
os.environ['OCTAVE_EXECUTABLE'] = os.environ['OCTAVE']
self._engine = OctaveEngine(stdin_handler=self._handle_stdin,
logger=self.logger)
# Add local Octave scripts.
self._engine.eval('addpath("%s");' % HERE.replace(osp.sep, '/')) | python | {
"resource": ""
} |
q261220 | Oct2Py._feval | validation | def _feval(self, func_name, func_args=(), dname='', nout=0,
timeout=None, stream_handler=None, store_as='', plot_dir=None):
"""Run the given function with the given args.
"""
engine = self._engine
if engine is None:
raise Oct2PyError('Session is closed')
# Set up our mat file paths.
out_file = osp.join(self.temp_dir, 'writer.mat')
out_file = out_file.replace(osp.sep, '/')
in_file = osp.join(self.temp_dir, 'reader.mat')
in_file = in_file.replace(osp.sep, '/')
func_args = list(func_args)
ref_indices = []
for (i, value) in enumerate(func_args):
if isinstance(value, OctavePtr):
ref_indices.append(i + 1)
func_args[i] = value.address
ref_indices = np.array(ref_indices)
# Save the request data to the output file.
req = dict(func_name=func_name, func_args=tuple(func_args),
dname=dname or '', nout=nout,
store_as=store_as or '',
ref_indices=ref_indices)
write_file(req, out_file, oned_as=self._oned_as,
convert_to_float=self.convert_to_float)
# Set up the engine and evaluate the `_pyeval()` function.
engine.stream_handler = stream_handler or self.logger.info
if timeout is None:
timeout = self.timeout
try:
engine.eval('_pyeval("%s", "%s");' % (out_file, in_file),
timeout=timeout)
except KeyboardInterrupt as e:
stream_handler(engine.repl.interrupt())
raise
except TIMEOUT:
stream_handler(engine.repl.interrupt())
raise Oct2PyError('Timed out, interrupting')
except EOF:
stream_handler(engine.repl.child.before)
self.restart()
raise Oct2PyError('Session died, restarting')
# Read in the output.
resp = read_file(in_file, self)
if resp['err']:
msg = self._parse_error(resp['err'])
raise Oct2PyError(msg)
result = resp['result'].ravel().tolist()
if isinstance(result, list) and len(result) == 1:
result = result[0]
# Check for sentinel value.
if (isinstance(result, Cell) and
result.size == 1 and
isinstance(result[0], string_types) and
result[0] == '__no_value__'):
result = None
if plot_dir:
self._engine.make_figures(plot_dir)
return result | python | {
"resource": ""
} |
q261221 | Oct2Py._parse_error | validation | def _parse_error(self, err):
"""Create a traceback for an Octave evaluation error.
"""
self.logger.debug(err)
stack = err.get('stack', [])
if not err['message'].startswith('parse error:'):
err['message'] = 'error: ' + err['message']
errmsg = 'Octave evaluation error:\n%s' % err['message']
if not isinstance(stack, StructArray):
return errmsg
errmsg += '\nerror: called from:'
for item in stack[:-1]:
errmsg += '\n %(name)s at line %(line)d' % item
try:
errmsg += ', column %(column)d' % item
except Exception:
pass
return errmsg | python | {
"resource": ""
} |
q261222 | Oct2Py._exist | validation | def _exist(self, name):
"""Test whether a name exists and return the name code.
Raises an error when the name does not exist.
"""
cmd = 'exist("%s")' % name
resp = self._engine.eval(cmd, silent=True).strip()
exist = int(resp.split()[-1])
if exist == 0:
msg = 'Value "%s" does not exist in Octave workspace'
raise Oct2PyError(msg % name)
return exist | python | {
"resource": ""
} |
q261223 | Oct2Py._isobject | validation | def _isobject(self, name, exist):
"""Test whether the name is an object."""
if exist in [2, 5]:
return False
cmd = 'isobject(%s)' % name
resp = self._engine.eval(cmd, silent=True).strip()
return resp == 'ans = 1' | python | {
"resource": ""
} |
q261224 | Oct2Py._get_function_ptr | validation | def _get_function_ptr(self, name):
"""Get or create a function pointer of the given name."""
func = _make_function_ptr_instance
self._function_ptrs.setdefault(name, func(self, name))
return self._function_ptrs[name] | python | {
"resource": ""
} |
q261225 | Oct2Py._get_user_class | validation | def _get_user_class(self, name):
"""Get or create a user class of the given type."""
self._user_classes.setdefault(name, _make_user_class(self, name))
return self._user_classes[name] | python | {
"resource": ""
} |
q261226 | Oct2Py._cleanup | validation | def _cleanup(self):
"""Clean up resources used by the session.
"""
self.exit()
workspace = osp.join(os.getcwd(), 'octave-workspace')
if osp.exists(workspace):
os.remove(workspace) | python | {
"resource": ""
} |
q261227 | read_file | validation | def read_file(path, session=None):
"""Read the data from the given file path.
"""
try:
data = loadmat(path, struct_as_record=True)
except UnicodeDecodeError as e:
raise Oct2PyError(str(e))
out = dict()
for (key, value) in data.items():
out[key] = _extract(value, session)
return out | python | {
"resource": ""
} |
q261228 | write_file | validation | def write_file(obj, path, oned_as='row', convert_to_float=True):
"""Save a Python object to an Octave file on the given path.
"""
data = _encode(obj, convert_to_float)
try:
# scipy.io.savemat is not thread-save.
# See https://github.com/scipy/scipy/issues/7260
with _WRITE_LOCK:
savemat(path, data, appendmat=False, oned_as=oned_as,
long_field_names=True)
except KeyError: # pragma: no cover
raise Exception('could not save mat file') | python | {
"resource": ""
} |
q261229 | _extract | validation | def _extract(data, session=None):
"""Convert the Octave values to values suitable for Python.
"""
# Extract each item of a list.
if isinstance(data, list):
return [_extract(d, session) for d in data]
# Ignore leaf objects.
if not isinstance(data, np.ndarray):
return data
# Extract user defined classes.
if isinstance(data, MatlabObject):
cls = session._get_user_class(data.classname)
return cls.from_value(data)
# Extract struct data.
if data.dtype.names:
# Singular struct
if data.size == 1:
return _create_struct(data, session)
# Struct array
return StructArray(data, session)
# Extract cells.
if data.dtype.kind == 'O':
return Cell(data, session)
# Compress singleton values.
if data.size == 1:
return data.item()
# Compress empty values.
if data.size == 0:
if data.dtype.kind in 'US':
return ''
return []
# Return standard array.
return data | python | {
"resource": ""
} |
q261230 | _create_struct | validation | def _create_struct(data, session):
"""Create a struct from session data.
"""
out = Struct()
for name in data.dtype.names:
item = data[name]
# Extract values that are cells (they are doubly wrapped).
if isinstance(item, np.ndarray) and item.dtype.kind == 'O':
item = item.squeeze().tolist()
out[name] = _extract(item, session)
return out | python | {
"resource": ""
} |
q261231 | _encode | validation | def _encode(data, convert_to_float):
"""Convert the Python values to values suitable to send to Octave.
"""
ctf = convert_to_float
# Handle variable pointer.
if isinstance(data, (OctaveVariablePtr)):
return _encode(data.value, ctf)
# Handle a user defined object.
if isinstance(data, OctaveUserClass):
return _encode(OctaveUserClass.to_value(data), ctf)
# Handle a function pointer.
if isinstance(data, (OctaveFunctionPtr, MatlabFunction)):
raise Oct2PyError('Cannot write Octave functions')
# Handle matlab objects.
if isinstance(data, MatlabObject):
view = data.view(np.ndarray)
out = MatlabObject(data, data.classname)
for name in out.dtype.names:
out[name] = _encode(view[name], ctf)
return out
# Handle pandas series and dataframes
if isinstance(data, (DataFrame, Series)):
return _encode(data.values, ctf)
# Extract and encode values from dict-like objects.
if isinstance(data, dict):
out = dict()
for (key, value) in data.items():
out[key] = _encode(value, ctf)
return out
# Send None as nan.
if data is None:
return np.NaN
# Sets are treated like lists.
if isinstance(data, set):
return _encode(list(data), ctf)
# Lists can be interpreted as numeric arrays or cell arrays.
if isinstance(data, list):
if _is_simple_numeric(data):
return _encode(np.array(data), ctf)
return _encode(tuple(data), ctf)
# Tuples are handled as cells.
if isinstance(data, tuple):
obj = np.empty(len(data), dtype=object)
for (i, item) in enumerate(data):
obj[i] = _encode(item, ctf)
return obj
# Sparse data must be floating type.
if isinstance(data, spmatrix):
return data.astype(np.float64)
# Return other data types unchanged.
if not isinstance(data, np.ndarray):
return data
# Extract and encode data from object-like arrays.
if data.dtype.kind in 'OV':
out = np.empty(data.size, dtype=data.dtype)
for (i, item) in enumerate(data.ravel()):
if data.dtype.names:
for name in data.dtype.names:
out[i][name] = _encode(item[name], ctf)
else:
out[i] = _encode(item, ctf)
return out.reshape(data.shape)
# Complex 128 is the highest supported by savemat.
if data.dtype.name == 'complex256':
return data.astype(np.complex128)
# Convert to float if applicable.
if ctf and data.dtype.kind in 'ui':
return data.astype(np.float64)
# Return standard array.
return data | python | {
"resource": ""
} |
q261232 | _is_simple_numeric | validation | def _is_simple_numeric(data):
"""Test if a list contains simple numeric data."""
for item in data:
if isinstance(item, set):
item = list(item)
if isinstance(item, list):
if not _is_simple_numeric(item):
return False
elif not isinstance(item, (int, float, complex)):
return False
return True | python | {
"resource": ""
} |
q261233 | _setup_log | validation | def _setup_log():
"""Configure root logger.
"""
try:
handler = logging.StreamHandler(stream=sys.stdout)
except TypeError: # pragma: no cover
handler = logging.StreamHandler(strm=sys.stdout)
log = get_log()
log.addHandler(handler)
log.setLevel(logging.INFO)
log.propagate = False | python | {
"resource": ""
} |
q261234 | _make_user_class | validation | def _make_user_class(session, name):
"""Make an Octave class for a given class name"""
attrs = session.eval('fieldnames(%s);' % name, nout=1).ravel().tolist()
methods = session.eval('methods(%s);' % name, nout=1).ravel().tolist()
ref = weakref.ref(session)
doc = _DocDescriptor(ref, name)
values = dict(__doc__=doc, _name=name, _ref=ref, _attrs=attrs,
__module__='oct2py.dynamic')
for method in methods:
doc = _MethodDocDescriptor(ref, name, method)
cls_name = '%s_%s' % (name, method)
method_values = dict(__doc__=doc)
method_cls = type(str(cls_name),
(OctaveUserClassMethod,), method_values)
values[method] = method_cls(ref, method, name)
for attr in attrs:
values[attr] = OctaveUserClassAttr(ref, attr, attr)
return type(str(name), (OctaveUserClass,), values) | python | {
"resource": ""
} |
q261235 | OctaveUserClass.from_value | validation | def from_value(cls, value):
"""This is how an instance is created when we read a
MatlabObject from a MAT file.
"""
instance = OctaveUserClass.__new__(cls)
instance._address = '%s_%s' % (instance._name, id(instance))
instance._ref().push(instance._address, value)
return instance | python | {
"resource": ""
} |
q261236 | OctaveUserClass.to_value | validation | def to_value(cls, instance):
"""Convert to a value to send to Octave."""
if not isinstance(instance, OctaveUserClass) or not instance._attrs:
return dict()
# Bootstrap a MatlabObject from scipy.io
# From https://github.com/scipy/scipy/blob/93a0ea9e5d4aba1f661b6bb0e18f9c2d1fce436a/scipy/io/matlab/mio5.py#L435-L443
# and https://github.com/scipy/scipy/blob/93a0ea9e5d4aba1f661b6bb0e18f9c2d1fce436a/scipy/io/matlab/mio5_params.py#L224
dtype = []
values = []
for attr in instance._attrs:
dtype.append((str(attr), object))
values.append(getattr(instance, attr))
struct = np.array([tuple(values)], dtype)
return MatlabObject(struct, instance._name) | python | {
"resource": ""
} |
q261237 | OctaveUserClass.to_pointer | validation | def to_pointer(cls, instance):
"""Get a pointer to the private object.
"""
return OctavePtr(instance._ref, instance._name, instance._address) | python | {
"resource": ""
} |
q261238 | document_func_view | validation | def document_func_view(serializer_class=None,
response_serializer_class=None,
filter_backends=None,
permission_classes=None,
authentication_classes=None,
doc_format_args=list(),
doc_format_kwargs=dict()):
"""
Decorator to make functional view documentable via drf-autodocs
"""
def decorator(func):
if serializer_class:
func.cls.serializer_class = func.view_class.serializer_class = serializer_class
if response_serializer_class:
func.cls.response_serializer_class = func.view_class.response_serializer_class = response_serializer_class
if filter_backends:
func.cls.filter_backends = func.view_class.filter_backends = filter_backends
if permission_classes:
func.cls.permission_classes = func.view_class.permission_classes = permission_classes
if authentication_classes:
func.cls.authentication_classes = func.view_class.authentication_classes = authentication_classes
if doc_format_args or doc_format_kwargs:
func.cls.__doc__ = func.view_class.__doc__ = getdoc(func).format(*doc_format_args, **doc_format_kwargs)
return func
return decorator | python | {
"resource": ""
} |
q261239 | format_docstring | validation | def format_docstring(*args, **kwargs):
"""
Decorator for clean docstring formatting
"""
def decorator(func):
func.__doc__ = getdoc(func).format(*args, **kwargs)
return func
return decorator | python | {
"resource": ""
} |
q261240 | is_rarfile | validation | def is_rarfile(filename):
"""Return true if file is a valid RAR file."""
mode = constants.RAR_OM_LIST_INCSPLIT
archive = unrarlib.RAROpenArchiveDataEx(filename, mode=mode)
try:
handle = unrarlib.RAROpenArchiveEx(ctypes.byref(archive))
except unrarlib.UnrarException:
return False
unrarlib.RARCloseArchive(handle)
return (archive.OpenResult == constants.SUCCESS) | python | {
"resource": ""
} |
q261241 | RarFile._read_header | validation | def _read_header(self, handle):
"""Read current member header into a RarInfo object."""
header_data = unrarlib.RARHeaderDataEx()
try:
res = unrarlib.RARReadHeaderEx(handle, ctypes.byref(header_data))
rarinfo = RarInfo(header=header_data)
except unrarlib.ArchiveEnd:
return None
except unrarlib.MissingPassword:
raise RuntimeError("Archive is encrypted, password required")
except unrarlib.BadPassword:
raise RuntimeError("Bad password for Archive")
except unrarlib.UnrarException as e:
raise BadRarFile(str(e))
return rarinfo | python | {
"resource": ""
} |
q261242 | RarFile._process_current | validation | def _process_current(self, handle, op, dest_path=None, dest_name=None):
"""Process current member with 'op' operation."""
unrarlib.RARProcessFileW(handle, op, dest_path, dest_name) | python | {
"resource": ""
} |
q261243 | RarFile._load_metadata | validation | def _load_metadata(self, handle):
"""Load archive members metadata."""
rarinfo = self._read_header(handle)
while rarinfo:
self.filelist.append(rarinfo)
self.NameToInfo[rarinfo.filename] = rarinfo
self._process_current(handle, constants.RAR_SKIP)
rarinfo = self._read_header(handle) | python | {
"resource": ""
} |
q261244 | RarFile._open | validation | def _open(self, archive):
"""Open RAR archive file."""
try:
handle = unrarlib.RAROpenArchiveEx(ctypes.byref(archive))
except unrarlib.UnrarException:
raise BadRarFile("Invalid RAR file.")
return handle | python | {
"resource": ""
} |
q261245 | RarFile.open | validation | def open(self, member, pwd=None):
"""Return file-like object for 'member'.
'member' may be a filename or a RarInfo object.
"""
if isinstance(member, RarInfo):
member = member.filename
archive = unrarlib.RAROpenArchiveDataEx(
self.filename, mode=constants.RAR_OM_EXTRACT)
handle = self._open(archive)
password = pwd or self.pwd
if password is not None:
unrarlib.RARSetPassword(handle, b(password))
# based on BrutuZ (https://github.com/matiasb/python-unrar/pull/4)
# and Cubixmeister work
data = _ReadIntoMemory()
c_callback = unrarlib.UNRARCALLBACK(data._callback)
unrarlib.RARSetCallback(handle, c_callback, 0)
try:
rarinfo = self._read_header(handle)
while rarinfo is not None:
if rarinfo.filename == member:
self._process_current(handle, constants.RAR_TEST)
break
else:
self._process_current(handle, constants.RAR_SKIP)
rarinfo = self._read_header(handle)
if rarinfo is None:
data = None
except unrarlib.MissingPassword:
raise RuntimeError("File is encrypted, password required")
except unrarlib.BadPassword:
raise RuntimeError("Bad password for File")
except unrarlib.BadDataError:
if password is not None:
raise RuntimeError("File CRC error or incorrect password")
else:
raise RuntimeError("File CRC error")
except unrarlib.UnrarException as e:
raise BadRarFile("Bad RAR archive data: %s" % str(e))
finally:
self._close(handle)
if data is None:
raise KeyError('There is no item named %r in the archive' % member)
# return file-like object
return data.get_bytes() | python | {
"resource": ""
} |
q261246 | RarFile.namelist | validation | def namelist(self):
"""Return a list of file names in the archive."""
names = []
for member in self.filelist:
names.append(member.filename)
return names | python | {
"resource": ""
} |
q261247 | RarFile.getinfo | validation | def getinfo(self, name):
"""Return the instance of RarInfo given 'name'."""
rarinfo = self.NameToInfo.get(name)
if rarinfo is None:
raise KeyError('There is no item named %r in the archive' % name)
return rarinfo | python | {
"resource": ""
} |
q261248 | RarFile.printdir | validation | def printdir(self):
"""Print a table of contents for the RAR file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"))
for rarinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % rarinfo.date_time[:6]
print("%-46s %s %12d" % (
rarinfo.filename, date, rarinfo.file_size)) | python | {
"resource": ""
} |
q261249 | RarFile.extract | validation | def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a RarInfo object. You can
specify a different directory using `path'.
"""
if isinstance(member, RarInfo):
member = member.filename
if path is None:
path = os.getcwd()
self._extract_members([member], path, pwd)
return os.path.join(path, member) | python | {
"resource": ""
} |
q261250 | RarFile._extract_members | validation | def _extract_members(self, members, targetpath, pwd):
"""Extract the RarInfo objects 'members' to a physical
file on the path targetpath.
"""
archive = unrarlib.RAROpenArchiveDataEx(
self.filename, mode=constants.RAR_OM_EXTRACT)
handle = self._open(archive)
password = pwd or self.pwd
if password is not None:
unrarlib.RARSetPassword(handle, b(password))
try:
rarinfo = self._read_header(handle)
while rarinfo is not None:
if rarinfo.filename in members:
self._process_current(
handle, constants.RAR_EXTRACT, targetpath)
else:
self._process_current(handle, constants.RAR_SKIP)
rarinfo = self._read_header(handle)
except unrarlib.MissingPassword:
raise RuntimeError("File is encrypted, password required")
except unrarlib.BadPassword:
raise RuntimeError("Bad password for File")
except unrarlib.BadDataError:
raise RuntimeError("File CRC Error")
except unrarlib.UnrarException as e:
raise BadRarFile("Bad RAR archive data: %s" % str(e))
finally:
self._close(handle) | python | {
"resource": ""
} |
q261251 | dostime_to_timetuple | validation | def dostime_to_timetuple(dostime):
"""Convert a RAR archive member DOS time to a Python time tuple."""
dostime = dostime >> 16
dostime = dostime & 0xffff
day = dostime & 0x1f
month = (dostime >> 5) & 0xf
year = 1980 + (dostime >> 9)
second = 2 * (dostime & 0x1f)
minute = (dostime >> 5) & 0x3f
hour = dostime >> 11
return (year, month, day, hour, minute, second) | python | {
"resource": ""
} |
q261252 | _c_func | validation | def _c_func(func, restype, argtypes, errcheck=None):
"""Wrap c function setting prototype."""
func.restype = restype
func.argtypes = argtypes
if errcheck is not None:
func.errcheck = errcheck
return func | python | {
"resource": ""
} |
q261253 | _load_savefile_header | validation | def _load_savefile_header(file_h):
"""
Load and validate the header of a pcap file.
"""
try:
raw_savefile_header = file_h.read(24)
except UnicodeDecodeError:
print("\nMake sure the input file is opened in read binary, 'rb'\n")
raise InvalidEncoding("Could not read file; it might not be opened in binary mode.")
# in case the capture file is not the same endianness as ours, we have to
# use the correct byte order for the file header
if raw_savefile_header[:4] in [struct.pack(">I", _MAGIC_NUMBER),
struct.pack(">I", _MAGIC_NUMBER_NS)]:
byte_order = b'big'
unpacked = struct.unpack('>IhhIIII', raw_savefile_header)
elif raw_savefile_header[:4] in [struct.pack("<I", _MAGIC_NUMBER),
struct.pack("<I", _MAGIC_NUMBER_NS)]:
byte_order = b'little'
unpacked = struct.unpack('<IhhIIII', raw_savefile_header)
else:
raise UnknownMagicNumber("No supported Magic Number found")
(magic, major, minor, tz_off, ts_acc, snaplen, ll_type) = unpacked
header = __pcap_header__(magic, major, minor, tz_off, ts_acc, snaplen,
ll_type, ctypes.c_char_p(byte_order),
magic == _MAGIC_NUMBER_NS)
if not __validate_header__(header):
raise InvalidHeader("Invalid Header")
else:
return header | python | {
"resource": ""
} |
q261254 | load_savefile | validation | def load_savefile(input_file, layers=0, verbose=False, lazy=False):
"""
Parse a savefile as a pcap_savefile instance. Returns the savefile
on success and None on failure. Verbose mode prints additional information
about the file's processing. layers defines how many layers to descend and
decode the packet. input_file should be a Python file object.
"""
global VERBOSE
old_verbose = VERBOSE
VERBOSE = verbose
__TRACE__('[+] attempting to load {:s}', (input_file.name,))
header = _load_savefile_header(input_file)
if __validate_header__(header):
__TRACE__('[+] found valid header')
if lazy:
packets = _generate_packets(input_file, header, layers)
__TRACE__('[+] created packet generator')
else:
packets = _load_packets(input_file, header, layers)
__TRACE__('[+] loaded {:d} packets', (len(packets),))
sfile = pcap_savefile(header, packets)
__TRACE__('[+] finished loading savefile.')
else:
__TRACE__('[!] invalid savefile')
sfile = None
VERBOSE = old_verbose
return sfile | python | {
"resource": ""
} |
q261255 | _read_a_packet | validation | def _read_a_packet(file_h, hdrp, layers=0):
"""
Reads the next individual packet from the capture file. Expects
the file handle to be somewhere after the header, on the next
per-packet header.
"""
raw_packet_header = file_h.read(16)
if not raw_packet_header or len(raw_packet_header) != 16:
return None
# in case the capture file is not the same endianness as ours, we have to
# use the correct byte order for the packet header
if hdrp[0].byteorder == 'big':
packet_header = struct.unpack('>IIII', raw_packet_header)
else:
packet_header = struct.unpack('<IIII', raw_packet_header)
(timestamp, timestamp_us, capture_len, packet_len) = packet_header
raw_packet_data = file_h.read(capture_len)
if not raw_packet_data or len(raw_packet_data) != capture_len:
return None
if layers > 0:
layers -= 1
raw_packet = linklayer.clookup(hdrp[0].ll_type)(raw_packet_data,
layers=layers)
else:
raw_packet = raw_packet_data
packet = pcap_packet(hdrp, timestamp, timestamp_us, capture_len,
packet_len, raw_packet)
return packet | python | {
"resource": ""
} |
q261256 | strip_ip | validation | def strip_ip(packet):
"""
Remove the IP packet layer, yielding the transport layer.
"""
if not isinstance(packet, IP):
packet = IP(packet)
payload = packet.payload
return payload | python | {
"resource": ""
} |
q261257 | strip_ethernet | validation | def strip_ethernet(packet):
"""
Strip the Ethernet frame from a packet.
"""
if not isinstance(packet, Ethernet):
packet = Ethernet(packet)
payload = packet.payload
return payload | python | {
"resource": ""
} |
q261258 | Ethernet.load_network | validation | def load_network(self, layers=1):
"""
Given an Ethernet frame, determine the appropriate sub-protocol;
If layers is greater than zerol determine the type of the payload
and load the appropriate type of network packet. It is expected
that the payload be a hexified string. The layers argument determines
how many layers to descend while parsing the packet.
"""
if layers:
ctor = payload_type(self.type)[0]
if ctor:
ctor = ctor
payload = self.payload
self.payload = ctor(payload, layers - 1)
else:
# if no type is found, do not touch the packet.
pass | python | {
"resource": ""
} |
q261259 | heartbeat | validation | def heartbeat():
"""Call Heartbeat URL"""
print "We got a call heartbeat notification\n"
if request.method == 'POST':
print request.form
else:
print request.args
return "OK" | python | {
"resource": ""
} |
q261260 | REST.request | validation | def request(self, path, method=None, data={}):
"""sends a request and gets a response from the Plivo REST API
path: the URL (relative to the endpoint URL, after the /v1
method: the HTTP method to use, defaults to POST
data: for POST or PUT, a dict of data to send
returns Plivo response in XML or raises an exception on error
"""
if not path:
raise ValueError('Invalid path parameter')
if method and method not in ['GET', 'POST', 'DELETE', 'PUT']:
raise NotImplementedError(
'HTTP %s method not implemented' % method)
if path[0] == '/':
uri = self.url + path
else:
uri = self.url + '/' + path
if APPENGINE:
return json.loads(self._appengine_fetch(uri, data, method))
return json.loads(self._urllib2_fetch(uri, data, method)) | python | {
"resource": ""
} |
q261261 | REST.reload_config | validation | def reload_config(self, call_params):
"""REST Reload Plivo Config helper
"""
path = '/' + self.api_version + '/ReloadConfig/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261262 | REST.reload_cache_config | validation | def reload_cache_config(self, call_params):
"""REST Reload Plivo Cache Config helper
"""
path = '/' + self.api_version + '/ReloadCacheConfig/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261263 | REST.call | validation | def call(self, call_params):
"""REST Call Helper
"""
path = '/' + self.api_version + '/Call/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261264 | REST.bulk_call | validation | def bulk_call(self, call_params):
"""REST BulkCalls Helper
"""
path = '/' + self.api_version + '/BulkCall/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261265 | REST.group_call | validation | def group_call(self, call_params):
"""REST GroupCalls Helper
"""
path = '/' + self.api_version + '/GroupCall/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261266 | REST.transfer_call | validation | def transfer_call(self, call_params):
"""REST Transfer Live Call Helper
"""
path = '/' + self.api_version + '/TransferCall/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261267 | REST.hangup_all_calls | validation | def hangup_all_calls(self):
"""REST Hangup All Live Calls Helper
"""
path = '/' + self.api_version + '/HangupAllCalls/'
method = 'POST'
return self.request(path, method) | python | {
"resource": ""
} |
q261268 | REST.hangup_call | validation | def hangup_call(self, call_params):
"""REST Hangup Live Call Helper
"""
path = '/' + self.api_version + '/HangupCall/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261269 | REST.schedule_hangup | validation | def schedule_hangup(self, call_params):
"""REST Schedule Hangup Helper
"""
path = '/' + self.api_version + '/ScheduleHangup/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261270 | REST.cancel_scheduled_hangup | validation | def cancel_scheduled_hangup(self, call_params):
"""REST Cancel a Scheduled Hangup Helper
"""
path = '/' + self.api_version + '/CancelScheduledHangup/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261271 | REST.record_start | validation | def record_start(self, call_params):
"""REST RecordStart helper
"""
path = '/' + self.api_version + '/RecordStart/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261272 | REST.conference_mute | validation | def conference_mute(self, call_params):
"""REST Conference Mute helper
"""
path = '/' + self.api_version + '/ConferenceMute/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261273 | REST.play | validation | def play(self, call_params):
"""REST Play something on a Call Helper
"""
path = '/' + self.api_version + '/Play/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261274 | REST.play_stop | validation | def play_stop(self, call_params):
"""REST PlayStop on a Call Helper
"""
path = '/' + self.api_version + '/PlayStop/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261275 | REST.schedule_play | validation | def schedule_play(self, call_params):
"""REST Schedule playing something on a call Helper
"""
path = '/' + self.api_version + '/SchedulePlay/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261276 | REST.cancel_scheduled_play | validation | def cancel_scheduled_play(self, call_params):
"""REST Cancel a Scheduled Play Helper
"""
path = '/' + self.api_version + '/CancelScheduledPlay/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261277 | REST.sound_touch | validation | def sound_touch(self, call_params):
"""REST Add soundtouch audio effects to a Call
"""
path = '/' + self.api_version + '/SoundTouch/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261278 | REST.sound_touch_stop | validation | def sound_touch_stop(self, call_params):
"""REST Remove soundtouch audio effects on a Call
"""
path = '/' + self.api_version + '/SoundTouchStop/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261279 | REST.send_digits | validation | def send_digits(self, call_params):
"""REST Send digits to a Call
"""
path = '/' + self.api_version + '/SendDigits/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261280 | REST.conference_unmute | validation | def conference_unmute(self, call_params):
"""REST Conference Unmute helper
"""
path = '/' + self.api_version + '/ConferenceUnmute/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261281 | REST.conference_kick | validation | def conference_kick(self, call_params):
"""REST Conference Kick helper
"""
path = '/' + self.api_version + '/ConferenceKick/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261282 | REST.conference_hangup | validation | def conference_hangup(self, call_params):
"""REST Conference Hangup helper
"""
path = '/' + self.api_version + '/ConferenceHangup/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261283 | REST.conference_deaf | validation | def conference_deaf(self, call_params):
"""REST Conference Deaf helper
"""
path = '/' + self.api_version + '/ConferenceDeaf/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261284 | REST.conference_undeaf | validation | def conference_undeaf(self, call_params):
"""REST Conference Undeaf helper
"""
path = '/' + self.api_version + '/ConferenceUndeaf/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261285 | REST.conference_record_start | validation | def conference_record_start(self, call_params):
"""REST Conference RecordStart helper
"""
path = '/' + self.api_version + '/ConferenceRecordStart/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261286 | REST.conference_record_stop | validation | def conference_record_stop(self, call_params):
"""REST Conference RecordStop
"""
path = '/' + self.api_version + '/ConferenceRecordStop/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261287 | REST.conference_play | validation | def conference_play(self, call_params):
"""REST Conference Play helper
"""
path = '/' + self.api_version + '/ConferencePlay/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261288 | REST.conference_speak | validation | def conference_speak(self, call_params):
"""REST Conference Speak helper
"""
path = '/' + self.api_version + '/ConferenceSpeak/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261289 | REST.conference_list | validation | def conference_list(self, call_params):
"""REST Conference List Helper
"""
path = '/' + self.api_version + '/ConferenceList/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261290 | REST.conference_list_members | validation | def conference_list_members(self, call_params):
"""REST Conference List Members Helper
"""
path = '/' + self.api_version + '/ConferenceListMembers/'
method = 'POST'
return self.request(path, method, call_params) | python | {
"resource": ""
} |
q261291 | Element._xml | validation | def _xml(self, root):
"""
Return an XML element representing this element
"""
element = root.createElement(self.name)
# Add attributes
keys = self.attrs.keys()
keys.sort()
for a in keys:
element.setAttribute(a, self.attrs[a])
if self.body:
text = root.createTextNode(self.body)
element.appendChild(text)
for c in self.elements:
element.appendChild(c._xml(root))
return element | python | {
"resource": ""
} |
q261292 | Utils.validateRequest | validation | def validateRequest(self, uri, postVars, expectedSignature):
"""validate a request from plivo
uri: the full URI that Plivo requested on your server
postVars: post vars that Plivo sent with the request
expectedSignature: signature in HTTP X-Plivo-Signature header
returns true if the request passes validation, false if not
"""
# append the POST variables sorted by key to the uri
s = uri
for k, v in sorted(postVars.items()):
s += k + v
# compute signature and compare signatures
return (base64.encodestring(hmac.new(self.auth_token, s, sha1).digest()).\
strip() == expectedSignature) | python | {
"resource": ""
} |
q261293 | Graph.DFS_prefix | validation | def DFS_prefix(self, root=None):
"""
Depth-first search.
.. seealso::
`Wikipedia DFS descritpion <http://en.wikipedia.org/wiki/Depth-first_search>`_
:param root: first to start the search
:return: list of nodes
"""
if not root:
root = self._root
return self._DFS_prefix(root) | python | {
"resource": ""
} |
q261294 | Graph.BFS | validation | def BFS(self, root=None):
"""
Breadth-first search.
.. seealso::
`Wikipedia BFS descritpion <http://en.wikipedia.org/wiki/Breadth-first_search>`_
:param root: first to start the search
:return: list of nodes
"""
if not root:
root = self.root()
queue = deque()
queue.append(root)
nodes = []
while len(queue) > 0:
x = queue.popleft()
nodes.append(x)
for child in x.children():
queue.append(child)
return nodes | python | {
"resource": ""
} |
q261295 | NoDupesGraph.add_node | validation | def add_node(self,label):
'''Return a node with label. Create node if label is new'''
try:
n = self._nodes[label]
except KeyError:
n = Node()
n['label'] = label
self._nodes[label]=n
return n | python | {
"resource": ""
} |
q261296 | NoDupesGraph.add_edge | validation | def add_edge(self, n1_label, n2_label,directed=False):
"""
Get or create edges using get_or_create_node
"""
n1 = self.add_node(n1_label)
n2 = self.add_node(n2_label)
e = Edge(n1, n2, directed)
self._edges.append(e)
return e | python | {
"resource": ""
} |
q261297 | GraphMLParser.parse_dom | validation | def parse_dom(dom):
"""Parse dom into a Graph.
:param dom: dom as returned by minidom.parse or minidom.parseString
:return: A Graph representation
"""
root = dom.getElementsByTagName("graphml")[0]
graph = root.getElementsByTagName("graph")[0]
name = graph.getAttribute('id')
g = Graph(name)
# # Get attributes
# attributes = []
# for attr in root.getElementsByTagName("key"):
# attributes.append(attr)
# Get nodes
for node in graph.getElementsByTagName("node"):
n = g.add_node(id=node.getAttribute('id'))
for attr in node.getElementsByTagName("data"):
if attr.firstChild:
n[attr.getAttribute("key")] = attr.firstChild.data
else:
n[attr.getAttribute("key")] = ""
# Get edges
for edge in graph.getElementsByTagName("edge"):
source = edge.getAttribute('source')
dest = edge.getAttribute('target')
# source/target attributes refer to IDs: http://graphml.graphdrawing.org/xmlns/1.1/graphml-structure.xsd
e = g.add_edge_by_id(source, dest)
for attr in edge.getElementsByTagName("data"):
if attr.firstChild:
e[attr.getAttribute("key")] = attr.firstChild.data
else:
e[attr.getAttribute("key")] = ""
return g | python | {
"resource": ""
} |
q261298 | GraphMLParser.parse_string | validation | def parse_string(self, string):
"""Parse a string into a Graph.
:param string: String that is to be passed into Grapg
:return: Graph
"""
dom = minidom.parseString(string)
return self.parse_dom(dom) | python | {
"resource": ""
} |
q261299 | Edge.node | validation | def node(self, node):
"""
Return the other node
"""
if node == self.node1:
return self.node2
elif node == self.node2:
return self.node1
else:
return None | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.