_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q270000 | configure_uploads | test | def configure_uploads(app, upload_sets):
"""
Call this after the app has been configured. It will go through all the
upload sets, get their configuration, and store the configuration on the
app. It will also register the uploads module if it hasn't been set. This
can be called multiple times with different upload sets.
.. versionchanged:: 0.1.3
The uploads module/blueprint will only be registered if it is needed
to serve the upload sets.
:param app: The `~flask.Flask` instance to get the configuration from.
:param upload_sets: The `UploadSet` instances to configure.
"""
if isinstance(upload_sets, UploadSet):
upload_sets = (upload_sets,)
if not hasattr(app, 'upload_set_config'):
app.upload_set_config = {}
set_config = app.upload_set_config
defaults = dict(dest=app.config.get('UPLOADS_DEFAULT_DEST'),
url=app.config.get('UPLOADS_DEFAULT_URL'))
for uset in upload_sets:
config = config_for_set(uset, app, defaults)
set_config[uset.name] = config
should_serve = any(s.base_url is None for s in set_config.values())
if '_uploads' not in app.blueprints and should_serve:
app.register_blueprint(uploads_mod) | python | {
"resource": ""
} |
q270001 | UploadSet.config | test | def config(self):
"""
This gets the current configuration. By default, it looks up the
current application and gets the configuration from there. But if you
don't want to go to the full effort of setting an application, or it's
otherwise outside of a request context, set the `_config` attribute to
an `UploadConfiguration` instance, then set it back to `None` when
you're done.
"""
if self._config is not None:
return self._config
try:
return current_app.upload_set_config[self.name]
except AttributeError:
raise RuntimeError("cannot access configuration outside request") | python | {
"resource": ""
} |
q270002 | UploadSet.url | test | def url(self, filename):
"""
This function gets the URL a file uploaded to this set would be
accessed at. It doesn't check whether said file exists.
:param filename: The filename to return the URL for.
"""
base = self.config.base_url
if base is None:
return url_for('_uploads.uploaded_file', setname=self.name,
filename=filename, _external=True)
else:
return base + filename | python | {
"resource": ""
} |
q270003 | UploadSet.path | test | def path(self, filename, folder=None):
"""
This returns the absolute path of a file uploaded to this set. It
doesn't actually check whether said file exists.
:param filename: The filename to return the path for.
:param folder: The subfolder within the upload set previously used
to save to.
"""
if folder is not None:
target_folder = os.path.join(self.config.destination, folder)
else:
target_folder = self.config.destination
return os.path.join(target_folder, filename) | python | {
"resource": ""
} |
q270004 | UploadSet.extension_allowed | test | def extension_allowed(self, ext):
"""
This determines whether a specific extension is allowed. It is called
by `file_allowed`, so if you override that but still want to check
extensions, call back into this.
:param ext: The extension to check, without the dot.
"""
return ((ext in self.config.allow) or
(ext in self.extensions and ext not in self.config.deny)) | python | {
"resource": ""
} |
q270005 | UploadSet.resolve_conflict | test | def resolve_conflict(self, target_folder, basename):
"""
If a file with the selected name already exists in the target folder,
this method is called to resolve the conflict. It should return a new
basename for the file.
The default implementation splits the name and extension and adds a
suffix to the name consisting of an underscore and a number, and tries
that until it finds one that doesn't exist.
:param target_folder: The absolute path to the target.
:param basename: The file's original basename.
"""
name, ext = os.path.splitext(basename)
count = 0
while True:
count = count + 1
newname = '%s_%d%s' % (name, count, ext)
if not os.path.exists(os.path.join(target_folder, newname)):
return newname | python | {
"resource": ""
} |
q270006 | get_vprof_version | test | def get_vprof_version(filename):
"""Returns actual version specified in filename."""
with open(filename) as src_file:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
src_file.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version info.') | python | {
"resource": ""
} |
q270007 | _remove_duplicates | test | def _remove_duplicates(objects):
"""Removes duplicate objects.
http://www.peterbe.com/plog/uniqifiers-benchmark.
"""
seen, uniq = set(), []
for obj in objects:
obj_id = id(obj)
if obj_id in seen:
continue
seen.add(obj_id)
uniq.append(obj)
return uniq | python | {
"resource": ""
} |
q270008 | _get_obj_count_difference | test | def _get_obj_count_difference(objs1, objs2):
"""Returns count difference in two collections of Python objects."""
clean_obj_list1 = _process_in_memory_objects(objs1)
clean_obj_list2 = _process_in_memory_objects(objs2)
obj_count_1 = _get_object_count_by_type(clean_obj_list1)
obj_count_2 = _get_object_count_by_type(clean_obj_list2)
return obj_count_1 - obj_count_2 | python | {
"resource": ""
} |
q270009 | _format_obj_count | test | def _format_obj_count(objects):
"""Formats object count."""
result = []
regex = re.compile(r'<(?P<type>\w+) \'(?P<name>\S+)\'>')
for obj_type, obj_count in objects.items():
if obj_count != 0:
match = re.findall(regex, repr(obj_type))
if match:
obj_type, obj_name = match[0]
result.append(("%s %s" % (obj_type, obj_name), obj_count))
return sorted(result, key=operator.itemgetter(1), reverse=True) | python | {
"resource": ""
} |
q270010 | _CodeEventsTracker._trace_memory_usage | test | def _trace_memory_usage(self, frame, event, arg): #pylint: disable=unused-argument
"""Checks memory usage when 'line' event occur."""
if event == 'line' and frame.f_code.co_filename in self.target_modules:
self._events_list.append(
(frame.f_lineno, self._process.memory_info().rss,
frame.f_code.co_name, frame.f_code.co_filename))
return self._trace_memory_usage | python | {
"resource": ""
} |
q270011 | _CodeEventsTracker.code_events | test | def code_events(self):
"""Returns processed memory usage."""
if self._resulting_events:
return self._resulting_events
for i, (lineno, mem, func, fname) in enumerate(self._events_list):
mem_in_mb = float(mem - self.mem_overhead) / _BYTES_IN_MB
if (self._resulting_events and
self._resulting_events[-1][0] == lineno and
self._resulting_events[-1][2] == func and
self._resulting_events[-1][3] == fname and
self._resulting_events[-1][1] < mem_in_mb):
self._resulting_events[-1][1] = mem_in_mb
else:
self._resulting_events.append(
[i + 1, lineno, mem_in_mb, func, fname])
return self._resulting_events | python | {
"resource": ""
} |
q270012 | _CodeEventsTracker.obj_overhead | test | def obj_overhead(self):
"""Returns all objects that are considered a profiler overhead.
Objects are hardcoded for convenience.
"""
overhead = [
self,
self._resulting_events,
self._events_list,
self._process
]
overhead_count = _get_object_count_by_type(overhead)
# One for reference to __dict__ and one for reference to
# the current module.
overhead_count[dict] += 2
return overhead_count | python | {
"resource": ""
} |
q270013 | _CodeEventsTracker.compute_mem_overhead | test | def compute_mem_overhead(self):
"""Returns memory overhead."""
self.mem_overhead = (self._process.memory_info().rss -
builtins.initial_rss_size) | python | {
"resource": ""
} |
q270014 | MemoryProfiler.profile_package | test | def profile_package(self):
"""Returns memory stats for a package."""
target_modules = base_profiler.get_pkg_module_names(self._run_object)
try:
with _CodeEventsTracker(target_modules) as prof:
prof.compute_mem_overhead()
runpy.run_path(self._run_object, run_name='__main__')
except SystemExit:
pass
return prof, None | python | {
"resource": ""
} |
q270015 | MemoryProfiler.profile_module | test | def profile_module(self):
"""Returns memory stats for a module."""
target_modules = {self._run_object}
try:
with open(self._run_object, 'rb') as srcfile,\
_CodeEventsTracker(target_modules) as prof:
code = compile(srcfile.read(), self._run_object, 'exec')
prof.compute_mem_overhead()
exec(code, self._globs, None)
except SystemExit:
pass
return prof, None | python | {
"resource": ""
} |
q270016 | MemoryProfiler.profile_function | test | def profile_function(self):
"""Returns memory stats for a function."""
target_modules = {self._run_object.__code__.co_filename}
with _CodeEventsTracker(target_modules) as prof:
prof.compute_mem_overhead()
result = self._run_object(*self._run_args, **self._run_kwargs)
return prof, result | python | {
"resource": ""
} |
q270017 | MemoryProfiler.run | test | def run(self):
"""Collects memory stats for specified Python program."""
existing_objects = _get_in_memory_objects()
prof, result = self.profile()
new_objects = _get_in_memory_objects()
new_obj_count = _get_obj_count_difference(new_objects, existing_objects)
result_obj_count = new_obj_count - prof.obj_overhead
# existing_objects list is also profiler overhead
result_obj_count[list] -= 1
pretty_obj_count = _format_obj_count(result_obj_count)
return {
'objectName': self._object_name,
'codeEvents': prof.code_events,
'totalEvents': len(prof.code_events),
'objectsCount': pretty_obj_count,
'result': result,
'timestamp': int(time.time())
} | python | {
"resource": ""
} |
q270018 | get_pkg_module_names | test | def get_pkg_module_names(package_path):
"""Returns module filenames from package.
Args:
package_path: Path to Python package.
Returns:
A set of module filenames.
"""
module_names = set()
for fobj, modname, _ in pkgutil.iter_modules(path=[package_path]):
filename = os.path.join(fobj.path, '%s.py' % modname)
if os.path.exists(filename):
module_names.add(os.path.abspath(filename))
return module_names | python | {
"resource": ""
} |
q270019 | run_in_separate_process | test | def run_in_separate_process(func, *args, **kwargs):
"""Runs function in separate process.
This function is used instead of a decorator, since Python multiprocessing
module can't serialize decorated function on all platforms.
"""
manager = multiprocessing.Manager()
manager_dict = manager.dict()
process = ProcessWithException(
manager_dict, target=func, args=args, kwargs=kwargs)
process.start()
process.join()
exc = process.exception
if exc:
raise exc
return process.output | python | {
"resource": ""
} |
q270020 | BaseProfiler.get_run_object_type | test | def get_run_object_type(run_object):
"""Determines run object type."""
if isinstance(run_object, tuple):
return 'function'
run_object, _, _ = run_object.partition(' ')
if os.path.isdir(run_object):
return 'package'
return 'module' | python | {
"resource": ""
} |
q270021 | BaseProfiler.init_module | test | def init_module(self, run_object):
"""Initializes profiler with a module."""
self.profile = self.profile_module
self._run_object, _, self._run_args = run_object.partition(' ')
self._object_name = '%s (module)' % self._run_object
self._globs = {
'__file__': self._run_object,
'__name__': '__main__',
'__package__': None,
}
program_path = os.path.dirname(self._run_object)
if sys.path[0] != program_path:
sys.path.insert(0, program_path)
self._replace_sysargs() | python | {
"resource": ""
} |
q270022 | BaseProfiler.init_package | test | def init_package(self, run_object):
"""Initializes profiler with a package."""
self.profile = self.profile_package
self._run_object, _, self._run_args = run_object.partition(' ')
self._object_name = '%s (package)' % self._run_object
self._replace_sysargs() | python | {
"resource": ""
} |
q270023 | BaseProfiler.init_function | test | def init_function(self, run_object):
"""Initializes profiler with a function."""
self.profile = self.profile_function
self._run_object, self._run_args, self._run_kwargs = run_object
filename = inspect.getsourcefile(self._run_object)
self._object_name = '%s @ %s (function)' % (
self._run_object.__name__, filename) | python | {
"resource": ""
} |
q270024 | BaseProfiler._replace_sysargs | test | def _replace_sysargs(self):
"""Replaces sys.argv with proper args to pass to script."""
sys.argv[:] = [self._run_object]
if self._run_args:
sys.argv += self._run_args.split() | python | {
"resource": ""
} |
q270025 | _StatProfiler.sample | test | def sample(self, signum, frame): #pylint: disable=unused-argument
"""Samples current stack and adds result in self._stats.
Args:
signum: Signal that activates handler.
frame: Frame on top of the stack when signal is handled.
"""
stack = []
while frame and frame != self.base_frame:
stack.append((
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_code.co_firstlineno))
frame = frame.f_back
self._stats[tuple(stack)] += 1
signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL) | python | {
"resource": ""
} |
q270026 | _StatProfiler._insert_stack | test | def _insert_stack(stack, sample_count, call_tree):
"""Inserts stack into the call tree.
Args:
stack: Call stack.
sample_count: Sample count of call stack.
call_tree: Call tree.
"""
curr_level = call_tree
for func in stack:
next_level_index = {
node['stack']: node for node in curr_level['children']}
if func not in next_level_index:
new_node = {'stack': func, 'children': [], 'sampleCount': 0}
curr_level['children'].append(new_node)
curr_level = new_node
else:
curr_level = next_level_index[func]
curr_level['sampleCount'] = sample_count | python | {
"resource": ""
} |
q270027 | _StatProfiler._fill_sample_count | test | def _fill_sample_count(self, node):
"""Counts and fills sample counts inside call tree."""
node['sampleCount'] += sum(
self._fill_sample_count(child) for child in node['children'])
return node['sampleCount'] | python | {
"resource": ""
} |
q270028 | _StatProfiler._format_tree | test | def _format_tree(self, node, total_samples):
"""Reformats call tree for the UI."""
funcname, filename, _ = node['stack']
sample_percent = self._get_percentage(
node['sampleCount'], total_samples)
color_hash = base_profiler.hash_name('%s @ %s' % (funcname, filename))
return {
'stack': node['stack'],
'children': [self._format_tree(child, total_samples)
for child in node['children']],
'sampleCount': node['sampleCount'],
'samplePercentage': sample_percent,
'colorHash': color_hash
} | python | {
"resource": ""
} |
q270029 | _StatProfiler.call_tree | test | def call_tree(self):
"""Returns call tree."""
call_tree = {'stack': 'base', 'sampleCount': 0, 'children': []}
for stack, sample_count in self._stats.items():
self._insert_stack(reversed(stack), sample_count, call_tree)
self._fill_sample_count(call_tree)
if not call_tree['children']:
return {}
return self._format_tree(
call_tree['children'][0], call_tree['sampleCount']) | python | {
"resource": ""
} |
q270030 | FlameGraphProfiler._profile_package | test | def _profile_package(self):
"""Runs statistical profiler on a package."""
with _StatProfiler() as prof:
prof.base_frame = inspect.currentframe()
try:
runpy.run_path(self._run_object, run_name='__main__')
except SystemExit:
pass
call_tree = prof.call_tree
return {
'objectName': self._object_name,
'sampleInterval': _SAMPLE_INTERVAL,
'runTime': prof.run_time,
'callStats': call_tree,
'totalSamples': call_tree.get('sampleCount', 0),
'timestamp': int(time.time())
} | python | {
"resource": ""
} |
q270031 | FlameGraphProfiler._profile_module | test | def _profile_module(self):
"""Runs statistical profiler on a module."""
with open(self._run_object, 'rb') as srcfile, _StatProfiler() as prof:
code = compile(srcfile.read(), self._run_object, 'exec')
prof.base_frame = inspect.currentframe()
try:
exec(code, self._globs, None)
except SystemExit:
pass
call_tree = prof.call_tree
return {
'objectName': self._object_name,
'sampleInterval': _SAMPLE_INTERVAL,
'runTime': prof.run_time,
'callStats': call_tree,
'totalSamples': call_tree.get('sampleCount', 0),
'timestamp': int(time.time())
} | python | {
"resource": ""
} |
q270032 | FlameGraphProfiler.profile_function | test | def profile_function(self):
"""Runs statistical profiler on a function."""
with _StatProfiler() as prof:
result = self._run_object(*self._run_args, **self._run_kwargs)
call_tree = prof.call_tree
return {
'objectName': self._object_name,
'sampleInterval': _SAMPLE_INTERVAL,
'runTime': prof.run_time,
'callStats': call_tree,
'totalSamples': call_tree.get('sampleCount', 0),
'result': result,
'timestamp': int(time.time())
} | python | {
"resource": ""
} |
q270033 | Profiler._transform_stats | test | def _transform_stats(prof):
"""Processes collected stats for UI."""
records = []
for info, params in prof.stats.items():
filename, lineno, funcname = info
cum_calls, num_calls, time_per_call, cum_time, _ = params
if prof.total_tt == 0:
percentage = 0
else:
percentage = round(100 * (cum_time / prof.total_tt), 4)
cum_time = round(cum_time, 4)
func_name = '%s @ %s' % (funcname, filename)
color_hash = base_profiler.hash_name(func_name)
records.append(
(filename, lineno, funcname, cum_time, percentage, num_calls,
cum_calls, time_per_call, filename, color_hash))
return sorted(records, key=operator.itemgetter(4), reverse=True) | python | {
"resource": ""
} |
q270034 | Profiler._profile_package | test | def _profile_package(self):
"""Runs cProfile on a package."""
prof = cProfile.Profile()
prof.enable()
try:
runpy.run_path(self._run_object, run_name='__main__')
except SystemExit:
pass
prof.disable()
prof_stats = pstats.Stats(prof)
prof_stats.calc_callees()
return {
'objectName': self._object_name,
'callStats': self._transform_stats(prof_stats),
'totalTime': prof_stats.total_tt,
'primitiveCalls': prof_stats.prim_calls,
'totalCalls': prof_stats.total_calls,
'timestamp': int(time.time())
} | python | {
"resource": ""
} |
q270035 | Profiler._profile_module | test | def _profile_module(self):
"""Runs cProfile on a module."""
prof = cProfile.Profile()
try:
with open(self._run_object, 'rb') as srcfile:
code = compile(srcfile.read(), self._run_object, 'exec')
prof.runctx(code, self._globs, None)
except SystemExit:
pass
prof_stats = pstats.Stats(prof)
prof_stats.calc_callees()
return {
'objectName': self._object_name,
'callStats': self._transform_stats(prof_stats),
'totalTime': prof_stats.total_tt,
'primitiveCalls': prof_stats.prim_calls,
'totalCalls': prof_stats.total_calls,
'timestamp': int(time.time())
} | python | {
"resource": ""
} |
q270036 | Profiler.profile_function | test | def profile_function(self):
"""Runs cProfile on a function."""
prof = cProfile.Profile()
prof.enable()
result = self._run_object(*self._run_args, **self._run_kwargs)
prof.disable()
prof_stats = pstats.Stats(prof)
prof_stats.calc_callees()
return {
'objectName': self._object_name,
'callStats': self._transform_stats(prof_stats),
'totalTime': prof_stats.total_tt,
'primitiveCalls': prof_stats.prim_calls,
'totalCalls': prof_stats.total_calls,
'result': result,
'timestamp': int(time.time())
} | python | {
"resource": ""
} |
q270037 | init_db | test | def init_db():
"""Initializes DB."""
with contextlib.closing(connect_to_db()) as db:
db.cursor().executescript(DB_SCHEMA)
db.commit() | python | {
"resource": ""
} |
q270038 | show_guestbook | test | def show_guestbook():
"""Returns all existing guestbook records."""
cursor = flask.g.db.execute(
'SELECT name, message FROM entry ORDER BY id DESC;')
entries = [{'name': row[0], 'message': row[1]} for row in cursor.fetchall()]
return jinja2.Template(LAYOUT).render(entries=entries) | python | {
"resource": ""
} |
q270039 | add_entry | test | def add_entry():
"""Adds single guestbook record."""
name, msg = flask.request.form['name'], flask.request.form['message']
flask.g.db.execute(
'INSERT INTO entry (name, message) VALUES (?, ?)', (name, msg))
flask.g.db.commit()
return flask.redirect('/') | python | {
"resource": ""
} |
q270040 | profiler_handler | test | def profiler_handler(uri):
"""Profiler handler."""
# HTTP method should be GET.
if uri == 'main':
runner.run(show_guestbook, 'cmhp')
# In this case HTTP method should be POST singe add_entry uses POST
elif uri == 'add':
runner.run(add_entry, 'cmhp')
return flask.redirect('/') | python | {
"resource": ""
} |
q270041 | start | test | def start(host, port, profiler_stats, dont_start_browser, debug_mode):
"""Starts HTTP server with specified parameters.
Args:
host: Server host name.
port: Server port.
profiler_stats: A dict with collected program stats.
dont_start_browser: Whether to open browser after profiling.
debug_mode: Whether to redirect stderr to /dev/null.
"""
stats_handler = functools.partial(StatsHandler, profiler_stats)
if not debug_mode:
sys.stderr = open(os.devnull, 'w')
print('Starting HTTP server...')
if not dont_start_browser:
webbrowser.open('http://{}:{}/'.format(host, port))
try:
StatsServer((host, port), stats_handler).serve_forever()
except KeyboardInterrupt:
print('Stopping...')
sys.exit(0) | python | {
"resource": ""
} |
q270042 | StatsHandler._handle_root | test | def _handle_root():
"""Handles index.html requests."""
res_filename = os.path.join(
os.path.dirname(__file__), _PROFILE_HTML)
with io.open(res_filename, 'rb') as res_file:
content = res_file.read()
return content, 'text/html' | python | {
"resource": ""
} |
q270043 | StatsHandler._handle_other | test | def _handle_other(self):
"""Handles static files requests."""
res_filename = os.path.join(
os.path.dirname(__file__), _STATIC_DIR, self.path[1:])
with io.open(res_filename, 'rb') as res_file:
content = res_file.read()
_, extension = os.path.splitext(self.path)
return content, 'text/%s' % extension[1:] | python | {
"resource": ""
} |
q270044 | StatsHandler.do_GET | test | def do_GET(self):
"""Handles HTTP GET requests."""
handler = self.uri_map.get(self.path) or self._handle_other
content, content_type = handler()
compressed_content = gzip.compress(content)
self._send_response(
200, headers=(('Content-type', '%s; charset=utf-8' % content_type),
('Content-Encoding', 'gzip'),
('Content-Length', len(compressed_content))))
self.wfile.write(compressed_content) | python | {
"resource": ""
} |
q270045 | StatsHandler.do_POST | test | def do_POST(self):
"""Handles HTTP POST requests."""
post_data = self.rfile.read(int(self.headers['Content-Length']))
json_data = gzip.decompress(post_data)
self._profile_json.update(json.loads(json_data.decode('utf-8')))
self._send_response(
200, headers=(('Content-type', '%s; charset=utf-8' % 'text/json'),
('Content-Encoding', 'gzip'),
('Content-Length', len(post_data)))) | python | {
"resource": ""
} |
q270046 | StatsHandler._send_response | test | def _send_response(self, http_code, message=None, headers=None):
"""Sends HTTP response code, message and headers."""
self.send_response(http_code, message)
if headers:
for header in headers:
self.send_header(*header)
self.end_headers() | python | {
"resource": ""
} |
q270047 | check_standard_dir | test | def check_standard_dir(module_path):
"""Checks whether path belongs to standard library or installed modules."""
if 'site-packages' in module_path:
return True
for stdlib_path in _STDLIB_PATHS:
if fnmatch.fnmatchcase(module_path, stdlib_path + '*'):
return True
return False | python | {
"resource": ""
} |
q270048 | _CodeHeatmapCalculator.record_line | test | def record_line(self, frame, event, arg): # pylint: disable=unused-argument
"""Records line execution time."""
if event == 'line':
if self.prev_timestamp:
runtime = time.time() - self.prev_timestamp
self.lines.append([self.prev_path, self.prev_lineno, runtime])
self.prev_lineno = frame.f_lineno
self.prev_path = frame.f_code.co_filename
self.prev_timestamp = time.time()
return self.record_line | python | {
"resource": ""
} |
q270049 | _CodeHeatmapCalculator.lines_without_stdlib | test | def lines_without_stdlib(self):
"""Filters code from standard library from self.lines."""
prev_line = None
current_module_path = inspect.getabsfile(inspect.currentframe())
for module_path, lineno, runtime in self.lines:
module_abspath = os.path.abspath(module_path)
if not prev_line:
prev_line = [module_abspath, lineno, runtime]
else:
if (not check_standard_dir(module_path) and
module_abspath != current_module_path):
yield prev_line
prev_line = [module_abspath, lineno, runtime]
else:
prev_line[2] += runtime
yield prev_line | python | {
"resource": ""
} |
q270050 | _CodeHeatmapCalculator.fill_heatmap | test | def fill_heatmap(self):
"""Fills code heatmap and execution count dictionaries."""
for module_path, lineno, runtime in self.lines_without_stdlib:
self._execution_count[module_path][lineno] += 1
self._heatmap[module_path][lineno] += runtime | python | {
"resource": ""
} |
q270051 | CodeHeatmapProfiler._skip_lines | test | def _skip_lines(src_code, skip_map):
"""Skips lines in src_code specified by skip map."""
if not skip_map:
return [['line', j + 1, l] for j, l in enumerate(src_code)]
code_with_skips, i = [], 0
for line, length in skip_map:
code_with_skips.extend(
['line', i + j + 1, l] for j, l in enumerate(src_code[i:line]))
if (code_with_skips
and code_with_skips[-1][0] == 'skip'): # Merge skips.
code_with_skips[-1][1] += length
else:
code_with_skips.append(['skip', length])
i = line + length
code_with_skips.extend(
['line', i + j + 1, l] for j, l in enumerate(src_code[i:]))
return code_with_skips | python | {
"resource": ""
} |
q270052 | CodeHeatmapProfiler._profile_package | test | def _profile_package(self):
"""Calculates heatmap for package."""
with _CodeHeatmapCalculator() as prof:
try:
runpy.run_path(self._run_object, run_name='__main__')
except SystemExit:
pass
heatmaps = []
for filename, heatmap in prof.heatmap.items():
if os.path.isfile(filename):
heatmaps.append(
self._format_heatmap(
filename, heatmap, prof.execution_count[filename]))
run_time = sum(heatmap['runTime'] for heatmap in heatmaps)
return {
'objectName': self._run_object,
'runTime': run_time,
'heatmaps': heatmaps
} | python | {
"resource": ""
} |
q270053 | CodeHeatmapProfiler._format_heatmap | test | def _format_heatmap(self, filename, heatmap, execution_count):
"""Formats heatmap for UI."""
with open(filename) as src_file:
file_source = src_file.read().split('\n')
skip_map = self._calc_skips(heatmap, len(file_source))
run_time = sum(time for time in heatmap.values())
return {
'name': filename,
'heatmap': heatmap,
'executionCount': execution_count,
'srcCode': self._skip_lines(file_source, skip_map),
'runTime': run_time
} | python | {
"resource": ""
} |
q270054 | CodeHeatmapProfiler._profile_module | test | def _profile_module(self):
"""Calculates heatmap for module."""
with open(self._run_object, 'r') as srcfile:
src_code = srcfile.read()
code = compile(src_code, self._run_object, 'exec')
try:
with _CodeHeatmapCalculator() as prof:
exec(code, self._globs, None)
except SystemExit:
pass
heatmaps = []
for filename, heatmap in prof.heatmap.items():
if os.path.isfile(filename):
heatmaps.append(
self._format_heatmap(
filename, heatmap, prof.execution_count[filename]))
run_time = sum(heatmap['runTime'] for heatmap in heatmaps)
return {
'objectName': self._run_object,
'runTime': run_time,
'heatmaps': heatmaps
} | python | {
"resource": ""
} |
q270055 | CodeHeatmapProfiler.profile_function | test | def profile_function(self):
"""Calculates heatmap for function."""
with _CodeHeatmapCalculator() as prof:
result = self._run_object(*self._run_args, **self._run_kwargs)
code_lines, start_line = inspect.getsourcelines(self._run_object)
source_lines = []
for line in code_lines:
source_lines.append(('line', start_line, line))
start_line += 1
filename = os.path.abspath(inspect.getsourcefile(self._run_object))
heatmap = prof.heatmap[filename]
run_time = sum(time for time in heatmap.values())
return {
'objectName': self._object_name,
'runTime': run_time,
'result': result,
'timestamp': int(time.time()),
'heatmaps': [{
'name': self._object_name,
'heatmap': heatmap,
'executionCount': prof.execution_count[filename],
'srcCode': source_lines,
'runTime': run_time
}]
} | python | {
"resource": ""
} |
q270056 | run_profilers | test | def run_profilers(run_object, prof_config, verbose=False):
"""Runs profilers on run_object.
Args:
run_object: An object (string or tuple) for profiling.
prof_config: A string with profilers configuration.
verbose: True if info about running profilers should be shown.
Returns:
An ordered dictionary with collected stats.
Raises:
AmbiguousConfigurationError: when prof_config is ambiguous.
BadOptionError: when unknown options are present in configuration.
"""
if len(prof_config) > len(set(prof_config)):
raise AmbiguousConfigurationError(
'Profiler configuration %s is ambiguous' % prof_config)
available_profilers = {opt for opt, _ in _PROFILERS}
for option in prof_config:
if option not in available_profilers:
raise BadOptionError('Unknown option: %s' % option)
run_stats = OrderedDict()
present_profilers = ((o, p) for o, p in _PROFILERS if o in prof_config)
for option, prof in present_profilers:
curr_profiler = prof(run_object)
if verbose:
print('Running %s...' % curr_profiler.__class__.__name__)
run_stats[option] = curr_profiler.run()
return run_stats | python | {
"resource": ""
} |
q270057 | run | test | def run(func, options, args=(), kwargs={}, host='localhost', port=8000): # pylint: disable=dangerous-default-value
"""Runs profilers on a function.
Args:
func: A Python function.
options: A string with profilers configuration (i.e. 'cmh').
args: func non-keyword arguments.
kwargs: func keyword arguments.
host: Host name to send collected data.
port: Port number to send collected data.
Returns:
A result of func execution.
"""
run_stats = run_profilers((func, args, kwargs), options)
result = None
for prof in run_stats:
if not result:
result = run_stats[prof]['result']
del run_stats[prof]['result'] # Don't send result to remote host
post_data = gzip.compress(
json.dumps(run_stats).encode('utf-8'))
urllib.request.urlopen('http://%s:%s' % (host, port), post_data)
return result | python | {
"resource": ""
} |
q270058 | SparkBaseNB.predict_proba | test | def predict_proba(self, X):
"""
Return probability estimates for the RDD containing test vector X.
Parameters
----------
X : RDD containing array-like items, shape = [m_samples, n_features]
Returns
-------
C : RDD with array-like items , shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the models for each RDD block. The columns correspond to the classes
in sorted order, as they appear in the attribute `classes_`.
"""
check_rdd(X, (sp.spmatrix, np.ndarray))
return X.map(
lambda X: super(SparkBaseNB, self).predict_proba(X)) | python | {
"resource": ""
} |
q270059 | SparkBaseNB.predict_log_proba | test | def predict_log_proba(self, X):
"""
Return log-probability estimates for the RDD containing the
test vector X.
Parameters
----------
X : RDD containing array-like items, shape = [m_samples, n_features]
Returns
-------
C : RDD with array-like items, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model for each RDD block. The columns correspond to the classes
in sorted order, as they appear in the attribute `classes_`.
"""
# required, scikit call self.predict_log_proba(X) in predict_proba
# and thus this function is call, it must have the same behavior when
# not called by sparkit-learn
if not isinstance(X, BlockRDD):
return super(SparkBaseNB, self).predict_log_proba(X)
check_rdd(X, (sp.spmatrix, np.ndarray))
return X.map(
lambda X: super(SparkBaseNB, self).predict_log_proba(X)) | python | {
"resource": ""
} |
q270060 | SparkGaussianNB.fit | test | def fit(self, Z, classes=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
Returns self.
"""
check_rdd(Z, {'X': (sp.spmatrix, np.ndarray), 'y': (sp.spmatrix, np.ndarray)})
models = Z[:, ['X', 'y']].map(
lambda X_y: self.partial_fit(X_y[0], X_y[1], classes))
avg = models.reduce(operator.add)
self.__dict__.update(avg.__dict__)
return self | python | {
"resource": ""
} |
q270061 | SparkCountVectorizer._count_vocab | test | def _count_vocab(self, analyzed_docs):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
vocabulary = self.vocabulary_
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in analyzed_docs:
for feature in doc:
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
if self.binary:
X.data.fill(1)
return X | python | {
"resource": ""
} |
q270062 | SparkCountVectorizer._sort_features | test | def _sort_features(self, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return map_index | python | {
"resource": ""
} |
q270063 | SparkCountVectorizer._limit_features | test | def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = X.map(_document_frequency).sum()
tfs = X.map(lambda x: np.asarray(x.sum(axis=0))).sum().ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return kept_indices, removed_terms | python | {
"resource": ""
} |
q270064 | SparkCountVectorizer.fit_transform | test | def fit_transform(self, Z):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
Z : iterable or DictRDD with column 'X'
An iterable of raw_documents which yields either str, unicode or
file objects; or a DictRDD with column 'X' containing such
iterables.
Returns
-------
X : array, [n_samples, n_features] or DictRDD
Document-term matrix.
"""
self._validate_vocabulary()
# map analyzer and cache result
analyze = self.build_analyzer()
A = Z.transform(lambda X: list(map(analyze, X)), column='X').persist()
# create vocabulary
X = A[:, 'X'] if isinstance(A, DictRDD) else A
self.vocabulary_ = self._init_vocab(X)
# transform according to vocabulary
mapper = self.broadcast(self._count_vocab, A.context)
Z = A.transform(mapper, column='X', dtype=sp.spmatrix)
if not self.fixed_vocabulary_:
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
# limit features according to min_df, max_df parameters
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
kept_indices, self.stop_words_ = self._limit_features(
X, self.vocabulary_, max_doc_count, min_doc_count, max_features)
# sort features
map_index = self._sort_features(self.vocabulary_)
# combined mask
mask = kept_indices[map_index]
Z = Z.transform(lambda x: x[:, mask], column='X', dtype=sp.spmatrix)
A.unpersist()
return Z | python | {
"resource": ""
} |
q270065 | SparkCountVectorizer.transform | test | def transform(self, Z):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
analyze = self.build_analyzer()
mapper = self.broadcast(self._count_vocab, Z.context)
Z = Z.transform(lambda X: list(map(analyze, X)), column='X') \
.transform(mapper, column='X', dtype=sp.spmatrix)
return Z | python | {
"resource": ""
} |
q270066 | SparkStandardScaler.to_scikit | test | def to_scikit(self):
"""
Convert to equivalent StandardScaler
"""
scaler = StandardScaler(with_mean=self.with_mean,
with_std=self.with_std,
copy=self.copy)
scaler.__dict__ = self.__dict__
return scaler | python | {
"resource": ""
} |
q270067 | SparkLinearModelMixin._spark_fit | test | def _spark_fit(self, cls, Z, *args, **kwargs):
"""Wraps a Scikit-learn Linear model's fit method to use with RDD
input.
Parameters
----------
cls : class object
The sklearn linear model's class to wrap.
Z : TupleRDD or DictRDD
The distributed train data in a DictRDD.
Returns
-------
self: the wrapped class
"""
mapper = lambda X_y: super(cls, self).fit(
X_y[0], X_y[1], *args, **kwargs
)
models = Z.map(mapper)
avg = models.reduce(operator.add) / models.count()
self.__dict__.update(avg.__dict__)
return self | python | {
"resource": ""
} |
q270068 | SparkLinearModelMixin._spark_predict | test | def _spark_predict(self, cls, X, *args, **kwargs):
"""Wraps a Scikit-learn Linear model's predict method to use with RDD
input.
Parameters
----------
cls : class object
The sklearn linear model's class to wrap.
Z : ArrayRDD
The distributed data to predict in a DictRDD.
Returns
-------
self: the wrapped class
"""
return X.map(lambda X: super(cls, self).predict(X, *args, **kwargs)) | python | {
"resource": ""
} |
q270069 | SparkLinearRegression.fit | test | def fit(self, Z):
"""
Fit linear model.
Parameters
----------
Z : DictRDD with (X, y) values
X containing numpy array or sparse matrix - The training data
y containing the target values
Returns
-------
self : returns an instance of self.
"""
check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)})
return self._spark_fit(SparkLinearRegression, Z) | python | {
"resource": ""
} |
q270070 | SparkPipeline.fit | test | def fit(self, Z, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
Z : ArrayRDD, TupleRDD or DictRDD
Input data in blocked distributed format.
Returns
-------
self : SparkPipeline
"""
Zt, fit_params = self._pre_transform(Z, **fit_params)
self.steps[-1][-1].fit(Zt, **fit_params)
Zt.unpersist()
return self | python | {
"resource": ""
} |
q270071 | SparkPipeline.fit_transform | test | def fit_transform(self, Z, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator."""
Zt, fit_params = self._pre_transform(Z, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Zt, **fit_params)
else:
return self.steps[-1][-1].fit(Zt, **fit_params).transform(Zt) | python | {
"resource": ""
} |
q270072 | SparkPipeline.score | test | def score(self, Z):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score."""
Zt = Z
for name, transform in self.steps[:-1]:
Zt = transform.transform(Zt)
return self.steps[-1][-1].score(Zt) | python | {
"resource": ""
} |
q270073 | SparkGridSearchCV._fit | test | def _fit(self, Z, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
cv = self.cv
cv = _check_cv(cv, Z)
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch, backend="threading"
)(
delayed(_fit_and_score)(clone(base_estimator), Z, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
best_estimator.fit(Z, **self.fit_params)
self.best_estimator_ = best_estimator
return self | python | {
"resource": ""
} |
q270074 | _score | test | def _score(estimator, Z_test, scorer):
"""Compute the score of an estimator on a given test set."""
score = scorer(estimator, Z_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score | python | {
"resource": ""
} |
q270075 | SparkKMeans.fit | test | def fit(self, Z):
"""Compute k-means clustering.
Parameters
----------
Z : ArrayRDD or DictRDD containing array-like or sparse matrix
Train data.
Returns
-------
self
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (np.ndarray, sp.spmatrix))
if self.init == 'k-means||':
self._mllib_model = MLlibKMeans.train(
X.unblock(),
self.n_clusters,
maxIterations=self.max_iter,
initializationMode="k-means||")
self.cluster_centers_ = self._mllib_model.centers
else:
models = X.map(lambda X: super(SparkKMeans, self).fit(X))
models = models.map(lambda model: model.cluster_centers_).collect()
return super(SparkKMeans, self).fit(np.concatenate(models)) | python | {
"resource": ""
} |
q270076 | SparkKMeans.predict | test | def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : ArrayRDD containing array-like, sparse matrix
New data to predict.
Returns
-------
labels : ArrayRDD with predictions
Index of the cluster each sample belongs to.
"""
check_rdd(X, (np.ndarray, sp.spmatrix))
if hasattr(self, '_mllib_model'):
if isinstance(X, ArrayRDD):
X = X.unblock()
return X.map(lambda x: self._mllib_model.predict(x))
else:
rdd = X.map(lambda X: super(SparkKMeans, self).predict(X))
return ArrayRDD(rdd) | python | {
"resource": ""
} |
q270077 | SparkSGDClassifier.predict | test | def predict(self, X):
"""Distributed method to predict class labels for samples in X.
Parameters
----------
X : ArrayRDD containing {array-like, sparse matrix}
Samples.
Returns
-------
C : ArrayRDD
Predicted class label per sample.
"""
check_rdd(X, (sp.spmatrix, np.ndarray))
return self._spark_predict(SparkSGDClassifier, X) | python | {
"resource": ""
} |
q270078 | check_rdd_dtype | test | def check_rdd_dtype(rdd, expected_dtype):
"""Checks if the blocks in the RDD matches the expected types.
Parameters:
-----------
rdd: splearn.BlockRDD
The RDD to check
expected_dtype: {type, list of types, tuple of types, dict of types}
Expected type(s). If the RDD is a DictRDD the parameter type is
restricted to dict.
Returns:
--------
accept: bool
Returns if the types are matched.
"""
if not isinstance(rdd, BlockRDD):
raise TypeError("Expected {0} for parameter rdd, got {1}."
.format(BlockRDD, type(rdd)))
if isinstance(rdd, DictRDD):
if not isinstance(expected_dtype, dict):
raise TypeError('Expected {0} for parameter '
'expected_dtype, got {1}.'
.format(dict, type(expected_dtype)))
accept = True
types = dict(list(zip(rdd.columns, rdd.dtype)))
for key, values in expected_dtype.items():
if not isinstance(values, (tuple, list)):
values = [values]
accept = accept and types[key] in values
return accept
if not isinstance(expected_dtype, (tuple, list)):
expected_dtype = [expected_dtype]
return rdd.dtype in expected_dtype | python | {
"resource": ""
} |
q270079 | SparkDictVectorizer.fit | test | def fit(self, Z):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
Z : DictRDD with column 'X'
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
self
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
"""Create vocabulary
"""
class SetAccum(AccumulatorParam):
def zero(self, initialValue):
return set(initialValue)
def addInPlace(self, v1, v2):
v1 |= v2
return v1
accum = X.context.accumulator(set(), SetAccum())
def mapper(X, separator=self.separator):
feature_names = []
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
feature_names.append(f)
accum.add(set(feature_names))
X.foreach(mapper) # init vocabulary
feature_names = list(accum.value)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self | python | {
"resource": ""
} |
q270080 | SparkVarianceThreshold.fit | test | def fit(self, Z):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (np.ndarray, sp.spmatrix))
def mapper(X):
"""Calculate statistics for every numpy or scipy blocks."""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
mean, var = mean_variance_axis(X, axis=0)
else:
mean, var = np.mean(X, axis=0), np.var(X, axis=0)
return X.shape[0], mean, var
def reducer(a, b):
"""Calculate the combined statistics."""
n_a, mean_a, var_a = a
n_b, mean_b, var_b = b
n_ab = n_a + n_b
mean_ab = ((mean_a * n_a) + (mean_b * n_b)) / n_ab
var_ab = (((n_a * var_a) + (n_b * var_b)) / n_ab) + \
((n_a * n_b) * ((mean_b - mean_a) / n_ab) ** 2)
return (n_ab, mean_ab, var_ab)
_, _, self.variances_ = X.map(mapper).treeReduce(reducer)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self | python | {
"resource": ""
} |
q270081 | SparkTruncatedSVD.fit_transform | test | def fit_transform(self, Z):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (sp.spmatrix, np.ndarray))
if self.algorithm == "em":
X = X.persist() # boosting iterative svm
Sigma, V = svd_em(X, k=self.n_components, maxiter=self.n_iter,
tol=self.tol, compute_u=False,
seed=self.random_state)
self.components_ = V
X.unpersist()
return self.transform(Z)
else:
# TODO: raise warning non distributed
return super(SparkTruncatedSVD, self).fit_transform(X.tosparse()) | python | {
"resource": ""
} |
q270082 | SparkTruncatedSVD.transform | test | def transform(self, Z):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (sp.spmatrix, np.ndarray))
mapper = self.broadcast(
super(SparkTruncatedSVD, self).transform, Z.context)
return Z.transform(mapper, column='X', dtype=np.ndarray) | python | {
"resource": ""
} |
q270083 | _block_collection | test | def _block_collection(iterator, dtype, bsize=-1):
"""Pack rdd with a specific collection constructor."""
i = 0
accumulated = []
for a in iterator:
if (bsize > 0) and (i >= bsize):
yield _pack_accumulated(accumulated, dtype)
accumulated = []
i = 0
accumulated.append(a)
i += 1
if i > 0:
yield _pack_accumulated(accumulated, dtype) | python | {
"resource": ""
} |
q270084 | _block_tuple | test | def _block_tuple(iterator, dtypes, bsize=-1):
"""Pack rdd of tuples as tuples of arrays or scipy.sparse matrices."""
i = 0
blocked_tuple = None
for tuple_i in iterator:
if blocked_tuple is None:
blocked_tuple = tuple([] for _ in range(len(tuple_i)))
if (bsize > 0) and (i >= bsize):
yield tuple(_pack_accumulated(x, dtype)
for x, dtype in zip(blocked_tuple, dtypes))
blocked_tuple = tuple([] for _ in range(len(tuple_i)))
i = 0
for x_j, x in zip(tuple_i, blocked_tuple):
x.append(x_j)
i += 1
if i > 0:
yield tuple(_pack_accumulated(x, dtype)
for x, dtype in zip(blocked_tuple, dtypes)) | python | {
"resource": ""
} |
q270085 | block | test | def block(rdd, bsize=-1, dtype=None):
"""Block an RDD
Parameters
----------
rdd : RDD
RDD of data points to block into either numpy arrays,
scipy sparse matrices, or pandas data frames.
Type of data point will be automatically inferred
and blocked accordingly.
bsize : int, optional, default None
Size of each block (number of elements), if None all data points
from each partition will be combined in a block.
Returns
-------
rdd : ArrayRDD or TupleRDD or DictRDD
The transformed rdd with added functionality
"""
try:
entry = rdd.first()
except IndexError:
# empty RDD: do not block
return rdd
# do different kinds of block depending on the type
if isinstance(entry, dict):
rdd = rdd.map(lambda x: list(x.values()))
return DictRDD(rdd, list(entry.keys()), bsize, dtype)
elif isinstance(entry, tuple):
return DictRDD(rdd, bsize=bsize, dtype=dtype)
elif sp.issparse(entry):
return SparseRDD(rdd, bsize)
elif isinstance(entry, np.ndarray):
return ArrayRDD(rdd, bsize)
else:
return BlockRDD(rdd, bsize, dtype) | python | {
"resource": ""
} |
q270086 | BlockRDD.transform | test | def transform(self, fn, dtype=None, *args, **kwargs):
"""Equivalent to map, compatibility purpose only.
Column parameter ignored.
"""
rdd = self._rdd.map(fn)
if dtype is None:
return self.__class__(rdd, noblock=True, **self.get_params())
if dtype is np.ndarray:
return ArrayRDD(rdd, bsize=self.bsize, noblock=True)
elif dtype is sp.spmatrix:
return SparseRDD(rdd, bsize=self.bsize, noblock=True)
else:
return BlockRDD(rdd, bsize=self.bsize, dtype=dtype, noblock=True) | python | {
"resource": ""
} |
q270087 | ArrayLikeRDDMixin.shape | test | def shape(self):
"""Returns the shape of the data."""
# TODO cache
first = self.first().shape
shape = self._rdd.map(lambda x: x.shape[0]).sum()
return (shape,) + first[1:] | python | {
"resource": ""
} |
q270088 | SparseRDD.toarray | test | def toarray(self):
"""Returns the data as numpy.array from each partition."""
rdd = self._rdd.map(lambda x: x.toarray())
return np.concatenate(rdd.collect()) | python | {
"resource": ""
} |
q270089 | DictRDD.transform | test | def transform(self, fn, column=None, dtype=None):
"""Execute a transformation on a column or columns. Returns the modified
DictRDD.
Parameters
----------
f : function
The function to execute on the columns.
column : {str, list or None}
The column(s) to transform. If None is specified the method is
equivalent to map.
column : {str, list or None}
The dtype of the column(s) to transform.
Returns
-------
result : DictRDD
DictRDD with transformed column(s).
TODO: optimize
"""
dtypes = self.dtype
if column is None:
indices = list(range(len(self.columns)))
else:
if not type(column) in (list, tuple):
column = [column]
indices = [self.columns.index(c) for c in column]
if dtype is not None:
if not type(dtype) in (list, tuple):
dtype = [dtype]
dtypes = [dtype[indices.index(i)] if i in indices else t
for i, t in enumerate(self.dtype)]
def mapper(values):
result = fn(*[values[i] for i in indices])
if len(indices) == 1:
result = (result,)
elif not isinstance(result, (tuple, list)):
raise ValueError("Transformer function must return an"
" iterable!")
elif len(result) != len(indices):
raise ValueError("Transformer result's length must be"
" equal to the given columns length!")
return tuple(result[indices.index(i)] if i in indices else v
for i, v in enumerate(values))
return DictRDD(self._rdd.map(mapper),
columns=self.columns, dtype=dtypes,
bsize=self.bsize, noblock=True) | python | {
"resource": ""
} |
q270090 | bitperm | test | def bitperm(s, perm, pos):
"""Returns zero if there are no permissions for a bit of the perm. of a file. Otherwise it returns a positive value
:param os.stat_result s: os.stat(file) object
:param str perm: R (Read) or W (Write) or X (eXecute)
:param str pos: USR (USeR) or GRP (GRouP) or OTH (OTHer)
:return: mask value
:rtype: int
"""
perm = perm.upper()
pos = pos.upper()
assert perm in ['R', 'W', 'X']
assert pos in ['USR', 'GRP', 'OTH']
return s.st_mode & getattr(stat, 'S_I{}{}'.format(perm, pos)) | python | {
"resource": ""
} |
q270091 | only_root_write | test | def only_root_write(path):
"""File is only writable by root
:param str path: Path to file
:return: True if only root can write
:rtype: bool
"""
s = os.stat(path)
for ug, bp in [(s.st_uid, bitperm(s, 'w', 'usr')), (s.st_gid, bitperm(s, 'w', 'grp'))]:
# User id (is not root) and bit permission
if ug and bp:
return False
if bitperm(s, 'w', 'oth'):
return False
return True | python | {
"resource": ""
} |
q270092 | check_config | test | def check_config(file, printfn=print):
"""Command to check configuration file. Raises InvalidConfig on error
:param str file: path to config file
:param printfn: print function for success message
:return: None
"""
Config(file).read()
printfn('The configuration file "{}" is correct'.format(file)) | python | {
"resource": ""
} |
q270093 | Config.read | test | def read(self):
"""Parse and validate the config file. The read data is accessible as a dictionary in this instance
:return: None
"""
try:
data = load(open(self.file), Loader)
except (UnicodeDecodeError, YAMLError) as e:
raise InvalidConfig(self.file, '{}'.format(e))
try:
validate(data, SCHEMA)
except ValidationError as e:
raise InvalidConfig(self.file, e)
self.update(data) | python | {
"resource": ""
} |
q270094 | run_as_cmd | test | def run_as_cmd(cmd, user, shell='bash'):
"""Get the arguments to execute a command as a user
:param str cmd: command to execute
:param user: User for use
:param shell: Bash, zsh, etc.
:return: arguments
:rtype: list
"""
to_execute = get_shell(shell) + [EXECUTE_SHELL_PARAM, cmd]
if user == 'root':
return to_execute
return ['sudo', '-s', '--set-home', '-u', user] + to_execute | python | {
"resource": ""
} |
q270095 | execute_cmd | test | def execute_cmd(cmd, cwd=None, timeout=5):
"""Excecute command on thread
:param cmd: Command to execute
:param cwd: current working directory
:return: None
"""
p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
p.wait(timeout=timeout)
except subprocess.TimeoutExpired:
return None
else:
stdout, stderr = p.stdout.read(), p.stderr.read()
if sys.version_info >= (3,):
stdout, stderr = stdout.decode('utf-8', errors='ignore'), stderr.decode('utf-8', errors='ignore')
if p.returncode:
raise ExecuteError('Error running command {}: The error code {} has returned. Stderr: {}'.format(
' '.join(cmd), p.returncode, stderr
))
else:
return stdout, stderr | python | {
"resource": ""
} |
q270096 | execute_over_ssh | test | def execute_over_ssh(cmd, ssh, cwd=None, shell='bash'):
"""Excecute command on remote machine using SSH
:param cmd: Command to execute
:param ssh: Server to connect. Port is optional
:param cwd: current working directory
:return: None
"""
port = None
parts = ssh.split(':', 1)
if len(parts) > 1 and not parts[1].isdigit():
raise InvalidConfig(extra_body='Invalid port number on ssh config: {}'.format(parts[1]))
elif len(parts) > 1:
port = parts[1]
quoted_cmd = ' '.join([x.replace("'", """'"'"'""") for x in cmd.split(' ')])
remote_cmd = ' '.join([
' '.join(get_shell(shell)), # /usr/bin/env bash
' '.join([EXECUTE_SHELL_PARAM, "'", ' '.join((['cd', cwd, ';'] if cwd else []) + [quoted_cmd]), "'"])],
)
return ['ssh', parts[0]] + (['-p', port] if port else []) + ['-C'] + [remote_cmd] | python | {
"resource": ""
} |
q270097 | ExecuteUrl.validate | test | def validate(self):
"""Check self.data. Raise InvalidConfig on error
:return: None
"""
if (self.data.get('content-type') or self.data.get('body')) and \
self.data.get('method', '').lower() not in CONTENT_TYPE_METHODS:
raise InvalidConfig(
extra_body='The body/content-type option only can be used with the {} methods. The device is {}. '
'Check the configuration file.'.format(', '.join(CONTENT_TYPE_METHODS), self.name)
)
self.data['content-type'] = CONTENT_TYPE_ALIASES.get(self.data.get('content-type'),
self.data.get('content-type'))
form_type = CONTENT_TYPE_ALIASES['form']
if self.data.get('body') and (self.data.get('content-type') or form_type) == form_type:
try:
self.data['body'] = json.loads(self.data['body'])
except JSONDecodeError:
raise InvalidConfig(
extra_body='Invalid JSON body on {} device.'.format(self.name)
) | python | {
"resource": ""
} |
q270098 | ExecuteUrlServiceBase.get_headers | test | def get_headers(self):
"""Get HTTP Headers to send. By default default_headers
:return: HTTP Headers
:rtype: dict
"""
headers = copy.copy(self.default_headers or {})
headers.update(self.data.get('headers') or {})
return headers | python | {
"resource": ""
} |
q270099 | ExecuteOwnApiBase.get_body | test | def get_body(self):
"""Return "data" value on self.data
:return: data to send
:rtype: str
"""
if self.default_body:
return self.default_body
data = self.data.get('data')
if isinstance(data, dict):
return json.dumps(data)
return data | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.