repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
blockstack/virtualchain
virtualchain/lib/ecdsalib.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/ecdsalib.py#L221-L232
def ecdsa_private_key(privkey_str=None, compressed=None): """ Make a private key, but enforce the following rule: * unless the key's hex encoding specifically ends in '01', treat it as uncompressed. """ if compressed is None: compressed = False if privkey_str is not None: if len(privkey_str) == 66 and privkey_str[-2:] == '01': compressed = True return _ECPrivateKey(privkey_str, compressed=compressed)
[ "def", "ecdsa_private_key", "(", "privkey_str", "=", "None", ",", "compressed", "=", "None", ")", ":", "if", "compressed", "is", "None", ":", "compressed", "=", "False", "if", "privkey_str", "is", "not", "None", ":", "if", "len", "(", "privkey_str", ")", ...
Make a private key, but enforce the following rule: * unless the key's hex encoding specifically ends in '01', treat it as uncompressed.
[ "Make", "a", "private", "key", "but", "enforce", "the", "following", "rule", ":", "*", "unless", "the", "key", "s", "hex", "encoding", "specifically", "ends", "in", "01", "treat", "it", "as", "uncompressed", "." ]
python
train
saltstack/salt
salt/modules/neutron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/neutron.py#L1462-L1489
def create_firewall_rule(protocol, action, profile=None, **kwargs): ''' Creates a new firewall rule CLI Example: .. code-block:: bash salt '*' neutron.create_firewall_rule protocol action tenant_id=TENANT_ID name=NAME description=DESCRIPTION ip_version=IP_VERSION source_ip_address=SOURCE_IP_ADDRESS destination_ip_address=DESTINATION_IP_ADDRESS source_port=SOURCE_PORT destination_port=DESTINATION_PORT shared=SHARED enabled=ENABLED :param protocol: Protocol for the firewall rule, choose "tcp","udp","icmp" or "None". :param action: Action for the firewall rule, choose "allow" or "deny". :param tenant_id: The owner tenant ID. (Optional) :param name: Name for the firewall rule. (Optional) :param description: Description for the firewall rule. (Optional) :param ip_version: IP protocol version, default: 4. (Optional) :param source_ip_address: Source IP address or subnet. (Optional) :param destination_ip_address: Destination IP address or subnet. (Optional) :param source_port: Source port (integer in [1, 65535] or range in a:b). (Optional) :param destination_port: Destination port (integer in [1, 65535] or range in a:b). (Optional) :param shared: Set shared to True, default: False. (Optional) :param enabled: To enable this rule, default: True. (Optional) ''' conn = _auth(profile) return conn.create_firewall_rule(protocol, action, **kwargs)
[ "def", "create_firewall_rule", "(", "protocol", ",", "action", ",", "profile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "create_firewall_rule", "(", "protocol", ",", "action", ",", ...
Creates a new firewall rule CLI Example: .. code-block:: bash salt '*' neutron.create_firewall_rule protocol action tenant_id=TENANT_ID name=NAME description=DESCRIPTION ip_version=IP_VERSION source_ip_address=SOURCE_IP_ADDRESS destination_ip_address=DESTINATION_IP_ADDRESS source_port=SOURCE_PORT destination_port=DESTINATION_PORT shared=SHARED enabled=ENABLED :param protocol: Protocol for the firewall rule, choose "tcp","udp","icmp" or "None". :param action: Action for the firewall rule, choose "allow" or "deny". :param tenant_id: The owner tenant ID. (Optional) :param name: Name for the firewall rule. (Optional) :param description: Description for the firewall rule. (Optional) :param ip_version: IP protocol version, default: 4. (Optional) :param source_ip_address: Source IP address or subnet. (Optional) :param destination_ip_address: Destination IP address or subnet. (Optional) :param source_port: Source port (integer in [1, 65535] or range in a:b). (Optional) :param destination_port: Destination port (integer in [1, 65535] or range in a:b). (Optional) :param shared: Set shared to True, default: False. (Optional) :param enabled: To enable this rule, default: True. (Optional)
[ "Creates", "a", "new", "firewall", "rule" ]
python
train
Qiskit/qiskit-terra
qiskit/qobj/converters/pulse_instruction.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/qobj/converters/pulse_instruction.py#L160-L175
def convert_frame_change(self, shift, instruction): """Return converted `FrameChangeInstruction`. Args: shift(int): Offset time. instruction (FrameChangeInstruction): frame change instruction. Returns: dict: Dictionary of required parameters. """ command_dict = { 'name': 'fc', 't0': shift+instruction.start_time, 'ch': instruction.channels[0].name, 'phase': instruction.command.phase } return self._qobj_model(**command_dict)
[ "def", "convert_frame_change", "(", "self", ",", "shift", ",", "instruction", ")", ":", "command_dict", "=", "{", "'name'", ":", "'fc'", ",", "'t0'", ":", "shift", "+", "instruction", ".", "start_time", ",", "'ch'", ":", "instruction", ".", "channels", "["...
Return converted `FrameChangeInstruction`. Args: shift(int): Offset time. instruction (FrameChangeInstruction): frame change instruction. Returns: dict: Dictionary of required parameters.
[ "Return", "converted", "FrameChangeInstruction", "." ]
python
test
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/core/scripts.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/scripts.py#L674-L744
def save_image_to_disk(self, filename_1 = None, filename_2 = None): """ creates an image using the scripts plot function and writes it to the disk for single plots (plot_type: 'main', 'aux') - if no filname provided take default name for double plots (plot_type: 'main', 'aux') - if no filnames provided take default name - if only one filname provided save only the plot for which name is provided Args: filename_1: filname for figure 1 filename_2: filname for figure 1 Returns: None """ def axes_empty(ax): """ takes an axes object and checks if it is empty the axes object is considered empty it doesn't contain any of the following: - lines - images - patches Returns: """ is_empty = True if ax is not None and len(ax)>0: for a in ax: if len(a.lines)+len(a.images)+len(a.patches) != 0: is_empty = False return is_empty # create and save images if (filename_1 is None): filename_1 = self.filename('-plt1.png') if (filename_2 is None): filename_2 = self.filename('-plt2.png') # windows can't deal with long filenames so we have to use the prefix '\\\\?\\' # if len(filename_1.split('\\\\?\\')) == 1: # filename_1 = '\\\\?\\' + filename_1 # if len(filename_2.split('\\\\?\\')) == 1: # filename_2 = '\\\\?\\' + filename_2 if os.path.exists(os.path.dirname(filename_1)) is False: os.makedirs(os.path.dirname(filename_1)) if os.path.exists(os.path.dirname(filename_2)) is False: os.makedirs(os.path.dirname(filename_2)) fig_1 = Figure() canvas_1 = FigureCanvas(fig_1) fig_2 = Figure() canvas_2 = FigureCanvas(fig_2) self.force_update() self.plot([fig_1, fig_2]) if filename_1 is not None and not axes_empty(fig_1.axes): fig_1.savefig(filename_1) if filename_2 is not None and not axes_empty(fig_2.axes): fig_2.savefig(filename_2)
[ "def", "save_image_to_disk", "(", "self", ",", "filename_1", "=", "None", ",", "filename_2", "=", "None", ")", ":", "def", "axes_empty", "(", "ax", ")", ":", "\"\"\"\n takes an axes object and checks if it is empty\n the axes object is considered empty i...
creates an image using the scripts plot function and writes it to the disk for single plots (plot_type: 'main', 'aux') - if no filname provided take default name for double plots (plot_type: 'main', 'aux') - if no filnames provided take default name - if only one filname provided save only the plot for which name is provided Args: filename_1: filname for figure 1 filename_2: filname for figure 1 Returns: None
[ "creates", "an", "image", "using", "the", "scripts", "plot", "function", "and", "writes", "it", "to", "the", "disk", "for", "single", "plots", "(", "plot_type", ":", "main", "aux", ")", "-", "if", "no", "filname", "provided", "take", "default", "name", "...
python
train
wandb/client
wandb/vendor/prompt_toolkit/shortcuts.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/shortcuts.py#L129-L145
def create_asyncio_eventloop(loop=None): """ Returns an asyncio :class:`~prompt_toolkit.eventloop.EventLoop` instance for usage in a :class:`~prompt_toolkit.interface.CommandLineInterface`. It is a wrapper around an asyncio loop. :param loop: The asyncio eventloop (or `None` if the default asyncioloop should be used.) """ # Inline import, to make sure the rest doesn't break on Python 2. (Where # asyncio is not available.) if is_windows(): from prompt_toolkit.eventloop.asyncio_win32 import Win32AsyncioEventLoop as AsyncioEventLoop else: from prompt_toolkit.eventloop.asyncio_posix import PosixAsyncioEventLoop as AsyncioEventLoop return AsyncioEventLoop(loop)
[ "def", "create_asyncio_eventloop", "(", "loop", "=", "None", ")", ":", "# Inline import, to make sure the rest doesn't break on Python 2. (Where", "# asyncio is not available.)", "if", "is_windows", "(", ")", ":", "from", "prompt_toolkit", ".", "eventloop", ".", "asyncio_win3...
Returns an asyncio :class:`~prompt_toolkit.eventloop.EventLoop` instance for usage in a :class:`~prompt_toolkit.interface.CommandLineInterface`. It is a wrapper around an asyncio loop. :param loop: The asyncio eventloop (or `None` if the default asyncioloop should be used.)
[ "Returns", "an", "asyncio", ":", "class", ":", "~prompt_toolkit", ".", "eventloop", ".", "EventLoop", "instance", "for", "usage", "in", "a", ":", "class", ":", "~prompt_toolkit", ".", "interface", ".", "CommandLineInterface", ".", "It", "is", "a", "wrapper", ...
python
train
grahame/sedge
sedge/cli.py
https://github.com/grahame/sedge/blob/60dc6a0c5ef3bf802fe48a2571a8524a6ea33878/sedge/cli.py#L112-L128
def init(config): """ Initialise ~./sedge/config file if none exists. Good for first time sedge usage """ from pkg_resources import resource_stream import shutil config_file = Path(config.config_file) if config_file.is_file(): click.echo('{} already exists, maybe you want $ sedge update'.format(config_file)) sys.exit() config_file.parent.mkdir(parents=True, exist_ok=True) with resource_stream(__name__, 'sedge_template.conf') as src_stream: with open(config.config_file, 'wb') as target_stream: shutil.copyfileobj(src_stream, target_stream)
[ "def", "init", "(", "config", ")", ":", "from", "pkg_resources", "import", "resource_stream", "import", "shutil", "config_file", "=", "Path", "(", "config", ".", "config_file", ")", "if", "config_file", ".", "is_file", "(", ")", ":", "click", ".", "echo", ...
Initialise ~./sedge/config file if none exists. Good for first time sedge usage
[ "Initialise", "~", ".", "/", "sedge", "/", "config", "file", "if", "none", "exists", ".", "Good", "for", "first", "time", "sedge", "usage" ]
python
train
klahnakoski/mo-logs
mo_logs/strings.py
https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/strings.py#L442-L454
def comma(value): """ FORMAT WITH THOUSANDS COMMA (,) SEPARATOR """ try: if float(value) == _round(float(value), 0): output = "{:,}".format(int(value)) else: output = "{:,}".format(float(value)) except Exception: output = text_type(value) return output
[ "def", "comma", "(", "value", ")", ":", "try", ":", "if", "float", "(", "value", ")", "==", "_round", "(", "float", "(", "value", ")", ",", "0", ")", ":", "output", "=", "\"{:,}\"", ".", "format", "(", "int", "(", "value", ")", ")", "else", ":"...
FORMAT WITH THOUSANDS COMMA (,) SEPARATOR
[ "FORMAT", "WITH", "THOUSANDS", "COMMA", "(", ")", "SEPARATOR" ]
python
train
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L666-L686
def sizeOfOverlap(self, e): """ Get the size of the overlap between self and e. :return: the number of bases that are shared in common between self and e. """ # no overlap if not self.intersects(e): return 0 # complete inclusion.. if e.start >= self.start and e.end <= self.end: return len(e) if self.start >= e.start and self.end <= e.end: return len(self) # partial overlap if e.start > self.start: return (self.end - e.start) if self.start > e.start: return (e.end - self.start)
[ "def", "sizeOfOverlap", "(", "self", ",", "e", ")", ":", "# no overlap", "if", "not", "self", ".", "intersects", "(", "e", ")", ":", "return", "0", "# complete inclusion..", "if", "e", ".", "start", ">=", "self", ".", "start", "and", "e", ".", "end", ...
Get the size of the overlap between self and e. :return: the number of bases that are shared in common between self and e.
[ "Get", "the", "size", "of", "the", "overlap", "between", "self", "and", "e", "." ]
python
train
mikedh/trimesh
trimesh/scene/scene.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/scene/scene.py#L859-L887
def show(self, viewer=None, **kwargs): """ Display the current scene. Parameters ----------- viewer: str 'gl': open a pyglet window str,'notebook': return ipython.display.HTML None: automatically pick based on whether or not we are in an ipython notebook smooth : bool Turn on or off automatic smooth shading """ if viewer is None: # check to see if we are in a notebook or not from ..viewer import in_notebook viewer = ['gl', 'notebook'][int(in_notebook())] if viewer == 'gl': # this imports pyglet, and will raise an ImportError # if pyglet is not available from ..viewer import SceneViewer return SceneViewer(self, **kwargs) elif viewer == 'notebook': from ..viewer import scene_to_notebook return scene_to_notebook(self, **kwargs) else: raise ValueError('viewer must be "gl", "notebook", or None')
[ "def", "show", "(", "self", ",", "viewer", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "viewer", "is", "None", ":", "# check to see if we are in a notebook or not", "from", ".", ".", "viewer", "import", "in_notebook", "viewer", "=", "[", "'gl'", ...
Display the current scene. Parameters ----------- viewer: str 'gl': open a pyglet window str,'notebook': return ipython.display.HTML None: automatically pick based on whether or not we are in an ipython notebook smooth : bool Turn on or off automatic smooth shading
[ "Display", "the", "current", "scene", "." ]
python
train
jaegertracing/jaeger-client-python
jaeger_client/config.py
https://github.com/jaegertracing/jaeger-client-python/blob/06face094757c645a6d81f0e073c001931a22a05/jaeger_client/config.py#L358-L403
def new_tracer(self, io_loop=None): """ Create a new Jaeger Tracer based on the passed `jaeger_client.Config`. Does not set `opentracing.tracer` global variable. """ channel = self._create_local_agent_channel(io_loop=io_loop) sampler = self.sampler if not sampler: sampler = RemoteControlledSampler( channel=channel, service_name=self.service_name, logger=logger, metrics_factory=self._metrics_factory, error_reporter=self.error_reporter, sampling_refresh_interval=self.sampling_refresh_interval, max_operations=self.max_operations) logger.info('Using sampler %s', sampler) reporter = Reporter( channel=channel, queue_capacity=self.reporter_queue_size, batch_size=self.reporter_batch_size, flush_interval=self.reporter_flush_interval, logger=logger, metrics_factory=self._metrics_factory, error_reporter=self.error_reporter) if self.logging: reporter = CompositeReporter(reporter, LoggingReporter(logger)) if not self.throttler_group() is None: throttler = RemoteThrottler( channel, self.service_name, refresh_interval=self.throttler_refresh_interval, logger=logger, metrics_factory=self._metrics_factory, error_reporter=self.error_reporter) else: throttler = None return self.create_tracer( reporter=reporter, sampler=sampler, throttler=throttler, )
[ "def", "new_tracer", "(", "self", ",", "io_loop", "=", "None", ")", ":", "channel", "=", "self", ".", "_create_local_agent_channel", "(", "io_loop", "=", "io_loop", ")", "sampler", "=", "self", ".", "sampler", "if", "not", "sampler", ":", "sampler", "=", ...
Create a new Jaeger Tracer based on the passed `jaeger_client.Config`. Does not set `opentracing.tracer` global variable.
[ "Create", "a", "new", "Jaeger", "Tracer", "based", "on", "the", "passed", "jaeger_client", ".", "Config", ".", "Does", "not", "set", "opentracing", ".", "tracer", "global", "variable", "." ]
python
train
bioasp/caspo
caspo/core/clamping.py
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L303-L324
def drop_literals(self, literals): """ Returns a new list of clampings without the given literals Parameters ---------- literals : iterable[:class:`caspo.core.literal.Literal`] Iterable of literals to be removed from each clamping Returns ------- caspo.core.clamping.ClampingList The new list of clampings """ clampings = [] for clamping in self: c = clamping.drop_literals(literals) if len(c) > 0: clampings.append(c) return ClampingList(clampings)
[ "def", "drop_literals", "(", "self", ",", "literals", ")", ":", "clampings", "=", "[", "]", "for", "clamping", "in", "self", ":", "c", "=", "clamping", ".", "drop_literals", "(", "literals", ")", "if", "len", "(", "c", ")", ">", "0", ":", "clampings"...
Returns a new list of clampings without the given literals Parameters ---------- literals : iterable[:class:`caspo.core.literal.Literal`] Iterable of literals to be removed from each clamping Returns ------- caspo.core.clamping.ClampingList The new list of clampings
[ "Returns", "a", "new", "list", "of", "clampings", "without", "the", "given", "literals" ]
python
train
edx/pa11ycrawler
pa11ycrawler/html.py
https://github.com/edx/pa11ycrawler/blob/fc672d4524463bc050ade4c7c97801c0d5bf8c9e/pa11ycrawler/html.py#L91-L158
def render_html(data_dir, output_dir): """ The main workhorse of this script. Finds all the JSON data files from pa11ycrawler, and transforms them into HTML files via Jinja2 templating. """ env = Environment(loader=PackageLoader('pa11ycrawler', 'templates')) env.globals["wcag_refs"] = wcag_refs pages = [] counter = collections.Counter() grouped_violations = collections.defaultdict(dict) # render detail templates for data_file in data_dir.files('*.json'): data = json.load(data_file.open()) num_error, num_warning, num_notice = pa11y_counts(data['pa11y']) data["num_error"] = num_error data["num_warning"] = num_warning data["num_notice"] = num_notice fname = data_file.namebase + ".html" html_path = output_dir / fname render_template(env, html_path, 'detail.html', data) data["filename"] = fname pages.append(data) for violation in data['pa11y']: violation_id = hashlib.md5( (violation['selector'] + violation['code']).encode('utf-8') ).hexdigest() if violation_id not in grouped_violations[violation['type']]: violation['pages'] = [] grouped_violations[violation['type']][violation_id] = violation counter[violation['type']] += 1 grouped_violations[violation['type']][violation_id]['pages'].append({ 'url': data['url'], 'page_title': data['page_title'] }) def extract_nums(page): "Used to sort pages by violation counts" return ( page["num_error"], page["num_warning"], page["num_notice"], ) index_path = output_dir / INDEX_TEMPLATE render_template(env, index_path, INDEX_TEMPLATE, { "pages": sorted(pages, key=extract_nums, reverse=True), "num_error": counter["error"], "num_warning": counter["warning"], "num_notice": counter["notice"] }) for violation_type in grouped_violations: unique_path = output_dir / u'{}s.html'.format(violation_type) render_template(env, unique_path, UNIQUE_TEMPLATE, { "grouped_violations": sorted( grouped_violations[violation_type].values(), key=lambda item: len(item['pages']), reverse=True ), "current_type": violation_type, "violation_counts": counter })
[ "def", "render_html", "(", "data_dir", ",", "output_dir", ")", ":", "env", "=", "Environment", "(", "loader", "=", "PackageLoader", "(", "'pa11ycrawler'", ",", "'templates'", ")", ")", "env", ".", "globals", "[", "\"wcag_refs\"", "]", "=", "wcag_refs", "page...
The main workhorse of this script. Finds all the JSON data files from pa11ycrawler, and transforms them into HTML files via Jinja2 templating.
[ "The", "main", "workhorse", "of", "this", "script", ".", "Finds", "all", "the", "JSON", "data", "files", "from", "pa11ycrawler", "and", "transforms", "them", "into", "HTML", "files", "via", "Jinja2", "templating", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L3251-L3276
def get_groups_of_account_user(self, account_id, user_id, **kwargs): # noqa: E501 """Get groups of the user. # noqa: E501 An endpoint for retrieving groups of the user. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_groups_of_account_user(account_id, user_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str user_id: The ID of the user whose details are retrieved. (required) :param int limit: The number of results to return (2-1000), default is 50. :param str after: The entity ID to fetch after the given one. :param str order: The order of the records based on creation time, ASC or DESC; by default ASC :param str include: Comma separated additional data to return. Currently supported: total_count :return: GroupSummaryList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_groups_of_account_user_with_http_info(account_id, user_id, **kwargs) # noqa: E501 else: (data) = self.get_groups_of_account_user_with_http_info(account_id, user_id, **kwargs) # noqa: E501 return data
[ "def", "get_groups_of_account_user", "(", "self", ",", "account_id", ",", "user_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", ...
Get groups of the user. # noqa: E501 An endpoint for retrieving groups of the user. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_groups_of_account_user(account_id, user_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str user_id: The ID of the user whose details are retrieved. (required) :param int limit: The number of results to return (2-1000), default is 50. :param str after: The entity ID to fetch after the given one. :param str order: The order of the records based on creation time, ASC or DESC; by default ASC :param str include: Comma separated additional data to return. Currently supported: total_count :return: GroupSummaryList If the method is called asynchronously, returns the request thread.
[ "Get", "groups", "of", "the", "user", ".", "#", "noqa", ":", "E501" ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/eventloop/posix.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/eventloop/posix.py#L228-L249
def call_from_executor(self, callback, _max_postpone_until=None): """ Call this function in the main event loop. Similar to Twisted's ``callFromThread``. :param _max_postpone_until: `None` or `time.time` value. For interal use. If the eventloop is saturated, consider this task to be low priority and postpone maximum until this timestamp. (For instance, repaint is done using low priority.) """ assert _max_postpone_until is None or isinstance(_max_postpone_until, float) self._calls_from_executor.append((callback, _max_postpone_until)) if self._schedule_pipe: try: os.write(self._schedule_pipe[1], b'x') except (AttributeError, IndexError, OSError): # Handle race condition. We're in a different thread. # - `_schedule_pipe` could have become None in the meantime. # - We catch `OSError` (actually BrokenPipeError), because the # main thread could have closed the pipe already. pass
[ "def", "call_from_executor", "(", "self", ",", "callback", ",", "_max_postpone_until", "=", "None", ")", ":", "assert", "_max_postpone_until", "is", "None", "or", "isinstance", "(", "_max_postpone_until", ",", "float", ")", "self", ".", "_calls_from_executor", "."...
Call this function in the main event loop. Similar to Twisted's ``callFromThread``. :param _max_postpone_until: `None` or `time.time` value. For interal use. If the eventloop is saturated, consider this task to be low priority and postpone maximum until this timestamp. (For instance, repaint is done using low priority.)
[ "Call", "this", "function", "in", "the", "main", "event", "loop", ".", "Similar", "to", "Twisted", "s", "callFromThread", "." ]
python
train
Clinical-Genomics/scout
scout/adapter/mongo/hgnc.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hgnc.py#L27-L45
def load_hgnc_bulk(self, gene_objs): """Load a bulk of hgnc gene objects Raises IntegrityError if there are any write concerns Args: gene_objs(iterable(scout.models.hgnc_gene)) Returns: result (pymongo.results.InsertManyResult) """ LOG.info("Loading gene bulk with length %s", len(gene_objs)) try: result = self.hgnc_collection.insert_many(gene_objs) except (DuplicateKeyError, BulkWriteError) as err: raise IntegrityError(err) return result
[ "def", "load_hgnc_bulk", "(", "self", ",", "gene_objs", ")", ":", "LOG", ".", "info", "(", "\"Loading gene bulk with length %s\"", ",", "len", "(", "gene_objs", ")", ")", "try", ":", "result", "=", "self", ".", "hgnc_collection", ".", "insert_many", "(", "ge...
Load a bulk of hgnc gene objects Raises IntegrityError if there are any write concerns Args: gene_objs(iterable(scout.models.hgnc_gene)) Returns: result (pymongo.results.InsertManyResult)
[ "Load", "a", "bulk", "of", "hgnc", "gene", "objects", "Raises", "IntegrityError", "if", "there", "are", "any", "write", "concerns" ]
python
test
lmjohns3/theanets
theanets/trainer.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/trainer.py#L93-L158
def itertrain(self, train, valid=None, **kwargs): '''Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset. ''' ifci = itertools.chain.from_iterable def first(x): return x[0] if isinstance(x, (tuple, list)) else x def last(x): return x[-1] if isinstance(x, (tuple, list)) else x odim = idim = None for t in train: idim = first(t).shape[-1] odim = last(t).shape[-1] rng = kwargs.get('rng') if rng is None or isinstance(rng, int): rng = np.random.RandomState(rng) # set output (decoding) weights on the network. samples = ifci(last(t) for t in train) for param in self.network.layers[-1].params: shape = param.get_value(borrow=True).shape if len(shape) == 2 and shape[1] == odim: arr = np.vstack(SampleTrainer.reservoir(samples, shape[0], rng)) util.log('setting {}: {}', param.name, shape) param.set_value(arr / np.sqrt((arr * arr).sum(axis=1))[:, None]) # set input (encoding) weights on the network. samples = ifci(first(t) for t in train) for layer in self.network.layers: for param in layer.params: shape = param.get_value(borrow=True).shape if len(shape) == 2 and shape[0] == idim: arr = np.vstack(SampleTrainer.reservoir(samples, shape[1], rng)).T util.log('setting {}: {}', param.name, shape) param.set_value(arr / np.sqrt((arr * arr).sum(axis=0))) samples = ifci(self.network.feed_forward( first(t))[i-1] for t in train) yield dict(loss=0), dict(loss=0)
[ "def", "itertrain", "(", "self", ",", "train", ",", "valid", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ifci", "=", "itertools", ".", "chain", ".", "from_iterable", "def", "first", "(", "x", ")", ":", "return", "x", "[", "0", "]", "if", "is...
Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset.
[ "Train", "a", "model", "using", "a", "training", "and", "validation", "set", "." ]
python
test
Xion/taipan
taipan/algorithms.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/algorithms.py#L103-L115
def iterate(iterator, n=None): """Efficiently advances the iterator N times; by default goes to its end. The actual loop is done "in C" and hence it is faster than equivalent 'for'. :param n: How much the iterator should be advanced. If None, it will be advanced until the end. """ ensure_iterable(iterator) if n is None: deque(iterator, maxlen=0) else: next(islice(iterator, n, n), None)
[ "def", "iterate", "(", "iterator", ",", "n", "=", "None", ")", ":", "ensure_iterable", "(", "iterator", ")", "if", "n", "is", "None", ":", "deque", "(", "iterator", ",", "maxlen", "=", "0", ")", "else", ":", "next", "(", "islice", "(", "iterator", ...
Efficiently advances the iterator N times; by default goes to its end. The actual loop is done "in C" and hence it is faster than equivalent 'for'. :param n: How much the iterator should be advanced. If None, it will be advanced until the end.
[ "Efficiently", "advances", "the", "iterator", "N", "times", ";", "by", "default", "goes", "to", "its", "end", "." ]
python
train
KelSolaar/Umbra
umbra/components/addons/projects_explorer/projects_explorer.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/addons/projects_explorer/projects_explorer.py#L965-L982
def __rename_file(self, source, target): """ Renames a file using given source and target names. :param source: Source file. :type source: unicode :param target: Target file. :type target: unicode """ for file_node in self.__script_editor.model.get_file_nodes(source, self.__script_editor.model.root_node): self.__script_editor.unregister_node_path(file_node) self.__rename_path(source, target) self.__script_editor.register_node_path(file_node) if self.__script_editor.model.is_authoring_node(file_node): self.__set_authoring_nodes(source, target) else: self.__script_editor.model.update_project_nodes(file_node.parent)
[ "def", "__rename_file", "(", "self", ",", "source", ",", "target", ")", ":", "for", "file_node", "in", "self", ".", "__script_editor", ".", "model", ".", "get_file_nodes", "(", "source", ",", "self", ".", "__script_editor", ".", "model", ".", "root_node", ...
Renames a file using given source and target names. :param source: Source file. :type source: unicode :param target: Target file. :type target: unicode
[ "Renames", "a", "file", "using", "given", "source", "and", "target", "names", "." ]
python
train
rocky/python-filecache
pyficache/main.py
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L292-L307
def cache_file(filename, reload_on_change=False, opts=default_opts): """Cache filename if it is not already cached. Return the expanded filename for it in the cache or nil if we can not find the file.""" filename = pyc2py(filename) if filename in file_cache: if reload_on_change: checkcache(filename) pass else: opts['use_linecache_lines'] = True update_cache(filename, opts) pass if filename in file_cache: return file_cache[filename].path else: return None return
[ "def", "cache_file", "(", "filename", ",", "reload_on_change", "=", "False", ",", "opts", "=", "default_opts", ")", ":", "filename", "=", "pyc2py", "(", "filename", ")", "if", "filename", "in", "file_cache", ":", "if", "reload_on_change", ":", "checkcache", ...
Cache filename if it is not already cached. Return the expanded filename for it in the cache or nil if we can not find the file.
[ "Cache", "filename", "if", "it", "is", "not", "already", "cached", ".", "Return", "the", "expanded", "filename", "for", "it", "in", "the", "cache", "or", "nil", "if", "we", "can", "not", "find", "the", "file", "." ]
python
train
a2liu/mr-clean
mr_clean/core/functions/basics.py
https://github.com/a2liu/mr-clean/blob/0ee4ee5639f834dec4b59b94442fa84373f3c176/mr_clean/core/functions/basics.py#L140-L155
def col_to_cat(df,col_name, dest = False): """ Coerces a column in a DataFrame to categorical Parameters: df - DataFrame DataFrame to operate on col_name - string Name of column to coerce dest - bool, default False Whether to apply the result to the DataFrame or return it. True is apply, False is return. """ new_col = df[col_name].astype('category') if dest: set_col(df,col_name,new_col) else: return new_col
[ "def", "col_to_cat", "(", "df", ",", "col_name", ",", "dest", "=", "False", ")", ":", "new_col", "=", "df", "[", "col_name", "]", ".", "astype", "(", "'category'", ")", "if", "dest", ":", "set_col", "(", "df", ",", "col_name", ",", "new_col", ")", ...
Coerces a column in a DataFrame to categorical Parameters: df - DataFrame DataFrame to operate on col_name - string Name of column to coerce dest - bool, default False Whether to apply the result to the DataFrame or return it. True is apply, False is return.
[ "Coerces", "a", "column", "in", "a", "DataFrame", "to", "categorical", "Parameters", ":", "df", "-", "DataFrame", "DataFrame", "to", "operate", "on", "col_name", "-", "string", "Name", "of", "column", "to", "coerce", "dest", "-", "bool", "default", "False", ...
python
train
MartinThoma/hwrt
hwrt/train.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/train.py#L111-L119
def train_model(model_folder): """Train the model in ``model_folder``.""" os.chdir(model_folder) training = generate_training_command(model_folder) if training is None: return -1 logging.info(training) os.chdir(model_folder) os.system(training)
[ "def", "train_model", "(", "model_folder", ")", ":", "os", ".", "chdir", "(", "model_folder", ")", "training", "=", "generate_training_command", "(", "model_folder", ")", "if", "training", "is", "None", ":", "return", "-", "1", "logging", ".", "info", "(", ...
Train the model in ``model_folder``.
[ "Train", "the", "model", "in", "model_folder", "." ]
python
train
mongodb/mongo-python-driver
pymongo/message.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L813-L821
def _delete_uncompressed( collection_name, spec, safe, last_error_args, opts, flags=0): """Internal delete message helper.""" op_delete, max_bson_size = _delete(collection_name, spec, opts, flags) rid, msg = __pack_message(2006, op_delete) if safe: rid, gle, _ = __last_error(collection_name, last_error_args) return rid, msg + gle, max_bson_size return rid, msg, max_bson_size
[ "def", "_delete_uncompressed", "(", "collection_name", ",", "spec", ",", "safe", ",", "last_error_args", ",", "opts", ",", "flags", "=", "0", ")", ":", "op_delete", ",", "max_bson_size", "=", "_delete", "(", "collection_name", ",", "spec", ",", "opts", ",", ...
Internal delete message helper.
[ "Internal", "delete", "message", "helper", "." ]
python
train
apache/incubator-heron
heron/tools/common/src/python/access/heron_api.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/common/src/python/access/heron_api.py#L211-L225
def get_logical_plan(cluster, environ, topology, role=None): ''' Get the logical plan state of a topology in a cluster :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(LOGICALPLAN_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
[ "def", "get_logical_plan", "(", "cluster", ",", "environ", ",", "topology", ",", "role", "=", "None", ")", ":", "params", "=", "dict", "(", "cluster", "=", "cluster", ",", "environ", "=", "environ", ",", "topology", "=", "topology", ")", "if", "role", ...
Get the logical plan state of a topology in a cluster :param cluster: :param environ: :param topology: :param role: :return:
[ "Get", "the", "logical", "plan", "state", "of", "a", "topology", "in", "a", "cluster", ":", "param", "cluster", ":", ":", "param", "environ", ":", ":", "param", "topology", ":", ":", "param", "role", ":", ":", "return", ":" ]
python
valid
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L832-L858
def show_info(self, **kwargs): """ Print info on the flow i.e. total number of tasks, works, tasks grouped by class. Example: Task Class Number ------------ -------- ScfTask 1 NscfTask 1 ScrTask 2 SigmaTask 6 """ stream = kwargs.pop("stream", sys.stdout) lines = [str(self)] app = lines.append app("Number of works: %d, total number of tasks: %s" % (len(self), self.num_tasks) ) app("Number of tasks with a given class:\n") # Build Table data = [[cls.__name__, len(tasks)] for cls, tasks in self.groupby_task_class().items()] app(str(tabulate(data, headers=["Task Class", "Number"]))) stream.write("\n".join(lines))
[ "def", "show_info", "(", "self", ",", "*", "*", "kwargs", ")", ":", "stream", "=", "kwargs", ".", "pop", "(", "\"stream\"", ",", "sys", ".", "stdout", ")", "lines", "=", "[", "str", "(", "self", ")", "]", "app", "=", "lines", ".", "append", "app"...
Print info on the flow i.e. total number of tasks, works, tasks grouped by class. Example: Task Class Number ------------ -------- ScfTask 1 NscfTask 1 ScrTask 2 SigmaTask 6
[ "Print", "info", "on", "the", "flow", "i", ".", "e", ".", "total", "number", "of", "tasks", "works", "tasks", "grouped", "by", "class", "." ]
python
train
Shizmob/pydle
pydle/features/rfc1459/client.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/rfc1459/client.py#L275-L283
async def kick(self, channel, target, reason=None): """ Kick user from channel. """ if not self.in_channel(channel): raise NotInChannel(channel) if reason: await self.rawmsg('KICK', channel, target, reason) else: await self.rawmsg('KICK', channel, target)
[ "async", "def", "kick", "(", "self", ",", "channel", ",", "target", ",", "reason", "=", "None", ")", ":", "if", "not", "self", ".", "in_channel", "(", "channel", ")", ":", "raise", "NotInChannel", "(", "channel", ")", "if", "reason", ":", "await", "s...
Kick user from channel.
[ "Kick", "user", "from", "channel", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xorbrecordbox.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbrecordbox.py#L665-L730
def refresh(self, records): """ Refreshs the current user interface to match the latest settings. """ self._loaded = True if self.isLoading(): return # load the information if RecordSet.typecheck(records): table = records.table() self.setTableType(table) if self.order(): records.setOrder(self.order()) # load specific data for this record box if self.specifiedColumnsOnly(): records.setColumns(map(lambda x: x.name(), self.specifiedColumns())) # load the records asynchronously if self.isThreadEnabled() and table: try: thread_enabled = table.getDatabase().isThreadEnabled() except AttributeError: thread_enabled = False if thread_enabled: # ensure we have a worker thread running self.worker() # assign ordering based on tree table if self.showTreePopup(): tree = self.treePopupWidget() if tree.isSortingEnabled(): col = tree.sortColumn() colname = tree.headerItem().text(col) column = table.schema().column(colname) if column: if tree.sortOrder() == Qt.AscendingOrder: sort_order = 'asc' else: sort_order = 'desc' records.setOrder([(column.name(), sort_order)]) self.loadRequested.emit(records) return # load the records synchronously self.loadingStarted.emit() curr_record = self.currentRecord() self.blockSignals(True) self.setUpdatesEnabled(False) self.clear() use_dummy = not self.isRequired() or self.isCheckable() if use_dummy: self.addItem('') self.addRecords(records) self.setUpdatesEnabled(True) self.blockSignals(False) self.setCurrentRecord(curr_record) self.loadingFinished.emit()
[ "def", "refresh", "(", "self", ",", "records", ")", ":", "self", ".", "_loaded", "=", "True", "if", "self", ".", "isLoading", "(", ")", ":", "return", "# load the information\r", "if", "RecordSet", ".", "typecheck", "(", "records", ")", ":", "table", "="...
Refreshs the current user interface to match the latest settings.
[ "Refreshs", "the", "current", "user", "interface", "to", "match", "the", "latest", "settings", "." ]
python
train
digidotcom/python-devicecloud
devicecloud/monitor_tcp.py
https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/monitor_tcp.py#L564-L579
def stop(self): """Stops all session activity. Blocks until io and writer thread dies """ if self._io_thread is not None: self.log.info("Waiting for I/O thread to stop...") self.closed = True self._io_thread.join() if self._writer_thread is not None: self.log.info("Waiting for Writer Thread to stop...") self.closed = True self._writer_thread.join() self.log.info("All worker threads stopped.")
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_io_thread", "is", "not", "None", ":", "self", ".", "log", ".", "info", "(", "\"Waiting for I/O thread to stop...\"", ")", "self", ".", "closed", "=", "True", "self", ".", "_io_thread", ".", "join...
Stops all session activity. Blocks until io and writer thread dies
[ "Stops", "all", "session", "activity", "." ]
python
train
goose3/goose3
goose3/configuration.py
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/configuration.py#L216-L249
def known_context_patterns(self, val): ''' val must be an ArticleContextPattern, a dictionary, or list of \ dictionaries e.g., {'attr': 'class', 'value': 'my-article-class'} or [{'attr': 'class', 'value': 'my-article-class'}, {'attr': 'id', 'value': 'my-article-id'}] ''' def create_pat_from_dict(val): '''Helper function used to create an ArticleContextPattern from a dictionary ''' if "tag" in val: pat = ArticleContextPattern(tag=val["tag"]) if "attr" in val: pat.attr = val["attr"] pat.value = val["value"] elif "attr" in val: pat = ArticleContextPattern(attr=val["attr"], value=val["value"]) if "domain" in val: pat.domain = val["domain"] return pat if isinstance(val, list): self._known_context_patterns = [ x if isinstance(x, ArticleContextPattern) else create_pat_from_dict(x) for x in val ] + self.known_context_patterns elif isinstance(val, ArticleContextPattern): self._known_context_patterns.insert(0, val) elif isinstance(val, dict): self._known_context_patterns.insert(0, create_pat_from_dict(val)) else: raise Exception("Unknown type: {}. Use a ArticleContextPattern.".format(type(val)))
[ "def", "known_context_patterns", "(", "self", ",", "val", ")", ":", "def", "create_pat_from_dict", "(", "val", ")", ":", "'''Helper function used to create an ArticleContextPattern from a dictionary\n '''", "if", "\"tag\"", "in", "val", ":", "pat", "=", "Articl...
val must be an ArticleContextPattern, a dictionary, or list of \ dictionaries e.g., {'attr': 'class', 'value': 'my-article-class'} or [{'attr': 'class', 'value': 'my-article-class'}, {'attr': 'id', 'value': 'my-article-id'}]
[ "val", "must", "be", "an", "ArticleContextPattern", "a", "dictionary", "or", "list", "of", "\\", "dictionaries", "e", ".", "g", ".", "{", "attr", ":", "class", "value", ":", "my", "-", "article", "-", "class", "}", "or", "[", "{", "attr", ":", "class...
python
valid
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1401-L1404
def add_event(self, name, time, chan): """Action: add a single event.""" self.annot.add_event(name, time, chan=chan) self.update_annotations()
[ "def", "add_event", "(", "self", ",", "name", ",", "time", ",", "chan", ")", ":", "self", ".", "annot", ".", "add_event", "(", "name", ",", "time", ",", "chan", "=", "chan", ")", "self", ".", "update_annotations", "(", ")" ]
Action: add a single event.
[ "Action", ":", "add", "a", "single", "event", "." ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/networking/interconnect.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/networking/interconnect.py#L449-L479
def send_last_message(self, msg, connection_id=None): """ Should be used instead of send_message, when you want to close the connection once the message is sent. :param msg: protobuf validator_pb2.Message """ zmq_identity = None if connection_id is not None and self._connections is not None: if connection_id in self._connections: connection_info = self._connections.get(connection_id) if connection_info.connection_type == \ ConnectionType.ZMQ_IDENTITY: zmq_identity = connection_info.connection del self._connections[connection_id] else: LOGGER.debug("Can't send to %s, not in self._connections", connection_id) return self._ready.wait() try: asyncio.run_coroutine_threadsafe( self._send_last_message(zmq_identity, msg), self._event_loop) except RuntimeError: # run_coroutine_threadsafe will throw a RuntimeError if # the eventloop is closed. This occurs on shutdown. pass
[ "def", "send_last_message", "(", "self", ",", "msg", ",", "connection_id", "=", "None", ")", ":", "zmq_identity", "=", "None", "if", "connection_id", "is", "not", "None", "and", "self", ".", "_connections", "is", "not", "None", ":", "if", "connection_id", ...
Should be used instead of send_message, when you want to close the connection once the message is sent. :param msg: protobuf validator_pb2.Message
[ "Should", "be", "used", "instead", "of", "send_message", "when", "you", "want", "to", "close", "the", "connection", "once", "the", "message", "is", "sent", "." ]
python
train
saltstack/salt
salt/pillar/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L861-L910
def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors
[ "def", "render_pillar", "(", "self", ",", "matches", ",", "errors", "=", "None", ")", ":", "pillar", "=", "copy", ".", "copy", "(", "self", ".", "pillar_override", ")", "if", "errors", "is", "None", ":", "errors", "=", "[", "]", "for", "saltenv", ","...
Extract the sls pillar files from the matches and render them into the pillar
[ "Extract", "the", "sls", "pillar", "files", "from", "the", "matches", "and", "render", "them", "into", "the", "pillar" ]
python
train
materialsvirtuallab/pyhull
pyhull/simplex.py
https://github.com/materialsvirtuallab/pyhull/blob/01d4ee2c108ab3d8faa9b9ff476290ffee90073f/pyhull/simplex.py#L51-L55
def volume(self): """ Volume of the simplex. """ return abs(np.linalg.det(self.T)) / math.factorial(self.space_dim)
[ "def", "volume", "(", "self", ")", ":", "return", "abs", "(", "np", ".", "linalg", ".", "det", "(", "self", ".", "T", ")", ")", "/", "math", ".", "factorial", "(", "self", ".", "space_dim", ")" ]
Volume of the simplex.
[ "Volume", "of", "the", "simplex", "." ]
python
train
Vauxoo/cfdilib
cfdilib/cfdilib.py
https://github.com/Vauxoo/cfdilib/blob/acd73d159f62119f3100d963a061820bbe3f93ea/cfdilib/cfdilib.py#L188-L217
def set_xml(self): """Set document xml just rendered already validated against xsd to be signed. :params boolean debug_mode: Either if you want the rendered template to be saved either it is valid or not with the given schema. :returns boolean: Either was valid or not the generated document. """ cached = StringIO() document = u'' try: document = self.template.render(inv=self) except UndefinedError as ups: self.ups = ups # TODO: Here should be called the cleanup 'Just before the validation'. valid = self.validate(self.schema, document) self.document = document if valid: document = etree.XML(document) self.document = etree.tostring(document, pretty_print=True, xml_declaration=True, encoding='utf-8') # TODO: When Document Generated, this this should not fail either. # Caching just when valid then. cached.write(self.document is not None and self.document or u'') cached.seek(0) self.document_path = cached
[ "def", "set_xml", "(", "self", ")", ":", "cached", "=", "StringIO", "(", ")", "document", "=", "u''", "try", ":", "document", "=", "self", ".", "template", ".", "render", "(", "inv", "=", "self", ")", "except", "UndefinedError", "as", "ups", ":", "se...
Set document xml just rendered already validated against xsd to be signed. :params boolean debug_mode: Either if you want the rendered template to be saved either it is valid or not with the given schema. :returns boolean: Either was valid or not the generated document.
[ "Set", "document", "xml", "just", "rendered", "already", "validated", "against", "xsd", "to", "be", "signed", "." ]
python
train
cs01/pygdbmi
pygdbmi/StringStream.py
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/StringStream.py#L60-L92
def advance_past_string_with_gdb_escapes(self, chars_to_remove_gdb_escape=None): """characters that gdb escapes that should not be escaped by this parser """ if chars_to_remove_gdb_escape is None: chars_to_remove_gdb_escape = ['"'] buf = "" while True: c = self.raw_text[self.index] self.index += 1 logging.debug("%s", fmt_cyan(c)) if c == "\\": # We are on a backslash and there is another character after the backslash # to parse. Handle this case specially since gdb escaped it for us # Get the next char that is being escaped c2 = self.raw_text[self.index] self.index += 1 # only store the escaped character in the buffer; don't store the backslash # (don't leave it escaped) buf += c2 elif c == '"': # Quote is closed. Exit (and don't include the end quote). break else: # capture this character, and keep capturing buf += c return buf
[ "def", "advance_past_string_with_gdb_escapes", "(", "self", ",", "chars_to_remove_gdb_escape", "=", "None", ")", ":", "if", "chars_to_remove_gdb_escape", "is", "None", ":", "chars_to_remove_gdb_escape", "=", "[", "'\"'", "]", "buf", "=", "\"\"", "while", "True", ":"...
characters that gdb escapes that should not be escaped by this parser
[ "characters", "that", "gdb", "escapes", "that", "should", "not", "be", "escaped", "by", "this", "parser" ]
python
valid
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_kvlayer_keyword_search.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer_keyword_search.py#L135-L172
def index(self, si): '''Record index records for a single document. Which indexes this creates depends on the parameters to the constructor. This records all of the requested indexes for a single document. ''' if not si.body.clean_visible: logger.warn('stream item %s has no clean_visible part, ' 'skipping keyword indexing', si.stream_id) return # Count tokens in si.clean_visible # We will recycle hash==0 for "# of documents" hash_counts = defaultdict(int) hash_counts[DOCUMENT_HASH_KEY] = 1 hash_kw = defaultdict(int) words = self.collect_words(si) for tok, count in words.iteritems(): (tok, tok_hash) = self.make_hash_kw(tok) hash_counts[tok_hash] += count hash_kw[tok] = tok_hash # Convert this and write it out if self.hash_docs: (k1, k2) = key_for_stream_item(si) kvps = [((h, k1, k2), n) for (h, n) in hash_counts.iteritems() if h != DOCUMENT_HASH_KEY] self.client.put(HASH_TF_INDEX_TABLE, *kvps) if self.hash_frequencies: kvps = [((h,), 1) for h in hash_counts.iterkeys()] self.client.increment(HASH_FREQUENCY_TABLE, *kvps) if self.hash_keywords: kvps = [((h, t), 1) for (t, h) in hash_kw.iteritems()] self.client.increment(HASH_KEYWORD_INDEX_TABLE, *kvps)
[ "def", "index", "(", "self", ",", "si", ")", ":", "if", "not", "si", ".", "body", ".", "clean_visible", ":", "logger", ".", "warn", "(", "'stream item %s has no clean_visible part, '", "'skipping keyword indexing'", ",", "si", ".", "stream_id", ")", "return", ...
Record index records for a single document. Which indexes this creates depends on the parameters to the constructor. This records all of the requested indexes for a single document.
[ "Record", "index", "records", "for", "a", "single", "document", "." ]
python
test
ArduPilot/MAVProxy
MAVProxy/modules/lib/MacOS/backend_wxagg.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/MacOS/backend_wxagg.py#L113-L121
def new_figure_manager_given_figure(num, figure): """ Create a new figure manager instance for the given figure. """ frame = FigureFrameWxAgg(num, figure) figmgr = frame.get_figure_manager() if matplotlib.is_interactive(): figmgr.frame.Show() return figmgr
[ "def", "new_figure_manager_given_figure", "(", "num", ",", "figure", ")", ":", "frame", "=", "FigureFrameWxAgg", "(", "num", ",", "figure", ")", "figmgr", "=", "frame", ".", "get_figure_manager", "(", ")", "if", "matplotlib", ".", "is_interactive", "(", ")", ...
Create a new figure manager instance for the given figure.
[ "Create", "a", "new", "figure", "manager", "instance", "for", "the", "given", "figure", "." ]
python
train
nuagenetworks/bambou
bambou/nurest_object.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_object.py#L490-L495
def is_owned_by_current_user(self): """ Check if the current user owns the object """ from bambou.nurest_root_object import NURESTRootObject root_object = NURESTRootObject.get_default_root_object() return self._owner == root_object.id
[ "def", "is_owned_by_current_user", "(", "self", ")", ":", "from", "bambou", ".", "nurest_root_object", "import", "NURESTRootObject", "root_object", "=", "NURESTRootObject", ".", "get_default_root_object", "(", ")", "return", "self", ".", "_owner", "==", "root_object",...
Check if the current user owns the object
[ "Check", "if", "the", "current", "user", "owns", "the", "object" ]
python
train
Workable/flask-log-request-id
flask_log_request_id/ctx_fetcher.py
https://github.com/Workable/flask-log-request-id/blob/3aaea86dfe2621ecc443a1e739ae6a27ae1187be/flask_log_request_id/ctx_fetcher.py#L28-L35
def register_fetcher(self, ctx_fetcher): """ Register another context-specialized fetcher :param Callable ctx_fetcher: A callable that will return the id or raise ExecutedOutsideContext if it was executed outside its context """ if ctx_fetcher not in self.ctx_fetchers: self.ctx_fetchers.append(ctx_fetcher)
[ "def", "register_fetcher", "(", "self", ",", "ctx_fetcher", ")", ":", "if", "ctx_fetcher", "not", "in", "self", ".", "ctx_fetchers", ":", "self", ".", "ctx_fetchers", ".", "append", "(", "ctx_fetcher", ")" ]
Register another context-specialized fetcher :param Callable ctx_fetcher: A callable that will return the id or raise ExecutedOutsideContext if it was executed outside its context
[ "Register", "another", "context", "-", "specialized", "fetcher", ":", "param", "Callable", "ctx_fetcher", ":", "A", "callable", "that", "will", "return", "the", "id", "or", "raise", "ExecutedOutsideContext", "if", "it", "was", "executed", "outside", "its", "cont...
python
train
apache/incubator-mxnet
example/gluon/style_transfer/utils.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/style_transfer/utils.py#L69-L78
def subtract_imagenet_mean_preprocess_batch(batch): """Subtract ImageNet mean pixel-wise from a BGR image.""" batch = F.swapaxes(batch,0, 1) (r, g, b) = F.split(batch, num_outputs=3, axis=0) r = r - 123.680 g = g - 116.779 b = b - 103.939 batch = F.concat(b, g, r, dim=0) batch = F.swapaxes(batch,0, 1) return batch
[ "def", "subtract_imagenet_mean_preprocess_batch", "(", "batch", ")", ":", "batch", "=", "F", ".", "swapaxes", "(", "batch", ",", "0", ",", "1", ")", "(", "r", ",", "g", ",", "b", ")", "=", "F", ".", "split", "(", "batch", ",", "num_outputs", "=", "...
Subtract ImageNet mean pixel-wise from a BGR image.
[ "Subtract", "ImageNet", "mean", "pixel", "-", "wise", "from", "a", "BGR", "image", "." ]
python
train
kwikteam/phy
phy/plot/panzoom.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L374-L378
def reset(self): """Reset the view.""" self.pan = (0., 0.) self.zoom = self._default_zoom self.update()
[ "def", "reset", "(", "self", ")", ":", "self", ".", "pan", "=", "(", "0.", ",", "0.", ")", "self", ".", "zoom", "=", "self", ".", "_default_zoom", "self", ".", "update", "(", ")" ]
Reset the view.
[ "Reset", "the", "view", "." ]
python
train
mbr/tinyrpc
tinyrpc/dispatch/__init__.py
https://github.com/mbr/tinyrpc/blob/59ccf62452b3f37e8411ff0309a3a99857d05e19/tinyrpc/dispatch/__init__.py#L132-L156
def get_method(self, name): """Retrieve a previously registered method. Checks if a method matching ``name`` has been registered. If :py:func:`get_method` cannot find a method, every subdispatcher with a prefix matching the method name is checked as well. :param str name: Function to find. :returns: The callable implementing the function. :rtype: callable :raises: :py:exc:`~tinyrpc.exc.MethodNotFoundError` """ if name in self.method_map: return self.method_map[name] for prefix, subdispatchers in self.subdispatchers.items(): if name.startswith(prefix): for sd in subdispatchers: try: return sd.get_method(name[len(prefix):]) except exc.MethodNotFoundError: pass raise exc.MethodNotFoundError(name)
[ "def", "get_method", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "method_map", ":", "return", "self", ".", "method_map", "[", "name", "]", "for", "prefix", ",", "subdispatchers", "in", "self", ".", "subdispatchers", ".", "items",...
Retrieve a previously registered method. Checks if a method matching ``name`` has been registered. If :py:func:`get_method` cannot find a method, every subdispatcher with a prefix matching the method name is checked as well. :param str name: Function to find. :returns: The callable implementing the function. :rtype: callable :raises: :py:exc:`~tinyrpc.exc.MethodNotFoundError`
[ "Retrieve", "a", "previously", "registered", "method", "." ]
python
train
ArchiveTeam/wpull
wpull/url.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/url.py#L432-L443
def normalize_hostname(hostname): '''Normalizes a hostname so that it is ASCII and valid domain name.''' try: new_hostname = hostname.encode('idna').decode('ascii').lower() except UnicodeError as error: raise UnicodeError('Hostname {} rejected: {}'.format(hostname, error)) from error if hostname != new_hostname: # Check for round-trip. May raise UnicodeError new_hostname.encode('idna') return new_hostname
[ "def", "normalize_hostname", "(", "hostname", ")", ":", "try", ":", "new_hostname", "=", "hostname", ".", "encode", "(", "'idna'", ")", ".", "decode", "(", "'ascii'", ")", ".", "lower", "(", ")", "except", "UnicodeError", "as", "error", ":", "raise", "Un...
Normalizes a hostname so that it is ASCII and valid domain name.
[ "Normalizes", "a", "hostname", "so", "that", "it", "is", "ASCII", "and", "valid", "domain", "name", "." ]
python
train
eternnoir/pyTelegramBotAPI
telebot/__init__.py
https://github.com/eternnoir/pyTelegramBotAPI/blob/47b53b88123097f1b9562a6cd5d4e080b86185d1/telebot/__init__.py#L799-L817
def send_venue(self, chat_id, latitude, longitude, title, address, foursquare_id=None, disable_notification=None, reply_to_message_id=None, reply_markup=None): """ Use this method to send information about a venue. :param chat_id: Integer or String : Unique identifier for the target chat or username of the target channel :param latitude: Float : Latitude of the venue :param longitude: Float : Longitude of the venue :param title: String : Name of the venue :param address: String : Address of the venue :param foursquare_id: String : Foursquare identifier of the venue :param disable_notification: :param reply_to_message_id: :param reply_markup: :return: """ return types.Message.de_json( apihelper.send_venue(self.token, chat_id, latitude, longitude, title, address, foursquare_id, disable_notification, reply_to_message_id, reply_markup) )
[ "def", "send_venue", "(", "self", ",", "chat_id", ",", "latitude", ",", "longitude", ",", "title", ",", "address", ",", "foursquare_id", "=", "None", ",", "disable_notification", "=", "None", ",", "reply_to_message_id", "=", "None", ",", "reply_markup", "=", ...
Use this method to send information about a venue. :param chat_id: Integer or String : Unique identifier for the target chat or username of the target channel :param latitude: Float : Latitude of the venue :param longitude: Float : Longitude of the venue :param title: String : Name of the venue :param address: String : Address of the venue :param foursquare_id: String : Foursquare identifier of the venue :param disable_notification: :param reply_to_message_id: :param reply_markup: :return:
[ "Use", "this", "method", "to", "send", "information", "about", "a", "venue", ".", ":", "param", "chat_id", ":", "Integer", "or", "String", ":", "Unique", "identifier", "for", "the", "target", "chat", "or", "username", "of", "the", "target", "channel", ":",...
python
train
mdsol/rwslib
rwslib/builders/metadata.py
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L1687-L1697
def build(self, builder): """Build XML by appending to builder""" params = dict( Namespace=self.namespace, Name=self.name, Value=self.value, TransactionType=self.transaction_type, ) builder.start("mdsol:Attribute", params) builder.end("mdsol:Attribute")
[ "def", "build", "(", "self", ",", "builder", ")", ":", "params", "=", "dict", "(", "Namespace", "=", "self", ".", "namespace", ",", "Name", "=", "self", ".", "name", ",", "Value", "=", "self", ".", "value", ",", "TransactionType", "=", "self", ".", ...
Build XML by appending to builder
[ "Build", "XML", "by", "appending", "to", "builder" ]
python
train
datadotworld/data.world-py
datadotworld/__init__.py
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/__init__.py#L104-L143
def query(dataset_key, query, query_type='sql', profile='default', parameters=None, **kwargs): """Query an existing dataset :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param query: SQL or SPARQL query :type query: str :param query_type: The type of the query. Must be either 'sql' or 'sparql'. (Default value = 'sql') :type query_type: {'sql', 'sparql'}, optional :param parameters: parameters to the query - if SPARQL query, this should be a dict containing named parameters, if SQL query, then this should be a list containing positional parameters. Boolean values will be converted to xsd:boolean, Integer values to xsd:integer, and other Numeric values to xsd:decimal. anything else is treated as a String literal (Default value = None) :type parameters: query parameters, optional :param profile: Configuration profile (account) to use. (Default value = 'default') :type profile: str, optional :returns: Object containing the results of the query :rtype: Results :raises RuntimeError: If a server error occurs Examples -------- >>> import datadotworld as dw >>> results = dw.query( ... 'jonloyens/an-intro-to-dataworld-dataset', ... 'SELECT * FROM `DataDotWorldBBallStats`, `DataDotWorldBBallTeam` ' ... 'WHERE DataDotWorldBBallTeam.Name = DataDotWorldBBallStats.Name') >>> df = results.dataframe >>> df.shape (8, 6) """ return _get_instance(profile, **kwargs).query(dataset_key, query, query_type=query_type, parameters=parameters, **kwargs)
[ "def", "query", "(", "dataset_key", ",", "query", ",", "query_type", "=", "'sql'", ",", "profile", "=", "'default'", ",", "parameters", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_get_instance", "(", "profile", ",", "*", "*", "kwargs", ...
Query an existing dataset :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param query: SQL or SPARQL query :type query: str :param query_type: The type of the query. Must be either 'sql' or 'sparql'. (Default value = 'sql') :type query_type: {'sql', 'sparql'}, optional :param parameters: parameters to the query - if SPARQL query, this should be a dict containing named parameters, if SQL query, then this should be a list containing positional parameters. Boolean values will be converted to xsd:boolean, Integer values to xsd:integer, and other Numeric values to xsd:decimal. anything else is treated as a String literal (Default value = None) :type parameters: query parameters, optional :param profile: Configuration profile (account) to use. (Default value = 'default') :type profile: str, optional :returns: Object containing the results of the query :rtype: Results :raises RuntimeError: If a server error occurs Examples -------- >>> import datadotworld as dw >>> results = dw.query( ... 'jonloyens/an-intro-to-dataworld-dataset', ... 'SELECT * FROM `DataDotWorldBBallStats`, `DataDotWorldBBallTeam` ' ... 'WHERE DataDotWorldBBallTeam.Name = DataDotWorldBBallStats.Name') >>> df = results.dataframe >>> df.shape (8, 6)
[ "Query", "an", "existing", "dataset" ]
python
train
Neurita/boyle
boyle/image/base.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/image/base.py#L178-L182
def smooth_fwhm(self, fwhm): """ Set a smoothing Gaussian kernel given its FWHM in mm. """ if fwhm != self._smooth_fwhm: self._is_data_smooth = False self._smooth_fwhm = fwhm
[ "def", "smooth_fwhm", "(", "self", ",", "fwhm", ")", ":", "if", "fwhm", "!=", "self", ".", "_smooth_fwhm", ":", "self", ".", "_is_data_smooth", "=", "False", "self", ".", "_smooth_fwhm", "=", "fwhm" ]
Set a smoothing Gaussian kernel given its FWHM in mm.
[ "Set", "a", "smoothing", "Gaussian", "kernel", "given", "its", "FWHM", "in", "mm", "." ]
python
valid
StackStorm/pybind
pybind/nos/v6_0_2f/interface/fortygigabitethernet/storm_control/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface/fortygigabitethernet/storm_control/__init__.py#L92-L113
def _set_ingress(self, v, load=False): """ Setter method for ingress, mapped from YANG variable /interface/fortygigabitethernet/storm_control/ingress (list) If this variable is read-only (config: false) in the source YANG file, then _set_ingress is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ingress() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("protocol_type",ingress.ingress, yang_name="ingress", rest_name="ingress", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='protocol-type', extensions={u'tailf-common': {u'info': u'Ingress Direction', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="ingress", rest_name="ingress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Ingress Direction', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bum-storm-control', defining_module='brocade-bum-storm-control', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ingress must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("protocol_type",ingress.ingress, yang_name="ingress", rest_name="ingress", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='protocol-type', extensions={u'tailf-common': {u'info': u'Ingress Direction', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="ingress", rest_name="ingress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Ingress Direction', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bum-storm-control', defining_module='brocade-bum-storm-control', yang_type='list', is_config=True)""", }) self.__ingress = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ingress", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base",...
Setter method for ingress, mapped from YANG variable /interface/fortygigabitethernet/storm_control/ingress (list) If this variable is read-only (config: false) in the source YANG file, then _set_ingress is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ingress() directly.
[ "Setter", "method", "for", "ingress", "mapped", "from", "YANG", "variable", "/", "interface", "/", "fortygigabitethernet", "/", "storm_control", "/", "ingress", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false...
python
train
gem/oq-engine
openquake/hmtk/plotting/seismicity/catalogue_plots.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/plotting/seismicity/catalogue_plots.py#L117-L158
def plot_depth_histogram( catalogue, bin_width, normalisation=False, bootstrap=None, filename=None, figure_size=(8, 6), filetype='png', dpi=300, ax=None): """ Creates a histogram of the depths in the catalogue :param catalogue: Earthquake catalogue as instance of :class: openquake.hmtk.seismicity.catalogue.Catalogue :param float bin_width: Width of the histogram for the depth bins :param bool normalisation: Normalise the histogram to give output as PMF (True) or count (False) :param int bootstrap: To sample depth uncertainty choose number of samples """ if ax is None: fig, ax = plt.subplots(figsize=figure_size) else: fig = ax.get_figure() # Create depth range if len(catalogue.data['depth']) == 0: # pylint: disable=len-as-condition raise ValueError('No depths reported in catalogue!') depth_bins = np.arange(0., np.max(catalogue.data['depth']) + bin_width, bin_width) depth_hist = catalogue.get_depth_distribution(depth_bins, normalisation, bootstrap) ax.bar(depth_bins[:-1], depth_hist, width=0.95 * bin_width, edgecolor='k') ax.set_xlabel('Depth (km)') if normalisation: ax.set_ylabel('Probability Mass Function') else: ax.set_ylabel('Count') ax.set_title('Depth Histogram') _save_image(fig, filename, filetype, dpi)
[ "def", "plot_depth_histogram", "(", "catalogue", ",", "bin_width", ",", "normalisation", "=", "False", ",", "bootstrap", "=", "None", ",", "filename", "=", "None", ",", "figure_size", "=", "(", "8", ",", "6", ")", ",", "filetype", "=", "'png'", ",", "dpi...
Creates a histogram of the depths in the catalogue :param catalogue: Earthquake catalogue as instance of :class: openquake.hmtk.seismicity.catalogue.Catalogue :param float bin_width: Width of the histogram for the depth bins :param bool normalisation: Normalise the histogram to give output as PMF (True) or count (False) :param int bootstrap: To sample depth uncertainty choose number of samples
[ "Creates", "a", "histogram", "of", "the", "depths", "in", "the", "catalogue" ]
python
train
ejhigson/nestcheck
nestcheck/estimators.py
https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/estimators.py#L176-L203
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0): """Mean of the square of single parameter (second moment of its posterior distribution). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. param_ind: int, optional Index of parameter for which the second moment should be calculated. This corresponds to the column of ns_run['theta'] which contains the parameter. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) w_relative = np.exp(logw - logw.max()) # protect against overflow w_relative /= np.sum(w_relative) return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
[ "def", "param_squared_mean", "(", "ns_run", ",", "logw", "=", "None", ",", "simulate", "=", "False", ",", "param_ind", "=", "0", ")", ":", "if", "logw", "is", "None", ":", "logw", "=", "nestcheck", ".", "ns_run_utils", ".", "get_logw", "(", "ns_run", "...
Mean of the square of single parameter (second moment of its posterior distribution). Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. param_ind: int, optional Index of parameter for which the second moment should be calculated. This corresponds to the column of ns_run['theta'] which contains the parameter. Returns ------- float
[ "Mean", "of", "the", "square", "of", "single", "parameter", "(", "second", "moment", "of", "its", "posterior", "distribution", ")", "." ]
python
train
neithere/monk
monk/compat.py
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/compat.py#L50-L60
def safe_str(value): """ Returns: * a `str` instance (bytes) in Python 2.x, or * a `str` instance (Unicode) in Python 3.x. """ if sys.version_info < (3,0) and isinstance(value, unicode): return value.encode('utf-8') else: return str(value)
[ "def", "safe_str", "(", "value", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", "and", "isinstance", "(", "value", ",", "unicode", ")", ":", "return", "value", ".", "encode", "(", "'utf-8'", ")", "else", ":", "return", "s...
Returns: * a `str` instance (bytes) in Python 2.x, or * a `str` instance (Unicode) in Python 3.x.
[ "Returns", ":" ]
python
train
zblz/naima
naima/plot.py
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/plot.py#L34-L60
def plot_chain(sampler, p=None, **kwargs): """Generate a diagnostic plot of the sampler chains. Parameters ---------- sampler : `emcee.EnsembleSampler` Sampler containing the chains to be plotted. p : int (optional) Index of the parameter to plot. If omitted, all chains are plotted. last_step : bool (optional) Whether to plot the last step of the chain or the complete chain (default). Returns ------- figure : `matplotlib.figure.Figure` Figure """ if p is None: npars = sampler.chain.shape[-1] for pp in six.moves.range(npars): _plot_chain_func(sampler, pp, **kwargs) fig = None else: fig = _plot_chain_func(sampler, p, **kwargs) return fig
[ "def", "plot_chain", "(", "sampler", ",", "p", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "p", "is", "None", ":", "npars", "=", "sampler", ".", "chain", ".", "shape", "[", "-", "1", "]", "for", "pp", "in", "six", ".", "moves", ".", ...
Generate a diagnostic plot of the sampler chains. Parameters ---------- sampler : `emcee.EnsembleSampler` Sampler containing the chains to be plotted. p : int (optional) Index of the parameter to plot. If omitted, all chains are plotted. last_step : bool (optional) Whether to plot the last step of the chain or the complete chain (default). Returns ------- figure : `matplotlib.figure.Figure` Figure
[ "Generate", "a", "diagnostic", "plot", "of", "the", "sampler", "chains", "." ]
python
train
HazyResearch/fonduer
src/fonduer/learning/disc_models/modules/sparse_linear.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/modules/sparse_linear.py#L53-L67
def forward(self, x, w): """Forward function. :param x: Feature indices. :type x: torch.Tensor of shape (batch_size * length) :param w: Feature weights. :type w: torch.Tensor of shape (batch_size * length) :return: Output of linear layer. :rtype: torch.Tensor of shape (batch_size, num_classes) """ if self.bias is None: return (w.unsqueeze(2) * self.weight(x)).sum(dim=1) else: return (w.unsqueeze(2) * self.weight(x)).sum(dim=1) + self.bias
[ "def", "forward", "(", "self", ",", "x", ",", "w", ")", ":", "if", "self", ".", "bias", "is", "None", ":", "return", "(", "w", ".", "unsqueeze", "(", "2", ")", "*", "self", ".", "weight", "(", "x", ")", ")", ".", "sum", "(", "dim", "=", "1"...
Forward function. :param x: Feature indices. :type x: torch.Tensor of shape (batch_size * length) :param w: Feature weights. :type w: torch.Tensor of shape (batch_size * length) :return: Output of linear layer. :rtype: torch.Tensor of shape (batch_size, num_classes)
[ "Forward", "function", "." ]
python
train
cokelaer/spectrum
doc/sphinxext/sphinx_gallery/gen_rst.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/doc/sphinxext/sphinx_gallery/gen_rst.py#L444-L517
def execute_script(code_block, example_globals, image_path, fig_count, src_file, gallery_conf): """Executes the code block of the example file""" time_elapsed = 0 stdout = '' # We need to execute the code print('plotting code blocks in %s' % src_file) plt.close('all') cwd = os.getcwd() # Redirect output to stdout and orig_stdout = sys.stdout try: # First cd in the original example dir, so that any file # created by the example get created in this directory os.chdir(os.path.dirname(src_file)) my_buffer = StringIO() my_stdout = Tee(sys.stdout, my_buffer) sys.stdout = my_stdout t_start = time() exec(code_block, example_globals) time_elapsed = time() - t_start sys.stdout = orig_stdout my_stdout = my_buffer.getvalue().strip().expandtabs() if my_stdout: stdout = CODE_OUTPUT.format(indent(my_stdout, ' ' * 4)) os.chdir(cwd) figure_list = save_figures(image_path, fig_count, gallery_conf) # Depending on whether we have one or more figures, we're using a # horizontal list or a single rst call to 'image'. image_list = "" if len(figure_list) == 1: figure_name = figure_list[0] image_list = SINGLE_IMAGE % figure_name.lstrip('/') elif len(figure_list) > 1: image_list = HLIST_HEADER for figure_name in figure_list: image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') except Exception: formatted_exception = traceback.format_exc() print(80 * '_') print('%s is not compiling:' % src_file) print(formatted_exception) print(80 * '_') figure_list = [] image_list = codestr2rst(formatted_exception, lang='pytb') # Overrides the output thumbnail in the gallery for easy identification broken_img = os.path.join(glr_path_static(), 'broken_example.png') shutil.copyfile(broken_img, os.path.join(cwd, image_path.format(1))) fig_count += 1 # raise count to avoid overwriting image # Breaks build on first example error if gallery_conf['abort_on_example_error']: raise finally: os.chdir(cwd) sys.stdout = orig_stdout print(" - time elapsed : %.2g sec" % time_elapsed) code_output = "\n{0}\n\n{1}\n\n".format(image_list, stdout) return code_output, time_elapsed, fig_count + len(figure_list)
[ "def", "execute_script", "(", "code_block", ",", "example_globals", ",", "image_path", ",", "fig_count", ",", "src_file", ",", "gallery_conf", ")", ":", "time_elapsed", "=", "0", "stdout", "=", "''", "# We need to execute the code", "print", "(", "'plotting code blo...
Executes the code block of the example file
[ "Executes", "the", "code", "block", "of", "the", "example", "file" ]
python
valid
jithurjacob/Windows-10-Toast-Notifications
win10toast/__init__.py
https://github.com/jithurjacob/Windows-10-Toast-Notifications/blob/9d52b73f1af6c60162cf09b99269c4f7b13cdb00/win10toast/__init__.py#L117-L135
def show_toast(self, title="Notification", msg="Here comes the message", icon_path=None, duration=5, threaded=False): """Notification settings. :title: notification title :msg: notification message :icon_path: path to the .ico file to custom notification :duration: delay in seconds before notification self-destruction """ if not threaded: self._show_toast(title, msg, icon_path, duration) else: if self.notification_active(): # We have an active notification, let is finish so we don't spam them return False self._thread = threading.Thread(target=self._show_toast, args=(title, msg, icon_path, duration)) self._thread.start() return True
[ "def", "show_toast", "(", "self", ",", "title", "=", "\"Notification\"", ",", "msg", "=", "\"Here comes the message\"", ",", "icon_path", "=", "None", ",", "duration", "=", "5", ",", "threaded", "=", "False", ")", ":", "if", "not", "threaded", ":", "self",...
Notification settings. :title: notification title :msg: notification message :icon_path: path to the .ico file to custom notification :duration: delay in seconds before notification self-destruction
[ "Notification", "settings", "." ]
python
train
openvax/isovar
isovar/variant_sequence_in_reading_frame.py
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L216-L233
def count_mismatches_before_variant(reference_prefix, cdna_prefix): """ Computes the number of mismatching nucleotides between two cDNA sequences before a variant locus. Parameters ---------- reference_prefix : str cDNA sequence of a reference transcript before a variant locus cdna_prefix : str cDNA sequence detected from RNAseq before a variant locus """ if len(reference_prefix) != len(cdna_prefix): raise ValueError( "Expected reference prefix '%s' to be same length as %s" % ( reference_prefix, cdna_prefix)) return sum(xi != yi for (xi, yi) in zip(reference_prefix, cdna_prefix))
[ "def", "count_mismatches_before_variant", "(", "reference_prefix", ",", "cdna_prefix", ")", ":", "if", "len", "(", "reference_prefix", ")", "!=", "len", "(", "cdna_prefix", ")", ":", "raise", "ValueError", "(", "\"Expected reference prefix '%s' to be same length as %s\"",...
Computes the number of mismatching nucleotides between two cDNA sequences before a variant locus. Parameters ---------- reference_prefix : str cDNA sequence of a reference transcript before a variant locus cdna_prefix : str cDNA sequence detected from RNAseq before a variant locus
[ "Computes", "the", "number", "of", "mismatching", "nucleotides", "between", "two", "cDNA", "sequences", "before", "a", "variant", "locus", "." ]
python
train
klahnakoski/pyLibrary
mo_math/stats.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/stats.py#L295-L350
def median(values, simple=True, mean_weight=0.0): """ RETURN MEDIAN VALUE IF simple=False THEN IN THE EVENT MULTIPLE INSTANCES OF THE MEDIAN VALUE, THE MEDIAN IS INTERPOLATED BASED ON ITS POSITION IN THE MEDIAN RANGE mean_weight IS TO PICK A MEDIAN VALUE IN THE ODD CASE THAT IS CLOSER TO THE MEAN (PICK A MEDIAN BETWEEN TWO MODES IN BIMODAL CASE) """ if OR(v == None for v in values): Log.error("median is not ready to handle None") try: if not values: return Null l = len(values) _sorted = sorted(values) middle = int(l / 2) _median = float(_sorted[middle]) if len(_sorted) == 1: return _median if simple: if l % 2 == 0: return (_sorted[middle - 1] + _median) / 2 return _median # FIND RANGE OF THE median start_index = middle - 1 while start_index > 0 and _sorted[start_index] == _median: start_index -= 1 start_index += 1 stop_index = middle + 1 while stop_index < l and _sorted[stop_index] == _median: stop_index += 1 num_middle = stop_index - start_index if l % 2 == 0: if num_middle == 1: return (_sorted[middle - 1] + _median) / 2 else: return (_median - 0.5) + (middle - start_index) / num_middle else: if num_middle == 1: return (1 - mean_weight) * _median + mean_weight * (_sorted[middle - 1] + _sorted[middle + 1]) / 2 else: return (_median - 0.5) + (middle + 0.5 - start_index) / num_middle except Exception as e: Log.error("problem with median of {{values}}", values= values, cause=e)
[ "def", "median", "(", "values", ",", "simple", "=", "True", ",", "mean_weight", "=", "0.0", ")", ":", "if", "OR", "(", "v", "==", "None", "for", "v", "in", "values", ")", ":", "Log", ".", "error", "(", "\"median is not ready to handle None\"", ")", "tr...
RETURN MEDIAN VALUE IF simple=False THEN IN THE EVENT MULTIPLE INSTANCES OF THE MEDIAN VALUE, THE MEDIAN IS INTERPOLATED BASED ON ITS POSITION IN THE MEDIAN RANGE mean_weight IS TO PICK A MEDIAN VALUE IN THE ODD CASE THAT IS CLOSER TO THE MEAN (PICK A MEDIAN BETWEEN TWO MODES IN BIMODAL CASE)
[ "RETURN", "MEDIAN", "VALUE" ]
python
train
Azure/azure-sdk-for-python
azure-servicemanagement-legacy/azure/servicemanagement/schedulermanagementservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/schedulermanagementservice.py#L115-L125
def get_cloud_service(self, cloud_service_id): ''' The Get Cloud Service operation gets all the resources (job collections) in the cloud service. cloud_service_id: The cloud service id ''' _validate_not_none('cloud_service_id', cloud_service_id) path = self._get_cloud_services_path(cloud_service_id) return self._perform_get(path, CloudService)
[ "def", "get_cloud_service", "(", "self", ",", "cloud_service_id", ")", ":", "_validate_not_none", "(", "'cloud_service_id'", ",", "cloud_service_id", ")", "path", "=", "self", ".", "_get_cloud_services_path", "(", "cloud_service_id", ")", "return", "self", ".", "_pe...
The Get Cloud Service operation gets all the resources (job collections) in the cloud service. cloud_service_id: The cloud service id
[ "The", "Get", "Cloud", "Service", "operation", "gets", "all", "the", "resources", "(", "job", "collections", ")", "in", "the", "cloud", "service", "." ]
python
test
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L3078-L3136
def query_log(self, filter=None, query=None, count=None, offset=None, sort=None, **kwargs): """ Search the query and event log. Searches the query and event log to find query sessions that match the specified criteria. Searching the **logs** endpoint uses the standard Discovery query syntax for the parameters that are supported. :param str filter: A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. :param str query: A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use **natural_language_query** and **query** at the same time. :param int count: Number of results to return. The maximum for the **count** and **offset** values together in any one query is **10000**. :param int offset: The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. The maximum for the **count** and **offset** values together in any one query is **10000**. :param list[str] sort: A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('discovery', 'V1', 'query_log') headers.update(sdk_headers) params = { 'version': self.version, 'filter': filter, 'query': query, 'count': count, 'offset': offset, 'sort': self._convert_list(sort) } url = '/v1/logs' response = self.request( method='GET', url=url, headers=headers, params=params, accept_json=True) return response
[ "def", "query_log", "(", "self", ",", "filter", "=", "None", ",", "query", "=", "None", ",", "count", "=", "None", ",", "offset", "=", "None", ",", "sort", "=", "None", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "{", "}", "if", "'headers'...
Search the query and event log. Searches the query and event log to find query sessions that match the specified criteria. Searching the **logs** endpoint uses the standard Discovery query syntax for the parameters that are supported. :param str filter: A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. :param str query: A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use **natural_language_query** and **query** at the same time. :param int count: Number of results to return. The maximum for the **count** and **offset** values together in any one query is **10000**. :param int offset: The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. The maximum for the **count** and **offset** values together in any one query is **10000**. :param list[str] sort: A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
[ "Search", "the", "query", "and", "event", "log", "." ]
python
train
rueckstiess/mtools
mtools/mplotqueries/plottypes/scatter_type.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/scatter_type.py#L54-L59
def accept_line(self, logevent): """Return True if the log line has the nominated yaxis field.""" if self.regex_mode: return bool(re.search(self.field, logevent.line_str)) else: return getattr(logevent, self.field) is not None
[ "def", "accept_line", "(", "self", ",", "logevent", ")", ":", "if", "self", ".", "regex_mode", ":", "return", "bool", "(", "re", ".", "search", "(", "self", ".", "field", ",", "logevent", ".", "line_str", ")", ")", "else", ":", "return", "getattr", "...
Return True if the log line has the nominated yaxis field.
[ "Return", "True", "if", "the", "log", "line", "has", "the", "nominated", "yaxis", "field", "." ]
python
train
SmileyChris/easy-thumbnails
easy_thumbnails/files.py
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/files.py#L673-L684
def delete(self, *args, **kwargs): """ Delete the image, along with any generated thumbnails. """ source_cache = self.get_source_cache() # First, delete any related thumbnails. self.delete_thumbnails(source_cache) # Next, delete the source image. super(ThumbnailerFieldFile, self).delete(*args, **kwargs) # Finally, delete the source cache entry. if source_cache and source_cache.pk is not None: source_cache.delete()
[ "def", "delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "source_cache", "=", "self", ".", "get_source_cache", "(", ")", "# First, delete any related thumbnails.", "self", ".", "delete_thumbnails", "(", "source_cache", ")", "# Next, del...
Delete the image, along with any generated thumbnails.
[ "Delete", "the", "image", "along", "with", "any", "generated", "thumbnails", "." ]
python
train
kodexlab/reliure
reliure/pipeline.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/pipeline.py#L264-L285
def set_options_values(self, options, parse=False, strict=False): """ Set the options from a dict of values (in string). :param option_values: the values of options (in format `{"opt_name": "new_value"}`) :type option_values: dict :param parse: whether to parse the given value :type parse: bool :param strict: if True the given `option_values` dict should only contains existing options (no other key) :type strict: bool """ if strict: for opt_name in options.keys(): if not self.has_option(opt_name): raise ValueError("'%s' is not a option of the component" % opt_name) elif self.option_is_hidden(opt_name): raise ValueError("'%s' is hidden, you can't set it" % opt_name) for opt_name, opt in self._options.items(): if opt.hidden: continue if opt_name in options: opt.set(options[opt_name], parse=parse)
[ "def", "set_options_values", "(", "self", ",", "options", ",", "parse", "=", "False", ",", "strict", "=", "False", ")", ":", "if", "strict", ":", "for", "opt_name", "in", "options", ".", "keys", "(", ")", ":", "if", "not", "self", ".", "has_option", ...
Set the options from a dict of values (in string). :param option_values: the values of options (in format `{"opt_name": "new_value"}`) :type option_values: dict :param parse: whether to parse the given value :type parse: bool :param strict: if True the given `option_values` dict should only contains existing options (no other key) :type strict: bool
[ "Set", "the", "options", "from", "a", "dict", "of", "values", "(", "in", "string", ")", ".", ":", "param", "option_values", ":", "the", "values", "of", "options", "(", "in", "format", "{", "opt_name", ":", "new_value", "}", ")", ":", "type", "option_va...
python
train
daviddrysdale/python-phonenumbers
tools/python/buildmetadatafromxml.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/tools/python/buildmetadatafromxml.py#L169-L186
def _extract_lengths(ll): """Extract list of possible lengths from string""" results = set() if ll is None: return [] for val in ll.split(','): m = _NUM_RE.match(val) if m: results.add(int(val)) else: m = _RANGE_RE.match(val) if m is None: raise Exception("Unrecognized length specification %s" % ll) min = int(m.group('min')) max = int(m.group('max')) for ii in range(min, max + 1): results.add(ii) return sorted(list(results))
[ "def", "_extract_lengths", "(", "ll", ")", ":", "results", "=", "set", "(", ")", "if", "ll", "is", "None", ":", "return", "[", "]", "for", "val", "in", "ll", ".", "split", "(", "','", ")", ":", "m", "=", "_NUM_RE", ".", "match", "(", "val", ")"...
Extract list of possible lengths from string
[ "Extract", "list", "of", "possible", "lengths", "from", "string" ]
python
train
has2k1/mizani
mizani/palettes.py
https://github.com/has2k1/mizani/blob/312d0550ee0136fd1b0384829b33f3b2065f47c8/mizani/palettes.py#L227-L266
def grey_pal(start=0.2, end=0.8): """ Utility for creating continuous grey scale palette Parameters ---------- start : float grey value at low end of palette end : float grey value at high end of palette Returns ------- out : function Continuous color palette that takes a single :class:`int` parameter ``n`` and returns ``n`` equally spaced colors. Examples -------- >>> palette = grey_pal() >>> palette(5) ['#333333', '#737373', '#989898', '#b5b5b5', '#cccccc'] """ gamma = 2.2 ends = ((0.0, start, start), (1.0, end, end)) cdict = {'red': ends, 'green': ends, 'blue': ends} grey_cmap = mcolors.LinearSegmentedColormap('grey', cdict) def continuous_grey_palette(n): colors = [] # The grey scale points are linearly separated in # gamma encoded space for x in np.linspace(start**gamma, end**gamma, n): # Map points onto the [0, 1] palette domain x = (x ** (1./gamma) - start) / (end - start) colors.append(mcolors.rgb2hex(grey_cmap(x))) return colors return continuous_grey_palette
[ "def", "grey_pal", "(", "start", "=", "0.2", ",", "end", "=", "0.8", ")", ":", "gamma", "=", "2.2", "ends", "=", "(", "(", "0.0", ",", "start", ",", "start", ")", ",", "(", "1.0", ",", "end", ",", "end", ")", ")", "cdict", "=", "{", "'red'", ...
Utility for creating continuous grey scale palette Parameters ---------- start : float grey value at low end of palette end : float grey value at high end of palette Returns ------- out : function Continuous color palette that takes a single :class:`int` parameter ``n`` and returns ``n`` equally spaced colors. Examples -------- >>> palette = grey_pal() >>> palette(5) ['#333333', '#737373', '#989898', '#b5b5b5', '#cccccc']
[ "Utility", "for", "creating", "continuous", "grey", "scale", "palette" ]
python
valid
agoragames/chai
chai/stub.py
https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L108-L212
def _stub_obj(obj): ''' Stub an object directly. ''' # Annoying circular reference requires importing here. Would like to see # this cleaned up. @AW from .mock import Mock # Return an existing stub if isinstance(obj, Stub): return obj # If a Mock object, stub its __call__ if isinstance(obj, Mock): return stub(obj.__call__) # If passed-in a type, assume that we're going to stub out the creation. # See StubNew for the awesome sauce. # if isinstance(obj, types.TypeType): if hasattr(types, 'TypeType') and isinstance(obj, types.TypeType): return StubNew(obj) elif hasattr(__builtins__, 'type') and \ isinstance(obj, __builtins__.type): return StubNew(obj) elif inspect.isclass(obj): return StubNew(obj) # I thought that types.UnboundMethodType differentiated these cases but # apparently not. if isinstance(obj, types.MethodType): # Handle differently if unbound because it's an implicit "any instance" if getattr(obj, 'im_self', None) is None: # Handle the python3 case and py2 filter if hasattr(obj, '__self__'): if obj.__self__ is not None: return StubMethod(obj) if sys.version_info.major == 2: return StubUnboundMethod(obj) else: return StubMethod(obj) # These aren't in the types library if type(obj).__name__ == 'method-wrapper': return StubMethodWrapper(obj) if type(obj).__name__ == 'wrapper_descriptor': raise UnsupportedStub( "must call stub(obj,'%s') for slot wrapper on %s", obj.__name__, obj.__objclass__.__name__) # (Mostly) Lastly, look for properties. # First look for the situation where there's a reference back to the # property. prop = obj if isinstance(getattr(obj, '__self__', None), property): obj = prop.__self__ # Once we've found a property, we have to figure out how to reference # back to the owning class. This is a giant pain and we have to use gc # to find out where it comes from. This code is dense but resolves to # something like this: # >>> gc.get_referrers( foo.x ) # [{'__dict__': <attribute '__dict__' of 'foo' objects>, # 'x': <property object at 0x7f68c99a16d8>, # '__module__': '__main__', # '__weakref__': <attribute '__weakref__' of 'foo' objects>, # '__doc__': None}] if isinstance(obj, property): klass, attr = None, None for ref in gc.get_referrers(obj): if klass and attr: break if isinstance(ref, dict) and ref.get('prop', None) is obj: klass = getattr( ref.get('__dict__', None), '__objclass__', None) for name, val in getattr(klass, '__dict__', {}).items(): if val is obj: attr = name break # In the case of PyPy, we have to check all types that refer to # the property, and see if any of their attrs are the property elif isinstance(ref, type): # Use dir as a means to quickly walk through the class tree for name in dir(ref): if getattr(ref, name) == obj: klass = ref attr = name break if klass and attr: rval = stub(klass, attr) if prop != obj: return stub(rval, prop.__name__) return rval # If a function and it has an associated module, we can mock directly. # Note that this *must* be after properties, otherwise it conflicts with # stubbing out the deleter methods and such # Sadly, builtin functions and methods have the same type, so we have to # use the same stub class even though it's a bit ugly if isinstance(obj, (types.FunctionType, types.BuiltinFunctionType, types.BuiltinMethodType)) and hasattr(obj, '__module__'): return StubFunction(obj) raise UnsupportedStub("can't stub %s", obj)
[ "def", "_stub_obj", "(", "obj", ")", ":", "# Annoying circular reference requires importing here. Would like to see", "# this cleaned up. @AW", "from", ".", "mock", "import", "Mock", "# Return an existing stub", "if", "isinstance", "(", "obj", ",", "Stub", ")", ":", "retu...
Stub an object directly.
[ "Stub", "an", "object", "directly", "." ]
python
train
uktrade/directory-components
scripts/upgrade_header_footer.py
https://github.com/uktrade/directory-components/blob/305b3cfd590e170255503ae3c41aebcaa658af8e/scripts/upgrade_header_footer.py#L45-L48
def get_file_string(filepath): """Get string from file.""" with open(os.path.abspath(filepath)) as f: return f.read()
[ "def", "get_file_string", "(", "filepath", ")", ":", "with", "open", "(", "os", ".", "path", ".", "abspath", "(", "filepath", ")", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Get string from file.
[ "Get", "string", "from", "file", "." ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L2410-L2428
def Network_setCacheDisabled(self, cacheDisabled): """ Function path: Network.setCacheDisabled Domain: Network Method name: setCacheDisabled Parameters: Required arguments: 'cacheDisabled' (type: boolean) -> Cache disabled state. No return value. Description: Toggles ignoring cache for each request. If <code>true</code>, cache will not be used. """ assert isinstance(cacheDisabled, (bool,) ), "Argument 'cacheDisabled' must be of type '['bool']'. Received type: '%s'" % type( cacheDisabled) subdom_funcs = self.synchronous_command('Network.setCacheDisabled', cacheDisabled=cacheDisabled) return subdom_funcs
[ "def", "Network_setCacheDisabled", "(", "self", ",", "cacheDisabled", ")", ":", "assert", "isinstance", "(", "cacheDisabled", ",", "(", "bool", ",", ")", ")", ",", "\"Argument 'cacheDisabled' must be of type '['bool']'. Received type: '%s'\"", "%", "type", "(", "cacheDi...
Function path: Network.setCacheDisabled Domain: Network Method name: setCacheDisabled Parameters: Required arguments: 'cacheDisabled' (type: boolean) -> Cache disabled state. No return value. Description: Toggles ignoring cache for each request. If <code>true</code>, cache will not be used.
[ "Function", "path", ":", "Network", ".", "setCacheDisabled", "Domain", ":", "Network", "Method", "name", ":", "setCacheDisabled", "Parameters", ":", "Required", "arguments", ":", "cacheDisabled", "(", "type", ":", "boolean", ")", "-", ">", "Cache", "disabled", ...
python
train
EmbodiedCognition/pagoda
pagoda/physics.py
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/physics.py#L553-L569
def axes(self, axes): '''Set the axes for this object's degrees of freedom. Parameters ---------- axes : list of axes specifications A list of axis values to set. This list must have the same number of elements as the degrees of freedom of the underlying ODE object. Each element can be (a) None, which has no effect on the corresponding axis, or (b) three floats specifying the axis to set. ''' assert self.ADOF == len(axes) or self.LDOF == len(axes) for i, axis in enumerate(axes): if axis is not None: self.ode_obj.setAxis(i, 0, axis)
[ "def", "axes", "(", "self", ",", "axes", ")", ":", "assert", "self", ".", "ADOF", "==", "len", "(", "axes", ")", "or", "self", ".", "LDOF", "==", "len", "(", "axes", ")", "for", "i", ",", "axis", "in", "enumerate", "(", "axes", ")", ":", "if", ...
Set the axes for this object's degrees of freedom. Parameters ---------- axes : list of axes specifications A list of axis values to set. This list must have the same number of elements as the degrees of freedom of the underlying ODE object. Each element can be (a) None, which has no effect on the corresponding axis, or (b) three floats specifying the axis to set.
[ "Set", "the", "axes", "for", "this", "object", "s", "degrees", "of", "freedom", "." ]
python
valid
ayust/kitnirc
kitnirc/client.py
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/client.py#L756-L760
def _parse_welcome(client, command, actor, args): """Parse a WELCOME and update user state, then dispatch a WELCOME event.""" _, _, hostmask = args.rpartition(' ') client.user.update_from_hostmask(hostmask) client.dispatch_event("WELCOME", hostmask)
[ "def", "_parse_welcome", "(", "client", ",", "command", ",", "actor", ",", "args", ")", ":", "_", ",", "_", ",", "hostmask", "=", "args", ".", "rpartition", "(", "' '", ")", "client", ".", "user", ".", "update_from_hostmask", "(", "hostmask", ")", "cli...
Parse a WELCOME and update user state, then dispatch a WELCOME event.
[ "Parse", "a", "WELCOME", "and", "update", "user", "state", "then", "dispatch", "a", "WELCOME", "event", "." ]
python
train
scraperwiki/dumptruck
dumptruck/convert.py
https://github.com/scraperwiki/dumptruck/blob/ac5855e34d4dffc7e53a13ff925ccabda19604fc/dumptruck/convert.py#L72-L90
def quote(text): 'Handle quote characters' # Convert to unicode. if not isinstance(text, unicode): text = text.decode('utf-8') # Look for quote characters. Keep the text as is if it's already quoted. for qp in QUOTEPAIRS: if text[0] == qp[0] and text[-1] == qp[-1] and len(text) >= 2: return text # If it's not quoted, try quoting for qp in QUOTEPAIRS: if qp[1] not in text: return qp[0] + text + qp[1] #Darn raise ValueError(u'The value "%s" is not quoted and contains too many quote characters to quote' % text)
[ "def", "quote", "(", "text", ")", ":", "# Convert to unicode.", "if", "not", "isinstance", "(", "text", ",", "unicode", ")", ":", "text", "=", "text", ".", "decode", "(", "'utf-8'", ")", "# Look for quote characters. Keep the text as is if it's already quoted.", "fo...
Handle quote characters
[ "Handle", "quote", "characters" ]
python
train
ly0/baidupcsapi
baidupcsapi/api.py
https://github.com/ly0/baidupcsapi/blob/6f6feeef0767a75b3b968924727460eb09242d76/baidupcsapi/api.py#L1251-L1279
def move(self, path_list, dest, **kwargs): """ 移动文件或文件夹 :param path_list: 在百度盘上要移动的源文件path :type path_list: list :param dest: 要移动到的目录 :type dest: str """ def __path(path): if path.endswith('/'): return path.split('/')[-2] else: return os.path.basename(path) params = { 'opera': 'move' } data = { 'filelist': json.dumps([{ "path": path, "dest": dest, "newname": __path(path)} for path in path_list]), } url = 'http://{0}/api/filemanager'.format(BAIDUPAN_SERVER) return self._request('filemanager', 'move', url=url, data=data, extra_params=params, **kwargs)
[ "def", "move", "(", "self", ",", "path_list", ",", "dest", ",", "*", "*", "kwargs", ")", ":", "def", "__path", "(", "path", ")", ":", "if", "path", ".", "endswith", "(", "'/'", ")", ":", "return", "path", ".", "split", "(", "'/'", ")", "[", "-"...
移动文件或文件夹 :param path_list: 在百度盘上要移动的源文件path :type path_list: list :param dest: 要移动到的目录 :type dest: str
[ "移动文件或文件夹" ]
python
train
MIT-LCP/wfdb-python
wfdb/processing/qrs.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/processing/qrs.py#L148-L162
def _bandpass(self, fc_low=5, fc_high=20): """ Apply a bandpass filter onto the signal, and save the filtered signal. """ self.fc_low = fc_low self.fc_high = fc_high b, a = signal.butter(2, [float(fc_low) * 2 / self.fs, float(fc_high) * 2 / self.fs], 'pass') self.sig_f = signal.filtfilt(b, a, self.sig[self.sampfrom:self.sampto], axis=0) # Save the passband gain (x2 due to double filtering) self.filter_gain = get_filter_gain(b, a, np.mean([fc_low, fc_high]), self.fs) * 2
[ "def", "_bandpass", "(", "self", ",", "fc_low", "=", "5", ",", "fc_high", "=", "20", ")", ":", "self", ".", "fc_low", "=", "fc_low", "self", ".", "fc_high", "=", "fc_high", "b", ",", "a", "=", "signal", ".", "butter", "(", "2", ",", "[", "float",...
Apply a bandpass filter onto the signal, and save the filtered signal.
[ "Apply", "a", "bandpass", "filter", "onto", "the", "signal", "and", "save", "the", "filtered", "signal", "." ]
python
train
3ll3d00d/vibe
backend/src/recorder/common/config.py
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/recorder/common/config.py#L88-L95
def _loadRecordingDevices(self): """ Loads the recordingDevices specified in the configuration. :param: handlers the loaded handlers. :return: the constructed recordingDevices in a dict keyed by name. """ return {device.name: device for device in [self.createDevice(deviceCfg) for deviceCfg in self.config['accelerometers']]}
[ "def", "_loadRecordingDevices", "(", "self", ")", ":", "return", "{", "device", ".", "name", ":", "device", "for", "device", "in", "[", "self", ".", "createDevice", "(", "deviceCfg", ")", "for", "deviceCfg", "in", "self", ".", "config", "[", "'acceleromete...
Loads the recordingDevices specified in the configuration. :param: handlers the loaded handlers. :return: the constructed recordingDevices in a dict keyed by name.
[ "Loads", "the", "recordingDevices", "specified", "in", "the", "configuration", ".", ":", "param", ":", "handlers", "the", "loaded", "handlers", ".", ":", "return", ":", "the", "constructed", "recordingDevices", "in", "a", "dict", "keyed", "by", "name", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xmenutemplatewidget/xmenutemplatewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xmenutemplatewidget/xmenutemplatewidget.py#L93-L118
def createActionItem( self, key ): """ Creates a new action item for the inputed key. :param key | <str> :return <QTreeWidgetItem> """ action = self._actions.get(key) if ( not action ): text = 'Missing Action: %s' % key item = QTreeWidgetItem([text]) ico = projexui.resources.find('img/log/warning.png') item.setIcon(0, QIcon(ico)) else: item = QTreeWidgetItem([nativestring(action.text()).replace('&', '')]) item.setIcon(0, action.icon()) item.setSizeHint(0, QSize(120, 20)) item.setData(0, Qt.UserRole, wrapVariant(key)) flags = item.flags() flags ^= Qt.ItemIsDropEnabled item.setFlags(flags) return item
[ "def", "createActionItem", "(", "self", ",", "key", ")", ":", "action", "=", "self", ".", "_actions", ".", "get", "(", "key", ")", "if", "(", "not", "action", ")", ":", "text", "=", "'Missing Action: %s'", "%", "key", "item", "=", "QTreeWidgetItem", "(...
Creates a new action item for the inputed key. :param key | <str> :return <QTreeWidgetItem>
[ "Creates", "a", "new", "action", "item", "for", "the", "inputed", "key", ".", ":", "param", "key", "|", "<str", ">", ":", "return", "<QTreeWidgetItem", ">" ]
python
train
agoragames/leaderboard-python
leaderboard/leaderboard.py
https://github.com/agoragames/leaderboard-python/blob/ec309859b197a751ac0322374b36d134d8c5522f/leaderboard/leaderboard.py#L975-L983
def around_me(self, member, **options): ''' Retrieve a page of leaders from the leaderboard around a given member. @param member [String] Member name. @param options [Hash] Options to be used when retrieving the page from the leaderboard. @return a page of leaders from the leaderboard around a given member. ''' return self.around_me_in(self.leaderboard_name, member, **options)
[ "def", "around_me", "(", "self", ",", "member", ",", "*", "*", "options", ")", ":", "return", "self", ".", "around_me_in", "(", "self", ".", "leaderboard_name", ",", "member", ",", "*", "*", "options", ")" ]
Retrieve a page of leaders from the leaderboard around a given member. @param member [String] Member name. @param options [Hash] Options to be used when retrieving the page from the leaderboard. @return a page of leaders from the leaderboard around a given member.
[ "Retrieve", "a", "page", "of", "leaders", "from", "the", "leaderboard", "around", "a", "given", "member", "." ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/sip/domain/auth_types/auth_calls_mapping/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/sip/domain/auth_types/auth_calls_mapping/__init__.py#L40-L53
def credential_list_mappings(self): """ Access the credential_list_mappings :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingList :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingList """ if self._credential_list_mappings is None: self._credential_list_mappings = AuthCallsCredentialListMappingList( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], ) return self._credential_list_mappings
[ "def", "credential_list_mappings", "(", "self", ")", ":", "if", "self", ".", "_credential_list_mappings", "is", "None", ":", "self", ".", "_credential_list_mappings", "=", "AuthCallsCredentialListMappingList", "(", "self", ".", "_version", ",", "account_sid", "=", "...
Access the credential_list_mappings :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingList :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingList
[ "Access", "the", "credential_list_mappings" ]
python
train
saltstack/salt
salt/modules/mac_timezone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_timezone.py#L78-L105
def set_date(date): ''' Set the current month, day, and year :param str date: The date to set. Valid date formats are: - %m:%d:%y - %m:%d:%Y - %m/%d/%y - %m/%d/%Y :return: True if successful, False if not :rtype: bool :raises: SaltInvocationError on Invalid Date format :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_date 1/13/2016 ''' date_format = _get_date_time_format(date) dt_obj = datetime.strptime(date, date_format) cmd = 'systemsetup -setdate {0}'.format(dt_obj.strftime('%m:%d:%Y')) return salt.utils.mac_utils.execute_return_success(cmd)
[ "def", "set_date", "(", "date", ")", ":", "date_format", "=", "_get_date_time_format", "(", "date", ")", "dt_obj", "=", "datetime", ".", "strptime", "(", "date", ",", "date_format", ")", "cmd", "=", "'systemsetup -setdate {0}'", ".", "format", "(", "dt_obj", ...
Set the current month, day, and year :param str date: The date to set. Valid date formats are: - %m:%d:%y - %m:%d:%Y - %m/%d/%y - %m/%d/%Y :return: True if successful, False if not :rtype: bool :raises: SaltInvocationError on Invalid Date format :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_date 1/13/2016
[ "Set", "the", "current", "month", "day", "and", "year" ]
python
train
inveniosoftware/invenio-pidstore
invenio_pidstore/models.py
https://github.com/inveniosoftware/invenio-pidstore/blob/8bf35f4e62d5dcaf1a2cfe5803245ba5220a9b78/invenio_pidstore/models.py#L243-L289
def assign(self, object_type, object_uuid, overwrite=False): """Assign this persistent identifier to a given object. Note, the persistent identifier must first have been reserved. Also, if an existing object is already assigned to the pid, it will raise an exception unless overwrite=True. :param object_type: The object type is a string that identify its type. :param object_uuid: The object UUID. :param overwrite: Force PID overwrites in case was previously assigned. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID was previously deleted. :raises invenio_pidstore.errors.PIDObjectAlreadyAssigned: If the PID was previously assigned with a different type/uuid. :returns: `True` if the PID is successfully assigned. """ if self.is_deleted(): raise PIDInvalidAction( "You cannot assign objects to a deleted/redirected persistent" " identifier." ) if not isinstance(object_uuid, uuid.UUID): object_uuid = uuid.UUID(object_uuid) if self.object_type or self.object_uuid: # The object is already assigned to this pid. if object_type == self.object_type and \ object_uuid == self.object_uuid: return True if not overwrite: raise PIDObjectAlreadyAssigned(object_type, object_uuid) self.unassign() try: with db.session.begin_nested(): self.object_type = object_type self.object_uuid = object_uuid db.session.add(self) except SQLAlchemyError: logger.exception("Failed to assign {0}:{1}".format( object_type, object_uuid), extra=dict(pid=self)) raise logger.info("Assigned object {0}:{1}".format( object_type, object_uuid), extra=dict(pid=self)) return True
[ "def", "assign", "(", "self", ",", "object_type", ",", "object_uuid", ",", "overwrite", "=", "False", ")", ":", "if", "self", ".", "is_deleted", "(", ")", ":", "raise", "PIDInvalidAction", "(", "\"You cannot assign objects to a deleted/redirected persistent\"", "\" ...
Assign this persistent identifier to a given object. Note, the persistent identifier must first have been reserved. Also, if an existing object is already assigned to the pid, it will raise an exception unless overwrite=True. :param object_type: The object type is a string that identify its type. :param object_uuid: The object UUID. :param overwrite: Force PID overwrites in case was previously assigned. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID was previously deleted. :raises invenio_pidstore.errors.PIDObjectAlreadyAssigned: If the PID was previously assigned with a different type/uuid. :returns: `True` if the PID is successfully assigned.
[ "Assign", "this", "persistent", "identifier", "to", "a", "given", "object", "." ]
python
train
coinkite/connectrum
connectrum/svr_info.py
https://github.com/coinkite/connectrum/blob/99948f92cc5c3ecb1a8a70146294014e608e50fc/connectrum/svr_info.py#L238-L244
def save_json(self, fname='servers.json'): ''' Write out to a CSV file. ''' rows = sorted(self.keys()) with open(fname, 'wt') as fp: json.dump([self[k] for k in rows], fp, indent=1)
[ "def", "save_json", "(", "self", ",", "fname", "=", "'servers.json'", ")", ":", "rows", "=", "sorted", "(", "self", ".", "keys", "(", ")", ")", "with", "open", "(", "fname", ",", "'wt'", ")", "as", "fp", ":", "json", ".", "dump", "(", "[", "self"...
Write out to a CSV file.
[ "Write", "out", "to", "a", "CSV", "file", "." ]
python
train
gc3-uzh-ch/elasticluster
elasticluster/providers/libcloud_provider.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/libcloud_provider.py#L261-L273
def __get_name_or_id(values, known): """ Return list of values that match attribute ``.id`` or ``.name`` of any object in list `known`. :param str values: comma-separated list (i.e., a Python string) of items :param list known: list of libcloud items to filter :return: list of the libcloud items that match the given values """ result = list() for element in [e.strip() for e in values.split(',')]: for item in [i for i in known if i.name == element or i.id == element]: result.append(item) return result
[ "def", "__get_name_or_id", "(", "values", ",", "known", ")", ":", "result", "=", "list", "(", ")", "for", "element", "in", "[", "e", ".", "strip", "(", ")", "for", "e", "in", "values", ".", "split", "(", "','", ")", "]", ":", "for", "item", "in",...
Return list of values that match attribute ``.id`` or ``.name`` of any object in list `known`. :param str values: comma-separated list (i.e., a Python string) of items :param list known: list of libcloud items to filter :return: list of the libcloud items that match the given values
[ "Return", "list", "of", "values", "that", "match", "attribute", ".", "id", "or", ".", "name", "of", "any", "object", "in", "list", "known", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_genobstacles.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_genobstacles.py#L409-L436
def mavlink_packet(self, m): '''trigger sends from ATTITUDE packets''' if not self.have_home and m.get_type() == 'GPS_RAW_INT' and m.fix_type >= 3: gen_settings.home_lat = m.lat * 1.0e-7 gen_settings.home_lon = m.lon * 1.0e-7 self.have_home = True if self.pending_start: self.start() if m.get_type() != 'ATTITUDE': return t = self.get_time() dt = t - self.last_t if dt < 0 or dt > 10: self.last_t = t return if dt > 10 or dt < 0.9: return self.last_t = t for a in self.aircraft: if not gen_settings.stop: a.update(1.0) self.pkt_queue.append(a.pickled()) while len(self.pkt_queue) > len(self.aircraft)*2: self.pkt_queue.pop(0) if self.module('map') is not None and not self.menu_added_map: self.menu_added_map = True self.module('map').add_menu(self.menu)
[ "def", "mavlink_packet", "(", "self", ",", "m", ")", ":", "if", "not", "self", ".", "have_home", "and", "m", ".", "get_type", "(", ")", "==", "'GPS_RAW_INT'", "and", "m", ".", "fix_type", ">=", "3", ":", "gen_settings", ".", "home_lat", "=", "m", "."...
trigger sends from ATTITUDE packets
[ "trigger", "sends", "from", "ATTITUDE", "packets" ]
python
train
hotdogee/gff3-py
gff3/gff3.py
https://github.com/hotdogee/gff3-py/blob/d239bc9ed1eb7014c174f5fbed754f0f02d6e1b9/gff3/gff3.py#L258-L382
def check_reference(self, sequence_region=False, fasta_embedded=False, fasta_external=False, check_bounds=True, check_n=True, allowed_num_of_n=0, feature_types=('CDS',)): """ Check seqid, bounds and the number of Ns in each feature using one or more reference sources. Seqid check: check if the seqid can be found in the reference sources. Bounds check: check the start and end fields of each features and log error if the values aren't within the seqid sequence length, requires at least one of these sources: ##sequence-region, embedded #FASTA, or external FASTA file. Ns check: count the number of Ns in each feature with the type specified in *line_types (default: 'CDS') and log an error if the number is greater than allowed_num_of_n (default: 0), requires at least one of these sources: embedded #FASTA, or external FASTA file. When called with all source parameters set as False (default), check all available sources, and log debug message if unable to perform a check due to none of the reference sources being available. If any source parameter is set to True, check only those sources marked as True, log error if those sources don't exist. :param sequence_region: check bounds using the ##sequence-region directive (default: False) :param fasta_embedded: check bounds using the embedded fasta specified by the ##FASTA directive (default: False) :param fasta_external: check bounds using the external fasta given by the self.parse_fasta_external (default: False) :param check_bounds: If False, don't run the bounds check (default: True) :param check_n: If False, don't run the Ns check (default: True) :param allowed_num_of_n: only report features with a number of Ns greater than the specified value (default: 0) :param feature_types: only check features of these feature_types, multiple types may be specified, if none are specified, check only 'CDS' :return: error_lines: a set of line_index(int) with errors detected by check_reference """ # collect lines with errors in this set error_lines = set() # check if we have a parsed gff3 if not self.lines: self.logger.debug('.parse(gff_file) before calling .check_bounds()') return error_lines # setup default line_types check_n_feature_types = set(feature_types) if len(check_n_feature_types) == 0: check_n_feature_types.add('CDS') # compile regex n_segments_finditer = re.compile(r'[Nn]+').finditer # check_all_sources mode check_all_sources = True if sequence_region or fasta_embedded or fasta_external: check_all_sources = False # get a list of line_data with valid start and end coordinates and unescape the seqid start_end_error_locations = set(('start', 'end', 'start,end')) valid_line_data_seqid = [(line_data, unquote(line_data['seqid'])) for line_data in self.lines if line_data['line_type'] == 'feature' and line_data['seqid'] != '.' and (not line_data['line_errors'] or not [error_info for error_info in line_data['line_errors'] if 'location' in error_info and error_info['location'] in start_end_error_locations])] checked_at_least_one_source = False # check directive # don't use any directives with errors valid_sequence_regions = dict([(unquote(line_data['seqid']), line_data) for line_data in self.lines if line_data['directive'] == '##sequence-region' and not line_data['line_errors']]) unresolved_seqid = set() if (check_all_sources or sequence_region) and valid_sequence_regions: checked_at_least_one_source = True for line_data, seqid in valid_line_data_seqid: if seqid not in valid_sequence_regions and seqid not in unresolved_seqid: unresolved_seqid.add(seqid) error_lines.add(line_data['line_index']) self.add_line_error(line_data, {'message': u'Seqid not found in any ##sequence-region: {0:s}'.format( seqid), 'error_type': 'BOUNDS', 'location': 'sequence_region'}) continue if line_data['start'] < valid_sequence_regions[seqid]['start']: error_lines.add(line_data['line_index']) self.add_line_error(line_data, {'message': 'Start is less than the ##sequence-region start: %d' % valid_sequence_regions[seqid]['start'], 'error_type': 'BOUNDS', 'location': 'sequence_region'}) if line_data['end'] > valid_sequence_regions[seqid]['end']: error_lines.add(line_data['line_index']) self.add_line_error(line_data, {'message': 'End is greater than the ##sequence-region end: %d' % valid_sequence_regions[seqid]['end'], 'error_type': 'BOUNDS', 'location': 'sequence_region'}) elif sequence_region: self.logger.debug('##sequence-region not found in GFF3') # check fasta_embedded unresolved_seqid = set() if (check_all_sources or fasta_embedded) and self.fasta_embedded: checked_at_least_one_source = True for line_data, seqid in valid_line_data_seqid: if seqid not in self.fasta_embedded and seqid not in unresolved_seqid: unresolved_seqid.add(seqid) error_lines.add(line_data['line_index']) self.add_line_error(line_data, {'message': 'Seqid not found in the embedded ##FASTA: %s' % seqid, 'error_type': 'BOUNDS', 'location': 'fasta_embedded'}) continue # check bounds if line_data['end'] > len(self.fasta_embedded[seqid]['seq']): error_lines.add(line_data['line_index']) self.add_line_error(line_data, {'message': 'End is greater than the embedded ##FASTA sequence length: %d' % len(self.fasta_embedded[seqid]['seq']), 'error_type': 'BOUNDS', 'location': 'fasta_embedded'}) # check n if check_n and line_data['type'] in check_n_feature_types: """ >>> timeit("a.lower().count('n')", "import re; a = ('ASDKADSJHFIUDNNNNNNNnnnnSHFD'*50)") 5.540903252684302 >>> timeit("a.count('n'); a.count('N')", "import re; a = ('ASDKADSJHFIUDNNNNNNNnnnnSHFD'*50)") 2.3504867946058425 >>> timeit("re.findall('[Nn]+', a)", "import re; a = ('ASDKADSJHFIUDNNNNNNNnnnnSHFD'*50)") 30.60731204915959 """ n_count = self.fasta_embedded[seqid]['seq'].count('N', line_data['start'] - 1, line_data['end']) + self.fasta_embedded[seqid]['seq'].count('n', line_data['start'] - 1, line_data['end']) if n_count > allowed_num_of_n: # get detailed segments info n_segments = [(m.start(), m.end() - m.start()) for m in n_segments_finditer(self.fasta_embedded[seqid]['seq'], line_data['start'] - 1, line_data['end'])] n_segments_str = ['(%d, %d)' % (m[0], m[1]) for m in n_segments] error_lines.add(line_data['line_index']) self.add_line_error(line_data, {'message': 'Found %d Ns in %s feature of length %d using the embedded ##FASTA, consists of %d segment (start, length): %s' % (n_count, line_data['type'], line_data['end'] - line_data['start'], len(n_segments), ', '.join(n_segments_str)), 'error_type': 'N_COUNT', 'n_segments': n_segments, 'location': 'fasta_embedded'}) elif fasta_embedded: self.logger.debug('Embedded ##FASTA not found in GFF3') # check fasta_external unresolved_seqid = set() if (check_all_sources or fasta_external) and self.fasta_external: checked_at_least_one_source = True for line_data, seqid in valid_line_data_seqid: if seqid not in self.fasta_external and seqid not in unresolved_seqid: unresolved_seqid.add(seqid) error_lines.add(line_data['line_index']) self.add_line_error(line_data, {'message': 'Seqid not found in the external FASTA file: %s' % seqid, 'error_type': 'BOUNDS', 'location': 'fasta_external'}) continue # check bounds if line_data['end'] > len(self.fasta_external[seqid]['seq']): error_lines.add(line_data['line_index']) self.add_line_error(line_data, {'message': 'End is greater than the external FASTA sequence length: %d' % len(self.fasta_external[seqid]['seq']), 'error_type': 'BOUNDS', 'location': 'fasta_external'}) # check n if check_n and line_data['type'] in check_n_feature_types: n_count = self.fasta_external[seqid]['seq'].count('N', line_data['start'] - 1, line_data['end']) + self.fasta_external[seqid]['seq'].count('n', line_data['start'] - 1, line_data['end']) if n_count > allowed_num_of_n: # get detailed segments info n_segments = [(m.start(), m.end() - m.start()) for m in n_segments_finditer(self.fasta_external[seqid]['seq'], line_data['start'] - 1, line_data['end'])] n_segments_str = ['(%d, %d)' % (m[0], m[1]) for m in n_segments] error_lines.add(line_data['line_index']) self.add_line_error(line_data, {'message': 'Found %d Ns in %s feature of length %d using the external FASTA, consists of %d segment (start, length): %s' % (n_count, line_data['type'], line_data['end'] - line_data['start'], len(n_segments), ', '.join(n_segments_str)), 'error_type': 'N_COUNT', 'n_segments': n_segments, 'location': 'fasta_external'}) elif fasta_external: self.logger.debug('External FASTA file not given') if check_all_sources and not checked_at_least_one_source: self.logger.debug('Unable to perform bounds check, requires at least one of the following sources: ##sequence-region, embedded ##FASTA, or external FASTA file') return error_lines
[ "def", "check_reference", "(", "self", ",", "sequence_region", "=", "False", ",", "fasta_embedded", "=", "False", ",", "fasta_external", "=", "False", ",", "check_bounds", "=", "True", ",", "check_n", "=", "True", ",", "allowed_num_of_n", "=", "0", ",", "fea...
Check seqid, bounds and the number of Ns in each feature using one or more reference sources. Seqid check: check if the seqid can be found in the reference sources. Bounds check: check the start and end fields of each features and log error if the values aren't within the seqid sequence length, requires at least one of these sources: ##sequence-region, embedded #FASTA, or external FASTA file. Ns check: count the number of Ns in each feature with the type specified in *line_types (default: 'CDS') and log an error if the number is greater than allowed_num_of_n (default: 0), requires at least one of these sources: embedded #FASTA, or external FASTA file. When called with all source parameters set as False (default), check all available sources, and log debug message if unable to perform a check due to none of the reference sources being available. If any source parameter is set to True, check only those sources marked as True, log error if those sources don't exist. :param sequence_region: check bounds using the ##sequence-region directive (default: False) :param fasta_embedded: check bounds using the embedded fasta specified by the ##FASTA directive (default: False) :param fasta_external: check bounds using the external fasta given by the self.parse_fasta_external (default: False) :param check_bounds: If False, don't run the bounds check (default: True) :param check_n: If False, don't run the Ns check (default: True) :param allowed_num_of_n: only report features with a number of Ns greater than the specified value (default: 0) :param feature_types: only check features of these feature_types, multiple types may be specified, if none are specified, check only 'CDS' :return: error_lines: a set of line_index(int) with errors detected by check_reference
[ "Check", "seqid", "bounds", "and", "the", "number", "of", "Ns", "in", "each", "feature", "using", "one", "or", "more", "reference", "sources", "." ]
python
valid
zhammer/faaspact-verifier
faaspact_verifier/delivery/github_prs.py
https://github.com/zhammer/faaspact-verifier/blob/f2b7accb869bcadbe4aecbce1ca8e89d47843b44/faaspact_verifier/delivery/github_prs.py#L73-L97
def _pluck_pull_request_info(pull_request_url: str) -> PullRequestInfo: """ # Plucks a PullRequestInfo from a valid >>> _pluck_pull_request_info('https://github.com/zhammer/morning-cd/pull/17') PullRequestInfo(owner='zhammer', repo='morning-cd', number=17) # Raises a GithubPrError on bad urls >>> _pluck_pull_request_info('bad url') Traceback (most recent call last): ... faaspact_verifier.delivery.github_prs.GithubPrError: ... """ match = re.search( r'github\.com/(?P<owner>[\w-]+)/(?P<repo>[\w-]+)/pull/(?P<number>\d+)', pull_request_url ) if not match: raise GithubPrError(f'Couldnt parse url: {pull_request_url}') return PullRequestInfo( owner=match.group('owner'), repo=match.group('repo'), number=int(match.group('number')) )
[ "def", "_pluck_pull_request_info", "(", "pull_request_url", ":", "str", ")", "->", "PullRequestInfo", ":", "match", "=", "re", ".", "search", "(", "r'github\\.com/(?P<owner>[\\w-]+)/(?P<repo>[\\w-]+)/pull/(?P<number>\\d+)'", ",", "pull_request_url", ")", "if", "not", "mat...
# Plucks a PullRequestInfo from a valid >>> _pluck_pull_request_info('https://github.com/zhammer/morning-cd/pull/17') PullRequestInfo(owner='zhammer', repo='morning-cd', number=17) # Raises a GithubPrError on bad urls >>> _pluck_pull_request_info('bad url') Traceback (most recent call last): ... faaspact_verifier.delivery.github_prs.GithubPrError: ...
[ "#", "Plucks", "a", "PullRequestInfo", "from", "a", "valid", ">>>", "_pluck_pull_request_info", "(", "https", ":", "//", "github", ".", "com", "/", "zhammer", "/", "morning", "-", "cd", "/", "pull", "/", "17", ")", "PullRequestInfo", "(", "owner", "=", "...
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py#L469-L478
def VRRPNewMaster_VRRPSessionId(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") VRRPNewMaster = ET.SubElement(config, "VRRPNewMaster", xmlns="http://brocade.com/ns/brocade-notification-stream") VRRPSessionId = ET.SubElement(VRRPNewMaster, "VRRPSessionId") VRRPSessionId.text = kwargs.pop('VRRPSessionId') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "VRRPNewMaster_VRRPSessionId", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "VRRPNewMaster", "=", "ET", ".", "SubElement", "(", "config", ",", "\"VRRPNewMaster\"", ",", "xmlns", "=", "\...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/refer/Descriptor.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/refer/Descriptor.py#L117-L130
def match(self, descriptor): """ Partially matches this descriptor to another descriptor. Fields that contain "*" or null are excluded from the match. :param descriptor: the descriptor to match this one against. :return: true if descriptors match and false otherwise """ return self._match_field(self._group, descriptor.get_group()) \ and self._match_field(self._type, descriptor.get_type()) \ and self._match_field(self._kind, descriptor.get_kind()) \ and self._match_field(self._name, descriptor.get_name()) \ and self._match_field(self._version, descriptor.get_version())
[ "def", "match", "(", "self", ",", "descriptor", ")", ":", "return", "self", ".", "_match_field", "(", "self", ".", "_group", ",", "descriptor", ".", "get_group", "(", ")", ")", "and", "self", ".", "_match_field", "(", "self", ".", "_type", ",", "descri...
Partially matches this descriptor to another descriptor. Fields that contain "*" or null are excluded from the match. :param descriptor: the descriptor to match this one against. :return: true if descriptors match and false otherwise
[ "Partially", "matches", "this", "descriptor", "to", "another", "descriptor", ".", "Fields", "that", "contain", "*", "or", "null", "are", "excluded", "from", "the", "match", "." ]
python
train
1flow/python-ftr
ftr/extractor.py
https://github.com/1flow/python-ftr/blob/90a2108c5ee005f1bf66dbe8cce68f2b7051b839/ftr/extractor.py#L292-L323
def _extract_author(self): """ Extract author(s) if not already done. """ if bool(self.author): return for pattern in self.config.author: items = self.parsed_tree.xpath(pattern) if isinstance(items, basestring): # In case xpath returns only one element. items = [items] for item in items: if isinstance(item, basestring): # '_ElementStringResult' object has no attribute 'text' stripped_author = unicode(item).strip() else: try: stripped_author = item.text.strip() except AttributeError: # We got a <div>… stripped_author = etree.tostring(item) if stripped_author: self.author.add(stripped_author) LOGGER.info(u'Author extracted: %s.', stripped_author, extra={'siteconfig': self.config.host})
[ "def", "_extract_author", "(", "self", ")", ":", "if", "bool", "(", "self", ".", "author", ")", ":", "return", "for", "pattern", "in", "self", ".", "config", ".", "author", ":", "items", "=", "self", ".", "parsed_tree", ".", "xpath", "(", "pattern", ...
Extract author(s) if not already done.
[ "Extract", "author", "(", "s", ")", "if", "not", "already", "done", "." ]
python
train
flrt/hl7tersely
hl7tersely/hl7parser.py
https://github.com/flrt/hl7tersely/blob/1ff848dfb10e7e59a9f5c664eb6b9d95e432cafb/hl7tersely/hl7parser.py#L41-L54
def extractSeparators(self, hl7dict, msg): """ Read from the MSH (Message Header) the separators used to separate the pieces of data Ex: In the message MSH|^~\&|OF|Chemistry|ORT||200309060825||ORU^R01^ORU_R01|msgOF105|T|2.5|123||||USA||EN The values separator are ^~\&| See HL7 Chapter 2 """ assert msg[:self.segment_len] == self.header_segment, \ "Message MUST start with the %s segment : Here %s" % (self.header_segment, msg[:self.segment_len]) hl7dict.separators = msg[self.segment_len:self.segment_len+self.separator_count]
[ "def", "extractSeparators", "(", "self", ",", "hl7dict", ",", "msg", ")", ":", "assert", "msg", "[", ":", "self", ".", "segment_len", "]", "==", "self", ".", "header_segment", ",", "\"Message MUST start with the %s segment : Here %s\"", "%", "(", "self", ".", ...
Read from the MSH (Message Header) the separators used to separate the pieces of data Ex: In the message MSH|^~\&|OF|Chemistry|ORT||200309060825||ORU^R01^ORU_R01|msgOF105|T|2.5|123||||USA||EN The values separator are ^~\&| See HL7 Chapter 2
[ "Read", "from", "the", "MSH", "(", "Message", "Header", ")", "the", "separators", "used", "to", "separate", "the", "pieces", "of", "data", "Ex", ":", "In", "the", "message", "MSH|^~", "\\", "&|OF|Chemistry|ORT||200309060825||ORU^R01^ORU_R01|msgOF105|T|2", ".", "5|...
python
train
thebigmunch/google-music
src/google_music/clients/musicmanager.py
https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/musicmanager.py#L122-L152
def songs(self, *, uploaded=True, purchased=True): """Get a listing of Music Library songs. Returns: list: Song dicts. """ if not uploaded and not purchased: raise ValueError("'uploaded' and 'purchased' cannot both be False.") if purchased and uploaded: song_list = [] for chunk in self.songs_iter(export_type=1): song_list.extend(chunk) elif purchased: song_list = [] for chunk in self.songs_iter(export_type=2): song_list.extend(chunk) elif uploaded: purchased_songs = [] for chunk in self.songs_iter(export_type=2): purchased_songs.extend(chunk) song_list = [ song for chunk in self.songs_iter(export_type=1) for song in chunk if song not in purchased_songs ] return song_list
[ "def", "songs", "(", "self", ",", "*", ",", "uploaded", "=", "True", ",", "purchased", "=", "True", ")", ":", "if", "not", "uploaded", "and", "not", "purchased", ":", "raise", "ValueError", "(", "\"'uploaded' and 'purchased' cannot both be False.\"", ")", "if"...
Get a listing of Music Library songs. Returns: list: Song dicts.
[ "Get", "a", "listing", "of", "Music", "Library", "songs", "." ]
python
train
saltstack/salt
salt/modules/postgres.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L2306-L2360
def language_create(name, maintenance_db, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Installs a language into a database CLI Example: .. code-block:: bash salt '*' postgres.language_create plpgsql dbname name Language to install maintenance_db The database to install the language in user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' if language_exists(name, maintenance_db): log.info('Language %s already exists in %s', name, maintenance_db) return False query = 'CREATE LANGUAGE {0}'.format(name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0
[ "def", "language_create", "(", "name", ",", "maintenance_db", ",", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "password", "=", "None", ",", "runas", "=", "None", ")", ":", "if", "language_exists", "(", "name", ",", ...
.. versionadded:: 2016.3.0 Installs a language into a database CLI Example: .. code-block:: bash salt '*' postgres.language_create plpgsql dbname name Language to install maintenance_db The database to install the language in user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of
[ "..", "versionadded", "::", "2016", ".", "3", ".", "0" ]
python
train
DasIch/argvard
argvard/utils.py
https://github.com/DasIch/argvard/blob/2603e323a995e0915ce41fcf49e2a82519556195/argvard/utils.py#L52-L62
def is_python3_identifier(possible_identifier): """ Returns `True` if the given `possible_identifier` can be used as an identifier in Python 3. """ possible_identifier = unicodedata.normalize('NFKC', possible_identifier) return ( bool(possible_identifier) and _is_in_id_start(possible_identifier[0]) and all(map(_is_in_id_continue, possible_identifier[1:])) ) and not iskeyword(possible_identifier)
[ "def", "is_python3_identifier", "(", "possible_identifier", ")", ":", "possible_identifier", "=", "unicodedata", ".", "normalize", "(", "'NFKC'", ",", "possible_identifier", ")", "return", "(", "bool", "(", "possible_identifier", ")", "and", "_is_in_id_start", "(", ...
Returns `True` if the given `possible_identifier` can be used as an identifier in Python 3.
[ "Returns", "True", "if", "the", "given", "possible_identifier", "can", "be", "used", "as", "an", "identifier", "in", "Python", "3", "." ]
python
train
dylanaraps/pywal
pywal/backends/haishoku.py
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/backends/haishoku.py#L34-L37
def get(img, light=False): """Get colorscheme.""" cols = gen_colors(img) return adjust(cols, light)
[ "def", "get", "(", "img", ",", "light", "=", "False", ")", ":", "cols", "=", "gen_colors", "(", "img", ")", "return", "adjust", "(", "cols", ",", "light", ")" ]
Get colorscheme.
[ "Get", "colorscheme", "." ]
python
train
NoviceLive/intellicoder
intellicoder/utils.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L76-L81
def get_parent_dir(name): """Get the parent directory of a filename.""" parent_dir = os.path.dirname(os.path.dirname(name)) if parent_dir: return parent_dir return os.path.abspath('.')
[ "def", "get_parent_dir", "(", "name", ")", ":", "parent_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "name", ")", ")", "if", "parent_dir", ":", "return", "parent_dir", "return", "os", ".", "path", ".", "a...
Get the parent directory of a filename.
[ "Get", "the", "parent", "directory", "of", "a", "filename", "." ]
python
train
fiesta/fiesta-python
fiesta/fiesta.py
https://github.com/fiesta/fiesta-python/blob/cfcc11e4ae4c76b1007794604c33dde877f62cfb/fiesta/fiesta.py#L92-L109
def _make_request(self, request): """ Does the magic of actually sending the request and parsing the response """ # TODO: I'm sure all kinds of error checking needs to go here try: response_raw = urllib2.urlopen(request) except urllib2.HTTPError, e: print e.read() raise response_str = response_raw.read() response = json.loads(response_str) self._last_request = request self._last_response = response_raw self._last_response_str = response_str return response
[ "def", "_make_request", "(", "self", ",", "request", ")", ":", "# TODO: I'm sure all kinds of error checking needs to go here", "try", ":", "response_raw", "=", "urllib2", ".", "urlopen", "(", "request", ")", "except", "urllib2", ".", "HTTPError", ",", "e", ":", "...
Does the magic of actually sending the request and parsing the response
[ "Does", "the", "magic", "of", "actually", "sending", "the", "request", "and", "parsing", "the", "response" ]
python
train
cwaldbieser/jhub_cas_authenticator
jhub_cas_authenticator/cas_auth.py
https://github.com/cwaldbieser/jhub_cas_authenticator/blob/b483ac85d16dad2532ef76846268c5660ddd5611/jhub_cas_authenticator/cas_auth.py#L107-L148
def validate_service_ticket(self, ticket): """ Validate a CAS service ticket. Returns (is_valid, user, attribs). `is_valid` - boolean `attribs` - set of attribute-value tuples. """ app_log = logging.getLogger("tornado.application") http_client = AsyncHTTPClient() service = self.make_service_url() qs_dict = dict(service=service, ticket=ticket) qs = urllib.parse.urlencode(qs_dict) cas_validate_url = self.authenticator.cas_service_validate_url + "?" + qs response = None app_log.debug("Validate URL: {0}".format(cas_validate_url)) try: response = yield http_client.fetch( cas_validate_url, method="GET", ca_certs=self.authenticator.cas_client_ca_certs) app_log.debug("Response was successful: {0}".format(response)) except HTTPError as ex: app_log.debug("Response was unsuccessful: {0}".format(response)) return (False, None, None) parser = etree.XMLParser() root = etree.fromstring(response.body, parser=parser) auth_result_elm = root[0] is_success = (etree.QName(auth_result_elm).localname == 'authenticationSuccess') if not is_success: return (False, None, None) user_elm = find_child_element(auth_result_elm, "user") user = user_elm.text.lower() attrib_results = set([]) attribs = find_child_element(auth_result_elm, "attributes") if attribs is None: attribs = [] for attrib in attribs: name = etree.QName(attrib).localname value = attrib.text attrib_results.add((name, value)) return (True, user, attrib_results)
[ "def", "validate_service_ticket", "(", "self", ",", "ticket", ")", ":", "app_log", "=", "logging", ".", "getLogger", "(", "\"tornado.application\"", ")", "http_client", "=", "AsyncHTTPClient", "(", ")", "service", "=", "self", ".", "make_service_url", "(", ")", ...
Validate a CAS service ticket. Returns (is_valid, user, attribs). `is_valid` - boolean `attribs` - set of attribute-value tuples.
[ "Validate", "a", "CAS", "service", "ticket", "." ]
python
train
IBMStreams/pypi.streamsx
streamsx/topology/topology.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/topology/topology.py#L793-L802
def _add_job_control_plane(self): """ Add a JobControlPlane operator to the topology, if one has not already been added. If a JobControlPlane operator has already been added, this has no effect. """ if not self._has_jcp: jcp = self.graph.addOperator(kind="spl.control::JobControlPlane", name="JobControlPlane") jcp.viewable = False self._has_jcp = True
[ "def", "_add_job_control_plane", "(", "self", ")", ":", "if", "not", "self", ".", "_has_jcp", ":", "jcp", "=", "self", ".", "graph", ".", "addOperator", "(", "kind", "=", "\"spl.control::JobControlPlane\"", ",", "name", "=", "\"JobControlPlane\"", ")", "jcp", ...
Add a JobControlPlane operator to the topology, if one has not already been added. If a JobControlPlane operator has already been added, this has no effect.
[ "Add", "a", "JobControlPlane", "operator", "to", "the", "topology", "if", "one", "has", "not", "already", "been", "added", ".", "If", "a", "JobControlPlane", "operator", "has", "already", "been", "added", "this", "has", "no", "effect", "." ]
python
train
hardbyte/python-can
can/io/sqlite.py
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/io/sqlite.py#L157-L182
def _create_db(self): """Creates a new databae or opens a connection to an existing one. .. note:: You can't share sqlite3 connections between threads (by default) hence we setup the db here. It has the upside of running async. """ log.debug("Creating sqlite database") self._conn = sqlite3.connect(self._db_filename) # create table structure self._conn.cursor().execute(""" CREATE TABLE IF NOT EXISTS {} ( ts REAL, arbitration_id INTEGER, extended INTEGER, remote INTEGER, error INTEGER, dlc INTEGER, data BLOB ) """.format(self.table_name)) self._conn.commit() self._insert_template = "INSERT INTO {} VALUES (?, ?, ?, ?, ?, ?, ?)".format(self.table_name)
[ "def", "_create_db", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Creating sqlite database\"", ")", "self", ".", "_conn", "=", "sqlite3", ".", "connect", "(", "self", ".", "_db_filename", ")", "# create table structure", "self", ".", "_conn", ".", "cu...
Creates a new databae or opens a connection to an existing one. .. note:: You can't share sqlite3 connections between threads (by default) hence we setup the db here. It has the upside of running async.
[ "Creates", "a", "new", "databae", "or", "opens", "a", "connection", "to", "an", "existing", "one", "." ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/table.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L836-L870
def from_api_repr(cls, resource): """Factory: construct a table given its API representation Args: resource (Dict[str, object]): Table resource representation from the API Returns: google.cloud.bigquery.table.Table: Table parsed from ``resource``. Raises: KeyError: If the ``resource`` lacks the key ``'tableReference'``, or if the ``dict`` stored within the key ``'tableReference'`` lacks the keys ``'tableId'``, ``'projectId'``, or ``'datasetId'``. """ from google.cloud.bigquery import dataset if ( "tableReference" not in resource or "tableId" not in resource["tableReference"] ): raise KeyError( "Resource lacks required identity information:" '["tableReference"]["tableId"]' ) project_id = resource["tableReference"]["projectId"] table_id = resource["tableReference"]["tableId"] dataset_id = resource["tableReference"]["datasetId"] dataset_ref = dataset.DatasetReference(project_id, dataset_id) table = cls(dataset_ref.table(table_id)) table._properties = resource return table
[ "def", "from_api_repr", "(", "cls", ",", "resource", ")", ":", "from", "google", ".", "cloud", ".", "bigquery", "import", "dataset", "if", "(", "\"tableReference\"", "not", "in", "resource", "or", "\"tableId\"", "not", "in", "resource", "[", "\"tableReference\...
Factory: construct a table given its API representation Args: resource (Dict[str, object]): Table resource representation from the API Returns: google.cloud.bigquery.table.Table: Table parsed from ``resource``. Raises: KeyError: If the ``resource`` lacks the key ``'tableReference'``, or if the ``dict`` stored within the key ``'tableReference'`` lacks the keys ``'tableId'``, ``'projectId'``, or ``'datasetId'``.
[ "Factory", ":", "construct", "a", "table", "given", "its", "API", "representation" ]
python
train
saltstack/salt
salt/fileserver/roots.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/roots.py#L143-L213
def update(): ''' When we are asked to update (regular interval) lets reap the cache ''' try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'roots', 'hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass mtime_map_path = os.path.join(__opts__['cachedir'], 'roots', 'mtime_map') # data to send on event data = {'changed': False, 'files': {'changed': []}, 'backend': 'roots'} # generate the new map new_mtime_map = salt.fileserver.generate_mtime_map(__opts__, __opts__['file_roots']) old_mtime_map = {} # if you have an old map, load that if os.path.exists(mtime_map_path): with salt.utils.files.fopen(mtime_map_path, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) try: file_path, mtime = line.replace('\n', '').split(':', 1) old_mtime_map[file_path] = mtime if mtime != new_mtime_map.get(file_path, mtime): data['files']['changed'].append(file_path) except ValueError: # Document the invalid entry in the log log.warning( 'Skipped invalid cache mtime entry in %s: %s', mtime_map_path, line ) # compare the maps, set changed to the return value data['changed'] = salt.fileserver.diff_mtime_map(old_mtime_map, new_mtime_map) # compute files that were removed and added old_files = set(old_mtime_map.keys()) new_files = set(new_mtime_map.keys()) data['files']['removed'] = list(old_files - new_files) data['files']['added'] = list(new_files - old_files) # write out the new map mtime_map_path_dir = os.path.dirname(mtime_map_path) if not os.path.exists(mtime_map_path_dir): os.makedirs(mtime_map_path_dir) with salt.utils.files.fopen(mtime_map_path, 'wb') as fp_: for file_path, mtime in six.iteritems(new_mtime_map): fp_.write( salt.utils.stringutils.to_bytes( '{0}:{1}\n'.format(file_path, mtime) ) ) if __opts__.get('fileserver_events', False): # if there is a change, fire an event event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, salt.utils.event.tagify(['roots', 'update'], prefix='fileserver'))
[ "def", "update", "(", ")", ":", "try", ":", "salt", ".", "fileserver", ".", "reap_fileserver_cache_dir", "(", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'cachedir'", "]", ",", "'roots'", ",", "'hash'", ")", ",", "find_file", ")", "except", ...
When we are asked to update (regular interval) lets reap the cache
[ "When", "we", "are", "asked", "to", "update", "(", "regular", "interval", ")", "lets", "reap", "the", "cache" ]
python
train
MakerReduxCorp/PLOD
PLOD/__init__.py
https://github.com/MakerReduxCorp/PLOD/blob/707502cd928e5be6bd5e46d7f6de7da0e188cf1e/PLOD/__init__.py#L793-L872
def contains(self, key, value, findAll=False, exclude=False, includeMissing=False): '''Return entries that: * have the key * key points to a list, and * value is found in the list. If value is also a list itself, then the list entry is selected if any of the values match. If findAll is set to True, then all the entries must be found. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": [9, 12] }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).contains("wigs", [1, 12]).returnString() [ {age: 18, income: 93000, name: 'Jim', wigs: [9, 12] }, {age: 20, income: 15000, name: 'Joe', wigs: [1, 2, 3]} ] .. versionadded:: 0.1.3b :param key: The dictionary key (or cascading list of keys) that should point to a list. :param value: The value to locate in the list. This argument can be an immutable value such as a string, tuple, or number. If this argument is a list of values instead, then this method will search for any of the values in that list. If the optional 'findAll' parameter is set to True, then all of the values in that list must be found. Optional named arguments: :param finalAll: If True, then all the values in the 'value' parameter must be found. :param exclude: If 'exclude' is True, then the entries that do NOT match the above conditions are returned. :param includeMissing: If 'includeMissing' is True, then if the key is missing then that entry is included in the results. However, it does not include entries that have the key but its value is for a non-list or empty list. :returns: self ''' result = [] result_index = [] for counter, row in enumerate(self.table): (target, tkey, target_list) = internal.dict_crawl(row, key) if target: if findAll: success = internal.list_match_all(target_list, value) else: success = internal.list_match_any(target_list, value) if exclude: success = not success if success: result.append(row) result_index.append(self.index_track[counter]) else: # item missing from list, so skip over pass else: if includeMissing: result.append(row) result_index.append(self.index_track[counter]) else: pass self.table = result self.index_track = result_index return self
[ "def", "contains", "(", "self", ",", "key", ",", "value", ",", "findAll", "=", "False", ",", "exclude", "=", "False", ",", "includeMissing", "=", "False", ")", ":", "result", "=", "[", "]", "result_index", "=", "[", "]", "for", "counter", ",", "row",...
Return entries that: * have the key * key points to a list, and * value is found in the list. If value is also a list itself, then the list entry is selected if any of the values match. If findAll is set to True, then all the entries must be found. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": [9, 12] }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).contains("wigs", [1, 12]).returnString() [ {age: 18, income: 93000, name: 'Jim', wigs: [9, 12] }, {age: 20, income: 15000, name: 'Joe', wigs: [1, 2, 3]} ] .. versionadded:: 0.1.3b :param key: The dictionary key (or cascading list of keys) that should point to a list. :param value: The value to locate in the list. This argument can be an immutable value such as a string, tuple, or number. If this argument is a list of values instead, then this method will search for any of the values in that list. If the optional 'findAll' parameter is set to True, then all of the values in that list must be found. Optional named arguments: :param finalAll: If True, then all the values in the 'value' parameter must be found. :param exclude: If 'exclude' is True, then the entries that do NOT match the above conditions are returned. :param includeMissing: If 'includeMissing' is True, then if the key is missing then that entry is included in the results. However, it does not include entries that have the key but its value is for a non-list or empty list. :returns: self
[ "Return", "entries", "that", ":", "*", "have", "the", "key", "*", "key", "points", "to", "a", "list", "and", "*", "value", "is", "found", "in", "the", "list", "." ]
python
train
andreikop/qutepart
qutepart/completer.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/completer.py#L466-L473
def _onCompletionListItemSelected(self, index): """Item selected. Insert completion to editor """ model = self._widget.model() selectedWord = model.words[index] textToInsert = selectedWord[len(model.typedText()):] self._qpart.textCursor().insertText(textToInsert) self._closeCompletion()
[ "def", "_onCompletionListItemSelected", "(", "self", ",", "index", ")", ":", "model", "=", "self", ".", "_widget", ".", "model", "(", ")", "selectedWord", "=", "model", ".", "words", "[", "index", "]", "textToInsert", "=", "selectedWord", "[", "len", "(", ...
Item selected. Insert completion to editor
[ "Item", "selected", ".", "Insert", "completion", "to", "editor" ]
python
train
ckcollab/brains-cli
brains.py
https://github.com/ckcollab/brains-cli/blob/8dc512c32fc83ecc3a80bf7fa2b474d142d99b0e/brains.py#L59-L77
def init(name, languages, run): """Initializes your CONFIG_FILE for the current submission""" contents = [file_name for file_name in glob.glob("*.*") if file_name != "brains.yaml"] with open(CONFIG_FILE, "w") as output: output.write(yaml.safe_dump({ "run": run, "name": name, "languages": languages, # automatically insert all root files into contents "contents": contents, }, default_flow_style=False)) print "" cprint("Automatically including the follow files in brain contents:", "cyan") for file_name in contents: print "\t", file_name print "" cprint("done! brains.yaml created", 'green')
[ "def", "init", "(", "name", ",", "languages", ",", "run", ")", ":", "contents", "=", "[", "file_name", "for", "file_name", "in", "glob", ".", "glob", "(", "\"*.*\"", ")", "if", "file_name", "!=", "\"brains.yaml\"", "]", "with", "open", "(", "CONFIG_FILE"...
Initializes your CONFIG_FILE for the current submission
[ "Initializes", "your", "CONFIG_FILE", "for", "the", "current", "submission" ]
python
train